problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_14876
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-2445
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cryptography.x509.Name implements __eq__ but not __hash__
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/x509/name.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import six
8
9 from cryptography import utils
10 from cryptography.x509.oid import ObjectIdentifier
11
12
13 class NameAttribute(object):
14 def __init__(self, oid, value):
15 if not isinstance(oid, ObjectIdentifier):
16 raise TypeError(
17 "oid argument must be an ObjectIdentifier instance."
18 )
19
20 if not isinstance(value, six.text_type):
21 raise TypeError(
22 "value argument must be a text type."
23 )
24
25 self._oid = oid
26 self._value = value
27
28 oid = utils.read_only_property("_oid")
29 value = utils.read_only_property("_value")
30
31 def __eq__(self, other):
32 if not isinstance(other, NameAttribute):
33 return NotImplemented
34
35 return (
36 self.oid == other.oid and
37 self.value == other.value
38 )
39
40 def __ne__(self, other):
41 return not self == other
42
43 def __repr__(self):
44 return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self)
45
46
47 class Name(object):
48 def __init__(self, attributes):
49 self._attributes = attributes
50
51 def get_attributes_for_oid(self, oid):
52 return [i for i in self if i.oid == oid]
53
54 def __eq__(self, other):
55 if not isinstance(other, Name):
56 return NotImplemented
57
58 return self._attributes == other._attributes
59
60 def __ne__(self, other):
61 return not self == other
62
63 def __iter__(self):
64 return iter(self._attributes)
65
66 def __len__(self):
67 return len(self._attributes)
68
69 def __repr__(self):
70 return "<Name({0!r})>".format(self._attributes)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py
--- a/src/cryptography/x509/name.py
+++ b/src/cryptography/x509/name.py
@@ -40,6 +40,9 @@
def __ne__(self, other):
return not self == other
+ def __hash__(self):
+ return hash((self.oid, self.value))
+
def __repr__(self):
return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self)
@@ -60,6 +63,11 @@
def __ne__(self, other):
return not self == other
+ def __hash__(self):
+ # TODO: this is relatively expensive, if this looks like a bottleneck
+ # for you, consider optimizing!
+ return hash(tuple(self._attributes))
+
def __iter__(self):
return iter(self._attributes)
|
{"golden_diff": "diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py\n--- a/src/cryptography/x509/name.py\n+++ b/src/cryptography/x509/name.py\n@@ -40,6 +40,9 @@\n def __ne__(self, other):\n return not self == other\n \n+ def __hash__(self):\n+ return hash((self.oid, self.value))\n+\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n \n@@ -60,6 +63,11 @@\n def __ne__(self, other):\n return not self == other\n \n+ def __hash__(self):\n+ # TODO: this is relatively expensive, if this looks like a bottleneck\n+ # for you, consider optimizing!\n+ return hash(tuple(self._attributes))\n+\n def __iter__(self):\n return iter(self._attributes)\n", "issue": "cryptography.x509.Name implements __eq__ but not __hash__\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.x509.oid import ObjectIdentifier\n\n\nclass NameAttribute(object):\n def __init__(self, oid, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(value, six.text_type):\n raise TypeError(\n \"value argument must be a text type.\"\n )\n\n self._oid = oid\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n value = utils.read_only_property(\"_value\")\n\n def __eq__(self, other):\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass Name(object):\n def __init__(self, attributes):\n self._attributes = attributes\n\n def get_attributes_for_oid(self, oid):\n return [i for i in self if i.oid == oid]\n\n def __eq__(self, other):\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __ne__(self, other):\n return not self == other\n\n def __iter__(self):\n return iter(self._attributes)\n\n def __len__(self):\n return len(self._attributes)\n\n def __repr__(self):\n return \"<Name({0!r})>\".format(self._attributes)\n", "path": "src/cryptography/x509/name.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.x509.oid import ObjectIdentifier\n\n\nclass NameAttribute(object):\n def __init__(self, oid, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(value, six.text_type):\n raise TypeError(\n \"value argument must be a text type.\"\n )\n\n self._oid = oid\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n value = utils.read_only_property(\"_value\")\n\n def __eq__(self, other):\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.oid, self.value))\n\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass Name(object):\n def __init__(self, attributes):\n self._attributes = attributes\n\n def get_attributes_for_oid(self, oid):\n return [i for i in self if i.oid == oid]\n\n def __eq__(self, other):\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self):\n return iter(self._attributes)\n\n def __len__(self):\n return len(self._attributes)\n\n def __repr__(self):\n return \"<Name({0!r})>\".format(self._attributes)\n", "path": "src/cryptography/x509/name.py"}]}
| 845 | 215 |
gh_patches_debug_19283
|
rasdani/github-patches
|
git_diff
|
conda__conda-1735
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Race condition for root environment detection
Periodically, when two conda processes are running at the same time, it is possible to see a race condition on determining whether the root environment is writable. Notice how the following produces two different configs from the same setup:
```
$ conda info & conda info
Current conda install:
platform : osx-64
conda version : 3.18.3
conda-build version : 1.18.0
python version : 2.7.10.final.0
requests version : 2.8.1
root environment : /Users/pelson/miniconda (read only)
default environment : /Users/pelson/miniconda
envs directories : /Users/pelson/.conda/envs
/Users/pelson/envs
/Users/pelson/miniconda/envs
package cache : /Users/pelson/.conda/envs/.pkgs
/Users/pelson/envs/.pkgs
/Users/pelson/miniconda/pkgs
...
Current conda install:
platform : osx-64
conda version : 3.18.3
conda-build version : 1.18.0
python version : 2.7.10.final.0
requests version : 2.8.1
root environment : /Users/pelson/miniconda (writable)
default environment : /Users/pelson/miniconda
envs directories : /Users/pelson/miniconda/envs
package cache : /Users/pelson/miniconda/pkgs
...
```
The offending line is in https://github.com/conda/conda/blob/master/conda/config.py#L135-L143 and https://github.com/conda/conda/blob/master/conda/utils.py#L43-L54. My assumption is that the `.conda-try-write` is being removed by the other process, and the exception is being raised in the `finally` block.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/utils.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 import logging
4 import sys
5 import hashlib
6 import collections
7 from functools import partial
8 from os.path import abspath, isdir, join
9 import os
10
11 log = logging.getLogger(__name__)
12 stderrlog = logging.getLogger('stderrlog')
13
14 def can_open(file):
15 """
16 Return True if the given ``file`` can be opened for writing
17 """
18 try:
19 fp = open(file, "ab")
20 fp.close()
21 return True
22 except IOError:
23 stderrlog.info("Unable to open %s\n" % file)
24 return False
25
26
27 def can_open_all(files):
28 """
29 Return True if all of the provided ``files`` can be opened
30 """
31 for f in files:
32 if not can_open(f):
33 return False
34 return True
35
36
37 def can_open_all_files_in_prefix(prefix, files):
38 """
39 Returns True if all ``files`` at a given ``prefix`` can be opened
40 """
41 return can_open_all((os.path.join(prefix, f) for f in files))
42
43 def try_write(dir_path):
44 assert isdir(dir_path)
45 try:
46 try:
47 with open(join(dir_path, '.conda-try-write'), mode='wb') as fo:
48 fo.write(b'This is a test file.\n')
49 return True
50 finally:
51 # XXX: If this raises an exception it will also return False
52 os.unlink(join(dir_path, '.conda-try-write'))
53 except (IOError, OSError):
54 return False
55
56
57 def hashsum_file(path, mode='md5'):
58 h = hashlib.new(mode)
59 with open(path, 'rb') as fi:
60 while True:
61 chunk = fi.read(262144) # process chunks of 256KB
62 if not chunk:
63 break
64 h.update(chunk)
65 return h.hexdigest()
66
67
68 def md5_file(path):
69 return hashsum_file(path, 'md5')
70
71
72 def url_path(path):
73 path = abspath(path)
74 if sys.platform == 'win32':
75 path = '/' + path.replace(':', '|').replace('\\', '/')
76 return 'file://%s' % path
77
78
79 def human_bytes(n):
80 """
81 Return the number of bytes n in more human readable form.
82 """
83 if n < 1024:
84 return '%d B' % n
85 k = n/1024
86 if k < 1024:
87 return '%d KB' % round(k)
88 m = k/1024
89 if m < 1024:
90 return '%.1f MB' % m
91 g = m/1024
92 return '%.2f GB' % g
93
94
95 class memoized(object):
96 """Decorator. Caches a function's return value each time it is called.
97 If called later with the same arguments, the cached value is returned
98 (not reevaluated).
99 """
100 def __init__(self, func):
101 self.func = func
102 self.cache = {}
103 def __call__(self, *args, **kw):
104 newargs = []
105 for arg in args:
106 if isinstance(arg, list):
107 newargs.append(tuple(arg))
108 elif not isinstance(arg, collections.Hashable):
109 # uncacheable. a list, for instance.
110 # better to not cache than blow up.
111 return self.func(*args, **kw)
112 else:
113 newargs.append(arg)
114 newargs = tuple(newargs)
115 key = (newargs, frozenset(kw.items()))
116 if key in self.cache:
117 return self.cache[key]
118 else:
119 value = self.func(*args, **kw)
120 self.cache[key] = value
121 return value
122
123
124 # For instance methods only
125 class memoize(object): # 577452
126 def __init__(self, func):
127 self.func = func
128 def __get__(self, obj, objtype=None):
129 if obj is None:
130 return self.func
131 return partial(self, obj)
132 def __call__(self, *args, **kw):
133 obj = args[0]
134 try:
135 cache = obj.__cache
136 except AttributeError:
137 cache = obj.__cache = {}
138 key = (self.func, args[1:], frozenset(kw.items()))
139 try:
140 res = cache[key]
141 except KeyError:
142 res = cache[key] = self.func(*args, **kw)
143 return res
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/utils.py b/conda/utils.py
--- a/conda/utils.py
+++ b/conda/utils.py
@@ -5,8 +5,10 @@
import hashlib
import collections
from functools import partial
-from os.path import abspath, isdir, join
+from os.path import abspath, isdir
import os
+import tempfile
+
log = logging.getLogger(__name__)
stderrlog = logging.getLogger('stderrlog')
@@ -43,13 +45,10 @@
def try_write(dir_path):
assert isdir(dir_path)
try:
- try:
- with open(join(dir_path, '.conda-try-write'), mode='wb') as fo:
- fo.write(b'This is a test file.\n')
- return True
- finally:
- # XXX: If this raises an exception it will also return False
- os.unlink(join(dir_path, '.conda-try-write'))
+ with tempfile.TemporaryFile(prefix='.conda-try-write',
+ dir=dir_path) as fo:
+ fo.write(b'This is a test file.\n')
+ return True
except (IOError, OSError):
return False
|
{"golden_diff": "diff --git a/conda/utils.py b/conda/utils.py\n--- a/conda/utils.py\n+++ b/conda/utils.py\n@@ -5,8 +5,10 @@\n import hashlib\n import collections\n from functools import partial\n-from os.path import abspath, isdir, join\n+from os.path import abspath, isdir\n import os\n+import tempfile\n+\n \n log = logging.getLogger(__name__)\n stderrlog = logging.getLogger('stderrlog')\n@@ -43,13 +45,10 @@\n def try_write(dir_path):\n assert isdir(dir_path)\n try:\n- try:\n- with open(join(dir_path, '.conda-try-write'), mode='wb') as fo:\n- fo.write(b'This is a test file.\\n')\n- return True\n- finally:\n- # XXX: If this raises an exception it will also return False\n- os.unlink(join(dir_path, '.conda-try-write'))\n+ with tempfile.TemporaryFile(prefix='.conda-try-write',\n+ dir=dir_path) as fo:\n+ fo.write(b'This is a test file.\\n')\n+ return True\n except (IOError, OSError):\n return False\n", "issue": "Race condition for root environment detection\nPeriodically, when two conda processes are running at the same time, it is possible to see a race condition on determining whether the root environment is writable. Notice how the following produces two different configs from the same setup:\n\n```\n$ conda info & conda info \n\nCurrent conda install:\n\n platform : osx-64\n conda version : 3.18.3\n conda-build version : 1.18.0\n python version : 2.7.10.final.0\n requests version : 2.8.1\n root environment : /Users/pelson/miniconda (read only)\n default environment : /Users/pelson/miniconda\n envs directories : /Users/pelson/.conda/envs\n /Users/pelson/envs\n /Users/pelson/miniconda/envs\n package cache : /Users/pelson/.conda/envs/.pkgs\n /Users/pelson/envs/.pkgs\n /Users/pelson/miniconda/pkgs\n...\n\nCurrent conda install:\n\n platform : osx-64\n conda version : 3.18.3\n conda-build version : 1.18.0\n python version : 2.7.10.final.0\n requests version : 2.8.1\n root environment : /Users/pelson/miniconda (writable)\n default environment : /Users/pelson/miniconda\n envs directories : /Users/pelson/miniconda/envs\n package cache : /Users/pelson/miniconda/pkgs\n...\n```\n\nThe offending line is in https://github.com/conda/conda/blob/master/conda/config.py#L135-L143 and https://github.com/conda/conda/blob/master/conda/utils.py#L43-L54. My assumption is that the `.conda-try-write` is being removed by the other process, and the exception is being raised in the `finally` block.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport sys\nimport hashlib\nimport collections\nfrom functools import partial\nfrom os.path import abspath, isdir, join\nimport os\n\nlog = logging.getLogger(__name__)\nstderrlog = logging.getLogger('stderrlog')\n\ndef can_open(file):\n \"\"\"\n Return True if the given ``file`` can be opened for writing\n \"\"\"\n try:\n fp = open(file, \"ab\")\n fp.close()\n return True\n except IOError:\n stderrlog.info(\"Unable to open %s\\n\" % file)\n return False\n\n\ndef can_open_all(files):\n \"\"\"\n Return True if all of the provided ``files`` can be opened\n \"\"\"\n for f in files:\n if not can_open(f):\n return False\n return True\n\n\ndef can_open_all_files_in_prefix(prefix, files):\n \"\"\"\n Returns True if all ``files`` at a given ``prefix`` can be opened\n \"\"\"\n return can_open_all((os.path.join(prefix, f) for f in files))\n\ndef try_write(dir_path):\n assert isdir(dir_path)\n try:\n try:\n with open(join(dir_path, '.conda-try-write'), mode='wb') as fo:\n fo.write(b'This is a test file.\\n')\n return True\n finally:\n # XXX: If this raises an exception it will also return False\n os.unlink(join(dir_path, '.conda-try-write'))\n except (IOError, OSError):\n return False\n\n\ndef hashsum_file(path, mode='md5'):\n h = hashlib.new(mode)\n with open(path, 'rb') as fi:\n while True:\n chunk = fi.read(262144) # process chunks of 256KB\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef md5_file(path):\n return hashsum_file(path, 'md5')\n\n\ndef url_path(path):\n path = abspath(path)\n if sys.platform == 'win32':\n path = '/' + path.replace(':', '|').replace('\\\\', '/')\n return 'file://%s' % path\n\n\ndef human_bytes(n):\n \"\"\"\n Return the number of bytes n in more human readable form.\n \"\"\"\n if n < 1024:\n return '%d B' % n\n k = n/1024\n if k < 1024:\n return '%d KB' % round(k)\n m = k/1024\n if m < 1024:\n return '%.1f MB' % m\n g = m/1024\n return '%.2f GB' % g\n\n\nclass memoized(object):\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, collections.Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(kw.items()))\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\n# For instance methods only\nclass memoize(object): # 577452\n def __init__(self, func):\n self.func = func\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self.func\n return partial(self, obj)\n def __call__(self, *args, **kw):\n obj = args[0]\n try:\n cache = obj.__cache\n except AttributeError:\n cache = obj.__cache = {}\n key = (self.func, args[1:], frozenset(kw.items()))\n try:\n res = cache[key]\n except KeyError:\n res = cache[key] = self.func(*args, **kw)\n return res\n", "path": "conda/utils.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport sys\nimport hashlib\nimport collections\nfrom functools import partial\nfrom os.path import abspath, isdir\nimport os\nimport tempfile\n\n\nlog = logging.getLogger(__name__)\nstderrlog = logging.getLogger('stderrlog')\n\ndef can_open(file):\n \"\"\"\n Return True if the given ``file`` can be opened for writing\n \"\"\"\n try:\n fp = open(file, \"ab\")\n fp.close()\n return True\n except IOError:\n stderrlog.info(\"Unable to open %s\\n\" % file)\n return False\n\n\ndef can_open_all(files):\n \"\"\"\n Return True if all of the provided ``files`` can be opened\n \"\"\"\n for f in files:\n if not can_open(f):\n return False\n return True\n\n\ndef can_open_all_files_in_prefix(prefix, files):\n \"\"\"\n Returns True if all ``files`` at a given ``prefix`` can be opened\n \"\"\"\n return can_open_all((os.path.join(prefix, f) for f in files))\n\ndef try_write(dir_path):\n assert isdir(dir_path)\n try:\n with tempfile.TemporaryFile(prefix='.conda-try-write',\n dir=dir_path) as fo:\n fo.write(b'This is a test file.\\n')\n return True\n except (IOError, OSError):\n return False\n\n\ndef hashsum_file(path, mode='md5'):\n h = hashlib.new(mode)\n with open(path, 'rb') as fi:\n while True:\n chunk = fi.read(262144) # process chunks of 256KB\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef md5_file(path):\n return hashsum_file(path, 'md5')\n\n\ndef url_path(path):\n path = abspath(path)\n if sys.platform == 'win32':\n path = '/' + path.replace(':', '|').replace('\\\\', '/')\n return 'file://%s' % path\n\n\ndef human_bytes(n):\n \"\"\"\n Return the number of bytes n in more human readable form.\n \"\"\"\n if n < 1024:\n return '%d B' % n\n k = n/1024\n if k < 1024:\n return '%d KB' % round(k)\n m = k/1024\n if m < 1024:\n return '%.1f MB' % m\n g = m/1024\n return '%.2f GB' % g\n\n\nclass memoized(object):\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, collections.Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(kw.items()))\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\n# For instance methods only\nclass memoize(object): # 577452\n def __init__(self, func):\n self.func = func\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self.func\n return partial(self, obj)\n def __call__(self, *args, **kw):\n obj = args[0]\n try:\n cache = obj.__cache\n except AttributeError:\n cache = obj.__cache = {}\n key = (self.func, args[1:], frozenset(kw.items()))\n try:\n res = cache[key]\n except KeyError:\n res = cache[key] = self.func(*args, **kw)\n return res\n", "path": "conda/utils.py"}]}
| 2,003 | 261 |
gh_patches_debug_33670
|
rasdani/github-patches
|
git_diff
|
getredash__redash-1792
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Alert notifications fail (sometime) with a SQLAlchemy error
### Issue Summary
After migrating to 1.0.1 alerts are notified only sporadically to Slack (I cannot pinpoint exactly the conditions in which it happens).
### Steps to Reproduce
1. Create a failing alert for an existing query
2. Add your email and a Slack destination to your alert (the order seems to matter: add first the email destination then the slack destination)
3. Refresh the query such that the alert would trigger
4. You will receive an alert email but no slack alert
5. In the back-end `celery_error.log` you will see an error message like
```
[2017-04-18 13:13:58,184: ERROR/Worker-4] redash.tasks.check_alerts_for_query[fb4fee06-3318-44f7-8aaf-7984d792a793]: Error with processing destination
Traceback (most recent call last):
File "/opt/redash/redash.1.0.1.b2833/redash/tasks/alerts.py", line 23, in notify_subscriptions
subscription.notify(alert, alert.query_rel, subscription.user, new_state, current_app, host)
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py", line 237, in __get__
return self.impl.get(instance_state(instance), dict_)
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py", line 584, in get
value = self.callable_(state, passive)
File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/strategies.py", line 530, in _load_for_state
(orm_util.state_str(state), self.key)
DetachedInstanceError: Parent instance <AlertSubscription at 0x7fcd934f04d0> is not bound to a Session; lazy load operation of attribute 'user' cannot proceed
```
I think this error is caused by the `subscription.user` call.
My guess is that it is eventually caused by [`models.db.session.commit()`](https://github.com/getredash/redash/blob/master/redash/tasks/alerts.py#L51).
This "might" be connected to #1706 .
### Technical details:
* Redash Version: 1.0.1+b2833
* Browser/OS: (not relevant, error is in the backend)
* How did you install Redash: ec2 from ami (in 2015), recently upgraded from 0.12 to 1.0.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/tasks/general.py`
Content:
```
1 import requests
2 from celery.utils.log import get_task_logger
3 from flask_mail import Message
4 from redash.worker import celery
5 from redash.version_check import run_version_check
6 from redash import models, mail, settings
7
8 logger = get_task_logger(__name__)
9
10
11 @celery.task(name="redash.tasks.record_event")
12 def record_event(raw_event):
13 event = models.Event.record(raw_event)
14 models.db.session.commit()
15
16 for hook in settings.EVENT_REPORTING_WEBHOOKS:
17 logger.debug("Forwarding event to: %s", hook)
18 try:
19 data = {
20 "schema": "iglu:io.redash.webhooks/event/jsonschema/1-0-0",
21 "data": event.to_dict()
22 }
23 response = requests.post(hook, json=data)
24 if response.status_code != 200:
25 logger.error("Failed posting to %s: %s", hook, response.content)
26 except Exception:
27 logger.exception("Failed posting to %s", hook)
28
29
30 @celery.task(name="redash.tasks.version_check")
31 def version_check():
32 run_version_check()
33
34
35 @celery.task(name="redash.tasks.subscribe")
36 def subscribe(form):
37 logger.info("Subscribing to: [security notifications=%s], [newsletter=%s]", form['security_notifications'], form['newsletter'])
38 data = {
39 'admin_name': form['name'],
40 'admin_email': form['email'],
41 'org_name': form['org_name'],
42 'security_notifications': form['security_notifications'],
43 'newsletter': form['newsletter']
44 }
45 requests.post('https://beacon.redash.io/subscribe', json=data)
46
47
48 @celery.task(name="redash.tasks.send_mail")
49 def send_mail(to, subject, html, text):
50 from redash.wsgi import app
51
52 try:
53 with app.app_context():
54 message = Message(recipients=to,
55 subject=subject,
56 html=html,
57 body=text)
58
59 mail.send(message)
60 except Exception:
61 logger.exception('Failed sending message: %s', message.subject)
62
```
Path: `redash/destinations/email.py`
Content:
```
1 import logging
2
3 from flask_mail import Message
4 from redash import mail, settings
5 from redash.destinations import *
6
7
8 class Email(BaseDestination):
9
10 @classmethod
11 def configuration_schema(cls):
12 return {
13 "type": "object",
14 "properties": {
15 "addresses": {
16 "type": "string"
17 },
18 "subject_template": {
19 "type": "string",
20 "default": settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE,
21 "title": "Subject Template"
22 }
23 },
24 "required": ["addresses"]
25 }
26
27 @classmethod
28 def icon(cls):
29 return 'fa-envelope'
30
31 def notify(self, alert, query, user, new_state, app, host, options):
32 recipients = [email for email in options.get('addresses', '').split(',') if email]
33
34 if not recipients:
35 logging.warning("No emails given. Skipping send.")
36
37 html = """
38 Check <a href="{host}/alerts/{alert_id}">alert</a> / check <a href="{host}/queries/{query_id}">query</a>.
39 """.format(host=host, alert_id=alert.id, query_id=query.id)
40 logging.debug("Notifying: %s", recipients)
41
42 try:
43 with app.app_context():
44 alert_name = alert.name.encode('utf-8', 'ignore')
45 state = new_state.upper()
46 subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)
47 message = Message(
48 recipients=recipients,
49 subject=subject_template.format(alert_name=alert_name, state=state),
50 html=html
51 )
52 mail.send(message)
53 except Exception:
54 logging.exception("Mail send error.")
55
56 register(Email)
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/destinations/email.py b/redash/destinations/email.py
--- a/redash/destinations/email.py
+++ b/redash/destinations/email.py
@@ -40,16 +40,15 @@
logging.debug("Notifying: %s", recipients)
try:
- with app.app_context():
- alert_name = alert.name.encode('utf-8', 'ignore')
- state = new_state.upper()
- subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)
- message = Message(
- recipients=recipients,
- subject=subject_template.format(alert_name=alert_name, state=state),
- html=html
- )
- mail.send(message)
+ alert_name = alert.name.encode('utf-8', 'ignore')
+ state = new_state.upper()
+ subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)
+ message = Message(
+ recipients=recipients,
+ subject=subject_template.format(alert_name=alert_name, state=state),
+ html=html
+ )
+ mail.send(message)
except Exception:
logging.exception("Mail send error.")
diff --git a/redash/tasks/general.py b/redash/tasks/general.py
--- a/redash/tasks/general.py
+++ b/redash/tasks/general.py
@@ -1,9 +1,10 @@
import requests
+
from celery.utils.log import get_task_logger
from flask_mail import Message
-from redash.worker import celery
+from redash import mail, models, settings
from redash.version_check import run_version_check
-from redash import models, mail, settings
+from redash.worker import celery
logger = get_task_logger(__name__)
@@ -50,12 +51,11 @@
from redash.wsgi import app
try:
- with app.app_context():
- message = Message(recipients=to,
- subject=subject,
- html=html,
- body=text)
+ message = Message(recipients=to,
+ subject=subject,
+ html=html,
+ body=text)
- mail.send(message)
+ mail.send(message)
except Exception:
logger.exception('Failed sending message: %s', message.subject)
|
{"golden_diff": "diff --git a/redash/destinations/email.py b/redash/destinations/email.py\n--- a/redash/destinations/email.py\n+++ b/redash/destinations/email.py\n@@ -40,16 +40,15 @@\n logging.debug(\"Notifying: %s\", recipients)\n \n try:\n- with app.app_context():\n- alert_name = alert.name.encode('utf-8', 'ignore')\n- state = new_state.upper()\n- subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)\n- message = Message(\n- recipients=recipients,\n- subject=subject_template.format(alert_name=alert_name, state=state),\n- html=html\n- )\n- mail.send(message)\n+ alert_name = alert.name.encode('utf-8', 'ignore')\n+ state = new_state.upper()\n+ subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)\n+ message = Message(\n+ recipients=recipients,\n+ subject=subject_template.format(alert_name=alert_name, state=state),\n+ html=html\n+ )\n+ mail.send(message)\n except Exception:\n logging.exception(\"Mail send error.\")\n \ndiff --git a/redash/tasks/general.py b/redash/tasks/general.py\n--- a/redash/tasks/general.py\n+++ b/redash/tasks/general.py\n@@ -1,9 +1,10 @@\n import requests\n+\n from celery.utils.log import get_task_logger\n from flask_mail import Message\n-from redash.worker import celery\n+from redash import mail, models, settings\n from redash.version_check import run_version_check\n-from redash import models, mail, settings\n+from redash.worker import celery\n \n logger = get_task_logger(__name__)\n \n@@ -50,12 +51,11 @@\n from redash.wsgi import app\n \n try:\n- with app.app_context():\n- message = Message(recipients=to,\n- subject=subject,\n- html=html,\n- body=text)\n+ message = Message(recipients=to,\n+ subject=subject,\n+ html=html,\n+ body=text)\n \n- mail.send(message)\n+ mail.send(message)\n except Exception:\n logger.exception('Failed sending message: %s', message.subject)\n", "issue": "Alert notifications fail (sometime) with a SQLAlchemy error\n### Issue Summary\r\n\r\nAfter migrating to 1.0.1 alerts are notified only sporadically to Slack (I cannot pinpoint exactly the conditions in which it happens).\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a failing alert for an existing query\r\n2. Add your email and a Slack destination to your alert (the order seems to matter: add first the email destination then the slack destination)\r\n3. Refresh the query such that the alert would trigger\r\n4. You will receive an alert email but no slack alert\r\n5. In the back-end `celery_error.log` you will see an error message like\r\n```\r\n[2017-04-18 13:13:58,184: ERROR/Worker-4] redash.tasks.check_alerts_for_query[fb4fee06-3318-44f7-8aaf-7984d792a793]: Error with processing destination\r\nTraceback (most recent call last):\r\n File \"/opt/redash/redash.1.0.1.b2833/redash/tasks/alerts.py\", line 23, in notify_subscriptions\r\n subscription.notify(alert, alert.query_rel, subscription.user, new_state, current_app, host)\r\n File \"/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py\", line 237, in __get__\r\n return self.impl.get(instance_state(instance), dict_)\r\n File \"/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py\", line 584, in get\r\n value = self.callable_(state, passive)\r\n File \"/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/strategies.py\", line 530, in _load_for_state\r\n (orm_util.state_str(state), self.key)\r\nDetachedInstanceError: Parent instance <AlertSubscription at 0x7fcd934f04d0> is not bound to a Session; lazy load operation of attribute 'user' cannot proceed\r\n```\r\n\r\nI think this error is caused by the `subscription.user` call.\r\nMy guess is that it is eventually caused by [`models.db.session.commit()`](https://github.com/getredash/redash/blob/master/redash/tasks/alerts.py#L51). \r\n\r\nThis \"might\" be connected to #1706 .\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.1+b2833\r\n* Browser/OS: (not relevant, error is in the backend)\r\n* How did you install Redash: ec2 from ami (in 2015), recently upgraded from 0.12 to 1.0.1\r\n\n", "before_files": [{"content": "import requests\nfrom celery.utils.log import get_task_logger\nfrom flask_mail import Message\nfrom redash.worker import celery\nfrom redash.version_check import run_version_check\nfrom redash import models, mail, settings\n\nlogger = get_task_logger(__name__)\n\n\[email protected](name=\"redash.tasks.record_event\")\ndef record_event(raw_event):\n event = models.Event.record(raw_event)\n models.db.session.commit()\n\n for hook in settings.EVENT_REPORTING_WEBHOOKS:\n logger.debug(\"Forwarding event to: %s\", hook)\n try:\n data = {\n \"schema\": \"iglu:io.redash.webhooks/event/jsonschema/1-0-0\",\n \"data\": event.to_dict()\n }\n response = requests.post(hook, json=data)\n if response.status_code != 200:\n logger.error(\"Failed posting to %s: %s\", hook, response.content)\n except Exception:\n logger.exception(\"Failed posting to %s\", hook)\n\n\[email protected](name=\"redash.tasks.version_check\")\ndef version_check():\n run_version_check()\n\n\[email protected](name=\"redash.tasks.subscribe\")\ndef subscribe(form):\n logger.info(\"Subscribing to: [security notifications=%s], [newsletter=%s]\", form['security_notifications'], form['newsletter'])\n data = {\n 'admin_name': form['name'],\n 'admin_email': form['email'],\n 'org_name': form['org_name'],\n 'security_notifications': form['security_notifications'],\n 'newsletter': form['newsletter']\n }\n requests.post('https://beacon.redash.io/subscribe', json=data)\n\n\[email protected](name=\"redash.tasks.send_mail\")\ndef send_mail(to, subject, html, text):\n from redash.wsgi import app\n\n try:\n with app.app_context():\n message = Message(recipients=to,\n subject=subject,\n html=html,\n body=text)\n\n mail.send(message)\n except Exception:\n logger.exception('Failed sending message: %s', message.subject)\n", "path": "redash/tasks/general.py"}, {"content": "import logging\n\nfrom flask_mail import Message\nfrom redash import mail, settings\nfrom redash.destinations import *\n\n\nclass Email(BaseDestination):\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"addresses\": {\n \"type\": \"string\"\n },\n \"subject_template\": {\n \"type\": \"string\",\n \"default\": settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE,\n \"title\": \"Subject Template\"\n }\n },\n \"required\": [\"addresses\"]\n }\n\n @classmethod\n def icon(cls):\n return 'fa-envelope'\n\n def notify(self, alert, query, user, new_state, app, host, options):\n recipients = [email for email in options.get('addresses', '').split(',') if email]\n\n if not recipients:\n logging.warning(\"No emails given. Skipping send.\")\n\n html = \"\"\"\n Check <a href=\"{host}/alerts/{alert_id}\">alert</a> / check <a href=\"{host}/queries/{query_id}\">query</a>.\n \"\"\".format(host=host, alert_id=alert.id, query_id=query.id)\n logging.debug(\"Notifying: %s\", recipients)\n\n try:\n with app.app_context():\n alert_name = alert.name.encode('utf-8', 'ignore')\n state = new_state.upper()\n subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)\n message = Message(\n recipients=recipients,\n subject=subject_template.format(alert_name=alert_name, state=state),\n html=html\n )\n mail.send(message)\n except Exception:\n logging.exception(\"Mail send error.\")\n\nregister(Email)\n", "path": "redash/destinations/email.py"}], "after_files": [{"content": "import requests\n\nfrom celery.utils.log import get_task_logger\nfrom flask_mail import Message\nfrom redash import mail, models, settings\nfrom redash.version_check import run_version_check\nfrom redash.worker import celery\n\nlogger = get_task_logger(__name__)\n\n\[email protected](name=\"redash.tasks.record_event\")\ndef record_event(raw_event):\n event = models.Event.record(raw_event)\n models.db.session.commit()\n\n for hook in settings.EVENT_REPORTING_WEBHOOKS:\n logger.debug(\"Forwarding event to: %s\", hook)\n try:\n data = {\n \"schema\": \"iglu:io.redash.webhooks/event/jsonschema/1-0-0\",\n \"data\": event.to_dict()\n }\n response = requests.post(hook, json=data)\n if response.status_code != 200:\n logger.error(\"Failed posting to %s: %s\", hook, response.content)\n except Exception:\n logger.exception(\"Failed posting to %s\", hook)\n\n\[email protected](name=\"redash.tasks.version_check\")\ndef version_check():\n run_version_check()\n\n\[email protected](name=\"redash.tasks.subscribe\")\ndef subscribe(form):\n logger.info(\"Subscribing to: [security notifications=%s], [newsletter=%s]\", form['security_notifications'], form['newsletter'])\n data = {\n 'admin_name': form['name'],\n 'admin_email': form['email'],\n 'org_name': form['org_name'],\n 'security_notifications': form['security_notifications'],\n 'newsletter': form['newsletter']\n }\n requests.post('https://beacon.redash.io/subscribe', json=data)\n\n\[email protected](name=\"redash.tasks.send_mail\")\ndef send_mail(to, subject, html, text):\n from redash.wsgi import app\n\n try:\n message = Message(recipients=to,\n subject=subject,\n html=html,\n body=text)\n\n mail.send(message)\n except Exception:\n logger.exception('Failed sending message: %s', message.subject)\n", "path": "redash/tasks/general.py"}, {"content": "import logging\n\nfrom flask_mail import Message\nfrom redash import mail, settings\nfrom redash.destinations import *\n\n\nclass Email(BaseDestination):\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"addresses\": {\n \"type\": \"string\"\n },\n \"subject_template\": {\n \"type\": \"string\",\n \"default\": settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE,\n \"title\": \"Subject Template\"\n }\n },\n \"required\": [\"addresses\"]\n }\n\n @classmethod\n def icon(cls):\n return 'fa-envelope'\n\n def notify(self, alert, query, user, new_state, app, host, options):\n recipients = [email for email in options.get('addresses', '').split(',') if email]\n\n if not recipients:\n logging.warning(\"No emails given. Skipping send.\")\n\n html = \"\"\"\n Check <a href=\"{host}/alerts/{alert_id}\">alert</a> / check <a href=\"{host}/queries/{query_id}\">query</a>.\n \"\"\".format(host=host, alert_id=alert.id, query_id=query.id)\n logging.debug(\"Notifying: %s\", recipients)\n\n try:\n alert_name = alert.name.encode('utf-8', 'ignore')\n state = new_state.upper()\n subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)\n message = Message(\n recipients=recipients,\n subject=subject_template.format(alert_name=alert_name, state=state),\n html=html\n )\n mail.send(message)\n except Exception:\n logging.exception(\"Mail send error.\")\n\nregister(Email)\n", "path": "redash/destinations/email.py"}]}
| 1,901 | 503 |
gh_patches_debug_31457
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-1206
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create Freq Capping Goals for Jobs
Create Global Frequency Capping goals for Jobs (introduced in #1034).
Goals:
- Total impressions
- Total clicks and button clicks (combined)
- Total blocks
A `Job` can have multiple goals (freq capping, datetime), first one reached will un-publish the `Job`.
Requires #1036. Also `Job`s with Freq Cap goals must get data from Redash more often (TBD).
## Use case:
- Publish a Snippet for 1M impressions. It's fine it snippet gets 5K-10K impressions more.
## Questions
- [x] How do we handle Jobs with Freq Capping when no Telemetry data is available?
- Jobs that have impression, block or click limits set and no metrics data updated for at least 24 hours will be marked Complete.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/management/commands/update_jobs.py`
Content:
```
1 from datetime import datetime
2
3 from django.contrib.auth import get_user_model
4 from django.core.management.base import BaseCommand
5 from django.db import transaction
6 from django.db.models import F, Q
7
8 from snippets.base.models import Job
9
10
11 class Command(BaseCommand):
12 args = "(no args)"
13 help = "Update Jobs"
14
15 @transaction.atomic
16 def handle(self, *args, **options):
17 now = datetime.utcnow()
18 user = get_user_model().objects.get_or_create(username='snippets_bot')[0]
19 count_total_completed = 0
20
21 # Publish Scheduled Jobs with `publish_start` before now or without
22 # publish_start.
23 jobs = Job.objects.filter(status=Job.SCHEDULED).filter(
24 Q(publish_start__lte=now) | Q(publish_start=None)
25 )
26 count_published = jobs.count()
27 for job in jobs:
28 job.change_status(
29 status=Job.PUBLISHED,
30 user=user,
31 reason='Published start date reached.',
32 )
33
34 # Disable Published Jobs with `publish_end` before now.
35 jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)
36 count_publication_end = jobs.count()
37 count_total_completed += count_publication_end
38
39 for job in jobs:
40 job.change_status(
41 status=Job.COMPLETED,
42 user=user,
43 reason='Publication end date reached.',
44 )
45
46 # Disable Jobs that reached Impression, Click or Block limits.
47 count_limit = {}
48 for limit in ['impressions', 'clicks', 'blocks']:
49 jobs = (Job.objects
50 .filter(status=Job.PUBLISHED)
51 .exclude(**{f'limit_{limit}': 0})
52 .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))
53 for job in jobs:
54 job.change_status(
55 status=Job.COMPLETED,
56 user=user,
57 reason=f'Limit reached: {limit}.',
58 )
59
60 count_limit[limit] = jobs.count()
61 count_total_completed += count_limit[limit]
62
63 count_running = Job.objects.filter(status=Job.PUBLISHED).count()
64
65 self.stdout.write(
66 f'Jobs Published: {count_published}\n'
67 f'Jobs Completed: {count_total_completed}\n'
68 f' - Reached Publication End Date: {count_publication_end}\n'
69 f' - Reached Impressions Limit: {count_limit["impressions"]}\n'
70 f' - Reached Clicks Limit: {count_limit["clicks"]}\n'
71 f' - Reached Blocks Limit: {count_limit["blocks"]}\n'
72 f'Total Jobs Running: {count_running}\n'
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py
--- a/snippets/base/management/commands/update_jobs.py
+++ b/snippets/base/management/commands/update_jobs.py
@@ -1,4 +1,4 @@
-from datetime import datetime
+from datetime import datetime, timedelta
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
@@ -60,6 +60,23 @@
count_limit[limit] = jobs.count()
count_total_completed += count_limit[limit]
+ # Disable Jobs that have Impression, Click or Block limits but don't
+ # have metrics data for at least 24h. This is to handle cases where the
+ # Metrics Pipeline is broken.
+ yesterday = datetime.utcnow() - timedelta(days=1)
+ jobs = (Job.objects
+ .filter(status=Job.PUBLISHED)
+ .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)
+ .filter(metric_last_update__lt=yesterday))
+ for job in jobs:
+ job.change_status(
+ status=Job.COMPLETED,
+ user=user,
+ reason=f'Premature termination due to missing metrics.',
+ )
+ count_premature_termination = jobs.count()
+ count_total_completed += count_premature_termination
+
count_running = Job.objects.filter(status=Job.PUBLISHED).count()
self.stdout.write(
@@ -69,5 +86,6 @@
f' - Reached Impressions Limit: {count_limit["impressions"]}\n'
f' - Reached Clicks Limit: {count_limit["clicks"]}\n'
f' - Reached Blocks Limit: {count_limit["blocks"]}\n'
+ f' - Premature Termination due to missing metrics: {count_premature_termination}\n'
f'Total Jobs Running: {count_running}\n'
)
|
{"golden_diff": "diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py\n--- a/snippets/base/management/commands/update_jobs.py\n+++ b/snippets/base/management/commands/update_jobs.py\n@@ -1,4 +1,4 @@\n-from datetime import datetime\n+from datetime import datetime, timedelta\n \n from django.contrib.auth import get_user_model\n from django.core.management.base import BaseCommand\n@@ -60,6 +60,23 @@\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n \n+ # Disable Jobs that have Impression, Click or Block limits but don't\n+ # have metrics data for at least 24h. This is to handle cases where the\n+ # Metrics Pipeline is broken.\n+ yesterday = datetime.utcnow() - timedelta(days=1)\n+ jobs = (Job.objects\n+ .filter(status=Job.PUBLISHED)\n+ .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n+ .filter(metric_last_update__lt=yesterday))\n+ for job in jobs:\n+ job.change_status(\n+ status=Job.COMPLETED,\n+ user=user,\n+ reason=f'Premature termination due to missing metrics.',\n+ )\n+ count_premature_termination = jobs.count()\n+ count_total_completed += count_premature_termination\n+\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n \n self.stdout.write(\n@@ -69,5 +86,6 @@\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n+ f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "issue": "Create Freq Capping Goals for Jobs\nCreate Global Frequency Capping goals for Jobs (introduced in #1034). \r\n\r\nGoals:\r\n - Total impressions\r\n - Total clicks and button clicks (combined)\r\n - Total blocks\r\n\r\nA `Job` can have multiple goals (freq capping, datetime), first one reached will un-publish the `Job`.\r\n\r\nRequires #1036. Also `Job`s with Freq Cap goals must get data from Redash more often (TBD).\r\n\r\n## Use case:\r\n - Publish a Snippet for 1M impressions. It's fine it snippet gets 5K-10K impressions more.\r\n\r\n\r\n## Questions\r\n - [x] How do we handle Jobs with Freq Capping when no Telemetry data is available? \r\n - Jobs that have impression, block or click limits set and no metrics data updated for at least 24 hours will be marked Complete.\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n # Disable Jobs that have Impression, Click or Block limits but don't\n # have metrics data for at least 24h. This is to handle cases where the\n # Metrics Pipeline is broken.\n yesterday = datetime.utcnow() - timedelta(days=1)\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Premature termination due to missing metrics.',\n )\n count_premature_termination = jobs.count()\n count_total_completed += count_premature_termination\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}]}
| 1,176 | 440 |
gh_patches_debug_10443
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-4096
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Conan Server 1.10 is broken
#### Description
Hi!
I'm trying to start conan server but seems like is broken.
$ conan_server
> Traceback (most recent call last):
> File "/opt/miniconda3/bin/conan_server", line 7, in <module>
> from conans.conan_server import run
> File "/opt/miniconda3/lib/python3.6/site-packages/conans/conan_server.py", line 1, in <module>
> from conans.server.server_launcher import main
> File "/opt/miniconda3/lib/python3.6/site-packages/conans/server/server_launcher.py", line 53, in <module>
> launcher = ServerLauncher()
> File "/opt/miniconda3/lib/python3.6/site-packages/conans/server/server_launcher.py", line 47, in __init__
server_capabilities, only_v1=False)
> TypeError: __init__() got an unexpected keyword argument 'only_v1'
#### Environment
Conan Versions: 1.10
Python version: 3.6, 3.7
OS: Linux Ubuntu 18.04
#### Steps to reproduce
pip3 install conan==1.10.0
conan_server
or using Conan Docker Tools:
BUILD_CONAN_SERVER_IMAGE=1 python build.py
docker run -t -p 9300:9300 --name conan_server conanio/conan_server:latest
- [X] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [X] I've specified the Conan version, operating system version and any tool that can be relevant.
- [X] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/server/server_launcher.py`
Content:
```
1 #!/usr/bin/python
2 import os
3 from conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator
4 from conans.server.conf import get_server_store
5 from conans.server.rest.server import ConanServer
6 from conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager
7 from conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager
8 from conans.server.conf import MIN_CLIENT_COMPATIBLE_VERSION
9 from conans.server.plugin_loader import load_authentication_plugin
10 from conans.model.version import Version
11 from conans.server.migrate import migrate_and_get_server_config
12 from conans import __version__ as SERVER_VERSION, REVISIONS
13 from conans.paths import conan_expand_user
14 from conans import SERVER_CAPABILITIES
15
16
17 class ServerLauncher(object):
18 def __init__(self):
19 user_folder = conan_expand_user("~")
20 server_folder = os.path.join(user_folder, '.conan_server')
21
22 server_config = migrate_and_get_server_config(user_folder)
23 custom_auth = server_config.custom_authenticator
24 if custom_auth:
25 authenticator = load_authentication_plugin(server_folder, custom_auth)
26 else:
27 authenticator = BasicAuthenticator(dict(server_config.users))
28
29 authorizer = BasicAuthorizer(server_config.read_permissions,
30 server_config.write_permissions)
31 credentials_manager = JWTCredentialsManager(server_config.jwt_secret,
32 server_config.jwt_expire_time)
33
34 updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,
35 server_config.authorize_timeout)
36
37 server_store = get_server_store(server_config.disk_storage_path,
38 server_config.public_url,
39 updown_auth_manager=updown_auth_manager)
40
41 server_capabilities = SERVER_CAPABILITIES
42 server_capabilities.append(REVISIONS)
43
44 self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager,
45 authorizer, authenticator, server_store,
46 Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION),
47 server_capabilities, only_v1=False)
48
49 def launch(self):
50 self.ra.run(host="0.0.0.0")
51
52
53 launcher = ServerLauncher()
54 app = launcher.ra.root_app
55
56
57 def main(*args):
58 launcher.launch()
59
60
61 if __name__ == "__main__":
62 main()
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/server/server_launcher.py b/conans/server/server_launcher.py
--- a/conans/server/server_launcher.py
+++ b/conans/server/server_launcher.py
@@ -44,7 +44,7 @@
self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager,
authorizer, authenticator, server_store,
Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION),
- server_capabilities, only_v1=False)
+ server_capabilities)
def launch(self):
self.ra.run(host="0.0.0.0")
|
{"golden_diff": "diff --git a/conans/server/server_launcher.py b/conans/server/server_launcher.py\n--- a/conans/server/server_launcher.py\n+++ b/conans/server/server_launcher.py\n@@ -44,7 +44,7 @@\n self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager,\n authorizer, authenticator, server_store,\n Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION),\n- server_capabilities, only_v1=False)\n+ server_capabilities)\n \n def launch(self):\n self.ra.run(host=\"0.0.0.0\")\n", "issue": "Conan Server 1.10 is broken\n#### Description\r\nHi!\r\n\r\nI'm trying to start conan server but seems like is broken.\r\n\r\n $ conan_server\r\n\r\n> Traceback (most recent call last):\r\n> File \"/opt/miniconda3/bin/conan_server\", line 7, in <module>\r\n> from conans.conan_server import run\r\n> File \"/opt/miniconda3/lib/python3.6/site-packages/conans/conan_server.py\", line 1, in <module>\r\n> from conans.server.server_launcher import main\r\n> File \"/opt/miniconda3/lib/python3.6/site-packages/conans/server/server_launcher.py\", line 53, in <module>\r\n> launcher = ServerLauncher()\r\n> File \"/opt/miniconda3/lib/python3.6/site-packages/conans/server/server_launcher.py\", line 47, in __init__\r\n server_capabilities, only_v1=False)\r\n> TypeError: __init__() got an unexpected keyword argument 'only_v1'\r\n\r\n#### Environment\r\nConan Versions: 1.10\r\nPython version: 3.6, 3.7\r\nOS: Linux Ubuntu 18.04\r\n\r\n#### Steps to reproduce\r\n pip3 install conan==1.10.0\r\n conan_server\r\n\r\nor using Conan Docker Tools:\r\n\r\n BUILD_CONAN_SERVER_IMAGE=1 python build.py\r\n docker run -t -p 9300:9300 --name conan_server conanio/conan_server:latest \r\n\r\n\r\n- [X] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [X] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [X] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/python\nimport os\nfrom conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator\nfrom conans.server.conf import get_server_store\nfrom conans.server.rest.server import ConanServer\nfrom conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager\nfrom conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager\nfrom conans.server.conf import MIN_CLIENT_COMPATIBLE_VERSION\nfrom conans.server.plugin_loader import load_authentication_plugin\nfrom conans.model.version import Version\nfrom conans.server.migrate import migrate_and_get_server_config\nfrom conans import __version__ as SERVER_VERSION, REVISIONS\nfrom conans.paths import conan_expand_user\nfrom conans import SERVER_CAPABILITIES\n\n\nclass ServerLauncher(object):\n def __init__(self):\n user_folder = conan_expand_user(\"~\")\n server_folder = os.path.join(user_folder, '.conan_server')\n\n server_config = migrate_and_get_server_config(user_folder)\n custom_auth = server_config.custom_authenticator\n if custom_auth:\n authenticator = load_authentication_plugin(server_folder, custom_auth)\n else:\n authenticator = BasicAuthenticator(dict(server_config.users))\n\n authorizer = BasicAuthorizer(server_config.read_permissions,\n server_config.write_permissions)\n credentials_manager = JWTCredentialsManager(server_config.jwt_secret,\n server_config.jwt_expire_time)\n\n updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,\n server_config.authorize_timeout)\n\n server_store = get_server_store(server_config.disk_storage_path,\n server_config.public_url,\n updown_auth_manager=updown_auth_manager)\n\n server_capabilities = SERVER_CAPABILITIES\n server_capabilities.append(REVISIONS)\n\n self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager,\n authorizer, authenticator, server_store,\n Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION),\n server_capabilities, only_v1=False)\n\n def launch(self):\n self.ra.run(host=\"0.0.0.0\")\n\n\nlauncher = ServerLauncher()\napp = launcher.ra.root_app\n\n\ndef main(*args):\n launcher.launch()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "conans/server/server_launcher.py"}], "after_files": [{"content": "#!/usr/bin/python\nimport os\nfrom conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator\nfrom conans.server.conf import get_server_store\nfrom conans.server.rest.server import ConanServer\nfrom conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager\nfrom conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager\nfrom conans.server.conf import MIN_CLIENT_COMPATIBLE_VERSION\nfrom conans.server.plugin_loader import load_authentication_plugin\nfrom conans.model.version import Version\nfrom conans.server.migrate import migrate_and_get_server_config\nfrom conans import __version__ as SERVER_VERSION, REVISIONS\nfrom conans.paths import conan_expand_user\nfrom conans import SERVER_CAPABILITIES\n\n\nclass ServerLauncher(object):\n def __init__(self):\n user_folder = conan_expand_user(\"~\")\n server_folder = os.path.join(user_folder, '.conan_server')\n\n server_config = migrate_and_get_server_config(user_folder)\n custom_auth = server_config.custom_authenticator\n if custom_auth:\n authenticator = load_authentication_plugin(server_folder, custom_auth)\n else:\n authenticator = BasicAuthenticator(dict(server_config.users))\n\n authorizer = BasicAuthorizer(server_config.read_permissions,\n server_config.write_permissions)\n credentials_manager = JWTCredentialsManager(server_config.jwt_secret,\n server_config.jwt_expire_time)\n\n updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,\n server_config.authorize_timeout)\n\n server_store = get_server_store(server_config.disk_storage_path,\n server_config.public_url,\n updown_auth_manager=updown_auth_manager)\n\n server_capabilities = SERVER_CAPABILITIES\n server_capabilities.append(REVISIONS)\n\n self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager,\n authorizer, authenticator, server_store,\n Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION),\n server_capabilities)\n\n def launch(self):\n self.ra.run(host=\"0.0.0.0\")\n\n\nlauncher = ServerLauncher()\napp = launcher.ra.root_app\n\n\ndef main(*args):\n launcher.launch()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "conans/server/server_launcher.py"}]}
| 1,259 | 125 |
gh_patches_debug_9464
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-1358
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add ability to specify SMTP port
Some relayed domains may live on machines that accept SMTP on a nonstandard port, e.g., port 2525. This would allow users to relay for domains by appending `:port` to the hostname or IP address.
Add ability to specify SMTP port
Some relayed domains may live on machines that accept SMTP on a nonstandard port, e.g., port 2525. This would allow users to relay for domains by appending `:port` to the hostname or IP address.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/mailu/internal/views/postfix.py`
Content:
```
1 from mailu import models
2 from mailu.internal import internal
3
4 import flask
5 import re
6
7
8 @internal.route("/postfix/domain/<domain_name>")
9 def postfix_mailbox_domain(domain_name):
10 if re.match("^\[.*\]$", domain_name):
11 return flask.abort(404)
12 domain = models.Domain.query.get(domain_name) or \
13 models.Alternative.query.get(domain_name) or \
14 flask.abort(404)
15 return flask.jsonify(domain.name)
16
17
18 @internal.route("/postfix/mailbox/<path:email>")
19 def postfix_mailbox_map(email):
20 user = models.User.query.get(email) or flask.abort(404)
21 return flask.jsonify(user.email)
22
23
24 @internal.route("/postfix/alias/<path:alias>")
25 def postfix_alias_map(alias):
26 localpart, domain_name = models.Email.resolve_domain(alias)
27 if localpart is None:
28 return flask.jsonify(domain_name)
29 destination = models.Email.resolve_destination(localpart, domain_name)
30 return flask.jsonify(",".join(destination)) if destination else flask.abort(404)
31
32
33 @internal.route("/postfix/transport/<path:email>")
34 def postfix_transport(email):
35 if email == '*' or re.match("(^|.*@)\[.*\]$", email):
36 return flask.abort(404)
37 localpart, domain_name = models.Email.resolve_domain(email)
38 relay = models.Relay.query.get(domain_name) or flask.abort(404)
39 return flask.jsonify("smtp:[{}]".format(relay.smtp))
40
41
42 @internal.route("/postfix/sender/login/<path:sender>")
43 def postfix_sender_login(sender):
44 localpart, domain_name = models.Email.resolve_domain(sender)
45 if localpart is None:
46 return flask.abort(404)
47 destination = models.Email.resolve_destination(localpart, domain_name, True)
48 return flask.jsonify(",".join(destination)) if destination else flask.abort(404)
49
50
51 @internal.route("/postfix/sender/access/<path:sender>")
52 def postfix_sender_access(sender):
53 """ Simply reject any sender that pretends to be from a local domain
54 """
55 if not is_void_address(sender):
56 localpart, domain_name = models.Email.resolve_domain(sender)
57 return flask.jsonify("REJECT") if models.Domain.query.get(domain_name) else flask.abort(404)
58 else:
59 return flask.abort(404)
60
61
62 def is_void_address(email):
63 '''True if the email is void (null) email address.
64 '''
65 if email.startswith('<') and email.endswith('>'):
66 email = email[1:-1]
67 # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's
68 # consider void any such thing.
69 return '@' not in email
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py
--- a/core/admin/mailu/internal/views/postfix.py
+++ b/core/admin/mailu/internal/views/postfix.py
@@ -36,7 +36,11 @@
return flask.abort(404)
localpart, domain_name = models.Email.resolve_domain(email)
relay = models.Relay.query.get(domain_name) or flask.abort(404)
- return flask.jsonify("smtp:[{}]".format(relay.smtp))
+ ret = "smtp:[{0}]".format(relay.smtp)
+ if ":" in relay.smtp:
+ split = relay.smtp.split(':')
+ ret = "smtp:[{0}]:{1}".format(split[0], split[1])
+ return flask.jsonify(ret)
@internal.route("/postfix/sender/login/<path:sender>")
|
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py\n--- a/core/admin/mailu/internal/views/postfix.py\n+++ b/core/admin/mailu/internal/views/postfix.py\n@@ -36,7 +36,11 @@\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n- return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n+ ret = \"smtp:[{0}]\".format(relay.smtp)\n+ if \":\" in relay.smtp:\n+ split = relay.smtp.split(':')\n+ ret = \"smtp:[{0}]:{1}\".format(split[0], split[1])\n+ return flask.jsonify(ret)\n \n \n @internal.route(\"/postfix/sender/login/<path:sender>\")\n", "issue": "Add ability to specify SMTP port\nSome relayed domains may live on machines that accept SMTP on a nonstandard port, e.g., port 2525. This would allow users to relay for domains by appending `:port` to the hostname or IP address.\nAdd ability to specify SMTP port\nSome relayed domains may live on machines that accept SMTP on a nonstandard port, e.g., port 2525. This would allow users to relay for domains by appending `:port` to the hostname or IP address.\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\nimport re\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n if re.match(\"^\\[.*\\]$\", domain_name):\n return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}], "after_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\nimport re\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n if re.match(\"^\\[.*\\]$\", domain_name):\n return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n ret = \"smtp:[{0}]\".format(relay.smtp)\n if \":\" in relay.smtp:\n split = relay.smtp.split(':')\n ret = \"smtp:[{0}]:{1}\".format(split[0], split[1])\n return flask.jsonify(ret)\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}]}
| 1,101 | 197 |
gh_patches_debug_27614
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-2326
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some Errors cannot render their exceptions during the creation of portal
This is a follow-up for #2203.
A exception of the following type cannot rendered in Plone 5.2:
```
ValueError
Traceback (innermost last):
Module ZServer.ZPublisher.Publish, line 144, in publish
Module ZPublisher.mapply, line 85, in mapply
Module Products.PDBDebugMode.runcall, line 68, in pdb_runcall
Module ZServer.ZPublisher.Publish, line 44, in call_object
Module Products.CMFPlone.browser.admin, line 271, in __call__
Module Products.CMFPlone.factory, line 146, in addPloneSite
Module Products.GenericSetup.tool, line 388, in runAllImportStepsFromProfile
__traceback_info__: profile-Products.CMFPlone:plone
Module Products.GenericSetup.tool, line 1433, in _runImportStepsFromContext
Module Products.GenericSetup.tool, line 1245, in _doRunImportStep
__traceback_info__: rolemap
Module Products.GenericSetup.rolemap, line 102, in importRolemap
Module <string>, line 8, in manage_permission
Module AccessControl.requestmethod, line 79, in _curried
Module OFS.role, line 86, in manage_permission
Module AccessControl.rolemanager, line 224, in manage_permission
ValueError: The permission <em>Use Database Methods</em> is invalid.
```
The issue is that the portal and the main_template exist during rendering of ExceptionView but the main_template cannot be rendered because the context of the exception is not the Plone site but the Application. Thus the exception-template cannot be rendered properly. To fix that i suggest we try to render main_template and switch to the basic_template when that fails.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/exceptions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from AccessControl import getSecurityManager
3 from Products.Five import BrowserView
4 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
5 from zExceptions.ExceptionFormatter import format_exception
6 from zope.component.hooks import getSite
7
8 import json
9 import sys
10
11
12 class ExceptionView(BrowserView):
13 basic_template = ViewPageTemplateFile('templates/basic_error_message.pt')
14
15 def is_manager(self):
16 return getSecurityManager().checkPermission(
17 'Manage portal', self.context)
18
19 def __call__(self):
20 exception = self.context
21 self.context = self.__parent__
22 request = self.request
23
24 error_type = exception.__class__.__name__
25 exc_type, value, traceback = sys.exc_info()
26 error_tb = ''.join(
27 format_exception(exc_type, value, traceback, as_html=True))
28 request.response.setStatus(exc_type)
29
30 # Indicate exception as JSON
31 if "text/html" not in request.getHeader('Accept', ''):
32 request.response.setHeader("Content-Type", "application/json")
33 return json.dumps({
34 'error_type': error_type,
35 })
36
37 if getSite() is None:
38 # We cannot get the site, so we cannot render our nice template
39 template = self.basic_template
40 else:
41 # Use a simplified template if main_template is not available
42 try:
43 self.context.unrestrictedTraverse('main_template')
44 except:
45 template = self.basic_template
46 else:
47 template = self.index
48
49 # Render page with user-facing error notice
50 request.set('disable_border', True)
51 request.set('disable_plone.leftcolumn', True)
52 request.set('disable_plone.rightcolumn', True)
53
54 return template(
55 error_type=error_type,
56 error_tb=error_tb,
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Products/CMFPlone/browser/exceptions.py b/Products/CMFPlone/browser/exceptions.py
--- a/Products/CMFPlone/browser/exceptions.py
+++ b/Products/CMFPlone/browser/exceptions.py
@@ -3,7 +3,6 @@
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zExceptions.ExceptionFormatter import format_exception
-from zope.component.hooks import getSite
import json
import sys
@@ -34,24 +33,16 @@
'error_type': error_type,
})
- if getSite() is None:
- # We cannot get the site, so we cannot render our nice template
- template = self.basic_template
- else:
- # Use a simplified template if main_template is not available
- try:
- self.context.unrestrictedTraverse('main_template')
- except:
- template = self.basic_template
- else:
- template = self.index
-
# Render page with user-facing error notice
request.set('disable_border', True)
request.set('disable_plone.leftcolumn', True)
request.set('disable_plone.rightcolumn', True)
- return template(
- error_type=error_type,
- error_tb=error_tb,
- )
+ try:
+ return self.index(
+ error_type=error_type,
+ error_tb=error_tb)
+ except:
+ return self.basic_template(
+ error_type=error_type,
+ error_tb=error_tb)
|
{"golden_diff": "diff --git a/Products/CMFPlone/browser/exceptions.py b/Products/CMFPlone/browser/exceptions.py\n--- a/Products/CMFPlone/browser/exceptions.py\n+++ b/Products/CMFPlone/browser/exceptions.py\n@@ -3,7 +3,6 @@\n from Products.Five import BrowserView\n from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n from zExceptions.ExceptionFormatter import format_exception\n-from zope.component.hooks import getSite\n \n import json\n import sys\n@@ -34,24 +33,16 @@\n 'error_type': error_type,\n })\n \n- if getSite() is None:\n- # We cannot get the site, so we cannot render our nice template\n- template = self.basic_template\n- else:\n- # Use a simplified template if main_template is not available\n- try:\n- self.context.unrestrictedTraverse('main_template')\n- except:\n- template = self.basic_template\n- else:\n- template = self.index\n-\n # Render page with user-facing error notice\n request.set('disable_border', True)\n request.set('disable_plone.leftcolumn', True)\n request.set('disable_plone.rightcolumn', True)\n \n- return template(\n- error_type=error_type,\n- error_tb=error_tb,\n- )\n+ try:\n+ return self.index(\n+ error_type=error_type,\n+ error_tb=error_tb)\n+ except:\n+ return self.basic_template(\n+ error_type=error_type,\n+ error_tb=error_tb)\n", "issue": "Some Errors cannot render their exceptions during the creation of portal\nThis is a follow-up for #2203.\r\n\r\nA exception of the following type cannot rendered in Plone 5.2:\r\n\r\n```\r\nValueError\r\n\r\nTraceback (innermost last):\r\n\r\n Module ZServer.ZPublisher.Publish, line 144, in publish\r\n Module ZPublisher.mapply, line 85, in mapply\r\n Module Products.PDBDebugMode.runcall, line 68, in pdb_runcall\r\n Module ZServer.ZPublisher.Publish, line 44, in call_object\r\n Module Products.CMFPlone.browser.admin, line 271, in __call__\r\n Module Products.CMFPlone.factory, line 146, in addPloneSite\r\n Module Products.GenericSetup.tool, line 388, in runAllImportStepsFromProfile\r\n __traceback_info__: profile-Products.CMFPlone:plone\r\n Module Products.GenericSetup.tool, line 1433, in _runImportStepsFromContext\r\n Module Products.GenericSetup.tool, line 1245, in _doRunImportStep\r\n __traceback_info__: rolemap\r\n Module Products.GenericSetup.rolemap, line 102, in importRolemap\r\n Module <string>, line 8, in manage_permission\r\n Module AccessControl.requestmethod, line 79, in _curried\r\n Module OFS.role, line 86, in manage_permission\r\n Module AccessControl.rolemanager, line 224, in manage_permission\r\n\r\nValueError: The permission <em>Use Database Methods</em> is invalid. \r\n```\r\n\r\nThe issue is that the portal and the main_template exist during rendering of ExceptionView but the main_template cannot be rendered because the context of the exception is not the Plone site but the Application. Thus the exception-template cannot be rendered properly. To fix that i suggest we try to render main_template and switch to the basic_template when that fails.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import getSecurityManager\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zExceptions.ExceptionFormatter import format_exception\nfrom zope.component.hooks import getSite\n\nimport json\nimport sys\n\n\nclass ExceptionView(BrowserView):\n basic_template = ViewPageTemplateFile('templates/basic_error_message.pt')\n\n def is_manager(self):\n return getSecurityManager().checkPermission(\n 'Manage portal', self.context)\n\n def __call__(self):\n exception = self.context\n self.context = self.__parent__\n request = self.request\n\n error_type = exception.__class__.__name__\n exc_type, value, traceback = sys.exc_info()\n error_tb = ''.join(\n format_exception(exc_type, value, traceback, as_html=True))\n request.response.setStatus(exc_type)\n\n # Indicate exception as JSON\n if \"text/html\" not in request.getHeader('Accept', ''):\n request.response.setHeader(\"Content-Type\", \"application/json\")\n return json.dumps({\n 'error_type': error_type,\n })\n\n if getSite() is None:\n # We cannot get the site, so we cannot render our nice template\n template = self.basic_template\n else:\n # Use a simplified template if main_template is not available\n try:\n self.context.unrestrictedTraverse('main_template')\n except:\n template = self.basic_template\n else:\n template = self.index\n\n # Render page with user-facing error notice\n request.set('disable_border', True)\n request.set('disable_plone.leftcolumn', True)\n request.set('disable_plone.rightcolumn', True)\n\n return template(\n error_type=error_type,\n error_tb=error_tb,\n )\n", "path": "Products/CMFPlone/browser/exceptions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import getSecurityManager\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zExceptions.ExceptionFormatter import format_exception\n\nimport json\nimport sys\n\n\nclass ExceptionView(BrowserView):\n basic_template = ViewPageTemplateFile('templates/basic_error_message.pt')\n\n def is_manager(self):\n return getSecurityManager().checkPermission(\n 'Manage portal', self.context)\n\n def __call__(self):\n exception = self.context\n self.context = self.__parent__\n request = self.request\n\n error_type = exception.__class__.__name__\n exc_type, value, traceback = sys.exc_info()\n error_tb = ''.join(\n format_exception(exc_type, value, traceback, as_html=True))\n request.response.setStatus(exc_type)\n\n # Indicate exception as JSON\n if \"text/html\" not in request.getHeader('Accept', ''):\n request.response.setHeader(\"Content-Type\", \"application/json\")\n return json.dumps({\n 'error_type': error_type,\n })\n\n # Render page with user-facing error notice\n request.set('disable_border', True)\n request.set('disable_plone.leftcolumn', True)\n request.set('disable_plone.rightcolumn', True)\n\n try:\n return self.index(\n error_type=error_type,\n error_tb=error_tb)\n except:\n return self.basic_template(\n error_type=error_type,\n error_tb=error_tb)\n", "path": "Products/CMFPlone/browser/exceptions.py"}]}
| 1,185 | 352 |
gh_patches_debug_2405
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-1221
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Published package is missing discovery files in discovery_cache
Many thanks to the @wyk9787 for noticing this and reaching out.
All calls to `discovery.build()` using `2.0.0` fail with "unknown api name or version".
```python
from googleapiclient import discovery
client = discovery.build("cloudprofiler", "v2")
```
This is because the published package has no `discovery_cache/documents` directory.
1. `python3 -m venv env`
2. `source env/bin/activate`
3. `python3 -m pip install google-api-python-client`
4. `ls env/lib/python*/site-packages/googleapiclient/discovery_cache`
```
busunkim@busunkim:~/github$ ls env/lib/python*/site-packages/googleapiclient/discovery_cache
appengine_memcache.py base.py file_cache.py __init__.py __pycache__
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (3, 6):
25 print("google-api-python-client requires python3 version >= 3.6.", file=sys.stderr)
26 sys.exit(1)
27
28 import io
29 import os
30 from setuptools import setup
31
32 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
33
34 install_requires = [
35 "httplib2>=0.15.0,<1dev",
36 "google-auth>=1.16.0,<2dev",
37 "google-auth-httplib2>=0.0.3",
38 "google-api-core>=1.21.0,<2dev",
39 "six>=1.13.0,<2dev",
40 "uritemplate>=3.0.0,<4dev",
41 ]
42
43 package_root = os.path.abspath(os.path.dirname(__file__))
44
45 readme_filename = os.path.join(package_root, "README.md")
46 with io.open(readme_filename, encoding="utf-8") as readme_file:
47 readme = readme_file.read()
48
49 version = "2.0.1"
50
51 setup(
52 name="google-api-python-client",
53 version=version,
54 description="Google API Client Library for Python",
55 long_description=readme,
56 long_description_content_type='text/markdown',
57 author="Google LLC",
58 author_email="[email protected]",
59 url="https://github.com/googleapis/google-api-python-client/",
60 install_requires=install_requires,
61 python_requires=">=3.6",
62 packages=packages,
63 package_data={},
64 license="Apache 2.0",
65 keywords="google api client",
66 classifiers=[
67 "Programming Language :: Python :: 3",
68 "Programming Language :: Python :: 3.6",
69 "Programming Language :: Python :: 3.7",
70 "Programming Language :: Python :: 3.8",
71 "Programming Language :: Python :: 3.9",
72 "Development Status :: 5 - Production/Stable",
73 "Intended Audience :: Developers",
74 "License :: OSI Approved :: Apache Software License",
75 "Operating System :: OS Independent",
76 "Topic :: Internet :: WWW/HTTP",
77 ],
78 )
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -60,7 +60,7 @@
install_requires=install_requires,
python_requires=">=3.6",
packages=packages,
- package_data={},
+ package_data={"googleapiclient": ["discovery_cache/documents/*.json"]},
license="Apache 2.0",
keywords="google api client",
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -60,7 +60,7 @@\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n- package_data={},\n+ package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n", "issue": "Published package is missing discovery files in discovery_cache\nMany thanks to the @wyk9787 for noticing this and reaching out.\r\n\r\nAll calls to `discovery.build()` using `2.0.0` fail with \"unknown api name or version\".\r\n\r\n```python\r\nfrom googleapiclient import discovery\r\n\r\nclient = discovery.build(\"cloudprofiler\", \"v2\")\r\n```\r\n\r\n\r\nThis is because the published package has no `discovery_cache/documents` directory. \r\n\r\n1. `python3 -m venv env`\r\n2. `source env/bin/activate`\r\n3. `python3 -m pip install google-api-python-client`\r\n4. `ls env/lib/python*/site-packages/googleapiclient/discovery_cache`\r\n\r\n```\r\nbusunkim@busunkim:~/github$ ls env/lib/python*/site-packages/googleapiclient/discovery_cache\r\nappengine_memcache.py base.py file_cache.py __init__.py __pycache__\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0,<2dev\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"2.0.1\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0,<2dev\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"2.0.1\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]}
| 1,245 | 97 |
gh_patches_debug_9729
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-1494
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
can't stay logged in
from @amaltson via [Twitter](https://twitter.com/amaltson/status/332678066985893889):
> trying to log in with GitHub, each time I go to my account page, I get logged out..
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/security/authentication.py`
Content:
```
1 """Defines website authentication helpers.
2 """
3 import rfc822
4 import time
5
6 from aspen import Response
7 from gittip.security import csrf
8 from gittip.security.user import User
9
10
11 BEGINNING_OF_EPOCH = rfc822.formatdate(0)
12 TIMEOUT = 60 * 60 * 24 * 7 # one week
13 ROLES = ['anonymous', 'authenticated', 'owner', 'admin']
14 ROLES_SHOULD_BE = "It should be one of: {}.".format(', '.join(ROLES))
15
16
17 class NoMinimumRoleSpecified(Exception):
18 def __str__(self):
19 return "There is no minimum_role specified in the simplate at {}. {}" \
20 .format(self.args[0], ROLES_SHOULD_BE)
21
22 class BadMinimumRole(Exception):
23 def __str__(self):
24 return "The minimum_role specific in {} is bad: {}. {}" \
25 .format(self.args[0], self.args[1], ROLES_SHOULD_BE)
26
27
28 def inbound(request):
29 """Authenticate from a cookie or an API key in basic auth.
30 """
31 user = None
32 if 'Authorization' in request.headers:
33 header = request.headers['authorization']
34 if header.startswith('Basic '):
35 creds = header[len('Basic '):].decode('base64')
36 token, ignored = creds.split(':')
37 user = User.from_api_key(token)
38
39 # We don't require CSRF if they basically authenticated.
40 csrf_token = csrf._get_new_csrf_key()
41 request.headers.cookie['csrf_token'] = csrf_token
42 request.headers['X-CSRF-TOKEN'] = csrf_token
43 if 'Referer' not in request.headers:
44 request.headers['Referer'] = \
45 'https://%s/' % csrf._get_host(request)
46 elif 'session' in request.headers.cookie:
47 token = request.headers.cookie['session'].value
48 user = User.from_session_token(token)
49
50 if user is None:
51 user = User()
52 request.context['user'] = user
53
54
55 def check_role(request):
56 """Given a request object, possibly raise Response(403).
57 """
58
59 # XXX We can't use this yet because we don't have an inbound Aspen hook
60 # that fires after the first page of the simplate is exec'd.
61
62 context = request.context
63 path = request.line.uri.path
64
65 if 'minimum_role' not in context:
66 raise NoMinimumRoleSpecified(request.fs)
67
68 minimum_role = context['minimum_role']
69 if minimum_role not in ROLES:
70 raise BadMinimumRole(request.fs, minimum_role)
71
72 user = context['user']
73 highest_role = user.get_highest_role(path.get('username', None))
74 if ROLES.index(highest_role) < ROLES.index(minimum_role):
75 request.redirect('..')
76
77
78 def outbound(response):
79 if 'user' in response.request.context:
80 user = response.request.context['user']
81 if not isinstance(user, User):
82 raise Response(400, "If you define 'user' in a simplate it has to "
83 "be a User instance.")
84 else:
85 user = User()
86
87 if user.ANON: # user is anonymous
88 if 'session' not in response.request.headers.cookie:
89 # no cookie in the request, don't set one on response
90 return
91 else:
92 # expired cookie in the request, instruct browser to delete it
93 response.headers.cookie['session'] = ''
94 expires = 0
95 else: # user is authenticated
96 response.headers['Expires'] = BEGINNING_OF_EPOCH # don't cache
97 response.headers.cookie['session'] = user.participant.session_token
98 expires = time.time() + TIMEOUT
99 user.keep_signed_in_until(expires)
100
101 cookie = response.headers.cookie['session']
102 # I am not setting domain, because it is supposed to default to what we
103 # want: the domain of the object requested.
104 #cookie['domain']
105 cookie['path'] = '/'
106 cookie['expires'] = rfc822.formatdate(expires)
107 cookie['httponly'] = "Yes, please."
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gittip/security/authentication.py b/gittip/security/authentication.py
--- a/gittip/security/authentication.py
+++ b/gittip/security/authentication.py
@@ -3,6 +3,7 @@
import rfc822
import time
+import gittip
from aspen import Response
from gittip.security import csrf
from gittip.security.user import User
@@ -105,3 +106,5 @@
cookie['path'] = '/'
cookie['expires'] = rfc822.formatdate(expires)
cookie['httponly'] = "Yes, please."
+ if gittip.canonical_scheme == 'https':
+ cookie['secure'] = "Yes, please."
|
{"golden_diff": "diff --git a/gittip/security/authentication.py b/gittip/security/authentication.py\n--- a/gittip/security/authentication.py\n+++ b/gittip/security/authentication.py\n@@ -3,6 +3,7 @@\n import rfc822\n import time\n \n+import gittip\n from aspen import Response\n from gittip.security import csrf\n from gittip.security.user import User\n@@ -105,3 +106,5 @@\n cookie['path'] = '/'\n cookie['expires'] = rfc822.formatdate(expires)\n cookie['httponly'] = \"Yes, please.\"\n+ if gittip.canonical_scheme == 'https':\n+ cookie['secure'] = \"Yes, please.\"\n", "issue": "can't stay logged in\nfrom @amaltson via [Twitter](https://twitter.com/amaltson/status/332678066985893889):\n\n> trying to log in with GitHub, each time I go to my account page, I get logged out.. \n\n", "before_files": [{"content": "\"\"\"Defines website authentication helpers.\n\"\"\"\nimport rfc822\nimport time\n\nfrom aspen import Response\nfrom gittip.security import csrf\nfrom gittip.security.user import User\n\n\nBEGINNING_OF_EPOCH = rfc822.formatdate(0)\nTIMEOUT = 60 * 60 * 24 * 7 # one week\nROLES = ['anonymous', 'authenticated', 'owner', 'admin']\nROLES_SHOULD_BE = \"It should be one of: {}.\".format(', '.join(ROLES))\n\n\nclass NoMinimumRoleSpecified(Exception):\n def __str__(self):\n return \"There is no minimum_role specified in the simplate at {}. {}\" \\\n .format(self.args[0], ROLES_SHOULD_BE)\n\nclass BadMinimumRole(Exception):\n def __str__(self):\n return \"The minimum_role specific in {} is bad: {}. {}\" \\\n .format(self.args[0], self.args[1], ROLES_SHOULD_BE)\n\n\ndef inbound(request):\n \"\"\"Authenticate from a cookie or an API key in basic auth.\n \"\"\"\n user = None\n if 'Authorization' in request.headers:\n header = request.headers['authorization']\n if header.startswith('Basic '):\n creds = header[len('Basic '):].decode('base64')\n token, ignored = creds.split(':')\n user = User.from_api_key(token)\n\n # We don't require CSRF if they basically authenticated.\n csrf_token = csrf._get_new_csrf_key()\n request.headers.cookie['csrf_token'] = csrf_token\n request.headers['X-CSRF-TOKEN'] = csrf_token\n if 'Referer' not in request.headers:\n request.headers['Referer'] = \\\n 'https://%s/' % csrf._get_host(request)\n elif 'session' in request.headers.cookie:\n token = request.headers.cookie['session'].value\n user = User.from_session_token(token)\n\n if user is None:\n user = User()\n request.context['user'] = user\n\n\ndef check_role(request):\n \"\"\"Given a request object, possibly raise Response(403).\n \"\"\"\n\n # XXX We can't use this yet because we don't have an inbound Aspen hook\n # that fires after the first page of the simplate is exec'd.\n\n context = request.context\n path = request.line.uri.path\n\n if 'minimum_role' not in context:\n raise NoMinimumRoleSpecified(request.fs)\n\n minimum_role = context['minimum_role']\n if minimum_role not in ROLES:\n raise BadMinimumRole(request.fs, minimum_role)\n\n user = context['user']\n highest_role = user.get_highest_role(path.get('username', None))\n if ROLES.index(highest_role) < ROLES.index(minimum_role):\n request.redirect('..')\n\n\ndef outbound(response):\n if 'user' in response.request.context:\n user = response.request.context['user']\n if not isinstance(user, User):\n raise Response(400, \"If you define 'user' in a simplate it has to \"\n \"be a User instance.\")\n else:\n user = User()\n\n if user.ANON: # user is anonymous\n if 'session' not in response.request.headers.cookie:\n # no cookie in the request, don't set one on response\n return\n else:\n # expired cookie in the request, instruct browser to delete it\n response.headers.cookie['session'] = ''\n expires = 0\n else: # user is authenticated\n response.headers['Expires'] = BEGINNING_OF_EPOCH # don't cache\n response.headers.cookie['session'] = user.participant.session_token\n expires = time.time() + TIMEOUT\n user.keep_signed_in_until(expires)\n\n cookie = response.headers.cookie['session']\n # I am not setting domain, because it is supposed to default to what we\n # want: the domain of the object requested.\n #cookie['domain']\n cookie['path'] = '/'\n cookie['expires'] = rfc822.formatdate(expires)\n cookie['httponly'] = \"Yes, please.\"\n", "path": "gittip/security/authentication.py"}], "after_files": [{"content": "\"\"\"Defines website authentication helpers.\n\"\"\"\nimport rfc822\nimport time\n\nimport gittip\nfrom aspen import Response\nfrom gittip.security import csrf\nfrom gittip.security.user import User\n\n\nBEGINNING_OF_EPOCH = rfc822.formatdate(0)\nTIMEOUT = 60 * 60 * 24 * 7 # one week\nROLES = ['anonymous', 'authenticated', 'owner', 'admin']\nROLES_SHOULD_BE = \"It should be one of: {}.\".format(', '.join(ROLES))\n\n\nclass NoMinimumRoleSpecified(Exception):\n def __str__(self):\n return \"There is no minimum_role specified in the simplate at {}. {}\" \\\n .format(self.args[0], ROLES_SHOULD_BE)\n\nclass BadMinimumRole(Exception):\n def __str__(self):\n return \"The minimum_role specific in {} is bad: {}. {}\" \\\n .format(self.args[0], self.args[1], ROLES_SHOULD_BE)\n\n\ndef inbound(request):\n \"\"\"Authenticate from a cookie or an API key in basic auth.\n \"\"\"\n user = None\n if 'Authorization' in request.headers:\n header = request.headers['authorization']\n if header.startswith('Basic '):\n creds = header[len('Basic '):].decode('base64')\n token, ignored = creds.split(':')\n user = User.from_api_key(token)\n\n # We don't require CSRF if they basically authenticated.\n csrf_token = csrf._get_new_csrf_key()\n request.headers.cookie['csrf_token'] = csrf_token\n request.headers['X-CSRF-TOKEN'] = csrf_token\n if 'Referer' not in request.headers:\n request.headers['Referer'] = \\\n 'https://%s/' % csrf._get_host(request)\n elif 'session' in request.headers.cookie:\n token = request.headers.cookie['session'].value\n user = User.from_session_token(token)\n\n if user is None:\n user = User()\n request.context['user'] = user\n\n\ndef check_role(request):\n \"\"\"Given a request object, possibly raise Response(403).\n \"\"\"\n\n # XXX We can't use this yet because we don't have an inbound Aspen hook\n # that fires after the first page of the simplate is exec'd.\n\n context = request.context\n path = request.line.uri.path\n\n if 'minimum_role' not in context:\n raise NoMinimumRoleSpecified(request.fs)\n\n minimum_role = context['minimum_role']\n if minimum_role not in ROLES:\n raise BadMinimumRole(request.fs, minimum_role)\n\n user = context['user']\n highest_role = user.get_highest_role(path.get('username', None))\n if ROLES.index(highest_role) < ROLES.index(minimum_role):\n request.redirect('..')\n\n\ndef outbound(response):\n if 'user' in response.request.context:\n user = response.request.context['user']\n if not isinstance(user, User):\n raise Response(400, \"If you define 'user' in a simplate it has to \"\n \"be a User instance.\")\n else:\n user = User()\n\n if user.ANON: # user is anonymous\n if 'session' not in response.request.headers.cookie:\n # no cookie in the request, don't set one on response\n return\n else:\n # expired cookie in the request, instruct browser to delete it\n response.headers.cookie['session'] = ''\n expires = 0\n else: # user is authenticated\n response.headers['Expires'] = BEGINNING_OF_EPOCH # don't cache\n response.headers.cookie['session'] = user.participant.session_token\n expires = time.time() + TIMEOUT\n user.keep_signed_in_until(expires)\n\n cookie = response.headers.cookie['session']\n # I am not setting domain, because it is supposed to default to what we\n # want: the domain of the object requested.\n #cookie['domain']\n cookie['path'] = '/'\n cookie['expires'] = rfc822.formatdate(expires)\n cookie['httponly'] = \"Yes, please.\"\n if gittip.canonical_scheme == 'https':\n cookie['secure'] = \"Yes, please.\"\n", "path": "gittip/security/authentication.py"}]}
| 1,434 | 160 |
gh_patches_debug_31388
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3100
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issues with proxies
I've started Conan server in my organization's local network. For external sites (like github) proxy must be used, but not for computers in local network.
So, I have there env variables:
```
HTTP_PROXY=http://proxy.domain:3128
HTTP_PROXY=https://proxy.domain:3128
NO_PROXY=localhost,127.0.0.1,*.domain,192.168.*
```
With these variables I can create packages with recipes in my local cache (and this recipes can download external sources), but cannot download recipes and prebuilt packages from organization's Conan server.
I've tried to set same settings in conan.conf, but had no luck. Also I've tried to use Conan server domain name and/or IP address in NO_PROXY with same result:
```
> conan search -r <remotename> "OpenCV*"
ERROR: Permission denied for user: '<username>'. [Remote: <remotename>]
```
But if I disable all proxies (comment in conan.conf and unset env variables), it works just fine:
```
Existing package recipes:
OpenCV/3.3.1@lukyanets/testing
```
Conan v. 1.0.1, OS: Arch Linux, CentOS 6, Windows 10.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/rest/conan_requester.py`
Content:
```
1 import fnmatch
2 import os
3
4 from conans.util.files import save
5
6
7 class ConanRequester(object):
8
9 def __init__(self, requester, client_cache, timeout):
10 self.proxies = client_cache.conan_config.proxies or {}
11 self._no_proxy_match = [el.strip() for el in
12 self.proxies.pop("no_proxy_match", "").split(",")]
13 self._timeout_seconds = timeout
14
15 # Retrocompatibility with deprecated no_proxy
16 # Account for the requests NO_PROXY env variable, not defined as a proxy like http=
17 no_proxy = self.proxies.pop("no_proxy", None)
18 if no_proxy:
19 os.environ["NO_PROXY"] = no_proxy
20
21 self._requester = requester
22 self._client_cache = client_cache
23
24 if not os.path.exists(self._client_cache.cacert_path):
25 from conans.client.rest.cacert import cacert
26 save(self._client_cache.cacert_path, cacert)
27
28 if not os.path.exists(client_cache.client_cert_path):
29 self._client_certificates = None
30 else:
31 if os.path.exists(client_cache.client_cert_key_path):
32 # Requests can accept a tuple with cert and key, or just an string with a
33 # file having both
34 self._client_certificates = (client_cache.client_cert_path,
35 client_cache.client_cert_key_path)
36 else:
37 self._client_certificates = client_cache.client_cert_path
38
39 def _should_skip_proxy(self, url):
40
41 for entry in self._no_proxy_match:
42 if fnmatch.fnmatch(url, entry):
43 return True
44
45 return False
46
47 def _add_kwargs(self, url, kwargs):
48 if kwargs.get("verify", None) is True:
49 kwargs["verify"] = self._client_cache.cacert_path
50 else:
51 kwargs["verify"] = False
52 kwargs["cert"] = self._client_certificates
53 if self.proxies:
54 if not self._should_skip_proxy(url):
55 kwargs["proxies"] = self.proxies
56 if self._timeout_seconds:
57 kwargs["timeout"] = self._timeout_seconds
58 return kwargs
59
60 def get(self, url, **kwargs):
61 return self._requester.get(url, **self._add_kwargs(url, kwargs))
62
63 def put(self, url, **kwargs):
64 return self._requester.put(url, **self._add_kwargs(url, kwargs))
65
66 def delete(self, url, **kwargs):
67 return self._requester.delete(url, **self._add_kwargs(url, kwargs))
68
69 def post(self, url, **kwargs):
70 return self._requester.post(url, **self._add_kwargs(url, kwargs))
71
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/rest/conan_requester.py b/conans/client/rest/conan_requester.py
--- a/conans/client/rest/conan_requester.py
+++ b/conans/client/rest/conan_requester.py
@@ -9,7 +9,7 @@
def __init__(self, requester, client_cache, timeout):
self.proxies = client_cache.conan_config.proxies or {}
self._no_proxy_match = [el.strip() for el in
- self.proxies.pop("no_proxy_match", "").split(",")]
+ self.proxies.pop("no_proxy_match", "").split(",") if el]
self._timeout_seconds = timeout
# Retrocompatibility with deprecated no_proxy
@@ -58,14 +58,28 @@
return kwargs
def get(self, url, **kwargs):
- return self._requester.get(url, **self._add_kwargs(url, kwargs))
+ return self._call_method("get", url, **kwargs)
def put(self, url, **kwargs):
- return self._requester.put(url, **self._add_kwargs(url, kwargs))
+ return self._call_method("put", url, **kwargs)
def delete(self, url, **kwargs):
- return self._requester.delete(url, **self._add_kwargs(url, kwargs))
+ return self._call_method("delete", url, **kwargs)
def post(self, url, **kwargs):
- return self._requester.post(url, **self._add_kwargs(url, kwargs))
-
+ return self._call_method("post", url, **kwargs)
+
+ def _call_method(self, method, url, **kwargs):
+ popped = False
+ if self.proxies or self._no_proxy_match:
+ old_env = dict(os.environ)
+ # Clean the proxies from the environ and use the conan specified proxies
+ for var_name in ("http_proxy", "https_proxy", "no_proxy"):
+ popped = popped or os.environ.pop(var_name, None)
+ popped = popped or os.environ.pop(var_name.upper(), None)
+ try:
+ return getattr(self._requester, method)(url, **self._add_kwargs(url, kwargs))
+ finally:
+ if popped:
+ os.environ.clear()
+ os.environ.update(old_env)
|
{"golden_diff": "diff --git a/conans/client/rest/conan_requester.py b/conans/client/rest/conan_requester.py\n--- a/conans/client/rest/conan_requester.py\n+++ b/conans/client/rest/conan_requester.py\n@@ -9,7 +9,7 @@\n def __init__(self, requester, client_cache, timeout):\n self.proxies = client_cache.conan_config.proxies or {}\n self._no_proxy_match = [el.strip() for el in\n- self.proxies.pop(\"no_proxy_match\", \"\").split(\",\")]\n+ self.proxies.pop(\"no_proxy_match\", \"\").split(\",\") if el]\n self._timeout_seconds = timeout\n \n # Retrocompatibility with deprecated no_proxy\n@@ -58,14 +58,28 @@\n return kwargs\n \n def get(self, url, **kwargs):\n- return self._requester.get(url, **self._add_kwargs(url, kwargs))\n+ return self._call_method(\"get\", url, **kwargs)\n \n def put(self, url, **kwargs):\n- return self._requester.put(url, **self._add_kwargs(url, kwargs))\n+ return self._call_method(\"put\", url, **kwargs)\n \n def delete(self, url, **kwargs):\n- return self._requester.delete(url, **self._add_kwargs(url, kwargs))\n+ return self._call_method(\"delete\", url, **kwargs)\n \n def post(self, url, **kwargs):\n- return self._requester.post(url, **self._add_kwargs(url, kwargs))\n-\n+ return self._call_method(\"post\", url, **kwargs)\n+\n+ def _call_method(self, method, url, **kwargs):\n+ popped = False\n+ if self.proxies or self._no_proxy_match:\n+ old_env = dict(os.environ)\n+ # Clean the proxies from the environ and use the conan specified proxies\n+ for var_name in (\"http_proxy\", \"https_proxy\", \"no_proxy\"):\n+ popped = popped or os.environ.pop(var_name, None)\n+ popped = popped or os.environ.pop(var_name.upper(), None)\n+ try:\n+ return getattr(self._requester, method)(url, **self._add_kwargs(url, kwargs))\n+ finally:\n+ if popped:\n+ os.environ.clear()\n+ os.environ.update(old_env)\n", "issue": "Issues with proxies\nI've started Conan server in my organization's local network. For external sites (like github) proxy must be used, but not for computers in local network.\r\nSo, I have there env variables:\r\n```\r\nHTTP_PROXY=http://proxy.domain:3128\r\nHTTP_PROXY=https://proxy.domain:3128\r\nNO_PROXY=localhost,127.0.0.1,*.domain,192.168.*\r\n```\r\nWith these variables I can create packages with recipes in my local cache (and this recipes can download external sources), but cannot download recipes and prebuilt packages from organization's Conan server.\r\n\r\nI've tried to set same settings in conan.conf, but had no luck. Also I've tried to use Conan server domain name and/or IP address in NO_PROXY with same result:\r\n```\r\n> conan search -r <remotename> \"OpenCV*\"\r\nERROR: Permission denied for user: '<username>'. [Remote: <remotename>]\r\n```\r\n\r\nBut if I disable all proxies (comment in conan.conf and unset env variables), it works just fine:\r\n```\r\nExisting package recipes:\r\n\r\nOpenCV/3.3.1@lukyanets/testing\r\n```\r\n\r\nConan v. 1.0.1, OS: Arch Linux, CentOS 6, Windows 10.\n", "before_files": [{"content": "import fnmatch\nimport os\n\nfrom conans.util.files import save\n\n\nclass ConanRequester(object):\n\n def __init__(self, requester, client_cache, timeout):\n self.proxies = client_cache.conan_config.proxies or {}\n self._no_proxy_match = [el.strip() for el in\n self.proxies.pop(\"no_proxy_match\", \"\").split(\",\")]\n self._timeout_seconds = timeout\n\n # Retrocompatibility with deprecated no_proxy\n # Account for the requests NO_PROXY env variable, not defined as a proxy like http=\n no_proxy = self.proxies.pop(\"no_proxy\", None)\n if no_proxy:\n os.environ[\"NO_PROXY\"] = no_proxy\n\n self._requester = requester\n self._client_cache = client_cache\n\n if not os.path.exists(self._client_cache.cacert_path):\n from conans.client.rest.cacert import cacert\n save(self._client_cache.cacert_path, cacert)\n\n if not os.path.exists(client_cache.client_cert_path):\n self._client_certificates = None\n else:\n if os.path.exists(client_cache.client_cert_key_path):\n # Requests can accept a tuple with cert and key, or just an string with a\n # file having both\n self._client_certificates = (client_cache.client_cert_path,\n client_cache.client_cert_key_path)\n else:\n self._client_certificates = client_cache.client_cert_path\n\n def _should_skip_proxy(self, url):\n\n for entry in self._no_proxy_match:\n if fnmatch.fnmatch(url, entry):\n return True\n\n return False\n\n def _add_kwargs(self, url, kwargs):\n if kwargs.get(\"verify\", None) is True:\n kwargs[\"verify\"] = self._client_cache.cacert_path\n else:\n kwargs[\"verify\"] = False\n kwargs[\"cert\"] = self._client_certificates\n if self.proxies:\n if not self._should_skip_proxy(url):\n kwargs[\"proxies\"] = self.proxies\n if self._timeout_seconds:\n kwargs[\"timeout\"] = self._timeout_seconds\n return kwargs\n\n def get(self, url, **kwargs):\n return self._requester.get(url, **self._add_kwargs(url, kwargs))\n\n def put(self, url, **kwargs):\n return self._requester.put(url, **self._add_kwargs(url, kwargs))\n\n def delete(self, url, **kwargs):\n return self._requester.delete(url, **self._add_kwargs(url, kwargs))\n\n def post(self, url, **kwargs):\n return self._requester.post(url, **self._add_kwargs(url, kwargs))\n\n", "path": "conans/client/rest/conan_requester.py"}], "after_files": [{"content": "import fnmatch\nimport os\n\nfrom conans.util.files import save\n\n\nclass ConanRequester(object):\n\n def __init__(self, requester, client_cache, timeout):\n self.proxies = client_cache.conan_config.proxies or {}\n self._no_proxy_match = [el.strip() for el in\n self.proxies.pop(\"no_proxy_match\", \"\").split(\",\") if el]\n self._timeout_seconds = timeout\n\n # Retrocompatibility with deprecated no_proxy\n # Account for the requests NO_PROXY env variable, not defined as a proxy like http=\n no_proxy = self.proxies.pop(\"no_proxy\", None)\n if no_proxy:\n os.environ[\"NO_PROXY\"] = no_proxy\n\n self._requester = requester\n self._client_cache = client_cache\n\n if not os.path.exists(self._client_cache.cacert_path):\n from conans.client.rest.cacert import cacert\n save(self._client_cache.cacert_path, cacert)\n\n if not os.path.exists(client_cache.client_cert_path):\n self._client_certificates = None\n else:\n if os.path.exists(client_cache.client_cert_key_path):\n # Requests can accept a tuple with cert and key, or just an string with a\n # file having both\n self._client_certificates = (client_cache.client_cert_path,\n client_cache.client_cert_key_path)\n else:\n self._client_certificates = client_cache.client_cert_path\n\n def _should_skip_proxy(self, url):\n\n for entry in self._no_proxy_match:\n if fnmatch.fnmatch(url, entry):\n return True\n\n return False\n\n def _add_kwargs(self, url, kwargs):\n if kwargs.get(\"verify\", None) is True:\n kwargs[\"verify\"] = self._client_cache.cacert_path\n else:\n kwargs[\"verify\"] = False\n kwargs[\"cert\"] = self._client_certificates\n if self.proxies:\n if not self._should_skip_proxy(url):\n kwargs[\"proxies\"] = self.proxies\n if self._timeout_seconds:\n kwargs[\"timeout\"] = self._timeout_seconds\n return kwargs\n\n def get(self, url, **kwargs):\n return self._call_method(\"get\", url, **kwargs)\n\n def put(self, url, **kwargs):\n return self._call_method(\"put\", url, **kwargs)\n\n def delete(self, url, **kwargs):\n return self._call_method(\"delete\", url, **kwargs)\n\n def post(self, url, **kwargs):\n return self._call_method(\"post\", url, **kwargs)\n\n def _call_method(self, method, url, **kwargs):\n popped = False\n if self.proxies or self._no_proxy_match:\n old_env = dict(os.environ)\n # Clean the proxies from the environ and use the conan specified proxies\n for var_name in (\"http_proxy\", \"https_proxy\", \"no_proxy\"):\n popped = popped or os.environ.pop(var_name, None)\n popped = popped or os.environ.pop(var_name.upper(), None)\n try:\n return getattr(self._requester, method)(url, **self._add_kwargs(url, kwargs))\n finally:\n if popped:\n os.environ.clear()\n os.environ.update(old_env)\n", "path": "conans/client/rest/conan_requester.py"}]}
| 1,259 | 507 |
gh_patches_debug_1142
|
rasdani/github-patches
|
git_diff
|
nipy__nipype-2852
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nipype/conftest.py should be excluded from API documentation
### Summary
The auto-generated API docs include `conftest.py`, which has a fixture. Pytest has turned calling a fixture directly into an error, and apparently the fixture is getting called when the docs are generated.
This is what's currently breaking the Circle builds.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/build_interface_docs.py`
Content:
```
1 #!/usr/bin/env python
2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
3 # vi: set ft=python sts=4 ts=4 sw=4 et:
4 """Script to auto-generate interface docs.
5 """
6 from __future__ import print_function, unicode_literals
7 # stdlib imports
8 import os
9 import sys
10
11 # *****************************************************************************
12 if __name__ == '__main__':
13 nipypepath = os.path.abspath('..')
14 sys.path.insert(1, nipypepath)
15 # local imports
16 from interfacedocgen import InterfaceHelpWriter
17 package = 'nipype'
18 outdir = os.path.join('interfaces', 'generated')
19 docwriter = InterfaceHelpWriter(package)
20 # Packages that should not be included in generated API docs.
21 docwriter.package_skip_patterns += [
22 '\.external$',
23 '\.fixes$',
24 '\.utils$',
25 '\.pipeline',
26 '\.testing',
27 '\.caching',
28 '\.scripts',
29 ]
30 # Modules that should not be included in generated API docs.
31 docwriter.module_skip_patterns += [
32 '\.version$',
33 '\.interfaces\.base$',
34 '\.interfaces\.matlab$',
35 '\.interfaces\.rest$',
36 '\.interfaces\.pymvpa$',
37 '\.interfaces\.slicer\.generate_classes$',
38 '\.interfaces\.spm\.base$',
39 '\.interfaces\.traits',
40 '\.pipeline\.alloy$',
41 '\.pipeline\.s3_node_wrapper$',
42 '\.testing',
43 '\.scripts',
44 ]
45 docwriter.class_skip_patterns += [
46 'AFNICommand',
47 'ANTS',
48 'FSLCommand',
49 'FS',
50 'Info',
51 '^SPM',
52 'Tester',
53 'Spec$',
54 'Numpy'
55 # NipypeTester raises an
56 # exception when instantiated in
57 # InterfaceHelpWriter.generate_api_doc
58 'NipypeTester',
59 ]
60 docwriter.write_api_docs(outdir)
61 docwriter.write_index(outdir, 'gen', relative_to='interfaces')
62 print('%d files written' % len(docwriter.written_modules))
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py
--- a/tools/build_interface_docs.py
+++ b/tools/build_interface_docs.py
@@ -41,6 +41,7 @@
'\.pipeline\.s3_node_wrapper$',
'\.testing',
'\.scripts',
+ '\.conftest',
]
docwriter.class_skip_patterns += [
'AFNICommand',
|
{"golden_diff": "diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py\n--- a/tools/build_interface_docs.py\n+++ b/tools/build_interface_docs.py\n@@ -41,6 +41,7 @@\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n+ '\\.conftest',\n ]\n docwriter.class_skip_patterns += [\n 'AFNICommand',\n", "issue": "nipype/conftest.py should be excluded from API documentation\n### Summary\r\n\r\nThe auto-generated API docs include `conftest.py`, which has a fixture. Pytest has turned calling a fixture directly into an error, and apparently the fixture is getting called when the docs are generated.\r\n\r\nThis is what's currently breaking the Circle builds.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += [\n '\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += [\n '\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n ]\n docwriter.class_skip_patterns += [\n 'AFNICommand',\n 'ANTS',\n 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n", "path": "tools/build_interface_docs.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += [\n '\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += [\n '\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n '\\.conftest',\n ]\n docwriter.class_skip_patterns += [\n 'AFNICommand',\n 'ANTS',\n 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n", "path": "tools/build_interface_docs.py"}]}
| 915 | 89 |
gh_patches_debug_32346
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1800
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix Arch Linux tests
The Arch Linux tests are failing due to new versions of pip needing to use the `--break-system-packages` flag to install packages globally. We should update the CI to use a virtualenv here.
CI: try to bring back Arch Linux CI
Let's see if this works now with virtualenv setup
<!-- Please make sure to read the testing and linting instructions at https://github.com/pwndbg/pwndbg/blob/dev/DEVELOPING.md before creating a PR -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/wrappers/readelf.py`
Content:
```
1 from enum import Enum
2 from typing import Dict
3 from typing import List
4
5 import pwndbg.wrappers
6
7 cmd_name = "readelf"
8
9
10 class RelocationType(Enum):
11 # For x86_64, some details about these flag can be found in 4.4.1 Relocation Types in https://www.intel.com/content/dam/develop/external/us/en/documents/mpx-linux64-abi.pdf
12 # The definitions of these flags can be found in this file: https://elixir.bootlin.com/glibc/glibc-2.37/source/elf/elf.h
13 JUMP_SLOT = 1 # e.g.: R_X86_64_JUMP_SLOT
14 GLOB_DAT = 2 # e.g.: R_X86_64_GLOB_DAT
15 IRELATIVE = 3 # e.g.: R_X86_64_IRELATIVE
16
17
18 @pwndbg.wrappers.OnlyWithCommand(cmd_name)
19 def get_got_entry(local_path: str) -> Dict[RelocationType, List[str]]:
20 # --wide is for showing the full information, e.g.: R_X86_64_JUMP_SLOT instead of R_X86_64_JUMP_SLO
21 cmd = get_got_entry.cmd + ["--relocs", "--wide", local_path]
22 readelf_out = pwndbg.wrappers.call_cmd(cmd)
23
24 entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType}
25 for line in readelf_out.splitlines():
26 if not line or not line[0].isdigit():
27 continue
28 category = line.split()[2]
29 # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt
30 for c in RelocationType:
31 if c.name in category:
32 entries[c].append(line)
33 return entries
34
```
Path: `gdbinit.py`
Content:
```
1 import cProfile
2 import glob
3 import locale
4 import os
5 import sys
6 import time
7 from glob import glob
8 from os import environ
9 from os import path
10
11 _profiler = cProfile.Profile()
12
13 _start_time = None
14 if environ.get("PWNDBG_PROFILE") == "1":
15 _start_time = time.time()
16 _profiler.enable()
17
18 directory, file = path.split(__file__)
19 directory = path.expanduser(directory)
20 directory = path.abspath(directory)
21
22 # Get virtualenv's site-packages path
23 venv_path = os.environ.get("PWNDBG_VENV_PATH")
24 if not venv_path:
25 venv_path = os.path.join(directory, ".venv")
26
27 if not os.path.exists(venv_path):
28 print(f"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh")
29 sys.exit(1)
30
31 site_pkgs_path = glob(os.path.join(venv_path, "lib/*/site-packages"))[0]
32
33 # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)
34 bin_path = os.path.join(venv_path, "bin")
35 os.environ["PATH"] = bin_path + os.pathsep + os.environ.get("PATH")
36
37 # Add gdb-pt-dump directory to sys.path so it can be imported
38 gdbpt = path.join(directory, "gdb-pt-dump")
39 sys.path.append(directory)
40 sys.path.append(site_pkgs_path)
41 sys.path.append(gdbpt)
42
43 # warn if the user has different encoding than utf-8
44 encoding = locale.getpreferredencoding()
45
46 if encoding != "UTF-8":
47 print("******")
48 print(f"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.")
49 print("You might try launching GDB with:")
50 print(" LC_CTYPE=C.UTF-8 gdb")
51 print(
52 "If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command"
53 )
54 print("******")
55
56 environ["PWNLIB_NOTERM"] = "1"
57
58 import pwndbg # noqa: F401
59 import pwndbg.profiling
60
61 pwndbg.profiling.init(_profiler, _start_time)
62 if environ.get("PWNDBG_PROFILE") == "1":
63 pwndbg.profiling.profiler.stop("pwndbg-load.pstats")
64 pwndbg.profiling.profiler.start()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gdbinit.py b/gdbinit.py
--- a/gdbinit.py
+++ b/gdbinit.py
@@ -2,6 +2,7 @@
import glob
import locale
import os
+import site
import sys
import time
from glob import glob
@@ -30,6 +31,14 @@
site_pkgs_path = glob(os.path.join(venv_path, "lib/*/site-packages"))[0]
+# add virtualenv's site-packages to sys.path and run .pth files
+site.addsitedir(site_pkgs_path)
+
+# remove existing, system-level site-packages from sys.path
+for site_packages in site.getsitepackages():
+ if site_packages in sys.path:
+ sys.path.remove(site_packages)
+
# Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)
bin_path = os.path.join(venv_path, "bin")
os.environ["PATH"] = bin_path + os.pathsep + os.environ.get("PATH")
@@ -37,7 +46,6 @@
# Add gdb-pt-dump directory to sys.path so it can be imported
gdbpt = path.join(directory, "gdb-pt-dump")
sys.path.append(directory)
-sys.path.append(site_pkgs_path)
sys.path.append(gdbpt)
# warn if the user has different encoding than utf-8
diff --git a/pwndbg/wrappers/readelf.py b/pwndbg/wrappers/readelf.py
--- a/pwndbg/wrappers/readelf.py
+++ b/pwndbg/wrappers/readelf.py
@@ -23,7 +23,7 @@
entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType}
for line in readelf_out.splitlines():
- if not line or not line[0].isdigit():
+ if not line or not line[0].isdigit() or " " not in line:
continue
category = line.split()[2]
# TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt
|
{"golden_diff": "diff --git a/gdbinit.py b/gdbinit.py\n--- a/gdbinit.py\n+++ b/gdbinit.py\n@@ -2,6 +2,7 @@\n import glob\n import locale\n import os\n+import site\n import sys\n import time\n from glob import glob\n@@ -30,6 +31,14 @@\n \n site_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n \n+# add virtualenv's site-packages to sys.path and run .pth files\n+site.addsitedir(site_pkgs_path)\n+\n+# remove existing, system-level site-packages from sys.path\n+for site_packages in site.getsitepackages():\n+ if site_packages in sys.path:\n+ sys.path.remove(site_packages)\n+\n # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\n bin_path = os.path.join(venv_path, \"bin\")\n os.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n@@ -37,7 +46,6 @@\n # Add gdb-pt-dump directory to sys.path so it can be imported\n gdbpt = path.join(directory, \"gdb-pt-dump\")\n sys.path.append(directory)\n-sys.path.append(site_pkgs_path)\n sys.path.append(gdbpt)\n \n # warn if the user has different encoding than utf-8\ndiff --git a/pwndbg/wrappers/readelf.py b/pwndbg/wrappers/readelf.py\n--- a/pwndbg/wrappers/readelf.py\n+++ b/pwndbg/wrappers/readelf.py\n@@ -23,7 +23,7 @@\n \n entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType}\n for line in readelf_out.splitlines():\n- if not line or not line[0].isdigit():\n+ if not line or not line[0].isdigit() or \" \" not in line:\n continue\n category = line.split()[2]\n # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt\n", "issue": "Fix Arch Linux tests\nThe Arch Linux tests are failing due to new versions of pip needing to use the `--break-system-packages` flag to install packages globally. We should update the CI to use a virtualenv here.\nCI: try to bring back Arch Linux CI\nLet's see if this works now with virtualenv setup\r\n\r\n<!-- Please make sure to read the testing and linting instructions at https://github.com/pwndbg/pwndbg/blob/dev/DEVELOPING.md before creating a PR -->\r\n\n", "before_files": [{"content": "from enum import Enum\nfrom typing import Dict\nfrom typing import List\n\nimport pwndbg.wrappers\n\ncmd_name = \"readelf\"\n\n\nclass RelocationType(Enum):\n # For x86_64, some details about these flag can be found in 4.4.1 Relocation Types in https://www.intel.com/content/dam/develop/external/us/en/documents/mpx-linux64-abi.pdf\n # The definitions of these flags can be found in this file: https://elixir.bootlin.com/glibc/glibc-2.37/source/elf/elf.h\n JUMP_SLOT = 1 # e.g.: R_X86_64_JUMP_SLOT\n GLOB_DAT = 2 # e.g.: R_X86_64_GLOB_DAT\n IRELATIVE = 3 # e.g.: R_X86_64_IRELATIVE\n\n\[email protected](cmd_name)\ndef get_got_entry(local_path: str) -> Dict[RelocationType, List[str]]:\n # --wide is for showing the full information, e.g.: R_X86_64_JUMP_SLOT instead of R_X86_64_JUMP_SLO\n cmd = get_got_entry.cmd + [\"--relocs\", \"--wide\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType}\n for line in readelf_out.splitlines():\n if not line or not line[0].isdigit():\n continue\n category = line.split()[2]\n # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt\n for c in RelocationType:\n if c.name in category:\n entries[c].append(line)\n return entries\n", "path": "pwndbg/wrappers/readelf.py"}, {"content": "import cProfile\nimport glob\nimport locale\nimport os\nimport sys\nimport time\nfrom glob import glob\nfrom os import environ\nfrom os import path\n\n_profiler = cProfile.Profile()\n\n_start_time = None\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n _start_time = time.time()\n _profiler.enable()\n\ndirectory, file = path.split(__file__)\ndirectory = path.expanduser(directory)\ndirectory = path.abspath(directory)\n\n# Get virtualenv's site-packages path\nvenv_path = os.environ.get(\"PWNDBG_VENV_PATH\")\nif not venv_path:\n venv_path = os.path.join(directory, \".venv\")\n\nif not os.path.exists(venv_path):\n print(f\"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh\")\n sys.exit(1)\n\nsite_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n\n# Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\nbin_path = os.path.join(venv_path, \"bin\")\nos.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n\n# Add gdb-pt-dump directory to sys.path so it can be imported\ngdbpt = path.join(directory, \"gdb-pt-dump\")\nsys.path.append(directory)\nsys.path.append(site_pkgs_path)\nsys.path.append(gdbpt)\n\n# warn if the user has different encoding than utf-8\nencoding = locale.getpreferredencoding()\n\nif encoding != \"UTF-8\":\n print(\"******\")\n print(f\"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.\")\n print(\"You might try launching GDB with:\")\n print(\" LC_CTYPE=C.UTF-8 gdb\")\n print(\n \"If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command\"\n )\n print(\"******\")\n\nenviron[\"PWNLIB_NOTERM\"] = \"1\"\n\nimport pwndbg # noqa: F401\nimport pwndbg.profiling\n\npwndbg.profiling.init(_profiler, _start_time)\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-load.pstats\")\n pwndbg.profiling.profiler.start()\n", "path": "gdbinit.py"}], "after_files": [{"content": "from enum import Enum\nfrom typing import Dict\nfrom typing import List\n\nimport pwndbg.wrappers\n\ncmd_name = \"readelf\"\n\n\nclass RelocationType(Enum):\n # For x86_64, some details about these flag can be found in 4.4.1 Relocation Types in https://www.intel.com/content/dam/develop/external/us/en/documents/mpx-linux64-abi.pdf\n # The definitions of these flags can be found in this file: https://elixir.bootlin.com/glibc/glibc-2.37/source/elf/elf.h\n JUMP_SLOT = 1 # e.g.: R_X86_64_JUMP_SLOT\n GLOB_DAT = 2 # e.g.: R_X86_64_GLOB_DAT\n IRELATIVE = 3 # e.g.: R_X86_64_IRELATIVE\n\n\[email protected](cmd_name)\ndef get_got_entry(local_path: str) -> Dict[RelocationType, List[str]]:\n # --wide is for showing the full information, e.g.: R_X86_64_JUMP_SLOT instead of R_X86_64_JUMP_SLO\n cmd = get_got_entry.cmd + [\"--relocs\", \"--wide\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType}\n for line in readelf_out.splitlines():\n if not line or not line[0].isdigit() or \" \" not in line:\n continue\n category = line.split()[2]\n # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt\n for c in RelocationType:\n if c.name in category:\n entries[c].append(line)\n return entries\n", "path": "pwndbg/wrappers/readelf.py"}, {"content": "import cProfile\nimport glob\nimport locale\nimport os\nimport site\nimport sys\nimport time\nfrom glob import glob\nfrom os import environ\nfrom os import path\n\n_profiler = cProfile.Profile()\n\n_start_time = None\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n _start_time = time.time()\n _profiler.enable()\n\ndirectory, file = path.split(__file__)\ndirectory = path.expanduser(directory)\ndirectory = path.abspath(directory)\n\n# Get virtualenv's site-packages path\nvenv_path = os.environ.get(\"PWNDBG_VENV_PATH\")\nif not venv_path:\n venv_path = os.path.join(directory, \".venv\")\n\nif not os.path.exists(venv_path):\n print(f\"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh\")\n sys.exit(1)\n\nsite_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n\n# add virtualenv's site-packages to sys.path and run .pth files\nsite.addsitedir(site_pkgs_path)\n\n# remove existing, system-level site-packages from sys.path\nfor site_packages in site.getsitepackages():\n if site_packages in sys.path:\n sys.path.remove(site_packages)\n\n# Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\nbin_path = os.path.join(venv_path, \"bin\")\nos.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n\n# Add gdb-pt-dump directory to sys.path so it can be imported\ngdbpt = path.join(directory, \"gdb-pt-dump\")\nsys.path.append(directory)\nsys.path.append(gdbpt)\n\n# warn if the user has different encoding than utf-8\nencoding = locale.getpreferredencoding()\n\nif encoding != \"UTF-8\":\n print(\"******\")\n print(f\"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.\")\n print(\"You might try launching GDB with:\")\n print(\" LC_CTYPE=C.UTF-8 gdb\")\n print(\n \"If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command\"\n )\n print(\"******\")\n\nenviron[\"PWNLIB_NOTERM\"] = \"1\"\n\nimport pwndbg # noqa: F401\nimport pwndbg.profiling\n\npwndbg.profiling.init(_profiler, _start_time)\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-load.pstats\")\n pwndbg.profiling.profiler.start()\n", "path": "gdbinit.py"}]}
| 1,523 | 479 |
gh_patches_debug_13716
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-340
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Return type ambiguous when extracting image features
The return type of the extracted image features (if the features file is present or not) is different. If the file is present then it returns a numpy.ndarray type object otherwise it returns a torch.autograd.variable.Variable object.
( https://github.com/facebookresearch/ParlAI/blob/3d86ccdbb4d87002cc6c4782afd0ee5277e742f1/parlai/core/image_featurizers.py#L149 )
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/core/image_featurizers.py`
Content:
```
1 # All rights reserved.
2 # This source code is licensed under the BSD-style license found in the
3 # LICENSE file in the root directory of this source tree. An additional grant
4 # of patent rights can be found in the PATENTS file in the same directory.
5
6 import parlai.core.build_data as build_data
7
8 import os
9 import copy
10 import numpy as np
11 from PIL import Image
12
13 _greyscale = ' .,:;crsA23hHG#98&@'
14
15 class ImageLoader():
16 """Extract image feature using pretrained CNN network.
17 """
18 def __init__(self, opt):
19 self.opt = copy.deepcopy(opt)
20 self.netCNN = None
21
22 def init_cnn(self):
23 """Lazy initialization of preprocessor model in case we don't need any image preprocessing."""
24 try:
25 import torch
26 except ModuleNotFoundError:
27 raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')
28 from torch.autograd import Variable
29 import torchvision
30 import torchvision.transforms as transforms
31 import torch.nn as nn
32
33 opt = self.opt
34 self.image_size = opt['image_size']
35 self.crop_size = opt['image_cropsize']
36 self.datatype = opt['datatype']
37 self.image_mode = opt['image_mode']
38
39 opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
40 self.use_cuda = opt['cuda']
41
42 if self.use_cuda:
43 print('[ Using CUDA ]')
44 torch.cuda.set_device(opt['gpu'])
45
46 cnn_type, layer_num = self.image_mode_switcher()
47
48 # initialize the pretrained CNN using pytorch.
49 CNN = getattr(torchvision.models, cnn_type)
50
51 # cut off the additional layer.
52 self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])
53
54 # initialize the transform function using torch vision.
55 self.transform = transforms.Compose([
56 transforms.Scale(self.image_size),
57 transforms.CenterCrop(self.crop_size),
58 transforms.ToTensor(),
59 transforms.Normalize(mean=[0.485, 0.456, 0.406],
60 std=[0.229, 0.224, 0.225])
61 ])
62
63 # container for single image
64 self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)
65
66 if self.use_cuda:
67 self.cuda()
68 self.xs = self.xs.cuda()
69
70 # make self.xs variable.
71 self.xs = Variable(self.xs)
72
73 def cuda(self):
74 self.netCNN.cuda()
75
76 def save(self, feature, path):
77 feature = feature.cpu().data.numpy()
78 np.save(path, feature)
79
80 def image_mode_switcher(self):
81 switcher = {
82 'resnet152': ['resnet152', -1],
83 'resnet101': ['resnet101', -1],
84 'resnet50': ['resnet50', -1],
85 'resnet34': ['resnet34', -1],
86 'resnet18': ['resnet18', -1],
87 'resnet152_spatial': ['resnet152', -2],
88 'resnet101_spatial': ['resnet101', -2],
89 'resnet50_spatial': ['resnet50', -2],
90 'resnet34_spatial': ['resnet34', -2],
91 'resnet18_spatial': ['resnet18', -2],
92 }
93
94 if self.image_mode not in switcher:
95 raise NotImplementedError('image preprocessing mode' +
96 '{} not supported yet'.format(self.image_mode))
97
98 return switcher.get(self.image_mode)
99
100 def extract(self, image, path):
101 # check whether initlize CNN network.
102 if not self.netCNN:
103 self.init_cnn()
104
105 self.xs.data.copy_(self.transform(image))
106 # extract the image feature
107 feature = self.netCNN(self.xs)
108 # save the feature
109 self.save(feature, path)
110 return feature
111
112 def img_to_ascii(self, path):
113 im = Image.open(path)
114 im.thumbnail((60, 40), Image.BICUBIC)
115 im = im.convert('L')
116 asc = []
117 for y in range(0, im.size[1]):
118 for x in range(0, im.size[0]):
119 lum = 255 - im.getpixel((x, y))
120 asc.append(_greyscale[lum * len(_greyscale) // 256])
121 asc.append('\n')
122 return ''.join(asc)
123
124 def load(self, path):
125 opt = self.opt
126 mode = opt.get('image_mode', 'raw')
127 if mode is None or mode == 'none':
128 # don't need to load images
129 return None
130 elif mode == 'raw':
131 # raw just returns RGB values
132 return Image.open(path).convert('RGB')
133 elif mode == 'ascii':
134 # convert images to ascii ¯\_(ツ)_/¯
135 return self.img_to_ascii(path)
136 else:
137 # otherwise, looks for preprocessed version under 'mode' directory
138 prepath, imagefn = os.path.split(path)
139
140 dpath = os.path.join(prepath, mode)
141
142 if not os.path.exists(dpath):
143 build_data.make_dir(dpath)
144
145 imagefn = imagefn + '.npy'
146 new_path = os.path.join(prepath, mode, imagefn)
147
148 if not os.path.isfile(new_path):
149 return self.extract(Image.open(path).convert('RGB'), new_path)
150 else:
151 return np.load(new_path)
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parlai/core/image_featurizers.py b/parlai/core/image_featurizers.py
--- a/parlai/core/image_featurizers.py
+++ b/parlai/core/image_featurizers.py
@@ -74,7 +74,6 @@
self.netCNN.cuda()
def save(self, feature, path):
- feature = feature.cpu().data.numpy()
np.save(path, feature)
def image_mode_switcher(self):
@@ -105,6 +104,7 @@
self.xs.data.copy_(self.transform(image))
# extract the image feature
feature = self.netCNN(self.xs)
+ feature = feature.cpu().data.numpy()
# save the feature
self.save(feature, path)
return feature
|
{"golden_diff": "diff --git a/parlai/core/image_featurizers.py b/parlai/core/image_featurizers.py\n--- a/parlai/core/image_featurizers.py\n+++ b/parlai/core/image_featurizers.py\n@@ -74,7 +74,6 @@\n self.netCNN.cuda()\n \n def save(self, feature, path):\n- feature = feature.cpu().data.numpy()\n np.save(path, feature)\n \n def image_mode_switcher(self):\n@@ -105,6 +104,7 @@\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n+ feature = feature.cpu().data.numpy()\n # save the feature\n self.save(feature, path)\n return feature\n", "issue": "Return type ambiguous when extracting image features\nThe return type of the extracted image features (if the features file is present or not) is different. If the file is present then it returns a numpy.ndarray type object otherwise it returns a torch.autograd.variable.Variable object.\r\n( https://github.com/facebookresearch/ParlAI/blob/3d86ccdbb4d87002cc6c4782afd0ee5277e742f1/parlai/core/image_featurizers.py#L149 )\n", "before_files": [{"content": "# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport parlai.core.build_data as build_data\n\nimport os\nimport copy\nimport numpy as np\nfrom PIL import Image\n\n_greyscale = ' .,:;crsA23hHG#98&@'\n\nclass ImageLoader():\n \"\"\"Extract image feature using pretrained CNN network.\n \"\"\"\n def __init__(self, opt):\n self.opt = copy.deepcopy(opt)\n self.netCNN = None\n\n def init_cnn(self):\n \"\"\"Lazy initialization of preprocessor model in case we don't need any image preprocessing.\"\"\"\n try:\n import torch\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')\n from torch.autograd import Variable\n import torchvision\n import torchvision.transforms as transforms\n import torch.nn as nn\n\n opt = self.opt\n self.image_size = opt['image_size']\n self.crop_size = opt['image_cropsize']\n self.datatype = opt['datatype']\n self.image_mode = opt['image_mode']\n\n opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()\n self.use_cuda = opt['cuda']\n\n if self.use_cuda:\n print('[ Using CUDA ]')\n torch.cuda.set_device(opt['gpu'])\n\n cnn_type, layer_num = self.image_mode_switcher()\n\n # initialize the pretrained CNN using pytorch.\n CNN = getattr(torchvision.models, cnn_type)\n\n # cut off the additional layer.\n self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])\n\n # initialize the transform function using torch vision.\n self.transform = transforms.Compose([\n transforms.Scale(self.image_size),\n transforms.CenterCrop(self.crop_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # container for single image\n self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)\n\n if self.use_cuda:\n self.cuda()\n self.xs = self.xs.cuda()\n\n # make self.xs variable.\n self.xs = Variable(self.xs)\n\n def cuda(self):\n self.netCNN.cuda()\n\n def save(self, feature, path):\n feature = feature.cpu().data.numpy()\n np.save(path, feature)\n\n def image_mode_switcher(self):\n switcher = {\n 'resnet152': ['resnet152', -1],\n 'resnet101': ['resnet101', -1],\n 'resnet50': ['resnet50', -1],\n 'resnet34': ['resnet34', -1],\n 'resnet18': ['resnet18', -1],\n 'resnet152_spatial': ['resnet152', -2],\n 'resnet101_spatial': ['resnet101', -2],\n 'resnet50_spatial': ['resnet50', -2],\n 'resnet34_spatial': ['resnet34', -2],\n 'resnet18_spatial': ['resnet18', -2],\n }\n\n if self.image_mode not in switcher:\n raise NotImplementedError('image preprocessing mode' +\n '{} not supported yet'.format(self.image_mode))\n\n return switcher.get(self.image_mode)\n\n def extract(self, image, path):\n # check whether initlize CNN network.\n if not self.netCNN:\n self.init_cnn()\n\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n # save the feature\n self.save(feature, path)\n return feature\n\n def img_to_ascii(self, path):\n im = Image.open(path)\n im.thumbnail((60, 40), Image.BICUBIC)\n im = im.convert('L')\n asc = []\n for y in range(0, im.size[1]):\n for x in range(0, im.size[0]):\n lum = 255 - im.getpixel((x, y))\n asc.append(_greyscale[lum * len(_greyscale) // 256])\n asc.append('\\n')\n return ''.join(asc)\n\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n if mode is None or mode == 'none':\n # don't need to load images\n return None\n elif mode == 'raw':\n # raw just returns RGB values\n return Image.open(path).convert('RGB')\n elif mode == 'ascii':\n # convert images to ascii \u00af\\_(\u30c4)_/\u00af\n return self.img_to_ascii(path)\n else:\n # otherwise, looks for preprocessed version under 'mode' directory\n prepath, imagefn = os.path.split(path)\n\n dpath = os.path.join(prepath, mode)\n\n if not os.path.exists(dpath):\n build_data.make_dir(dpath)\n\n imagefn = imagefn + '.npy'\n new_path = os.path.join(prepath, mode, imagefn)\n\n if not os.path.isfile(new_path):\n return self.extract(Image.open(path).convert('RGB'), new_path)\n else:\n return np.load(new_path)\n", "path": "parlai/core/image_featurizers.py"}], "after_files": [{"content": "# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport parlai.core.build_data as build_data\n\nimport os\nimport copy\nimport numpy as np\nfrom PIL import Image\n\n_greyscale = ' .,:;crsA23hHG#98&@'\n\nclass ImageLoader():\n \"\"\"Extract image feature using pretrained CNN network.\n \"\"\"\n def __init__(self, opt):\n self.opt = copy.deepcopy(opt)\n self.netCNN = None\n\n def init_cnn(self):\n \"\"\"Lazy initialization of preprocessor model in case we don't need any image preprocessing.\"\"\"\n try:\n import torch\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')\n from torch.autograd import Variable\n import torchvision\n import torchvision.transforms as transforms\n import torch.nn as nn\n\n opt = self.opt\n self.image_size = opt['image_size']\n self.crop_size = opt['image_cropsize']\n self.datatype = opt['datatype']\n self.image_mode = opt['image_mode']\n\n opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()\n self.use_cuda = opt['cuda']\n\n if self.use_cuda:\n print('[ Using CUDA ]')\n torch.cuda.set_device(opt['gpu'])\n\n cnn_type, layer_num = self.image_mode_switcher()\n\n # initialize the pretrained CNN using pytorch.\n CNN = getattr(torchvision.models, cnn_type)\n\n # cut off the additional layer.\n self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])\n\n # initialize the transform function using torch vision.\n self.transform = transforms.Compose([\n transforms.Scale(self.image_size),\n transforms.CenterCrop(self.crop_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # container for single image\n self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)\n\n if self.use_cuda:\n self.cuda()\n self.xs = self.xs.cuda()\n\n # make self.xs variable.\n self.xs = Variable(self.xs)\n\n def cuda(self):\n self.netCNN.cuda()\n\n def save(self, feature, path):\n np.save(path, feature)\n\n def image_mode_switcher(self):\n switcher = {\n 'resnet152': ['resnet152', -1],\n 'resnet101': ['resnet101', -1],\n 'resnet50': ['resnet50', -1],\n 'resnet34': ['resnet34', -1],\n 'resnet18': ['resnet18', -1],\n 'resnet152_spatial': ['resnet152', -2],\n 'resnet101_spatial': ['resnet101', -2],\n 'resnet50_spatial': ['resnet50', -2],\n 'resnet34_spatial': ['resnet34', -2],\n 'resnet18_spatial': ['resnet18', -2],\n }\n\n if self.image_mode not in switcher:\n raise NotImplementedError('image preprocessing mode' +\n '{} not supported yet'.format(self.image_mode))\n\n return switcher.get(self.image_mode)\n\n def extract(self, image, path):\n # check whether initlize CNN network.\n if not self.netCNN:\n self.init_cnn()\n\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n feature = feature.cpu().data.numpy()\n # save the feature\n self.save(feature, path)\n return feature\n\n def img_to_ascii(self, path):\n im = Image.open(path)\n im.thumbnail((60, 40), Image.BICUBIC)\n im = im.convert('L')\n asc = []\n for y in range(0, im.size[1]):\n for x in range(0, im.size[0]):\n lum = 255 - im.getpixel((x, y))\n asc.append(_greyscale[lum * len(_greyscale) // 256])\n asc.append('\\n')\n return ''.join(asc)\n\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n if mode is None or mode == 'none':\n # don't need to load images\n return None\n elif mode == 'raw':\n # raw just returns RGB values\n return Image.open(path).convert('RGB')\n elif mode == 'ascii':\n # convert images to ascii \u00af\\_(\u30c4)_/\u00af\n return self.img_to_ascii(path)\n else:\n # otherwise, looks for preprocessed version under 'mode' directory\n prepath, imagefn = os.path.split(path)\n\n dpath = os.path.join(prepath, mode)\n\n if not os.path.exists(dpath):\n build_data.make_dir(dpath)\n\n imagefn = imagefn + '.npy'\n new_path = os.path.join(prepath, mode, imagefn)\n\n if not os.path.isfile(new_path):\n return self.extract(Image.open(path).convert('RGB'), new_path)\n else:\n return np.load(new_path)\n", "path": "parlai/core/image_featurizers.py"}]}
| 1,979 | 175 |
gh_patches_debug_13533
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-306
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve error message when attempting to run non-existent hook
Hook id in `/.pre-commit-config.yaml` doesn't exist in the included repository
(From https://github.com/pre-commit/pre-commit-hooks/issues/37)
It should probably suggest updating hooks or checking the spelling of the hookid
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/repository.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import logging
4 import shutil
5 from collections import defaultdict
6
7 from cached_property import cached_property
8
9 from pre_commit import git
10 from pre_commit.clientlib.validate_config import is_local_hooks
11 from pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA
12 from pre_commit.jsonschema_extensions import apply_defaults
13 from pre_commit.languages.all import languages
14 from pre_commit.languages.helpers import environment_dir
15 from pre_commit.manifest import Manifest
16 from pre_commit.prefixed_command_runner import PrefixedCommandRunner
17
18
19 logger = logging.getLogger('pre_commit')
20
21
22 class Repository(object):
23 def __init__(self, repo_config, repo_path_getter):
24 self.repo_config = repo_config
25 self.repo_path_getter = repo_path_getter
26 self.__installed = False
27
28 @classmethod
29 def create(cls, config, store):
30 if is_local_hooks(config):
31 return LocalRepository(config)
32 else:
33 repo_path_getter = store.get_repo_path_getter(
34 config['repo'], config['sha']
35 )
36 return cls(config, repo_path_getter)
37
38 @cached_property
39 def repo_url(self):
40 return self.repo_config['repo']
41
42 @cached_property
43 def sha(self):
44 return self.repo_config['sha']
45
46 @cached_property
47 def languages(self):
48 return set(
49 (hook['language'], hook['language_version'])
50 for _, hook in self.hooks
51 )
52
53 @cached_property
54 def additional_dependencies(self):
55 dep_dict = defaultdict(lambda: defaultdict(set))
56 for _, hook in self.hooks:
57 dep_dict[hook['language']][hook['language_version']].update(
58 hook.get('additional_dependencies', []),
59 )
60 return dep_dict
61
62 @cached_property
63 def hooks(self):
64 # TODO: merging in manifest dicts is a smell imo
65 return tuple(
66 (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))
67 for hook in self.repo_config['hooks']
68 )
69
70 @cached_property
71 def manifest(self):
72 return Manifest(self.repo_path_getter)
73
74 @cached_property
75 def cmd_runner(self):
76 return PrefixedCommandRunner(self.repo_path_getter.repo_path)
77
78 def require_installed(self):
79 if self.__installed:
80 return
81
82 self.install()
83 self.__installed = True
84
85 def install(self):
86 """Install the hook repository."""
87 def language_is_installed(language_name, language_version):
88 language = languages[language_name]
89 directory = environment_dir(
90 language.ENVIRONMENT_DIR, language_version,
91 )
92 return (
93 directory is None or
94 self.cmd_runner.exists(directory, '.installed')
95 )
96
97 if not all(
98 language_is_installed(language_name, language_version)
99 for language_name, language_version in self.languages
100 ):
101 logger.info(
102 'Installing environment for {0}.'.format(self.repo_url)
103 )
104 logger.info('Once installed this environment will be reused.')
105 logger.info('This may take a few minutes...')
106
107 for language_name, language_version in self.languages:
108 language = languages[language_name]
109 if language_is_installed(language_name, language_version):
110 continue
111
112 directory = environment_dir(
113 language.ENVIRONMENT_DIR, language_version,
114 )
115 # There's potentially incomplete cleanup from previous runs
116 # Clean it up!
117 if self.cmd_runner.exists(directory):
118 shutil.rmtree(self.cmd_runner.path(directory))
119
120 language.install_environment(
121 self.cmd_runner, language_version,
122 self.additional_dependencies[language_name][language_version],
123 )
124 # Touch the .installed file (atomic) to indicate we've installed
125 open(self.cmd_runner.path(directory, '.installed'), 'w').close()
126
127 def run_hook(self, hook, file_args):
128 """Run a hook.
129
130 Args:
131 hook - Hook dictionary
132 file_args - List of files to run
133 """
134 self.require_installed()
135 return languages[hook['language']].run_hook(
136 self.cmd_runner, hook, file_args,
137 )
138
139
140 class LocalRepository(Repository):
141 def __init__(self, repo_config):
142 super(LocalRepository, self).__init__(repo_config, None)
143
144 @cached_property
145 def hooks(self):
146 return tuple(
147 (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items']))
148 for hook in self.repo_config['hooks']
149 )
150
151 @cached_property
152 def cmd_runner(self):
153 return PrefixedCommandRunner(git.get_root())
154
155 @cached_property
156 def sha(self):
157 raise NotImplementedError
158
159 @cached_property
160 def manifest(self):
161 raise NotImplementedError
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/repository.py b/pre_commit/repository.py
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -61,7 +61,16 @@
@cached_property
def hooks(self):
- # TODO: merging in manifest dicts is a smell imo
+ for hook in self.repo_config['hooks']:
+ if hook['id'] not in self.manifest.hooks:
+ logger.error(
+ '`{0}` is not present in repository {1}. '
+ 'Typo? Perhaps it is introduced in a newer version? '
+ 'Often `pre-commit autoupdate` fixes this.'.format(
+ hook['id'], self.repo_config['repo'],
+ )
+ )
+ exit(1)
return tuple(
(hook['id'], dict(self.manifest.hooks[hook['id']], **hook))
for hook in self.repo_config['hooks']
|
{"golden_diff": "diff --git a/pre_commit/repository.py b/pre_commit/repository.py\n--- a/pre_commit/repository.py\n+++ b/pre_commit/repository.py\n@@ -61,7 +61,16 @@\n \n @cached_property\n def hooks(self):\n- # TODO: merging in manifest dicts is a smell imo\n+ for hook in self.repo_config['hooks']:\n+ if hook['id'] not in self.manifest.hooks:\n+ logger.error(\n+ '`{0}` is not present in repository {1}. '\n+ 'Typo? Perhaps it is introduced in a newer version? '\n+ 'Often `pre-commit autoupdate` fixes this.'.format(\n+ hook['id'], self.repo_config['repo'],\n+ )\n+ )\n+ exit(1)\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n", "issue": "Improve error message when attempting to run non-existent hook\nHook id in `/.pre-commit-config.yaml` doesn't exist in the included repository\n\n(From https://github.com/pre-commit/pre-commit-hooks/issues/37)\n\nIt should probably suggest updating hooks or checking the spelling of the hookid\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport shutil\nfrom collections import defaultdict\n\nfrom cached_property import cached_property\n\nfrom pre_commit import git\nfrom pre_commit.clientlib.validate_config import is_local_hooks\nfrom pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA\nfrom pre_commit.jsonschema_extensions import apply_defaults\nfrom pre_commit.languages.all import languages\nfrom pre_commit.languages.helpers import environment_dir\nfrom pre_commit.manifest import Manifest\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\n\n\nlogger = logging.getLogger('pre_commit')\n\n\nclass Repository(object):\n def __init__(self, repo_config, repo_path_getter):\n self.repo_config = repo_config\n self.repo_path_getter = repo_path_getter\n self.__installed = False\n\n @classmethod\n def create(cls, config, store):\n if is_local_hooks(config):\n return LocalRepository(config)\n else:\n repo_path_getter = store.get_repo_path_getter(\n config['repo'], config['sha']\n )\n return cls(config, repo_path_getter)\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(\n (hook['language'], hook['language_version'])\n for _, hook in self.hooks\n )\n\n @cached_property\n def additional_dependencies(self):\n dep_dict = defaultdict(lambda: defaultdict(set))\n for _, hook in self.hooks:\n dep_dict[hook['language']][hook['language_version']].update(\n hook.get('additional_dependencies', []),\n )\n return dep_dict\n\n @cached_property\n def hooks(self):\n # TODO: merging in manifest dicts is a smell imo\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n return Manifest(self.repo_path_getter)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.repo_path_getter.repo_path)\n\n def require_installed(self):\n if self.__installed:\n return\n\n self.install()\n self.__installed = True\n\n def install(self):\n \"\"\"Install the hook repository.\"\"\"\n def language_is_installed(language_name, language_version):\n language = languages[language_name]\n directory = environment_dir(\n language.ENVIRONMENT_DIR, language_version,\n )\n return (\n directory is None or\n self.cmd_runner.exists(directory, '.installed')\n )\n\n if not all(\n language_is_installed(language_name, language_version)\n for language_name, language_version in self.languages\n ):\n logger.info(\n 'Installing environment for {0}.'.format(self.repo_url)\n )\n logger.info('Once installed this environment will be reused.')\n logger.info('This may take a few minutes...')\n\n for language_name, language_version in self.languages:\n language = languages[language_name]\n if language_is_installed(language_name, language_version):\n continue\n\n directory = environment_dir(\n language.ENVIRONMENT_DIR, language_version,\n )\n # There's potentially incomplete cleanup from previous runs\n # Clean it up!\n if self.cmd_runner.exists(directory):\n shutil.rmtree(self.cmd_runner.path(directory))\n\n language.install_environment(\n self.cmd_runner, language_version,\n self.additional_dependencies[language_name][language_version],\n )\n # Touch the .installed file (atomic) to indicate we've installed\n open(self.cmd_runner.path(directory, '.installed'), 'w').close()\n\n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n\n Args:\n hook - Hook dictionary\n file_args - List of files to run\n \"\"\"\n self.require_installed()\n return languages[hook['language']].run_hook(\n self.cmd_runner, hook, file_args,\n )\n\n\nclass LocalRepository(Repository):\n def __init__(self, repo_config):\n super(LocalRepository, self).__init__(repo_config, None)\n\n @cached_property\n def hooks(self):\n return tuple(\n (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items']))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(git.get_root())\n\n @cached_property\n def sha(self):\n raise NotImplementedError\n\n @cached_property\n def manifest(self):\n raise NotImplementedError\n", "path": "pre_commit/repository.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport shutil\nfrom collections import defaultdict\n\nfrom cached_property import cached_property\n\nfrom pre_commit import git\nfrom pre_commit.clientlib.validate_config import is_local_hooks\nfrom pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA\nfrom pre_commit.jsonschema_extensions import apply_defaults\nfrom pre_commit.languages.all import languages\nfrom pre_commit.languages.helpers import environment_dir\nfrom pre_commit.manifest import Manifest\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\n\n\nlogger = logging.getLogger('pre_commit')\n\n\nclass Repository(object):\n def __init__(self, repo_config, repo_path_getter):\n self.repo_config = repo_config\n self.repo_path_getter = repo_path_getter\n self.__installed = False\n\n @classmethod\n def create(cls, config, store):\n if is_local_hooks(config):\n return LocalRepository(config)\n else:\n repo_path_getter = store.get_repo_path_getter(\n config['repo'], config['sha']\n )\n return cls(config, repo_path_getter)\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(\n (hook['language'], hook['language_version'])\n for _, hook in self.hooks\n )\n\n @cached_property\n def additional_dependencies(self):\n dep_dict = defaultdict(lambda: defaultdict(set))\n for _, hook in self.hooks:\n dep_dict[hook['language']][hook['language_version']].update(\n hook.get('additional_dependencies', []),\n )\n return dep_dict\n\n @cached_property\n def hooks(self):\n for hook in self.repo_config['hooks']:\n if hook['id'] not in self.manifest.hooks:\n logger.error(\n '`{0}` is not present in repository {1}. '\n 'Typo? Perhaps it is introduced in a newer version? '\n 'Often `pre-commit autoupdate` fixes this.'.format(\n hook['id'], self.repo_config['repo'],\n )\n )\n exit(1)\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n return Manifest(self.repo_path_getter)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.repo_path_getter.repo_path)\n\n def require_installed(self):\n if self.__installed:\n return\n\n self.install()\n self.__installed = True\n\n def install(self):\n \"\"\"Install the hook repository.\"\"\"\n def language_is_installed(language_name, language_version):\n language = languages[language_name]\n directory = environment_dir(\n language.ENVIRONMENT_DIR, language_version,\n )\n return (\n directory is None or\n self.cmd_runner.exists(directory, '.installed')\n )\n\n if not all(\n language_is_installed(language_name, language_version)\n for language_name, language_version in self.languages\n ):\n logger.info(\n 'Installing environment for {0}.'.format(self.repo_url)\n )\n logger.info('Once installed this environment will be reused.')\n logger.info('This may take a few minutes...')\n\n for language_name, language_version in self.languages:\n language = languages[language_name]\n if language_is_installed(language_name, language_version):\n continue\n\n directory = environment_dir(\n language.ENVIRONMENT_DIR, language_version,\n )\n # There's potentially incomplete cleanup from previous runs\n # Clean it up!\n if self.cmd_runner.exists(directory):\n shutil.rmtree(self.cmd_runner.path(directory))\n\n language.install_environment(\n self.cmd_runner, language_version,\n self.additional_dependencies[language_name][language_version],\n )\n # Touch the .installed file (atomic) to indicate we've installed\n open(self.cmd_runner.path(directory, '.installed'), 'w').close()\n\n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n\n Args:\n hook - Hook dictionary\n file_args - List of files to run\n \"\"\"\n self.require_installed()\n return languages[hook['language']].run_hook(\n self.cmd_runner, hook, file_args,\n )\n\n\nclass LocalRepository(Repository):\n def __init__(self, repo_config):\n super(LocalRepository, self).__init__(repo_config, None)\n\n @cached_property\n def hooks(self):\n return tuple(\n (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items']))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(git.get_root())\n\n @cached_property\n def sha(self):\n raise NotImplementedError\n\n @cached_property\n def manifest(self):\n raise NotImplementedError\n", "path": "pre_commit/repository.py"}]}
| 1,687 | 205 |
gh_patches_debug_36673
|
rasdani/github-patches
|
git_diff
|
translate__pootle-3883
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
list_language no longer has an idea of a changeid
We dropped change_id concept with 2.7.0 and now we have the idea of `revision`. Previously I could get a list of active languages since a `change_id` using `list_languages`. Now however I cannot. I used `change_id` in the past to allow me to decide which languages to sync. I also use it to record in a log which languages have been active since the last changeid.
So this is to either
1. Bring back `--revision` to `list_languages` so we can see languages active since some revision
2. Explain what other process could be used to gather this info
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_app/management/commands/list_projects.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import os
11 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
12
13 from django.core.management.base import NoArgsCommand
14
15 from pootle_project.models import Project
16
17
18 class Command(NoArgsCommand):
19
20 def handle_noargs(self, **options):
21 self.list_projects(**options)
22
23 def list_projects(self, **options):
24 """List all projects on the server."""
25
26 for project in Project.objects.all():
27 self.stdout.write(project.code)
28
```
Path: `pootle/apps/pootle_app/management/commands/list_languages.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import os
11 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
12
13 from optparse import make_option
14
15 from django.core.management.base import NoArgsCommand
16
17
18 class Command(NoArgsCommand):
19 option_list = NoArgsCommand.option_list + (
20 make_option('--project', action='append', dest='projects',
21 help='Limit to PROJECTS'),
22 )
23 help = "List language codes."
24
25 def handle_noargs(self, **options):
26 self.list_languages(**options)
27
28 def list_languages(self, **options):
29 """List all languages on the server or the given projects."""
30 projects = options.get('projects', [])
31
32 from pootle_translationproject.models import TranslationProject
33 tps = TranslationProject.objects.distinct()
34 tps = tps.exclude(language__code='templates').order_by('language__code')
35
36 if projects:
37 tps = tps.filter(project__code__in=projects)
38
39 for lang in tps.values_list('language__code', flat=True):
40 self.stdout.write(lang)
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pootle/apps/pootle_app/management/commands/list_languages.py b/pootle/apps/pootle_app/management/commands/list_languages.py
--- a/pootle/apps/pootle_app/management/commands/list_languages.py
+++ b/pootle/apps/pootle_app/management/commands/list_languages.py
@@ -19,6 +19,9 @@
option_list = NoArgsCommand.option_list + (
make_option('--project', action='append', dest='projects',
help='Limit to PROJECTS'),
+ make_option("--modified-since", action="store", dest="modified_since",
+ type=int,
+ help="Only process translations newer than specified revision"),
)
help = "List language codes."
@@ -33,6 +36,10 @@
tps = TranslationProject.objects.distinct()
tps = tps.exclude(language__code='templates').order_by('language__code')
+ revision = options.get("modified_since", 0)
+ if revision:
+ tps = tps.filter(submission__id__gt=revision)
+
if projects:
tps = tps.filter(project__code__in=projects)
diff --git a/pootle/apps/pootle_app/management/commands/list_projects.py b/pootle/apps/pootle_app/management/commands/list_projects.py
--- a/pootle/apps/pootle_app/management/commands/list_projects.py
+++ b/pootle/apps/pootle_app/management/commands/list_projects.py
@@ -10,12 +10,20 @@
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
+from optparse import make_option
+
from django.core.management.base import NoArgsCommand
from pootle_project.models import Project
class Command(NoArgsCommand):
+ option_list = NoArgsCommand.option_list + (
+ make_option("--modified-since",
+ action="store", dest="modified_since", type=int,
+ help="Only process translations newer than specified revision"
+ ),
+ )
def handle_noargs(self, **options):
self.list_projects(**options)
@@ -23,5 +31,15 @@
def list_projects(self, **options):
"""List all projects on the server."""
- for project in Project.objects.all():
- self.stdout.write(project.code)
+ revision = options.get("modified_since", 0)
+ if revision:
+ from pootle_translationproject.models import TranslationProject
+ tps = TranslationProject.objects.filter(submission__id__gt=revision) \
+ .distinct().values("project__code")
+
+ for tp in tps:
+ self.stdout.write(tp["project__code"])
+
+ else:
+ for project in Project.objects.all():
+ self.stdout.write(project.code)
|
{"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/list_languages.py b/pootle/apps/pootle_app/management/commands/list_languages.py\n--- a/pootle/apps/pootle_app/management/commands/list_languages.py\n+++ b/pootle/apps/pootle_app/management/commands/list_languages.py\n@@ -19,6 +19,9 @@\n option_list = NoArgsCommand.option_list + (\n make_option('--project', action='append', dest='projects',\n help='Limit to PROJECTS'),\n+ make_option(\"--modified-since\", action=\"store\", dest=\"modified_since\",\n+ type=int,\n+ help=\"Only process translations newer than specified revision\"),\n )\n help = \"List language codes.\"\n \n@@ -33,6 +36,10 @@\n tps = TranslationProject.objects.distinct()\n tps = tps.exclude(language__code='templates').order_by('language__code')\n \n+ revision = options.get(\"modified_since\", 0)\n+ if revision:\n+ tps = tps.filter(submission__id__gt=revision)\n+\n if projects:\n tps = tps.filter(project__code__in=projects)\n \ndiff --git a/pootle/apps/pootle_app/management/commands/list_projects.py b/pootle/apps/pootle_app/management/commands/list_projects.py\n--- a/pootle/apps/pootle_app/management/commands/list_projects.py\n+++ b/pootle/apps/pootle_app/management/commands/list_projects.py\n@@ -10,12 +10,20 @@\n import os\n os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n \n+from optparse import make_option\n+\n from django.core.management.base import NoArgsCommand\n \n from pootle_project.models import Project\n \n \n class Command(NoArgsCommand):\n+ option_list = NoArgsCommand.option_list + (\n+ make_option(\"--modified-since\",\n+ action=\"store\", dest=\"modified_since\", type=int,\n+ help=\"Only process translations newer than specified revision\"\n+ ),\n+ )\n \n def handle_noargs(self, **options):\n self.list_projects(**options)\n@@ -23,5 +31,15 @@\n def list_projects(self, **options):\n \"\"\"List all projects on the server.\"\"\"\n \n- for project in Project.objects.all():\n- self.stdout.write(project.code)\n+ revision = options.get(\"modified_since\", 0)\n+ if revision:\n+ from pootle_translationproject.models import TranslationProject\n+ tps = TranslationProject.objects.filter(submission__id__gt=revision) \\\n+ .distinct().values(\"project__code\")\n+\n+ for tp in tps:\n+ self.stdout.write(tp[\"project__code\"])\n+\n+ else:\n+ for project in Project.objects.all():\n+ self.stdout.write(project.code)\n", "issue": "list_language no longer has an idea of a changeid\nWe dropped change_id concept with 2.7.0 and now we have the idea of `revision`. Previously I could get a list of active languages since a `change_id` using `list_languages`. Now however I cannot. I used `change_id` in the past to allow me to decide which languages to sync. I also use it to record in a log which languages have been active since the last changeid.\n\nSo this is to either\n1. Bring back `--revision` to `list_languages` so we can see languages active since some revision\n2. Explain what other process could be used to gather this info\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom django.core.management.base import NoArgsCommand\n\nfrom pootle_project.models import Project\n\n\nclass Command(NoArgsCommand):\n\n def handle_noargs(self, **options):\n self.list_projects(**options)\n\n def list_projects(self, **options):\n \"\"\"List all projects on the server.\"\"\"\n\n for project in Project.objects.all():\n self.stdout.write(project.code)\n", "path": "pootle/apps/pootle_app/management/commands/list_projects.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom optparse import make_option\n\nfrom django.core.management.base import NoArgsCommand\n\n\nclass Command(NoArgsCommand):\n option_list = NoArgsCommand.option_list + (\n make_option('--project', action='append', dest='projects',\n help='Limit to PROJECTS'),\n )\n help = \"List language codes.\"\n\n def handle_noargs(self, **options):\n self.list_languages(**options)\n\n def list_languages(self, **options):\n \"\"\"List all languages on the server or the given projects.\"\"\"\n projects = options.get('projects', [])\n\n from pootle_translationproject.models import TranslationProject\n tps = TranslationProject.objects.distinct()\n tps = tps.exclude(language__code='templates').order_by('language__code')\n\n if projects:\n tps = tps.filter(project__code__in=projects)\n\n for lang in tps.values_list('language__code', flat=True):\n self.stdout.write(lang)\n", "path": "pootle/apps/pootle_app/management/commands/list_languages.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom optparse import make_option\n\nfrom django.core.management.base import NoArgsCommand\n\nfrom pootle_project.models import Project\n\n\nclass Command(NoArgsCommand):\n option_list = NoArgsCommand.option_list + (\n make_option(\"--modified-since\",\n action=\"store\", dest=\"modified_since\", type=int,\n help=\"Only process translations newer than specified revision\"\n ),\n )\n\n def handle_noargs(self, **options):\n self.list_projects(**options)\n\n def list_projects(self, **options):\n \"\"\"List all projects on the server.\"\"\"\n\n revision = options.get(\"modified_since\", 0)\n if revision:\n from pootle_translationproject.models import TranslationProject\n tps = TranslationProject.objects.filter(submission__id__gt=revision) \\\n .distinct().values(\"project__code\")\n\n for tp in tps:\n self.stdout.write(tp[\"project__code\"])\n\n else:\n for project in Project.objects.all():\n self.stdout.write(project.code)\n", "path": "pootle/apps/pootle_app/management/commands/list_projects.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom optparse import make_option\n\nfrom django.core.management.base import NoArgsCommand\n\n\nclass Command(NoArgsCommand):\n option_list = NoArgsCommand.option_list + (\n make_option('--project', action='append', dest='projects',\n help='Limit to PROJECTS'),\n make_option(\"--modified-since\", action=\"store\", dest=\"modified_since\",\n type=int,\n help=\"Only process translations newer than specified revision\"),\n )\n help = \"List language codes.\"\n\n def handle_noargs(self, **options):\n self.list_languages(**options)\n\n def list_languages(self, **options):\n \"\"\"List all languages on the server or the given projects.\"\"\"\n projects = options.get('projects', [])\n\n from pootle_translationproject.models import TranslationProject\n tps = TranslationProject.objects.distinct()\n tps = tps.exclude(language__code='templates').order_by('language__code')\n\n revision = options.get(\"modified_since\", 0)\n if revision:\n tps = tps.filter(submission__id__gt=revision)\n\n if projects:\n tps = tps.filter(project__code__in=projects)\n\n for lang in tps.values_list('language__code', flat=True):\n self.stdout.write(lang)\n", "path": "pootle/apps/pootle_app/management/commands/list_languages.py"}]}
| 1,040 | 627 |
gh_patches_debug_42065
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-690
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove clash between adhocracy4 library names
- prevents override of global var if same library name is used from
different bundles
fixes #687
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/maps/fields.py`
Content:
```
1 from django.utils.translation import ugettext_lazy as _
2 from jsonfield.fields import JSONField, JSONFormField
3
4 from .validators import GeoJSONFormFieldValidator
5
6
7 class GeoJSONFormField(JSONFormField):
8
9 def __init__(self, *args, **kwargs):
10 geom_type = kwargs.pop('geom_type')
11 required = kwargs.pop('required')
12 kwargs.setdefault(
13 'validators', [GeoJSONFormFieldValidator(geom_type, required)])
14 super().__init__(*args, **kwargs)
15
16
17 class GeoJSONField(JSONField):
18 description = _("Geometry as GeoJSON")
19 form_class = GeoJSONFormField
20 dim = 2
21 geom_type = 'GEOMETRY'
22
23 '''
24 def __init__(self, *args, **kwargs):
25 required = kwargs.pop('required', True)
26 kwargs.setdefault(
27 'validators', [GeoJSONFormFieldValidator(
28 self.geom_type, required)])
29 super().__init__(*args, **kwargs)
30 '''
31
32 def formfield(self, **kwargs):
33 kwargs.setdefault('geom_type', self.geom_type)
34 return super(GeoJSONField, self).formfield(**kwargs)
35
36
37 class GeometryField(GeoJSONField):
38 pass
39
40
41 class PointField(GeometryField):
42 geom_type = 'POINT'
43
44
45 class MultiPolygonField(GeoJSONField):
46 geom_type = 'MULTIPOLYGON'
47
```
Path: `euth/maps/forms.py`
Content:
```
1 from django import forms
2
3 from . import models
4 from .widgets import MapChoosePointWidget
5
6
7 class MapIdeaForm(forms.ModelForm):
8
9 def __init__(self, *args, **kwargs):
10 self.settings = kwargs.pop('settings_instance')
11 super().__init__(*args, **kwargs)
12 self.fields['point'].widget = MapChoosePointWidget(
13 polygon=self.settings.polygon)
14
15 class Meta:
16 model = models.MapIdea
17 fields = ['name', 'description', 'image', 'point']
18
```
Path: `euth/maps/validators.py`
Content:
```
1 import json
2
3 from django.core.exceptions import ValidationError
4 from django.utils.translation import ugettext_lazy as _
5
6
7 class GeoJSONFormFieldValidator(object):
8
9 def __init__(self, geom_type, required):
10 self.geom_type = geom_type
11 self.required = required
12
13 def __call__(self, value):
14 if isinstance(value, str):
15 value = json.loads(value)
16 err_msg = None
17 json_type = value.get('type')
18 if json_type == 'Feature' and self.geom_type == 'GEOMETRY':
19 geom_type = value.get('geometry').get('type')
20 is_geometry = geom_type in (
21 "Point", "MultiPoint", "LineString", "MultiLineString",
22 "Polygon", "MultiPolygon", "GeometryCollection"
23 )
24 if not is_geometry:
25 err_msg = '%s is not a valid GeoJSON geometry type' % geom_type
26 elif json_type == 'FeatureCollection':
27 if len(value.get('features')) == 0 and self.required:
28 err_msg = _('Field can not be empty')
29
30 if err_msg:
31 raise ValidationError(err_msg)
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/euth/maps/fields.py b/euth/maps/fields.py
--- a/euth/maps/fields.py
+++ b/euth/maps/fields.py
@@ -1,17 +1,22 @@
+from django.core import validators as django_validators
from django.utils.translation import ugettext_lazy as _
from jsonfield.fields import JSONField, JSONFormField
-from .validators import GeoJSONFormFieldValidator
-
class GeoJSONFormField(JSONFormField):
def __init__(self, *args, **kwargs):
- geom_type = kwargs.pop('geom_type')
- required = kwargs.pop('required')
- kwargs.setdefault(
- 'validators', [GeoJSONFormFieldValidator(geom_type, required)])
+ required_message = kwargs.pop('required_message')
super().__init__(*args, **kwargs)
+ self.default_error_messages['required'] = required_message
+
+ def to_python(self, value):
+ empty_featureset = '{"type":"FeatureCollection","features":[]}'
+ if (value not in django_validators.EMPTY_VALUES and not
+ value == empty_featureset):
+ return super().to_python(value)
+ else:
+ return None
class GeoJSONField(JSONField):
@@ -20,17 +25,8 @@
dim = 2
geom_type = 'GEOMETRY'
- '''
- def __init__(self, *args, **kwargs):
- required = kwargs.pop('required', True)
- kwargs.setdefault(
- 'validators', [GeoJSONFormFieldValidator(
- self.geom_type, required)])
- super().__init__(*args, **kwargs)
- '''
-
def formfield(self, **kwargs):
- kwargs.setdefault('geom_type', self.geom_type)
+ kwargs.setdefault('required_message', self.required_message)
return super(GeoJSONField, self).formfield(**kwargs)
@@ -40,7 +36,9 @@
class PointField(GeometryField):
geom_type = 'POINT'
+ required_message = _('Please add a Marker on the map')
class MultiPolygonField(GeoJSONField):
geom_type = 'MULTIPOLYGON'
+ required_message = _('Please draw a Polygon on the map')
diff --git a/euth/maps/forms.py b/euth/maps/forms.py
--- a/euth/maps/forms.py
+++ b/euth/maps/forms.py
@@ -1,4 +1,5 @@
from django import forms
+from django.utils.translation import ugettext as _
from . import models
from .widgets import MapChoosePointWidget
@@ -11,6 +12,8 @@
super().__init__(*args, **kwargs)
self.fields['point'].widget = MapChoosePointWidget(
polygon=self.settings.polygon)
+ self.fields['point'].error_messages['required'] = _(
+ 'Please locate your proposal on the map.')
class Meta:
model = models.MapIdea
diff --git a/euth/maps/validators.py b/euth/maps/validators.py
deleted file mode 100644
--- a/euth/maps/validators.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import json
-
-from django.core.exceptions import ValidationError
-from django.utils.translation import ugettext_lazy as _
-
-
-class GeoJSONFormFieldValidator(object):
-
- def __init__(self, geom_type, required):
- self.geom_type = geom_type
- self.required = required
-
- def __call__(self, value):
- if isinstance(value, str):
- value = json.loads(value)
- err_msg = None
- json_type = value.get('type')
- if json_type == 'Feature' and self.geom_type == 'GEOMETRY':
- geom_type = value.get('geometry').get('type')
- is_geometry = geom_type in (
- "Point", "MultiPoint", "LineString", "MultiLineString",
- "Polygon", "MultiPolygon", "GeometryCollection"
- )
- if not is_geometry:
- err_msg = '%s is not a valid GeoJSON geometry type' % geom_type
- elif json_type == 'FeatureCollection':
- if len(value.get('features')) == 0 and self.required:
- err_msg = _('Field can not be empty')
-
- if err_msg:
- raise ValidationError(err_msg)
|
{"golden_diff": "diff --git a/euth/maps/fields.py b/euth/maps/fields.py\n--- a/euth/maps/fields.py\n+++ b/euth/maps/fields.py\n@@ -1,17 +1,22 @@\n+from django.core import validators as django_validators\n from django.utils.translation import ugettext_lazy as _\n from jsonfield.fields import JSONField, JSONFormField\n \n-from .validators import GeoJSONFormFieldValidator\n-\n \n class GeoJSONFormField(JSONFormField):\n \n def __init__(self, *args, **kwargs):\n- geom_type = kwargs.pop('geom_type')\n- required = kwargs.pop('required')\n- kwargs.setdefault(\n- 'validators', [GeoJSONFormFieldValidator(geom_type, required)])\n+ required_message = kwargs.pop('required_message')\n super().__init__(*args, **kwargs)\n+ self.default_error_messages['required'] = required_message\n+\n+ def to_python(self, value):\n+ empty_featureset = '{\"type\":\"FeatureCollection\",\"features\":[]}'\n+ if (value not in django_validators.EMPTY_VALUES and not\n+ value == empty_featureset):\n+ return super().to_python(value)\n+ else:\n+ return None\n \n \n class GeoJSONField(JSONField):\n@@ -20,17 +25,8 @@\n dim = 2\n geom_type = 'GEOMETRY'\n \n- '''\n- def __init__(self, *args, **kwargs):\n- required = kwargs.pop('required', True)\n- kwargs.setdefault(\n- 'validators', [GeoJSONFormFieldValidator(\n- self.geom_type, required)])\n- super().__init__(*args, **kwargs)\n- '''\n-\n def formfield(self, **kwargs):\n- kwargs.setdefault('geom_type', self.geom_type)\n+ kwargs.setdefault('required_message', self.required_message)\n return super(GeoJSONField, self).formfield(**kwargs)\n \n \n@@ -40,7 +36,9 @@\n \n class PointField(GeometryField):\n geom_type = 'POINT'\n+ required_message = _('Please add a Marker on the map')\n \n \n class MultiPolygonField(GeoJSONField):\n geom_type = 'MULTIPOLYGON'\n+ required_message = _('Please draw a Polygon on the map')\ndiff --git a/euth/maps/forms.py b/euth/maps/forms.py\n--- a/euth/maps/forms.py\n+++ b/euth/maps/forms.py\n@@ -1,4 +1,5 @@\n from django import forms\n+from django.utils.translation import ugettext as _\n \n from . import models\n from .widgets import MapChoosePointWidget\n@@ -11,6 +12,8 @@\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = MapChoosePointWidget(\n polygon=self.settings.polygon)\n+ self.fields['point'].error_messages['required'] = _(\n+ 'Please locate your proposal on the map.')\n \n class Meta:\n model = models.MapIdea\ndiff --git a/euth/maps/validators.py b/euth/maps/validators.py\ndeleted file mode 100644\n--- a/euth/maps/validators.py\n+++ /dev/null\n@@ -1,31 +0,0 @@\n-import json\n-\n-from django.core.exceptions import ValidationError\n-from django.utils.translation import ugettext_lazy as _\n-\n-\n-class GeoJSONFormFieldValidator(object):\n-\n- def __init__(self, geom_type, required):\n- self.geom_type = geom_type\n- self.required = required\n-\n- def __call__(self, value):\n- if isinstance(value, str):\n- value = json.loads(value)\n- err_msg = None\n- json_type = value.get('type')\n- if json_type == 'Feature' and self.geom_type == 'GEOMETRY':\n- geom_type = value.get('geometry').get('type')\n- is_geometry = geom_type in (\n- \"Point\", \"MultiPoint\", \"LineString\", \"MultiLineString\",\n- \"Polygon\", \"MultiPolygon\", \"GeometryCollection\"\n- )\n- if not is_geometry:\n- err_msg = '%s is not a valid GeoJSON geometry type' % geom_type\n- elif json_type == 'FeatureCollection':\n- if len(value.get('features')) == 0 and self.required:\n- err_msg = _('Field can not be empty')\n-\n- if err_msg:\n- raise ValidationError(err_msg)\n", "issue": "Remove clash between adhocracy4 library names\n - prevents override of global var if same library name is used from\r\n different bundles\r\n\r\nfixes #687 \n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\nfrom jsonfield.fields import JSONField, JSONFormField\n\nfrom .validators import GeoJSONFormFieldValidator\n\n\nclass GeoJSONFormField(JSONFormField):\n\n def __init__(self, *args, **kwargs):\n geom_type = kwargs.pop('geom_type')\n required = kwargs.pop('required')\n kwargs.setdefault(\n 'validators', [GeoJSONFormFieldValidator(geom_type, required)])\n super().__init__(*args, **kwargs)\n\n\nclass GeoJSONField(JSONField):\n description = _(\"Geometry as GeoJSON\")\n form_class = GeoJSONFormField\n dim = 2\n geom_type = 'GEOMETRY'\n\n '''\n def __init__(self, *args, **kwargs):\n required = kwargs.pop('required', True)\n kwargs.setdefault(\n 'validators', [GeoJSONFormFieldValidator(\n self.geom_type, required)])\n super().__init__(*args, **kwargs)\n '''\n\n def formfield(self, **kwargs):\n kwargs.setdefault('geom_type', self.geom_type)\n return super(GeoJSONField, self).formfield(**kwargs)\n\n\nclass GeometryField(GeoJSONField):\n pass\n\n\nclass PointField(GeometryField):\n geom_type = 'POINT'\n\n\nclass MultiPolygonField(GeoJSONField):\n geom_type = 'MULTIPOLYGON'\n", "path": "euth/maps/fields.py"}, {"content": "from django import forms\n\nfrom . import models\nfrom .widgets import MapChoosePointWidget\n\n\nclass MapIdeaForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n self.settings = kwargs.pop('settings_instance')\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = MapChoosePointWidget(\n polygon=self.settings.polygon)\n\n class Meta:\n model = models.MapIdea\n fields = ['name', 'description', 'image', 'point']\n", "path": "euth/maps/forms.py"}, {"content": "import json\n\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass GeoJSONFormFieldValidator(object):\n\n def __init__(self, geom_type, required):\n self.geom_type = geom_type\n self.required = required\n\n def __call__(self, value):\n if isinstance(value, str):\n value = json.loads(value)\n err_msg = None\n json_type = value.get('type')\n if json_type == 'Feature' and self.geom_type == 'GEOMETRY':\n geom_type = value.get('geometry').get('type')\n is_geometry = geom_type in (\n \"Point\", \"MultiPoint\", \"LineString\", \"MultiLineString\",\n \"Polygon\", \"MultiPolygon\", \"GeometryCollection\"\n )\n if not is_geometry:\n err_msg = '%s is not a valid GeoJSON geometry type' % geom_type\n elif json_type == 'FeatureCollection':\n if len(value.get('features')) == 0 and self.required:\n err_msg = _('Field can not be empty')\n\n if err_msg:\n raise ValidationError(err_msg)\n", "path": "euth/maps/validators.py"}], "after_files": [{"content": "from django.core import validators as django_validators\nfrom django.utils.translation import ugettext_lazy as _\nfrom jsonfield.fields import JSONField, JSONFormField\n\n\nclass GeoJSONFormField(JSONFormField):\n\n def __init__(self, *args, **kwargs):\n required_message = kwargs.pop('required_message')\n super().__init__(*args, **kwargs)\n self.default_error_messages['required'] = required_message\n\n def to_python(self, value):\n empty_featureset = '{\"type\":\"FeatureCollection\",\"features\":[]}'\n if (value not in django_validators.EMPTY_VALUES and not\n value == empty_featureset):\n return super().to_python(value)\n else:\n return None\n\n\nclass GeoJSONField(JSONField):\n description = _(\"Geometry as GeoJSON\")\n form_class = GeoJSONFormField\n dim = 2\n geom_type = 'GEOMETRY'\n\n def formfield(self, **kwargs):\n kwargs.setdefault('required_message', self.required_message)\n return super(GeoJSONField, self).formfield(**kwargs)\n\n\nclass GeometryField(GeoJSONField):\n pass\n\n\nclass PointField(GeometryField):\n geom_type = 'POINT'\n required_message = _('Please add a Marker on the map')\n\n\nclass MultiPolygonField(GeoJSONField):\n geom_type = 'MULTIPOLYGON'\n required_message = _('Please draw a Polygon on the map')\n", "path": "euth/maps/fields.py"}, {"content": "from django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom . import models\nfrom .widgets import MapChoosePointWidget\n\n\nclass MapIdeaForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n self.settings = kwargs.pop('settings_instance')\n super().__init__(*args, **kwargs)\n self.fields['point'].widget = MapChoosePointWidget(\n polygon=self.settings.polygon)\n self.fields['point'].error_messages['required'] = _(\n 'Please locate your proposal on the map.')\n\n class Meta:\n model = models.MapIdea\n fields = ['name', 'description', 'image', 'point']\n", "path": "euth/maps/forms.py"}, {"content": null, "path": "euth/maps/validators.py"}]}
| 1,132 | 947 |
gh_patches_debug_24312
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-864
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add an option to the bug_classifier script to classify a specific bug
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/bug_classifier.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import os
5 from logging import INFO, basicConfig, getLogger
6
7 import numpy as np
8 import requests
9
10 from bugbug import bugzilla
11 from bugbug.models import get_model_class
12 from bugbug.utils import download_check_etag, zstd_decompress
13
14 MODELS_WITH_TYPE = ("component",)
15
16 basicConfig(level=INFO)
17 logger = getLogger(__name__)
18
19
20 def classify_bugs(model_name, classifier):
21 if classifier != "default":
22 assert (
23 model_name in MODELS_WITH_TYPE
24 ), f"{classifier} is not a valid classifier type for {model_name}"
25
26 model_file_name = f"{model_name}{classifier}model"
27 model_name = f"{model_name}_{classifier}"
28 else:
29 model_file_name = f"{model_name}model"
30
31 if not os.path.exists(model_file_name):
32 logger.info(f"{model_file_name} does not exist. Downloading the model....")
33 try:
34 download_check_etag(
35 f"https://index.taskcluster.net/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst",
36 f"{model_file_name}.zst",
37 )
38 except requests.HTTPError:
39 logger.error(
40 f"A pre-trained model is not available, you will need to train it yourself using the trainer script"
41 )
42 raise SystemExit(1)
43
44 zstd_decompress(model_file_name)
45 assert os.path.exists(model_file_name), "Decompressed file doesn't exist"
46
47 model_class = get_model_class(model_name)
48 model = model_class.load(model_file_name)
49
50 for bug in bugzilla.get_bugs():
51 print(
52 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]} - {bug["summary"]} '
53 )
54
55 if model.calculate_importance:
56 probas, importance = model.classify(
57 bug, probabilities=True, importances=True
58 )
59
60 feature_names = model.get_human_readable_feature_names()
61
62 model.print_feature_importances(
63 importance["importances"], feature_names, class_probabilities=probas
64 )
65 else:
66 probas = model.classify(bug, probabilities=True, importances=False)
67
68 if np.argmax(probas) == 1:
69 print(f"Positive! {probas}")
70 else:
71 print(f"Negative! {probas}")
72 input()
73
74
75 def main():
76 description = "Perform evaluation on bugs using the specified model"
77 parser = argparse.ArgumentParser(description=description)
78
79 parser.add_argument("model", help="Which model to use for evaluation")
80 parser.add_argument(
81 "--classifier",
82 help="Type of the classifier. Only used for component classification.",
83 choices=["default", "nn"],
84 default="default",
85 )
86
87 args = parser.parse_args()
88
89 classify_bugs(args.model, args.classifier)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py
--- a/scripts/bug_classifier.py
+++ b/scripts/bug_classifier.py
@@ -17,7 +17,7 @@
logger = getLogger(__name__)
-def classify_bugs(model_name, classifier):
+def classify_bugs(model_name, classifier, bug_id):
if classifier != "default":
assert (
model_name in MODELS_WITH_TYPE
@@ -47,7 +47,13 @@
model_class = get_model_class(model_name)
model = model_class.load(model_file_name)
- for bug in bugzilla.get_bugs():
+ if bug_id:
+ bugs = bugzilla.get(bug_id).values()
+ assert bugs, f"A bug with a bug id of {bug_id} was not found"
+ else:
+ bugs = bugzilla.get_bugs()
+
+ for bug in bugs:
print(
f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]} - {bug["summary"]} '
)
@@ -83,7 +89,8 @@
choices=["default", "nn"],
default="default",
)
+ parser.add_argument("--bug-id", help="Classify the given bug id")
args = parser.parse_args()
- classify_bugs(args.model, args.classifier)
+ classify_bugs(args.model, args.classifier, args.bug_id)
|
{"golden_diff": "diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py\n--- a/scripts/bug_classifier.py\n+++ b/scripts/bug_classifier.py\n@@ -17,7 +17,7 @@\n logger = getLogger(__name__)\n \n \n-def classify_bugs(model_name, classifier):\n+def classify_bugs(model_name, classifier, bug_id):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n@@ -47,7 +47,13 @@\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n \n- for bug in bugzilla.get_bugs():\n+ if bug_id:\n+ bugs = bugzilla.get(bug_id).values()\n+ assert bugs, f\"A bug with a bug id of {bug_id} was not found\"\n+ else:\n+ bugs = bugzilla.get_bugs()\n+\n+ for bug in bugs:\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n@@ -83,7 +89,8 @@\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n+ parser.add_argument(\"--bug-id\", help=\"Classify the given bug id\")\n \n args = parser.parse_args()\n \n- classify_bugs(args.model, args.classifier)\n+ classify_bugs(args.model, args.classifier, args.bug_id)\n", "issue": "Add an option to the bug_classifier script to classify a specific bug\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom logging import INFO, basicConfig, getLogger\n\nimport numpy as np\nimport requests\n\nfrom bugbug import bugzilla\nfrom bugbug.models import get_model_class\nfrom bugbug.utils import download_check_etag, zstd_decompress\n\nMODELS_WITH_TYPE = (\"component\",)\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\n\ndef classify_bugs(model_name, classifier):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n ), f\"{classifier} is not a valid classifier type for {model_name}\"\n\n model_file_name = f\"{model_name}{classifier}model\"\n model_name = f\"{model_name}_{classifier}\"\n else:\n model_file_name = f\"{model_name}model\"\n\n if not os.path.exists(model_file_name):\n logger.info(f\"{model_file_name} does not exist. Downloading the model....\")\n try:\n download_check_etag(\n f\"https://index.taskcluster.net/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst\",\n f\"{model_file_name}.zst\",\n )\n except requests.HTTPError:\n logger.error(\n f\"A pre-trained model is not available, you will need to train it yourself using the trainer script\"\n )\n raise SystemExit(1)\n\n zstd_decompress(model_file_name)\n assert os.path.exists(model_file_name), \"Decompressed file doesn't exist\"\n\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_human_readable_feature_names()\n\n model.print_feature_importances(\n importance[\"importances\"], feature_names, class_probabilities=probas\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n\ndef main():\n description = \"Perform evaluation on bugs using the specified model\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to use for evaluation\")\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n\n args = parser.parse_args()\n\n classify_bugs(args.model, args.classifier)\n", "path": "scripts/bug_classifier.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom logging import INFO, basicConfig, getLogger\n\nimport numpy as np\nimport requests\n\nfrom bugbug import bugzilla\nfrom bugbug.models import get_model_class\nfrom bugbug.utils import download_check_etag, zstd_decompress\n\nMODELS_WITH_TYPE = (\"component\",)\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\n\ndef classify_bugs(model_name, classifier, bug_id):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n ), f\"{classifier} is not a valid classifier type for {model_name}\"\n\n model_file_name = f\"{model_name}{classifier}model\"\n model_name = f\"{model_name}_{classifier}\"\n else:\n model_file_name = f\"{model_name}model\"\n\n if not os.path.exists(model_file_name):\n logger.info(f\"{model_file_name} does not exist. Downloading the model....\")\n try:\n download_check_etag(\n f\"https://index.taskcluster.net/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst\",\n f\"{model_file_name}.zst\",\n )\n except requests.HTTPError:\n logger.error(\n f\"A pre-trained model is not available, you will need to train it yourself using the trainer script\"\n )\n raise SystemExit(1)\n\n zstd_decompress(model_file_name)\n assert os.path.exists(model_file_name), \"Decompressed file doesn't exist\"\n\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n\n if bug_id:\n bugs = bugzilla.get(bug_id).values()\n assert bugs, f\"A bug with a bug id of {bug_id} was not found\"\n else:\n bugs = bugzilla.get_bugs()\n\n for bug in bugs:\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_human_readable_feature_names()\n\n model.print_feature_importances(\n importance[\"importances\"], feature_names, class_probabilities=probas\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n\ndef main():\n description = \"Perform evaluation on bugs using the specified model\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to use for evaluation\")\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--bug-id\", help=\"Classify the given bug id\")\n\n args = parser.parse_args()\n\n classify_bugs(args.model, args.classifier, args.bug_id)\n", "path": "scripts/bug_classifier.py"}]}
| 1,064 | 311 |
gh_patches_debug_25031
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-223
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CalendarEventDevice is deprecated
Hi there,
Latest HA beta `core-2022.5.0` shows this within the logs:
```
CalendarEventDevice is deprecated, modify WasteCollectionCalendar to extend CalendarEntity
```
Just wanted to let you know 🙂
More information can be found here:
https://github.com/home-assistant/core/pull/68843
And here:
https://developers.home-assistant.io/docs/core/entity/calendar/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/calendar.py`
Content:
```
1 """Calendar platform support for Waste Collection Schedule."""
2
3 import logging
4 from datetime import timedelta
5
6 from homeassistant.components.calendar import CalendarEventDevice
7
8 _LOGGER = logging.getLogger(__name__)
9
10
11 async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
12 """Set up calendar platform."""
13 # We only want this platform to be set up via discovery.
14 if discovery_info is None:
15 return
16
17 entities = []
18
19 api = discovery_info["api"]
20
21 for scraper in api.scrapers:
22 entities.append(WasteCollectionCalendar(api, scraper))
23
24 async_add_entities(entities)
25
26
27 class WasteCollectionCalendar(CalendarEventDevice):
28 """Calendar entity class."""
29
30 def __init__(self, api, scraper):
31 self._api = api
32 self._scraper = scraper
33 self._attr_unique_id = scraper.unique_id + "_calendar"
34
35 @property
36 def name(self):
37 """Return entity name."""
38 return self._scraper.calendar_title
39
40 @property
41 def event(self):
42 """Return next collection event."""
43 collections = self._scraper.get_upcoming(count=1, include_today=True)
44 if len(collections) == 0:
45 return None
46 else:
47 return self._convert(collections[0])
48
49 async def async_get_events(self, hass, start_date, end_date):
50 """Return all events within specified time span."""
51 collections = []
52 for a in self._scraper.get_upcoming(include_today=True):
53 if a.date >= start_date.date() and a.date <= end_date.date():
54 collections.append(self._convert(a))
55 return collections
56
57 def _convert(self, collection):
58 """Convert an collection into a Home Assistant calendar event."""
59 return {
60 "uid": f"{self._scraper.calendar_title}-{collection.date.isoformat()}-{collection.type}",
61 "summary": collection.type,
62 "start": {"date": collection.date.isoformat()},
63 "end": {"date": (collection.date + timedelta(days=1)).isoformat()},
64 "allDay": True,
65 }
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/calendar.py b/custom_components/waste_collection_schedule/calendar.py
--- a/custom_components/waste_collection_schedule/calendar.py
+++ b/custom_components/waste_collection_schedule/calendar.py
@@ -3,7 +3,7 @@
import logging
from datetime import timedelta
-from homeassistant.components.calendar import CalendarEventDevice
+from homeassistant.components.calendar import CalendarEntity, CalendarEvent
_LOGGER = logging.getLogger(__name__)
@@ -24,7 +24,7 @@
async_add_entities(entities)
-class WasteCollectionCalendar(CalendarEventDevice):
+class WasteCollectionCalendar(CalendarEntity):
"""Calendar entity class."""
def __init__(self, api, scraper):
@@ -56,10 +56,8 @@
def _convert(self, collection):
"""Convert an collection into a Home Assistant calendar event."""
- return {
- "uid": f"{self._scraper.calendar_title}-{collection.date.isoformat()}-{collection.type}",
- "summary": collection.type,
- "start": {"date": collection.date.isoformat()},
- "end": {"date": (collection.date + timedelta(days=1)).isoformat()},
- "allDay": True,
- }
+ return CalendarEvent(
+ summary=collection.type,
+ start=collection.date,
+ end=collection.date + timedelta(days=1),
+ )
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/calendar.py b/custom_components/waste_collection_schedule/calendar.py\n--- a/custom_components/waste_collection_schedule/calendar.py\n+++ b/custom_components/waste_collection_schedule/calendar.py\n@@ -3,7 +3,7 @@\n import logging\n from datetime import timedelta\n \n-from homeassistant.components.calendar import CalendarEventDevice\n+from homeassistant.components.calendar import CalendarEntity, CalendarEvent\n \n _LOGGER = logging.getLogger(__name__)\n \n@@ -24,7 +24,7 @@\n async_add_entities(entities)\n \n \n-class WasteCollectionCalendar(CalendarEventDevice):\n+class WasteCollectionCalendar(CalendarEntity):\n \"\"\"Calendar entity class.\"\"\"\n \n def __init__(self, api, scraper):\n@@ -56,10 +56,8 @@\n \n def _convert(self, collection):\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\n- return {\n- \"uid\": f\"{self._scraper.calendar_title}-{collection.date.isoformat()}-{collection.type}\",\n- \"summary\": collection.type,\n- \"start\": {\"date\": collection.date.isoformat()},\n- \"end\": {\"date\": (collection.date + timedelta(days=1)).isoformat()},\n- \"allDay\": True,\n- }\n+ return CalendarEvent(\n+ summary=collection.type,\n+ start=collection.date,\n+ end=collection.date + timedelta(days=1),\n+ )\n", "issue": "CalendarEventDevice is deprecated\nHi there,\r\n\r\nLatest HA beta `core-2022.5.0` shows this within the logs:\r\n```\r\nCalendarEventDevice is deprecated, modify WasteCollectionCalendar to extend CalendarEntity\r\n```\r\n\r\nJust wanted to let you know \ud83d\ude42\r\n\r\nMore information can be found here:\r\nhttps://github.com/home-assistant/core/pull/68843\r\n\r\nAnd here:\r\nhttps://developers.home-assistant.io/docs/core/entity/calendar/\n", "before_files": [{"content": "\"\"\"Calendar platform support for Waste Collection Schedule.\"\"\"\n\nimport logging\nfrom datetime import timedelta\n\nfrom homeassistant.components.calendar import CalendarEventDevice\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Set up calendar platform.\"\"\"\n # We only want this platform to be set up via discovery.\n if discovery_info is None:\n return\n\n entities = []\n\n api = discovery_info[\"api\"]\n\n for scraper in api.scrapers:\n entities.append(WasteCollectionCalendar(api, scraper))\n\n async_add_entities(entities)\n\n\nclass WasteCollectionCalendar(CalendarEventDevice):\n \"\"\"Calendar entity class.\"\"\"\n\n def __init__(self, api, scraper):\n self._api = api\n self._scraper = scraper\n self._attr_unique_id = scraper.unique_id + \"_calendar\"\n\n @property\n def name(self):\n \"\"\"Return entity name.\"\"\"\n return self._scraper.calendar_title\n\n @property\n def event(self):\n \"\"\"Return next collection event.\"\"\"\n collections = self._scraper.get_upcoming(count=1, include_today=True)\n if len(collections) == 0:\n return None\n else:\n return self._convert(collections[0])\n\n async def async_get_events(self, hass, start_date, end_date):\n \"\"\"Return all events within specified time span.\"\"\"\n collections = []\n for a in self._scraper.get_upcoming(include_today=True):\n if a.date >= start_date.date() and a.date <= end_date.date():\n collections.append(self._convert(a))\n return collections\n\n def _convert(self, collection):\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\n return {\n \"uid\": f\"{self._scraper.calendar_title}-{collection.date.isoformat()}-{collection.type}\",\n \"summary\": collection.type,\n \"start\": {\"date\": collection.date.isoformat()},\n \"end\": {\"date\": (collection.date + timedelta(days=1)).isoformat()},\n \"allDay\": True,\n }\n", "path": "custom_components/waste_collection_schedule/calendar.py"}], "after_files": [{"content": "\"\"\"Calendar platform support for Waste Collection Schedule.\"\"\"\n\nimport logging\nfrom datetime import timedelta\n\nfrom homeassistant.components.calendar import CalendarEntity, CalendarEvent\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Set up calendar platform.\"\"\"\n # We only want this platform to be set up via discovery.\n if discovery_info is None:\n return\n\n entities = []\n\n api = discovery_info[\"api\"]\n\n for scraper in api.scrapers:\n entities.append(WasteCollectionCalendar(api, scraper))\n\n async_add_entities(entities)\n\n\nclass WasteCollectionCalendar(CalendarEntity):\n \"\"\"Calendar entity class.\"\"\"\n\n def __init__(self, api, scraper):\n self._api = api\n self._scraper = scraper\n self._attr_unique_id = scraper.unique_id + \"_calendar\"\n\n @property\n def name(self):\n \"\"\"Return entity name.\"\"\"\n return self._scraper.calendar_title\n\n @property\n def event(self):\n \"\"\"Return next collection event.\"\"\"\n collections = self._scraper.get_upcoming(count=1, include_today=True)\n if len(collections) == 0:\n return None\n else:\n return self._convert(collections[0])\n\n async def async_get_events(self, hass, start_date, end_date):\n \"\"\"Return all events within specified time span.\"\"\"\n collections = []\n for a in self._scraper.get_upcoming(include_today=True):\n if a.date >= start_date.date() and a.date <= end_date.date():\n collections.append(self._convert(a))\n return collections\n\n def _convert(self, collection):\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\n return CalendarEvent(\n summary=collection.type,\n start=collection.date,\n end=collection.date + timedelta(days=1),\n )\n", "path": "custom_components/waste_collection_schedule/calendar.py"}]}
| 924 | 295 |
gh_patches_debug_17685
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-3906
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mypy invalid syntax error with python 2
### Which version of dd-trace-py are you using?
v1.2.0
### Which version of pip are you using?
22.1.2
### Which version of the libraries are you using?
```
ddtrace==1.2.0
mypy[python2]==0.961
```
### How can we reproduce your problem?
1. Clone https://gist.github.com/majorgreys/720201c4bdd33131e4c231207ef716ce
2. Run `docker run --rm -it (docker build .)`
### What is the result that you get?
```
usr/local/lib/python3.9/site-packages/ddtrace/contrib/redis/asyncio_patch.py:12: error: invalid syntax
Found 1 error in 1 file (errors prevented further checking)
```
### What is the result that you expected?
```
Success: no issues found in 1 source file
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/redis/patch.py`
Content:
```
1 import redis
2
3 from ddtrace import config
4 from ddtrace.vendor import wrapt
5
6 from ...internal.utils.formats import stringify_cache_args
7 from ...pin import Pin
8 from ..trace_utils import unwrap
9 from .util import _trace_redis_cmd
10 from .util import _trace_redis_execute_pipeline
11
12
13 config._add("redis", dict(_default_service="redis"))
14
15
16 def patch():
17 """Patch the instrumented methods
18
19 This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top
20 of Redis and StrictRedis. However, it means that any "import redis.Redis" won't be instrumented.
21 """
22 if getattr(redis, "_datadog_patch", False):
23 return
24 setattr(redis, "_datadog_patch", True)
25
26 _w = wrapt.wrap_function_wrapper
27
28 if redis.VERSION < (3, 0, 0):
29 _w("redis", "StrictRedis.execute_command", traced_execute_command)
30 _w("redis", "StrictRedis.pipeline", traced_pipeline)
31 _w("redis", "Redis.pipeline", traced_pipeline)
32 _w("redis.client", "BasePipeline.execute", traced_execute_pipeline)
33 _w("redis.client", "BasePipeline.immediate_execute_command", traced_execute_command)
34 else:
35 _w("redis", "Redis.execute_command", traced_execute_command)
36 _w("redis", "Redis.pipeline", traced_pipeline)
37 _w("redis.client", "Pipeline.execute", traced_execute_pipeline)
38 _w("redis.client", "Pipeline.immediate_execute_command", traced_execute_command)
39 if redis.VERSION >= (4, 2, 0):
40 from .asyncio_patch import traced_async_execute_command
41 from .asyncio_patch import traced_async_execute_pipeline
42
43 _w("redis.asyncio.client", "Redis.execute_command", traced_async_execute_command)
44 _w("redis.asyncio.client", "Redis.pipeline", traced_pipeline)
45 _w("redis.asyncio.client", "Pipeline.execute", traced_async_execute_pipeline)
46 _w("redis.asyncio.client", "Pipeline.immediate_execute_command", traced_async_execute_command)
47 Pin(service=None).onto(redis.StrictRedis)
48
49
50 def unpatch():
51 if getattr(redis, "_datadog_patch", False):
52 setattr(redis, "_datadog_patch", False)
53
54 if redis.VERSION < (3, 0, 0):
55 unwrap(redis.StrictRedis, "execute_command")
56 unwrap(redis.StrictRedis, "pipeline")
57 unwrap(redis.Redis, "pipeline")
58 unwrap(redis.client.BasePipeline, "execute")
59 unwrap(redis.client.BasePipeline, "immediate_execute_command")
60 else:
61 unwrap(redis.Redis, "execute_command")
62 unwrap(redis.Redis, "pipeline")
63 unwrap(redis.client.Pipeline, "execute")
64 unwrap(redis.client.Pipeline, "immediate_execute_command")
65 if redis.VERSION >= (4, 2, 0):
66 unwrap(redis.asyncio.client.Redis, "execute_command")
67 unwrap(redis.asyncio.client.Redis, "pipeline")
68 unwrap(redis.asyncio.client.Pipeline, "execute")
69 unwrap(redis.asyncio.client.Pipeline, "immediate_execute_command")
70
71
72 #
73 # tracing functions
74 #
75 def traced_execute_command(func, instance, args, kwargs):
76 pin = Pin.get_from(instance)
77 if not pin or not pin.enabled():
78 return func(*args, **kwargs)
79
80 with _trace_redis_cmd(pin, config.redis, instance, args):
81 return func(*args, **kwargs)
82
83
84 def traced_pipeline(func, instance, args, kwargs):
85 pipeline = func(*args, **kwargs)
86 pin = Pin.get_from(instance)
87 if pin:
88 pin.onto(pipeline)
89 return pipeline
90
91
92 def traced_execute_pipeline(func, instance, args, kwargs):
93 pin = Pin.get_from(instance)
94 if not pin or not pin.enabled():
95 return func(*args, **kwargs)
96
97 cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]
98 resource = "\n".join(cmds)
99 with _trace_redis_execute_pipeline(pin, config.redis, resource, instance):
100 return func(*args, **kwargs)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py
--- a/ddtrace/contrib/redis/patch.py
+++ b/ddtrace/contrib/redis/patch.py
@@ -1,4 +1,5 @@
import redis
+from six import PY3
from ddtrace import config
from ddtrace.vendor import wrapt
@@ -36,7 +37,8 @@
_w("redis", "Redis.pipeline", traced_pipeline)
_w("redis.client", "Pipeline.execute", traced_execute_pipeline)
_w("redis.client", "Pipeline.immediate_execute_command", traced_execute_command)
- if redis.VERSION >= (4, 2, 0):
+ # Avoid mypy invalid syntax errors when parsing Python 2 files
+ if PY3 and redis.VERSION >= (4, 2, 0):
from .asyncio_patch import traced_async_execute_command
from .asyncio_patch import traced_async_execute_pipeline
|
{"golden_diff": "diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py\n--- a/ddtrace/contrib/redis/patch.py\n+++ b/ddtrace/contrib/redis/patch.py\n@@ -1,4 +1,5 @@\n import redis\n+from six import PY3\n \n from ddtrace import config\n from ddtrace.vendor import wrapt\n@@ -36,7 +37,8 @@\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n- if redis.VERSION >= (4, 2, 0):\n+ # Avoid mypy invalid syntax errors when parsing Python 2 files\n+ if PY3 and redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n", "issue": "mypy invalid syntax error with python 2\n### Which version of dd-trace-py are you using?\r\n\r\nv1.2.0\r\n\r\n### Which version of pip are you using?\r\n\r\n22.1.2\r\n\r\n### Which version of the libraries are you using?\r\n\r\n```\r\nddtrace==1.2.0\r\nmypy[python2]==0.961\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n1. Clone https://gist.github.com/majorgreys/720201c4bdd33131e4c231207ef716ce\r\n2. Run `docker run --rm -it (docker build .)`\r\n\r\n### What is the result that you get?\r\n\r\n```\r\nusr/local/lib/python3.9/site-packages/ddtrace/contrib/redis/asyncio_patch.py:12: error: invalid syntax\r\nFound 1 error in 1 file (errors prevented further checking)\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\n```\r\nSuccess: no issues found in 1 source file\r\n```\r\n\n", "before_files": [{"content": "import redis\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...internal.utils.formats import stringify_cache_args\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap\nfrom .util import _trace_redis_cmd\nfrom .util import _trace_redis_execute_pipeline\n\n\nconfig._add(\"redis\", dict(_default_service=\"redis\"))\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n\n This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top\n of Redis and StrictRedis. However, it means that any \"import redis.Redis\" won't be instrumented.\n \"\"\"\n if getattr(redis, \"_datadog_patch\", False):\n return\n setattr(redis, \"_datadog_patch\", True)\n\n _w = wrapt.wrap_function_wrapper\n\n if redis.VERSION < (3, 0, 0):\n _w(\"redis\", \"StrictRedis.execute_command\", traced_execute_command)\n _w(\"redis\", \"StrictRedis.pipeline\", traced_pipeline)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"BasePipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"BasePipeline.immediate_execute_command\", traced_execute_command)\n else:\n _w(\"redis\", \"Redis.execute_command\", traced_execute_command)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n if redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n\n _w(\"redis.asyncio.client\", \"Redis.execute_command\", traced_async_execute_command)\n _w(\"redis.asyncio.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.execute\", traced_async_execute_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.immediate_execute_command\", traced_async_execute_command)\n Pin(service=None).onto(redis.StrictRedis)\n\n\ndef unpatch():\n if getattr(redis, \"_datadog_patch\", False):\n setattr(redis, \"_datadog_patch\", False)\n\n if redis.VERSION < (3, 0, 0):\n unwrap(redis.StrictRedis, \"execute_command\")\n unwrap(redis.StrictRedis, \"pipeline\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.BasePipeline, \"execute\")\n unwrap(redis.client.BasePipeline, \"immediate_execute_command\")\n else:\n unwrap(redis.Redis, \"execute_command\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.Pipeline, \"execute\")\n unwrap(redis.client.Pipeline, \"immediate_execute_command\")\n if redis.VERSION >= (4, 2, 0):\n unwrap(redis.asyncio.client.Redis, \"execute_command\")\n unwrap(redis.asyncio.client.Redis, \"pipeline\")\n unwrap(redis.asyncio.client.Pipeline, \"execute\")\n unwrap(redis.asyncio.client.Pipeline, \"immediate_execute_command\")\n\n\n#\n# tracing functions\n#\ndef traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.redis, instance, args):\n return func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.redis, resource, instance):\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/redis/patch.py"}], "after_files": [{"content": "import redis\nfrom six import PY3\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...internal.utils.formats import stringify_cache_args\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap\nfrom .util import _trace_redis_cmd\nfrom .util import _trace_redis_execute_pipeline\n\n\nconfig._add(\"redis\", dict(_default_service=\"redis\"))\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n\n This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top\n of Redis and StrictRedis. However, it means that any \"import redis.Redis\" won't be instrumented.\n \"\"\"\n if getattr(redis, \"_datadog_patch\", False):\n return\n setattr(redis, \"_datadog_patch\", True)\n\n _w = wrapt.wrap_function_wrapper\n\n if redis.VERSION < (3, 0, 0):\n _w(\"redis\", \"StrictRedis.execute_command\", traced_execute_command)\n _w(\"redis\", \"StrictRedis.pipeline\", traced_pipeline)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"BasePipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"BasePipeline.immediate_execute_command\", traced_execute_command)\n else:\n _w(\"redis\", \"Redis.execute_command\", traced_execute_command)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n # Avoid mypy invalid syntax errors when parsing Python 2 files\n if PY3 and redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n\n _w(\"redis.asyncio.client\", \"Redis.execute_command\", traced_async_execute_command)\n _w(\"redis.asyncio.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.execute\", traced_async_execute_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.immediate_execute_command\", traced_async_execute_command)\n Pin(service=None).onto(redis.StrictRedis)\n\n\ndef unpatch():\n if getattr(redis, \"_datadog_patch\", False):\n setattr(redis, \"_datadog_patch\", False)\n\n if redis.VERSION < (3, 0, 0):\n unwrap(redis.StrictRedis, \"execute_command\")\n unwrap(redis.StrictRedis, \"pipeline\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.BasePipeline, \"execute\")\n unwrap(redis.client.BasePipeline, \"immediate_execute_command\")\n else:\n unwrap(redis.Redis, \"execute_command\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.Pipeline, \"execute\")\n unwrap(redis.client.Pipeline, \"immediate_execute_command\")\n if redis.VERSION >= (4, 2, 0):\n unwrap(redis.asyncio.client.Redis, \"execute_command\")\n unwrap(redis.asyncio.client.Redis, \"pipeline\")\n unwrap(redis.asyncio.client.Pipeline, \"execute\")\n unwrap(redis.asyncio.client.Pipeline, \"immediate_execute_command\")\n\n\n#\n# tracing functions\n#\ndef traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.redis, instance, args):\n return func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.redis, resource, instance):\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/redis/patch.py"}]}
| 1,564 | 213 |
gh_patches_debug_23956
|
rasdani/github-patches
|
git_diff
|
xorbitsai__inference-407
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: Asking to pad but the tokenizer does not have a padding token
### Describe the bug
When using Llama-2 PyTorch model for embedding creation, there's an issue:
----> 1 model.create_embedding("Once upon a time, there was a very old computer")
File ~/inference/xinference/client.py:324, in RESTfulGenerateModelHandle.create_embedding(self, input)
322 response = requests.post(url, json=request_body)
323 if response.status_code != 200:
--> 324 raise RuntimeError(
325 f"Failed to create the embeddings, detail: {response.json()['detail']}"
326 )
328 response_data = response.json()
329 return response_data
RuntimeError: Failed to create the embeddings, detail: [address=127.0.0.1:35477, pid=951303] Asking to pad but the tokenizer does not have a padding token. Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xinference/model/llm/pytorch/llama_2.py`
Content:
```
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional
16
17 from ..llm_family import LLMFamilyV1, LLMSpecV1
18 from .core import PytorchChatModel, PytorchModel, PytorchModelConfig
19
20
21 class LlamaPytorchModel(PytorchModel):
22 def __init__(
23 self,
24 model_uid: str,
25 model_family: "LLMFamilyV1",
26 model_spec: "LLMSpecV1",
27 quantization: str,
28 model_path: str,
29 pytorch_model_config: Optional[PytorchModelConfig] = None,
30 ):
31 super().__init__(
32 model_uid,
33 model_family,
34 model_spec,
35 quantization,
36 model_path,
37 pytorch_model_config=pytorch_model_config,
38 )
39
40 def _load_model(self, kwargs: dict):
41 model, tokenizer = super()._load_model(kwargs)
42 model.config.eos_token_id = tokenizer.eos_token_id
43 model.config.pad_token_id = tokenizer.pad_token_id
44 return model, tokenizer
45
46 @classmethod
47 def match(cls, llm_family: "LLMFamilyV1", llm_spec: "LLMSpecV1") -> bool:
48 if llm_spec.model_format != "pytorch":
49 return False
50 if "llama-2" not in llm_family.model_name:
51 return False
52 if "generate" not in llm_family.model_ability:
53 return False
54 return True
55
56
57 class LlamaPytorchChatModel(PytorchChatModel):
58 def __init__(
59 self,
60 model_uid: str,
61 model_family: "LLMFamilyV1",
62 model_spec: "LLMSpecV1",
63 quantization: str,
64 model_path: str,
65 pytorch_model_config: Optional["PytorchModelConfig"] = None,
66 ):
67 super().__init__(
68 model_uid,
69 model_family,
70 model_spec,
71 quantization,
72 model_path,
73 pytorch_model_config=pytorch_model_config,
74 )
75 self._use_fast_tokenizer = False
76
77 def _load_model(self, kwargs: dict):
78 model, tokenizer = super()._load_model(kwargs)
79 model.config.eos_token_id = tokenizer.eos_token_id
80 model.config.pad_token_id = tokenizer.pad_token_id
81 return model, tokenizer
82
83 @classmethod
84 def match(cls, llm_family: "LLMFamilyV1", llm_spec: "LLMSpecV1") -> bool:
85 if llm_spec.model_format != "pytorch":
86 return False
87 if "llama-2" not in llm_family.model_name:
88 return False
89 if "chat" not in llm_family.model_ability:
90 return False
91 return True
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/xinference/model/llm/pytorch/llama_2.py b/xinference/model/llm/pytorch/llama_2.py
--- a/xinference/model/llm/pytorch/llama_2.py
+++ b/xinference/model/llm/pytorch/llama_2.py
@@ -39,6 +39,9 @@
def _load_model(self, kwargs: dict):
model, tokenizer = super()._load_model(kwargs)
+ # Llama has no pad token by default
+ # https://github.com/huggingface/transformers/blob/07998ef39926b76d3f6667025535d0859eed61c3/docs/source/en/llm_tutorial.md?plain=1#L125
+ tokenizer.pad_token = tokenizer.eos_token
model.config.eos_token_id = tokenizer.eos_token_id
model.config.pad_token_id = tokenizer.pad_token_id
return model, tokenizer
@@ -76,6 +79,9 @@
def _load_model(self, kwargs: dict):
model, tokenizer = super()._load_model(kwargs)
+ # Llama has no pad token by default
+ # https://github.com/huggingface/transformers/blob/07998ef39926b76d3f6667025535d0859eed61c3/docs/source/en/llm_tutorial.md?plain=1#L125
+ tokenizer.pad_token = tokenizer.eos_token
model.config.eos_token_id = tokenizer.eos_token_id
model.config.pad_token_id = tokenizer.pad_token_id
return model, tokenizer
|
{"golden_diff": "diff --git a/xinference/model/llm/pytorch/llama_2.py b/xinference/model/llm/pytorch/llama_2.py\n--- a/xinference/model/llm/pytorch/llama_2.py\n+++ b/xinference/model/llm/pytorch/llama_2.py\n@@ -39,6 +39,9 @@\n \n def _load_model(self, kwargs: dict):\n model, tokenizer = super()._load_model(kwargs)\n+ # Llama has no pad token by default\n+ # https://github.com/huggingface/transformers/blob/07998ef39926b76d3f6667025535d0859eed61c3/docs/source/en/llm_tutorial.md?plain=1#L125\n+ tokenizer.pad_token = tokenizer.eos_token\n model.config.eos_token_id = tokenizer.eos_token_id\n model.config.pad_token_id = tokenizer.pad_token_id\n return model, tokenizer\n@@ -76,6 +79,9 @@\n \n def _load_model(self, kwargs: dict):\n model, tokenizer = super()._load_model(kwargs)\n+ # Llama has no pad token by default\n+ # https://github.com/huggingface/transformers/blob/07998ef39926b76d3f6667025535d0859eed61c3/docs/source/en/llm_tutorial.md?plain=1#L125\n+ tokenizer.pad_token = tokenizer.eos_token\n model.config.eos_token_id = tokenizer.eos_token_id\n model.config.pad_token_id = tokenizer.pad_token_id\n return model, tokenizer\n", "issue": "BUG: Asking to pad but the tokenizer does not have a padding token\n### Describe the bug\r\nWhen using Llama-2 PyTorch model for embedding creation, there's an issue:\r\n\r\n----> 1 model.create_embedding(\"Once upon a time, there was a very old computer\")\r\n\r\nFile ~/inference/xinference/client.py:324, in RESTfulGenerateModelHandle.create_embedding(self, input)\r\n 322 response = requests.post(url, json=request_body)\r\n 323 if response.status_code != 200:\r\n--> 324 raise RuntimeError(\r\n 325 f\"Failed to create the embeddings, detail: {response.json()['detail']}\"\r\n 326 )\r\n 328 response_data = response.json()\r\n 329 return response_data\r\n\r\nRuntimeError: Failed to create the embeddings, detail: [address=127.0.0.1:35477, pid=951303] Asking to pad but the tokenizer does not have a padding token. Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`.\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nfrom ..llm_family import LLMFamilyV1, LLMSpecV1\nfrom .core import PytorchChatModel, PytorchModel, PytorchModelConfig\n\n\nclass LlamaPytorchModel(PytorchModel):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n pytorch_model_config: Optional[PytorchModelConfig] = None,\n ):\n super().__init__(\n model_uid,\n model_family,\n model_spec,\n quantization,\n model_path,\n pytorch_model_config=pytorch_model_config,\n )\n\n def _load_model(self, kwargs: dict):\n model, tokenizer = super()._load_model(kwargs)\n model.config.eos_token_id = tokenizer.eos_token_id\n model.config.pad_token_id = tokenizer.pad_token_id\n return model, tokenizer\n\n @classmethod\n def match(cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\") -> bool:\n if llm_spec.model_format != \"pytorch\":\n return False\n if \"llama-2\" not in llm_family.model_name:\n return False\n if \"generate\" not in llm_family.model_ability:\n return False\n return True\n\n\nclass LlamaPytorchChatModel(PytorchChatModel):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n pytorch_model_config: Optional[\"PytorchModelConfig\"] = None,\n ):\n super().__init__(\n model_uid,\n model_family,\n model_spec,\n quantization,\n model_path,\n pytorch_model_config=pytorch_model_config,\n )\n self._use_fast_tokenizer = False\n\n def _load_model(self, kwargs: dict):\n model, tokenizer = super()._load_model(kwargs)\n model.config.eos_token_id = tokenizer.eos_token_id\n model.config.pad_token_id = tokenizer.pad_token_id\n return model, tokenizer\n\n @classmethod\n def match(cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\") -> bool:\n if llm_spec.model_format != \"pytorch\":\n return False\n if \"llama-2\" not in llm_family.model_name:\n return False\n if \"chat\" not in llm_family.model_ability:\n return False\n return True\n", "path": "xinference/model/llm/pytorch/llama_2.py"}], "after_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional\n\nfrom ..llm_family import LLMFamilyV1, LLMSpecV1\nfrom .core import PytorchChatModel, PytorchModel, PytorchModelConfig\n\n\nclass LlamaPytorchModel(PytorchModel):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n pytorch_model_config: Optional[PytorchModelConfig] = None,\n ):\n super().__init__(\n model_uid,\n model_family,\n model_spec,\n quantization,\n model_path,\n pytorch_model_config=pytorch_model_config,\n )\n\n def _load_model(self, kwargs: dict):\n model, tokenizer = super()._load_model(kwargs)\n # Llama has no pad token by default\n # https://github.com/huggingface/transformers/blob/07998ef39926b76d3f6667025535d0859eed61c3/docs/source/en/llm_tutorial.md?plain=1#L125\n tokenizer.pad_token = tokenizer.eos_token\n model.config.eos_token_id = tokenizer.eos_token_id\n model.config.pad_token_id = tokenizer.pad_token_id\n return model, tokenizer\n\n @classmethod\n def match(cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\") -> bool:\n if llm_spec.model_format != \"pytorch\":\n return False\n if \"llama-2\" not in llm_family.model_name:\n return False\n if \"generate\" not in llm_family.model_ability:\n return False\n return True\n\n\nclass LlamaPytorchChatModel(PytorchChatModel):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n pytorch_model_config: Optional[\"PytorchModelConfig\"] = None,\n ):\n super().__init__(\n model_uid,\n model_family,\n model_spec,\n quantization,\n model_path,\n pytorch_model_config=pytorch_model_config,\n )\n self._use_fast_tokenizer = False\n\n def _load_model(self, kwargs: dict):\n model, tokenizer = super()._load_model(kwargs)\n # Llama has no pad token by default\n # https://github.com/huggingface/transformers/blob/07998ef39926b76d3f6667025535d0859eed61c3/docs/source/en/llm_tutorial.md?plain=1#L125\n tokenizer.pad_token = tokenizer.eos_token\n model.config.eos_token_id = tokenizer.eos_token_id\n model.config.pad_token_id = tokenizer.pad_token_id\n return model, tokenizer\n\n @classmethod\n def match(cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\") -> bool:\n if llm_spec.model_format != \"pytorch\":\n return False\n if \"llama-2\" not in llm_family.model_name:\n return False\n if \"chat\" not in llm_family.model_ability:\n return False\n return True\n", "path": "xinference/model/llm/pytorch/llama_2.py"}]}
| 1,433 | 394 |
gh_patches_debug_9221
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-2030
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multiple mutable rev warnings issued on `autoupdate`
when running `pre-commit autoupdate` I get 2 warnings per mutable rev, when I expected 0 see #974
```sh
~/projects/pytest-cov pre-commit-autoupdate pipx run pre-commit autoupdate
[WARNING] The 'rev' field of repo 'https://github.com/pre-commit/pre-commit-hooks' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.
[WARNING] The 'rev' field of repo 'https://github.com/timothycrosley/isort' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.
[WARNING] The 'rev' field of repo 'https://gitlab.com/pycqa/flake8' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.
[WARNING] The 'rev' field of repo 'https://github.com/pre-commit/pre-commit-hooks' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.
[WARNING] The 'rev' field of repo 'https://github.com/timothycrosley/isort' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.
[WARNING] The 'rev' field of repo 'https://gitlab.com/pycqa/flake8' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.
Updating https://github.com/pre-commit/pre-commit-hooks ... updating master -> v4.0.1.
Updating https://github.com/timothycrosley/isort ... updating master -> 5.9.3.
Updating https://gitlab.com/pycqa/flake8 ... updating master -> 3.9.2.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/migrate_config.py`
Content:
```
1 import re
2 import textwrap
3
4 import yaml
5
6 from pre_commit.clientlib import load_config
7 from pre_commit.util import yaml_load
8
9
10 def _is_header_line(line: str) -> bool:
11 return line.startswith(('#', '---')) or not line.strip()
12
13
14 def _migrate_map(contents: str) -> str:
15 if isinstance(yaml_load(contents), list):
16 # Find the first non-header line
17 lines = contents.splitlines(True)
18 i = 0
19 # Only loop on non empty configuration file
20 while i < len(lines) and _is_header_line(lines[i]):
21 i += 1
22
23 header = ''.join(lines[:i])
24 rest = ''.join(lines[i:])
25
26 # If they are using the "default" flow style of yaml, this operation
27 # will yield a valid configuration
28 try:
29 trial_contents = f'{header}repos:\n{rest}'
30 yaml_load(trial_contents)
31 contents = trial_contents
32 except yaml.YAMLError:
33 contents = f'{header}repos:\n{textwrap.indent(rest, " " * 4)}'
34
35 return contents
36
37
38 def _migrate_sha_to_rev(contents: str) -> str:
39 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents)
40
41
42 def migrate_config(config_file: str, quiet: bool = False) -> int:
43 # ensure that the configuration is a valid pre-commit configuration
44 load_config(config_file)
45
46 with open(config_file) as f:
47 orig_contents = contents = f.read()
48
49 contents = _migrate_map(contents)
50 contents = _migrate_sha_to_rev(contents)
51
52 if contents != orig_contents:
53 with open(config_file, 'w') as f:
54 f.write(contents)
55
56 print('Configuration has been migrated.')
57 elif not quiet:
58 print('Configuration is already migrated.')
59 return 0
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py
--- a/pre_commit/commands/migrate_config.py
+++ b/pre_commit/commands/migrate_config.py
@@ -3,7 +3,6 @@
import yaml
-from pre_commit.clientlib import load_config
from pre_commit.util import yaml_load
@@ -40,9 +39,6 @@
def migrate_config(config_file: str, quiet: bool = False) -> int:
- # ensure that the configuration is a valid pre-commit configuration
- load_config(config_file)
-
with open(config_file) as f:
orig_contents = contents = f.read()
|
{"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -3,7 +3,6 @@\n \n import yaml\n \n-from pre_commit.clientlib import load_config\n from pre_commit.util import yaml_load\n \n \n@@ -40,9 +39,6 @@\n \n \n def migrate_config(config_file: str, quiet: bool = False) -> int:\n- # ensure that the configuration is a valid pre-commit configuration\n- load_config(config_file)\n-\n with open(config_file) as f:\n orig_contents = contents = f.read()\n", "issue": "multiple mutable rev warnings issued on `autoupdate`\nwhen running `pre-commit autoupdate` I get 2 warnings per mutable rev, when I expected 0 see #974\r\n\r\n```sh\r\n~/projects/pytest-cov \ue0b0 \ue0a0 pre-commit-autoupdate \ue0b0 pipx run pre-commit autoupdate \r\n[WARNING] The 'rev' field of repo 'https://github.com/pre-commit/pre-commit-hooks' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.\r\n[WARNING] The 'rev' field of repo 'https://github.com/timothycrosley/isort' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.\r\n[WARNING] The 'rev' field of repo 'https://gitlab.com/pycqa/flake8' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.\r\n[WARNING] The 'rev' field of repo 'https://github.com/pre-commit/pre-commit-hooks' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.\r\n[WARNING] The 'rev' field of repo 'https://github.com/timothycrosley/isort' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.\r\n[WARNING] The 'rev' field of repo 'https://gitlab.com/pycqa/flake8' appears to be a mutable reference (moving tag / branch). Mutable references are never updated after first install and are not supported. See https://pre-commit.com/#using-the-latest-version-for-a-repository for more details. Hint: `pre-commit autoupdate` often fixes this.\r\nUpdating https://github.com/pre-commit/pre-commit-hooks ... updating master -> v4.0.1.\r\nUpdating https://github.com/timothycrosley/isort ... updating master -> 5.9.3.\r\nUpdating https://gitlab.com/pycqa/flake8 ... updating master -> 3.9.2.\r\n```\n", "before_files": [{"content": "import re\nimport textwrap\n\nimport yaml\n\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.util import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n # ensure that the configuration is a valid pre-commit configuration\n load_config(config_file)\n\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}], "after_files": [{"content": "import re\nimport textwrap\n\nimport yaml\n\nfrom pre_commit.util import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]}
| 1,430 | 146 |
gh_patches_debug_7821
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1324
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AsyncPG Instrumentation Span Names are too long when using query string
**Is your feature request related to a problem?**
Not a problem per se however, the `asyncpg` instrumentation uses sets span names as the query string which results in some very messing looking trace names in jaeger, datadog, etc and outright doesn't work with promscale due to long queries exhaust the available bytes for btree indexes.
**Describe the solution you'd like**
- The ability to change the name of the span with a hook or something similar. The `httpx` instrumentation provides hooks that receive the span and the name can be updated there.
- Just use a shorter or truncated version of the query as the name.
Which alternative solutions or features have you considered?
Not using the `asyncpg` instrumentation and manually instrumenting specific queries.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows tracing PostgreSQL queries made by the
17 `asyncpg <https://magicstack.github.io/asyncpg/current/>`_ library.
18
19 Usage
20 -----
21
22 .. code-block:: python
23
24 import asyncpg
25 from opentelemetry.instrumentation.asyncpg import AsyncPGInstrumentor
26
27 # You can optionally pass a custom TracerProvider to AsyncPGInstrumentor.instrument()
28 AsyncPGInstrumentor().instrument()
29 conn = await asyncpg.connect(user='user', password='password',
30 database='database', host='127.0.0.1')
31 values = await conn.fetch('''SELECT 42;''')
32
33 API
34 ---
35 """
36
37 from typing import Collection
38
39 import asyncpg
40 import wrapt
41
42 from opentelemetry import trace
43 from opentelemetry.instrumentation.asyncpg.package import _instruments
44 from opentelemetry.instrumentation.asyncpg.version import __version__
45 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
46 from opentelemetry.instrumentation.utils import unwrap
47 from opentelemetry.semconv.trace import (
48 DbSystemValues,
49 NetTransportValues,
50 SpanAttributes,
51 )
52 from opentelemetry.trace import SpanKind
53 from opentelemetry.trace.status import Status, StatusCode
54
55
56 def _hydrate_span_from_args(connection, query, parameters) -> dict:
57 """Get network and database attributes from connection."""
58 span_attributes = {
59 SpanAttributes.DB_SYSTEM: DbSystemValues.POSTGRESQL.value
60 }
61
62 # connection contains _params attribute which is a namedtuple ConnectionParameters.
63 # https://github.com/MagicStack/asyncpg/blob/master/asyncpg/connection.py#L68
64
65 params = getattr(connection, "_params", None)
66 dbname = getattr(params, "database", None)
67 if dbname:
68 span_attributes[SpanAttributes.DB_NAME] = dbname
69 user = getattr(params, "user", None)
70 if user:
71 span_attributes[SpanAttributes.DB_USER] = user
72
73 # connection contains _addr attribute which is either a host/port tuple, or unix socket string
74 # https://magicstack.github.io/asyncpg/current/_modules/asyncpg/connection.html
75 addr = getattr(connection, "_addr", None)
76 if isinstance(addr, tuple):
77 span_attributes[SpanAttributes.NET_PEER_NAME] = addr[0]
78 span_attributes[SpanAttributes.NET_PEER_PORT] = addr[1]
79 span_attributes[
80 SpanAttributes.NET_TRANSPORT
81 ] = NetTransportValues.IP_TCP.value
82 elif isinstance(addr, str):
83 span_attributes[SpanAttributes.NET_PEER_NAME] = addr
84 span_attributes[
85 SpanAttributes.NET_TRANSPORT
86 ] = NetTransportValues.UNIX.value
87
88 if query is not None:
89 span_attributes[SpanAttributes.DB_STATEMENT] = query
90
91 if parameters is not None and len(parameters) > 0:
92 span_attributes["db.statement.parameters"] = str(parameters)
93
94 return span_attributes
95
96
97 class AsyncPGInstrumentor(BaseInstrumentor):
98 def __init__(self, capture_parameters=False):
99 super().__init__()
100 self.capture_parameters = capture_parameters
101 self._tracer = None
102
103 def instrumentation_dependencies(self) -> Collection[str]:
104 return _instruments
105
106 def _instrument(self, **kwargs):
107 tracer_provider = kwargs.get("tracer_provider")
108 self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)
109
110 for method in [
111 "Connection.execute",
112 "Connection.executemany",
113 "Connection.fetch",
114 "Connection.fetchval",
115 "Connection.fetchrow",
116 ]:
117 wrapt.wrap_function_wrapper(
118 "asyncpg.connection", method, self._do_execute
119 )
120
121 def _uninstrument(self, **__):
122 for method in [
123 "execute",
124 "executemany",
125 "fetch",
126 "fetchval",
127 "fetchrow",
128 ]:
129 unwrap(asyncpg.Connection, method)
130
131 async def _do_execute(self, func, instance, args, kwargs):
132
133 exception = None
134 params = getattr(instance, "_params", {})
135 name = args[0] if args[0] else params.get("database", "postgresql")
136
137 with self._tracer.start_as_current_span(
138 name, kind=SpanKind.CLIENT
139 ) as span:
140 if span.is_recording():
141 span_attributes = _hydrate_span_from_args(
142 instance,
143 args[0],
144 args[1:] if self.capture_parameters else None,
145 )
146 for attribute, value in span_attributes.items():
147 span.set_attribute(attribute, value)
148
149 try:
150 result = await func(*args, **kwargs)
151 except Exception as exc: # pylint: disable=W0703
152 exception = exc
153 raise
154 finally:
155 if span.is_recording() and exception is not None:
156 span.set_status(Status(StatusCode.ERROR))
157
158 return result
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py b/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py
@@ -134,6 +134,11 @@
params = getattr(instance, "_params", {})
name = args[0] if args[0] else params.get("database", "postgresql")
+ try:
+ name = name.split()[0]
+ except IndexError:
+ name = ""
+
with self._tracer.start_as_current_span(
name, kind=SpanKind.CLIENT
) as span:
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py b/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py\n@@ -134,6 +134,11 @@\n params = getattr(instance, \"_params\", {})\n name = args[0] if args[0] else params.get(\"database\", \"postgresql\")\n \n+ try:\n+ name = name.split()[0]\n+ except IndexError:\n+ name = \"\"\n+\n with self._tracer.start_as_current_span(\n name, kind=SpanKind.CLIENT\n ) as span:\n", "issue": "AsyncPG Instrumentation Span Names are too long when using query string\n**Is your feature request related to a problem?**\r\nNot a problem per se however, the `asyncpg` instrumentation uses sets span names as the query string which results in some very messing looking trace names in jaeger, datadog, etc and outright doesn't work with promscale due to long queries exhaust the available bytes for btree indexes.\r\n\r\n**Describe the solution you'd like**\r\n- The ability to change the name of the span with a hook or something similar. The `httpx` instrumentation provides hooks that receive the span and the name can be updated there.\r\n- Just use a shorter or truncated version of the query as the name.\r\n\r\nWhich alternative solutions or features have you considered?\r\nNot using the `asyncpg` instrumentation and manually instrumenting specific queries.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing PostgreSQL queries made by the\n`asyncpg <https://magicstack.github.io/asyncpg/current/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n import asyncpg\n from opentelemetry.instrumentation.asyncpg import AsyncPGInstrumentor\n\n # You can optionally pass a custom TracerProvider to AsyncPGInstrumentor.instrument()\n AsyncPGInstrumentor().instrument()\n conn = await asyncpg.connect(user='user', password='password',\n database='database', host='127.0.0.1')\n values = await conn.fetch('''SELECT 42;''')\n\nAPI\n---\n\"\"\"\n\nfrom typing import Collection\n\nimport asyncpg\nimport wrapt\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.asyncpg.package import _instruments\nfrom opentelemetry.instrumentation.asyncpg.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import (\n DbSystemValues,\n NetTransportValues,\n SpanAttributes,\n)\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\ndef _hydrate_span_from_args(connection, query, parameters) -> dict:\n \"\"\"Get network and database attributes from connection.\"\"\"\n span_attributes = {\n SpanAttributes.DB_SYSTEM: DbSystemValues.POSTGRESQL.value\n }\n\n # connection contains _params attribute which is a namedtuple ConnectionParameters.\n # https://github.com/MagicStack/asyncpg/blob/master/asyncpg/connection.py#L68\n\n params = getattr(connection, \"_params\", None)\n dbname = getattr(params, \"database\", None)\n if dbname:\n span_attributes[SpanAttributes.DB_NAME] = dbname\n user = getattr(params, \"user\", None)\n if user:\n span_attributes[SpanAttributes.DB_USER] = user\n\n # connection contains _addr attribute which is either a host/port tuple, or unix socket string\n # https://magicstack.github.io/asyncpg/current/_modules/asyncpg/connection.html\n addr = getattr(connection, \"_addr\", None)\n if isinstance(addr, tuple):\n span_attributes[SpanAttributes.NET_PEER_NAME] = addr[0]\n span_attributes[SpanAttributes.NET_PEER_PORT] = addr[1]\n span_attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.IP_TCP.value\n elif isinstance(addr, str):\n span_attributes[SpanAttributes.NET_PEER_NAME] = addr\n span_attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.UNIX.value\n\n if query is not None:\n span_attributes[SpanAttributes.DB_STATEMENT] = query\n\n if parameters is not None and len(parameters) > 0:\n span_attributes[\"db.statement.parameters\"] = str(parameters)\n\n return span_attributes\n\n\nclass AsyncPGInstrumentor(BaseInstrumentor):\n def __init__(self, capture_parameters=False):\n super().__init__()\n self.capture_parameters = capture_parameters\n self._tracer = None\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n for method in [\n \"Connection.execute\",\n \"Connection.executemany\",\n \"Connection.fetch\",\n \"Connection.fetchval\",\n \"Connection.fetchrow\",\n ]:\n wrapt.wrap_function_wrapper(\n \"asyncpg.connection\", method, self._do_execute\n )\n\n def _uninstrument(self, **__):\n for method in [\n \"execute\",\n \"executemany\",\n \"fetch\",\n \"fetchval\",\n \"fetchrow\",\n ]:\n unwrap(asyncpg.Connection, method)\n\n async def _do_execute(self, func, instance, args, kwargs):\n\n exception = None\n params = getattr(instance, \"_params\", {})\n name = args[0] if args[0] else params.get(\"database\", \"postgresql\")\n\n with self._tracer.start_as_current_span(\n name, kind=SpanKind.CLIENT\n ) as span:\n if span.is_recording():\n span_attributes = _hydrate_span_from_args(\n instance,\n args[0],\n args[1:] if self.capture_parameters else None,\n )\n for attribute, value in span_attributes.items():\n span.set_attribute(attribute, value)\n\n try:\n result = await func(*args, **kwargs)\n except Exception as exc: # pylint: disable=W0703\n exception = exc\n raise\n finally:\n if span.is_recording() and exception is not None:\n span.set_status(Status(StatusCode.ERROR))\n\n return result\n", "path": "instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing PostgreSQL queries made by the\n`asyncpg <https://magicstack.github.io/asyncpg/current/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n import asyncpg\n from opentelemetry.instrumentation.asyncpg import AsyncPGInstrumentor\n\n # You can optionally pass a custom TracerProvider to AsyncPGInstrumentor.instrument()\n AsyncPGInstrumentor().instrument()\n conn = await asyncpg.connect(user='user', password='password',\n database='database', host='127.0.0.1')\n values = await conn.fetch('''SELECT 42;''')\n\nAPI\n---\n\"\"\"\n\nfrom typing import Collection\n\nimport asyncpg\nimport wrapt\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.asyncpg.package import _instruments\nfrom opentelemetry.instrumentation.asyncpg.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import (\n DbSystemValues,\n NetTransportValues,\n SpanAttributes,\n)\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\ndef _hydrate_span_from_args(connection, query, parameters) -> dict:\n \"\"\"Get network and database attributes from connection.\"\"\"\n span_attributes = {\n SpanAttributes.DB_SYSTEM: DbSystemValues.POSTGRESQL.value\n }\n\n # connection contains _params attribute which is a namedtuple ConnectionParameters.\n # https://github.com/MagicStack/asyncpg/blob/master/asyncpg/connection.py#L68\n\n params = getattr(connection, \"_params\", None)\n dbname = getattr(params, \"database\", None)\n if dbname:\n span_attributes[SpanAttributes.DB_NAME] = dbname\n user = getattr(params, \"user\", None)\n if user:\n span_attributes[SpanAttributes.DB_USER] = user\n\n # connection contains _addr attribute which is either a host/port tuple, or unix socket string\n # https://magicstack.github.io/asyncpg/current/_modules/asyncpg/connection.html\n addr = getattr(connection, \"_addr\", None)\n if isinstance(addr, tuple):\n span_attributes[SpanAttributes.NET_PEER_NAME] = addr[0]\n span_attributes[SpanAttributes.NET_PEER_PORT] = addr[1]\n span_attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.IP_TCP.value\n elif isinstance(addr, str):\n span_attributes[SpanAttributes.NET_PEER_NAME] = addr\n span_attributes[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.UNIX.value\n\n if query is not None:\n span_attributes[SpanAttributes.DB_STATEMENT] = query\n\n if parameters is not None and len(parameters) > 0:\n span_attributes[\"db.statement.parameters\"] = str(parameters)\n\n return span_attributes\n\n\nclass AsyncPGInstrumentor(BaseInstrumentor):\n def __init__(self, capture_parameters=False):\n super().__init__()\n self.capture_parameters = capture_parameters\n self._tracer = None\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n for method in [\n \"Connection.execute\",\n \"Connection.executemany\",\n \"Connection.fetch\",\n \"Connection.fetchval\",\n \"Connection.fetchrow\",\n ]:\n wrapt.wrap_function_wrapper(\n \"asyncpg.connection\", method, self._do_execute\n )\n\n def _uninstrument(self, **__):\n for method in [\n \"execute\",\n \"executemany\",\n \"fetch\",\n \"fetchval\",\n \"fetchrow\",\n ]:\n unwrap(asyncpg.Connection, method)\n\n async def _do_execute(self, func, instance, args, kwargs):\n\n exception = None\n params = getattr(instance, \"_params\", {})\n name = args[0] if args[0] else params.get(\"database\", \"postgresql\")\n\n try:\n name = name.split()[0]\n except IndexError:\n name = \"\"\n\n with self._tracer.start_as_current_span(\n name, kind=SpanKind.CLIENT\n ) as span:\n if span.is_recording():\n span_attributes = _hydrate_span_from_args(\n instance,\n args[0],\n args[1:] if self.capture_parameters else None,\n )\n for attribute, value in span_attributes.items():\n span.set_attribute(attribute, value)\n\n try:\n result = await func(*args, **kwargs)\n except Exception as exc: # pylint: disable=W0703\n exception = exc\n raise\n finally:\n if span.is_recording() and exception is not None:\n span.set_status(Status(StatusCode.ERROR))\n\n return result\n", "path": "instrumentation/opentelemetry-instrumentation-asyncpg/src/opentelemetry/instrumentation/asyncpg/__init__.py"}]}
| 1,980 | 211 |
gh_patches_debug_26064
|
rasdani/github-patches
|
git_diff
|
holoviz__holoviews-671
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dependencies missing
Hi,
I think that the holoviews pip package does not correctly state its dependencies. These are packages that holoviews complained about not finding when I tried importing it:
- jinja2
- nbformat
- nbconvert
- matplotlib
After installing them manually via pip, I can import holoviews fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import sys, os
4 try:
5 from setuptools import setup
6 except ImportError:
7 from distutils.core import setup
8
9
10 setup_args = {}
11 install_requires = ['param>=1.3.2', 'numpy>=1.0']
12 extras_require={}
13
14 # Notebook dependencies of IPython 3
15 extras_require['notebook-dependencies'] = ['ipython', 'pyzmq', 'jinja2', 'tornado',
16 'jsonschema', 'ipython', 'pygments']
17 # IPython Notebook + matplotlib + Lancet
18 extras_require['recommended'] = (extras_require['notebook-dependencies']
19 + ['matplotlib', 'lancet-ioam'])
20 # Additional, useful third-party packages
21 extras_require['extras'] = (['pandas', 'seaborn', 'mpld3', 'bokeh']
22 + extras_require['recommended'])
23 # Everything including cyordereddict (optimization) and nosetests
24 extras_require['all'] = (extras_require['recommended']
25 + extras_require['extras']
26 + ['cyordereddict', 'nose'])
27
28 setup_args.update(dict(
29 name='holoviews',
30 version="1.4.3",
31 install_requires = install_requires,
32 extras_require = extras_require,
33 description='Stop plotting your data - annotate your data and let it visualize itself.',
34 long_description=open('README.rst').read() if os.path.isfile('README.rst') else 'Consult README.rst',
35 author= "Jean-Luc Stevens and Philipp Rudiger",
36 author_email= "[email protected]",
37 maintainer= "IOAM",
38 maintainer_email= "[email protected]",
39 platforms=['Windows', 'Mac OS X', 'Linux'],
40 license='BSD',
41 url='http://ioam.github.com/holoviews/',
42 packages = ["holoviews",
43 "holoviews.core",
44 "holoviews.core.data",
45 "holoviews.element",
46 "holoviews.interface",
47 "holoviews.ipython",
48 "holoviews.operation",
49 "holoviews.plotting",
50 "holoviews.plotting.mpl",
51 "holoviews.plotting.bokeh",
52 "holoviews.plotting.widgets"],
53 package_data={'holoviews.ipython': ['*.html'],
54 'holoviews.plotting.mpl': ['*.mplstyle', '*.jinja', '*.js'],
55 'holoviews.plotting.bokeh': ['*.js', '*.css'],
56 'holoviews.plotting.widgets': ['*.jinja', '*.js', '*.css']},
57 classifiers = [
58 "License :: OSI Approved :: BSD License",
59 "Development Status :: 5 - Production/Stable",
60 "Programming Language :: Python :: 2.7",
61 "Programming Language :: Python :: 3.3",
62 "Programming Language :: Python :: 3.4",
63 "Operating System :: OS Independent",
64 "Intended Audience :: Science/Research",
65 "Intended Audience :: Developers",
66 "Natural Language :: English",
67 "Topic :: Scientific/Engineering",
68 "Topic :: Software Development :: Libraries"]
69 ))
70
71 def check_pseudo_package(path):
72 """
73 Verifies that a fake subpackage path for assets (notebooks, svgs,
74 pngs etc) both exists and is populated with files.
75 """
76 if not os.path.isdir(path):
77 raise Exception("Please make sure pseudo-package %s exists." % path)
78 else:
79 assets = os.listdir(path)
80 if len(assets) == 0:
81 raise Exception("Please make sure pseudo-package %s is populated." % path)
82
83
84 if __name__=="__main__":
85
86 if 'HOLOVIEWS_RELEASE' in os.environ:
87 # Make sure to create these directories and populate them before upload
88 setup_args['packages'] += ["holoviews.assets", 'holoviews.notebooks']
89
90 # Add unit tests
91 setup_args['packages'].append('holoviews.tests')
92
93 setup_args['package_data']['holoviews.assets'] = ['*.png', '*.svg', '*.rst']
94 setup_args['package_data']['holoviews.notebooks'] = ['*.ipynb', '*.npy']
95
96 if ('upload' in sys.argv) or ('sdist' in sys.argv):
97 check_pseudo_package(os.path.join('.', 'holoviews', 'tests'))
98 check_pseudo_package(os.path.join('.', 'holoviews', 'assets'))
99 check_pseudo_package(os.path.join('.', 'holoviews', 'notebooks'))
100
101 import holoviews
102 holoviews.__version__.verify(setup_args['version'])
103
104 setup(**setup_args)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@
# Notebook dependencies of IPython 3
extras_require['notebook-dependencies'] = ['ipython', 'pyzmq', 'jinja2', 'tornado',
- 'jsonschema', 'ipython', 'pygments']
+ 'jsonschema', 'notebook', 'pygments']
# IPython Notebook + matplotlib + Lancet
extras_require['recommended'] = (extras_require['notebook-dependencies']
+ ['matplotlib', 'lancet-ioam'])
@@ -101,4 +101,22 @@
import holoviews
holoviews.__version__.verify(setup_args['version'])
+
+ if 'install' in sys.argv:
+ header = "HOLOVIEWS INSTALLATION INFORMATION"
+ bars = "="*len(header)
+
+ extras = '\n'.join('holoviews[%s]' % e for e in setup_args['extras_require'])
+
+ print("%s\n%s\n%s" % (bars, header, bars))
+
+ print("\nHoloViews supports the following installation types:\n")
+ print("%s\n" % extras)
+ print("Users should consider using one of these options.\n")
+ print("By default only a core installation is performed and ")
+ print("only the minimal set of dependencies are fetched.\n\n")
+ print("For more information please visit http://holoviews.org/install.html\n")
+ print(bars+'\n')
+
+
setup(**setup_args)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,7 +13,7 @@\n \n # Notebook dependencies of IPython 3\n extras_require['notebook-dependencies'] = ['ipython', 'pyzmq', 'jinja2', 'tornado',\n- 'jsonschema', 'ipython', 'pygments']\n+ 'jsonschema', 'notebook', 'pygments']\n # IPython Notebook + matplotlib + Lancet\n extras_require['recommended'] = (extras_require['notebook-dependencies']\n + ['matplotlib', 'lancet-ioam'])\n@@ -101,4 +101,22 @@\n import holoviews\n holoviews.__version__.verify(setup_args['version'])\n \n+\n+ if 'install' in sys.argv:\n+ header = \"HOLOVIEWS INSTALLATION INFORMATION\"\n+ bars = \"=\"*len(header)\n+\n+ extras = '\\n'.join('holoviews[%s]' % e for e in setup_args['extras_require'])\n+\n+ print(\"%s\\n%s\\n%s\" % (bars, header, bars))\n+\n+ print(\"\\nHoloViews supports the following installation types:\\n\")\n+ print(\"%s\\n\" % extras)\n+ print(\"Users should consider using one of these options.\\n\")\n+ print(\"By default only a core installation is performed and \")\n+ print(\"only the minimal set of dependencies are fetched.\\n\\n\")\n+ print(\"For more information please visit http://holoviews.org/install.html\\n\")\n+ print(bars+'\\n')\n+\n+\n setup(**setup_args)\n", "issue": "Dependencies missing\nHi,\n\nI think that the holoviews pip package does not correctly state its dependencies. These are packages that holoviews complained about not finding when I tried importing it:\n- jinja2 \n- nbformat \n- nbconvert \n- matplotlib\n\nAfter installing them manually via pip, I can import holoviews fine.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport sys, os\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\nsetup_args = {}\ninstall_requires = ['param>=1.3.2', 'numpy>=1.0']\nextras_require={}\n\n# Notebook dependencies of IPython 3\nextras_require['notebook-dependencies'] = ['ipython', 'pyzmq', 'jinja2', 'tornado',\n 'jsonschema', 'ipython', 'pygments']\n# IPython Notebook + matplotlib + Lancet\nextras_require['recommended'] = (extras_require['notebook-dependencies']\n + ['matplotlib', 'lancet-ioam'])\n# Additional, useful third-party packages\nextras_require['extras'] = (['pandas', 'seaborn', 'mpld3', 'bokeh']\n + extras_require['recommended'])\n# Everything including cyordereddict (optimization) and nosetests\nextras_require['all'] = (extras_require['recommended']\n + extras_require['extras']\n + ['cyordereddict', 'nose'])\n\nsetup_args.update(dict(\n name='holoviews',\n version=\"1.4.3\",\n install_requires = install_requires,\n extras_require = extras_require,\n description='Stop plotting your data - annotate your data and let it visualize itself.',\n long_description=open('README.rst').read() if os.path.isfile('README.rst') else 'Consult README.rst',\n author= \"Jean-Luc Stevens and Philipp Rudiger\",\n author_email= \"[email protected]\",\n maintainer= \"IOAM\",\n maintainer_email= \"[email protected]\",\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='http://ioam.github.com/holoviews/',\n packages = [\"holoviews\",\n \"holoviews.core\",\n \"holoviews.core.data\",\n \"holoviews.element\",\n \"holoviews.interface\",\n \"holoviews.ipython\",\n \"holoviews.operation\",\n \"holoviews.plotting\",\n \"holoviews.plotting.mpl\",\n \"holoviews.plotting.bokeh\",\n \"holoviews.plotting.widgets\"],\n package_data={'holoviews.ipython': ['*.html'],\n 'holoviews.plotting.mpl': ['*.mplstyle', '*.jinja', '*.js'],\n 'holoviews.plotting.bokeh': ['*.js', '*.css'],\n 'holoviews.plotting.widgets': ['*.jinja', '*.js', '*.css']},\n classifiers = [\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\"]\n))\n\ndef check_pseudo_package(path):\n \"\"\"\n Verifies that a fake subpackage path for assets (notebooks, svgs,\n pngs etc) both exists and is populated with files.\n \"\"\"\n if not os.path.isdir(path):\n raise Exception(\"Please make sure pseudo-package %s exists.\" % path)\n else:\n assets = os.listdir(path)\n if len(assets) == 0:\n raise Exception(\"Please make sure pseudo-package %s is populated.\" % path)\n\n\nif __name__==\"__main__\":\n\n if 'HOLOVIEWS_RELEASE' in os.environ:\n # Make sure to create these directories and populate them before upload\n setup_args['packages'] += [\"holoviews.assets\", 'holoviews.notebooks']\n\n # Add unit tests\n setup_args['packages'].append('holoviews.tests')\n\n setup_args['package_data']['holoviews.assets'] = ['*.png', '*.svg', '*.rst']\n setup_args['package_data']['holoviews.notebooks'] = ['*.ipynb', '*.npy']\n\n if ('upload' in sys.argv) or ('sdist' in sys.argv):\n check_pseudo_package(os.path.join('.', 'holoviews', 'tests'))\n check_pseudo_package(os.path.join('.', 'holoviews', 'assets'))\n check_pseudo_package(os.path.join('.', 'holoviews', 'notebooks'))\n\n import holoviews\n holoviews.__version__.verify(setup_args['version'])\n\n setup(**setup_args)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport sys, os\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\nsetup_args = {}\ninstall_requires = ['param>=1.3.2', 'numpy>=1.0']\nextras_require={}\n\n# Notebook dependencies of IPython 3\nextras_require['notebook-dependencies'] = ['ipython', 'pyzmq', 'jinja2', 'tornado',\n 'jsonschema', 'notebook', 'pygments']\n# IPython Notebook + matplotlib + Lancet\nextras_require['recommended'] = (extras_require['notebook-dependencies']\n + ['matplotlib', 'lancet-ioam'])\n# Additional, useful third-party packages\nextras_require['extras'] = (['pandas', 'seaborn', 'mpld3', 'bokeh']\n + extras_require['recommended'])\n# Everything including cyordereddict (optimization) and nosetests\nextras_require['all'] = (extras_require['recommended']\n + extras_require['extras']\n + ['cyordereddict', 'nose'])\n\nsetup_args.update(dict(\n name='holoviews',\n version=\"1.4.3\",\n install_requires = install_requires,\n extras_require = extras_require,\n description='Stop plotting your data - annotate your data and let it visualize itself.',\n long_description=open('README.rst').read() if os.path.isfile('README.rst') else 'Consult README.rst',\n author= \"Jean-Luc Stevens and Philipp Rudiger\",\n author_email= \"[email protected]\",\n maintainer= \"IOAM\",\n maintainer_email= \"[email protected]\",\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='http://ioam.github.com/holoviews/',\n packages = [\"holoviews\",\n \"holoviews.core\",\n \"holoviews.core.data\",\n \"holoviews.element\",\n \"holoviews.interface\",\n \"holoviews.ipython\",\n \"holoviews.operation\",\n \"holoviews.plotting\",\n \"holoviews.plotting.mpl\",\n \"holoviews.plotting.bokeh\",\n \"holoviews.plotting.widgets\"],\n package_data={'holoviews.ipython': ['*.html'],\n 'holoviews.plotting.mpl': ['*.mplstyle', '*.jinja', '*.js'],\n 'holoviews.plotting.bokeh': ['*.js', '*.css'],\n 'holoviews.plotting.widgets': ['*.jinja', '*.js', '*.css']},\n classifiers = [\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\"]\n))\n\ndef check_pseudo_package(path):\n \"\"\"\n Verifies that a fake subpackage path for assets (notebooks, svgs,\n pngs etc) both exists and is populated with files.\n \"\"\"\n if not os.path.isdir(path):\n raise Exception(\"Please make sure pseudo-package %s exists.\" % path)\n else:\n assets = os.listdir(path)\n if len(assets) == 0:\n raise Exception(\"Please make sure pseudo-package %s is populated.\" % path)\n\n\nif __name__==\"__main__\":\n\n if 'HOLOVIEWS_RELEASE' in os.environ:\n # Make sure to create these directories and populate them before upload\n setup_args['packages'] += [\"holoviews.assets\", 'holoviews.notebooks']\n\n # Add unit tests\n setup_args['packages'].append('holoviews.tests')\n\n setup_args['package_data']['holoviews.assets'] = ['*.png', '*.svg', '*.rst']\n setup_args['package_data']['holoviews.notebooks'] = ['*.ipynb', '*.npy']\n\n if ('upload' in sys.argv) or ('sdist' in sys.argv):\n check_pseudo_package(os.path.join('.', 'holoviews', 'tests'))\n check_pseudo_package(os.path.join('.', 'holoviews', 'assets'))\n check_pseudo_package(os.path.join('.', 'holoviews', 'notebooks'))\n\n import holoviews\n holoviews.__version__.verify(setup_args['version'])\n\n\n if 'install' in sys.argv:\n header = \"HOLOVIEWS INSTALLATION INFORMATION\"\n bars = \"=\"*len(header)\n\n extras = '\\n'.join('holoviews[%s]' % e for e in setup_args['extras_require'])\n\n print(\"%s\\n%s\\n%s\" % (bars, header, bars))\n\n print(\"\\nHoloViews supports the following installation types:\\n\")\n print(\"%s\\n\" % extras)\n print(\"Users should consider using one of these options.\\n\")\n print(\"By default only a core installation is performed and \")\n print(\"only the minimal set of dependencies are fetched.\\n\\n\")\n print(\"For more information please visit http://holoviews.org/install.html\\n\")\n print(bars+'\\n')\n\n\n setup(**setup_args)\n", "path": "setup.py"}]}
| 1,559 | 362 |
gh_patches_debug_14663
|
rasdani/github-patches
|
git_diff
|
pytorch__text-1889
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error running unit tests when building with setup.py install
## 🐛 Bug
**Describe the bug** A clear and concise description of what the bug is.
When building with python setup.py install, running pytest from either the project root directory or the test/ directory causes the error `ImportError: torchtext C++ Extension is not found`. This can be worked-around by renaming the torchtext subdirectory, or by instead using python setup.py develop like the CI does (see .circleci/unittest/linux/scripts/install.sh#L36).
**To Reproduce** Steps to reproduce the behavior:
1. Follow the build steps like normal, running python setup.py install
2. Run pytest
3. Every test fails with the error `ImportError: torchtext C++ Extension is not found`.
**Expected behavior** A clear and concise description of what you expected to happen.
The tests should succeed even when installing with setup.py install, either running pytest from the project root or the test/ directory (this is the case in pytorch) without having to rename the torchtext subdirectory.
**Screenshots** If applicable, add screenshots to help explain your problem.
**Environment**
Please copy and paste the output from our
[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py) (or
fill out the checklist below manually).
You can get the script and run it with:
```
wget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py
# For security purposes, please check the contents of collect_env.py before running it.
python collect_env.py
python -c "import torchtext; print(\"torchtext version is \", torchtext.__version__)"
```
- PyTorch Version (e.g., 1.0): 1.12
- OS (e.g., Linux): Linux
- How you installed PyTorch (`conda`, `pip`, source): Compiled from source
- Build command you used (if compiling from source): python3 ./setup.py install
- Python version: 3.7.13
- CUDA/cuDNN version: ROCm version 5.2
- GPU models and configuration: N/A
- Any other relevant information:
**Additional context** Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/setup_helpers/extension.py`
Content:
```
1 import distutils.sysconfig
2 import os
3 import platform
4 import subprocess
5 from pathlib import Path
6
7 import torch
8 from setuptools import Extension
9 from setuptools.command.build_ext import build_ext
10
11
12 __all__ = [
13 "get_ext_modules",
14 "CMakeBuild",
15 ]
16
17
18 _LIBTORCHTEXT_NAME = "torchtext.lib.libtorchtext"
19 _EXT_NAME = "torchtext._torchtext"
20 _THIS_DIR = Path(__file__).parent.resolve()
21 _ROOT_DIR = _THIS_DIR.parent.parent.resolve()
22
23
24 def get_ext_modules():
25 modules = [
26 Extension(name=_LIBTORCHTEXT_NAME, sources=[]),
27 Extension(name=_EXT_NAME, sources=[]),
28 ]
29 return modules
30
31
32 # Based off of
33 # https://github.com/pybind/cmake_example/blob/580c5fd29d4651db99d8874714b07c0c49a53f8a/setup.py
34
35
36 class CMakeBuild(build_ext):
37 def run(self):
38 try:
39 subprocess.check_output(["cmake", "--version"])
40 except OSError:
41 raise RuntimeError("CMake is not available.") from None
42 super().run()
43
44 def build_extension(self, ext):
45 # Since two library files (libtorchaudio and _torchaudio) need to be
46 # recognized by setuptools, we instantiate `Extension` twice. (see `get_ext_modules`)
47 # This leads to the situation where this `build_extension` method is called twice.
48 # However, the following `cmake` command will build all of them at the same time,
49 # so, we do not need to perform `cmake` twice.
50 # Therefore we call `cmake` only for `torchaudio._torchaudio`.
51 if ext.name != "torchtext._torchtext":
52 return
53
54 extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
55
56 # required for auto-detection of auxiliary "native" libs
57 if not extdir.endswith(os.path.sep):
58 extdir += os.path.sep
59
60 cfg = "Debug" if self.debug else "Release"
61
62 cmake_args = [
63 f"-DCMAKE_BUILD_TYPE={cfg}",
64 f"-DCMAKE_PREFIX_PATH={torch.utils.cmake_prefix_path}",
65 f"-DCMAKE_INSTALL_PREFIX={extdir}",
66 "-DCMAKE_VERBOSE_MAKEFILE=ON",
67 f"-DPython_INCLUDE_DIR={distutils.sysconfig.get_python_inc()}",
68 f"-DTORCH_INSTALL_PREFIX:STRING={os.path.dirname(torch.__file__)}",
69 "-DBUILD_TORCHTEXT_PYTHON_EXTENSION:BOOL=ON",
70 "-DRE2_BUILD_TESTING:BOOL=OFF",
71 "-DBUILD_TESTING:BOOL=OFF",
72 "-DBUILD_SHARED_LIBS=OFF",
73 "-DCMAKE_POLICY_DEFAULT_CMP0063=NEW",
74 "-DSPM_ENABLE_SHARED=OFF",
75 ]
76 build_args = ["--target", "install"]
77
78 # Default to Ninja
79 if "CMAKE_GENERATOR" not in os.environ or platform.system() == "Windows":
80 cmake_args += ["-GNinja"]
81 if platform.system() == "Windows":
82 import sys
83
84 python_version = sys.version_info
85 cmake_args += [
86 "-DCMAKE_C_COMPILER=cl",
87 "-DCMAKE_CXX_COMPILER=cl",
88 f"-DPYTHON_VERSION={python_version.major}.{python_version.minor}",
89 ]
90
91 # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
92 # across all generators.
93 if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
94 # self.parallel is a Python 3 only way to set parallel jobs by hand
95 # using -j in the build_ext call, not supported by pip or PyPA-build.
96 if hasattr(self, "parallel") and self.parallel:
97 # CMake 3.12+ only.
98 build_args += ["-j{}".format(self.parallel)]
99
100 if not os.path.exists(self.build_temp):
101 os.makedirs(self.build_temp)
102
103 subprocess.check_call(["cmake", str(_ROOT_DIR)] + cmake_args, cwd=self.build_temp)
104 subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=self.build_temp)
105
106 def get_ext_filename(self, fullname):
107 ext_filename = super().get_ext_filename(fullname)
108 ext_filename_parts = ext_filename.split(".")
109 without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]
110 ext_filename = ".".join(without_abi)
111 return ext_filename
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/setup_helpers/extension.py b/tools/setup_helpers/extension.py
--- a/tools/setup_helpers/extension.py
+++ b/tools/setup_helpers/extension.py
@@ -21,6 +21,10 @@
_ROOT_DIR = _THIS_DIR.parent.parent.resolve()
+def _get_cxx11_abi():
+ return "-D_GLIBCXX_USE_CXX11_ABI=" + str(int(torch.compiled_with_cxx11_abi()))
+
+
def get_ext_modules():
modules = [
Extension(name=_LIBTORCHTEXT_NAME, sources=[]),
@@ -72,6 +76,7 @@
"-DBUILD_SHARED_LIBS=OFF",
"-DCMAKE_POLICY_DEFAULT_CMP0063=NEW",
"-DSPM_ENABLE_SHARED=OFF",
+ f"-DTORCH_COMPILED_WITH_CXX_ABI={_get_cxx11_abi()}",
]
build_args = ["--target", "install"]
|
{"golden_diff": "diff --git a/tools/setup_helpers/extension.py b/tools/setup_helpers/extension.py\n--- a/tools/setup_helpers/extension.py\n+++ b/tools/setup_helpers/extension.py\n@@ -21,6 +21,10 @@\n _ROOT_DIR = _THIS_DIR.parent.parent.resolve()\n \n \n+def _get_cxx11_abi():\n+ return \"-D_GLIBCXX_USE_CXX11_ABI=\" + str(int(torch.compiled_with_cxx11_abi()))\n+\n+\n def get_ext_modules():\n modules = [\n Extension(name=_LIBTORCHTEXT_NAME, sources=[]),\n@@ -72,6 +76,7 @@\n \"-DBUILD_SHARED_LIBS=OFF\",\n \"-DCMAKE_POLICY_DEFAULT_CMP0063=NEW\",\n \"-DSPM_ENABLE_SHARED=OFF\",\n+ f\"-DTORCH_COMPILED_WITH_CXX_ABI={_get_cxx11_abi()}\",\n ]\n build_args = [\"--target\", \"install\"]\n", "issue": "Error running unit tests when building with setup.py install\n## \ud83d\udc1b Bug\r\n\r\n**Describe the bug** A clear and concise description of what the bug is.\r\nWhen building with python setup.py install, running pytest from either the project root directory or the test/ directory causes the error `ImportError: torchtext C++ Extension is not found`. This can be worked-around by renaming the torchtext subdirectory, or by instead using python setup.py develop like the CI does (see .circleci/unittest/linux/scripts/install.sh#L36).\r\n\r\n**To Reproduce** Steps to reproduce the behavior:\r\n\r\n1. Follow the build steps like normal, running python setup.py install\r\n2. Run pytest\r\n3. Every test fails with the error `ImportError: torchtext C++ Extension is not found`.\r\n\r\n**Expected behavior** A clear and concise description of what you expected to happen.\r\n\r\nThe tests should succeed even when installing with setup.py install, either running pytest from the project root or the test/ directory (this is the case in pytorch) without having to rename the torchtext subdirectory.\r\n\r\n**Screenshots** If applicable, add screenshots to help explain your problem.\r\n\r\n**Environment**\r\n\r\nPlease copy and paste the output from our\r\n[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py) (or\r\nfill out the checklist below manually).\r\n\r\nYou can get the script and run it with:\r\n\r\n```\r\nwget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py\r\n# For security purposes, please check the contents of collect_env.py before running it.\r\npython collect_env.py\r\npython -c \"import torchtext; print(\\\"torchtext version is \\\", torchtext.__version__)\"\r\n```\r\n\r\n- PyTorch Version (e.g., 1.0): 1.12\r\n- OS (e.g., Linux): Linux\r\n- How you installed PyTorch (`conda`, `pip`, source): Compiled from source\r\n- Build command you used (if compiling from source): python3 ./setup.py install\r\n- Python version: 3.7.13\r\n- CUDA/cuDNN version: ROCm version 5.2\r\n- GPU models and configuration: N/A\r\n- Any other relevant information:\r\n\r\n**Additional context** Add any other context about the problem here.\r\n\n", "before_files": [{"content": "import distutils.sysconfig\nimport os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nimport torch\nfrom setuptools import Extension\nfrom setuptools.command.build_ext import build_ext\n\n\n__all__ = [\n \"get_ext_modules\",\n \"CMakeBuild\",\n]\n\n\n_LIBTORCHTEXT_NAME = \"torchtext.lib.libtorchtext\"\n_EXT_NAME = \"torchtext._torchtext\"\n_THIS_DIR = Path(__file__).parent.resolve()\n_ROOT_DIR = _THIS_DIR.parent.parent.resolve()\n\n\ndef get_ext_modules():\n modules = [\n Extension(name=_LIBTORCHTEXT_NAME, sources=[]),\n Extension(name=_EXT_NAME, sources=[]),\n ]\n return modules\n\n\n# Based off of\n# https://github.com/pybind/cmake_example/blob/580c5fd29d4651db99d8874714b07c0c49a53f8a/setup.py\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n subprocess.check_output([\"cmake\", \"--version\"])\n except OSError:\n raise RuntimeError(\"CMake is not available.\") from None\n super().run()\n\n def build_extension(self, ext):\n # Since two library files (libtorchaudio and _torchaudio) need to be\n # recognized by setuptools, we instantiate `Extension` twice. (see `get_ext_modules`)\n # This leads to the situation where this `build_extension` method is called twice.\n # However, the following `cmake` command will build all of them at the same time,\n # so, we do not need to perform `cmake` twice.\n # Therefore we call `cmake` only for `torchaudio._torchaudio`.\n if ext.name != \"torchtext._torchtext\":\n return\n\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n\n # required for auto-detection of auxiliary \"native\" libs\n if not extdir.endswith(os.path.sep):\n extdir += os.path.sep\n\n cfg = \"Debug\" if self.debug else \"Release\"\n\n cmake_args = [\n f\"-DCMAKE_BUILD_TYPE={cfg}\",\n f\"-DCMAKE_PREFIX_PATH={torch.utils.cmake_prefix_path}\",\n f\"-DCMAKE_INSTALL_PREFIX={extdir}\",\n \"-DCMAKE_VERBOSE_MAKEFILE=ON\",\n f\"-DPython_INCLUDE_DIR={distutils.sysconfig.get_python_inc()}\",\n f\"-DTORCH_INSTALL_PREFIX:STRING={os.path.dirname(torch.__file__)}\",\n \"-DBUILD_TORCHTEXT_PYTHON_EXTENSION:BOOL=ON\",\n \"-DRE2_BUILD_TESTING:BOOL=OFF\",\n \"-DBUILD_TESTING:BOOL=OFF\",\n \"-DBUILD_SHARED_LIBS=OFF\",\n \"-DCMAKE_POLICY_DEFAULT_CMP0063=NEW\",\n \"-DSPM_ENABLE_SHARED=OFF\",\n ]\n build_args = [\"--target\", \"install\"]\n\n # Default to Ninja\n if \"CMAKE_GENERATOR\" not in os.environ or platform.system() == \"Windows\":\n cmake_args += [\"-GNinja\"]\n if platform.system() == \"Windows\":\n import sys\n\n python_version = sys.version_info\n cmake_args += [\n \"-DCMAKE_C_COMPILER=cl\",\n \"-DCMAKE_CXX_COMPILER=cl\",\n f\"-DPYTHON_VERSION={python_version.major}.{python_version.minor}\",\n ]\n\n # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level\n # across all generators.\n if \"CMAKE_BUILD_PARALLEL_LEVEL\" not in os.environ:\n # self.parallel is a Python 3 only way to set parallel jobs by hand\n # using -j in the build_ext call, not supported by pip or PyPA-build.\n if hasattr(self, \"parallel\") and self.parallel:\n # CMake 3.12+ only.\n build_args += [\"-j{}\".format(self.parallel)]\n\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n\n subprocess.check_call([\"cmake\", str(_ROOT_DIR)] + cmake_args, cwd=self.build_temp)\n subprocess.check_call([\"cmake\", \"--build\", \".\"] + build_args, cwd=self.build_temp)\n\n def get_ext_filename(self, fullname):\n ext_filename = super().get_ext_filename(fullname)\n ext_filename_parts = ext_filename.split(\".\")\n without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]\n ext_filename = \".\".join(without_abi)\n return ext_filename\n", "path": "tools/setup_helpers/extension.py"}], "after_files": [{"content": "import distutils.sysconfig\nimport os\nimport platform\nimport subprocess\nfrom pathlib import Path\n\nimport torch\nfrom setuptools import Extension\nfrom setuptools.command.build_ext import build_ext\n\n\n__all__ = [\n \"get_ext_modules\",\n \"CMakeBuild\",\n]\n\n\n_LIBTORCHTEXT_NAME = \"torchtext.lib.libtorchtext\"\n_EXT_NAME = \"torchtext._torchtext\"\n_THIS_DIR = Path(__file__).parent.resolve()\n_ROOT_DIR = _THIS_DIR.parent.parent.resolve()\n\n\ndef _get_cxx11_abi():\n return \"-D_GLIBCXX_USE_CXX11_ABI=\" + str(int(torch.compiled_with_cxx11_abi()))\n\n\ndef get_ext_modules():\n modules = [\n Extension(name=_LIBTORCHTEXT_NAME, sources=[]),\n Extension(name=_EXT_NAME, sources=[]),\n ]\n return modules\n\n\n# Based off of\n# https://github.com/pybind/cmake_example/blob/580c5fd29d4651db99d8874714b07c0c49a53f8a/setup.py\n\n\nclass CMakeBuild(build_ext):\n def run(self):\n try:\n subprocess.check_output([\"cmake\", \"--version\"])\n except OSError:\n raise RuntimeError(\"CMake is not available.\") from None\n super().run()\n\n def build_extension(self, ext):\n # Since two library files (libtorchaudio and _torchaudio) need to be\n # recognized by setuptools, we instantiate `Extension` twice. (see `get_ext_modules`)\n # This leads to the situation where this `build_extension` method is called twice.\n # However, the following `cmake` command will build all of them at the same time,\n # so, we do not need to perform `cmake` twice.\n # Therefore we call `cmake` only for `torchaudio._torchaudio`.\n if ext.name != \"torchtext._torchtext\":\n return\n\n extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n\n # required for auto-detection of auxiliary \"native\" libs\n if not extdir.endswith(os.path.sep):\n extdir += os.path.sep\n\n cfg = \"Debug\" if self.debug else \"Release\"\n\n cmake_args = [\n f\"-DCMAKE_BUILD_TYPE={cfg}\",\n f\"-DCMAKE_PREFIX_PATH={torch.utils.cmake_prefix_path}\",\n f\"-DCMAKE_INSTALL_PREFIX={extdir}\",\n \"-DCMAKE_VERBOSE_MAKEFILE=ON\",\n f\"-DPython_INCLUDE_DIR={distutils.sysconfig.get_python_inc()}\",\n f\"-DTORCH_INSTALL_PREFIX:STRING={os.path.dirname(torch.__file__)}\",\n \"-DBUILD_TORCHTEXT_PYTHON_EXTENSION:BOOL=ON\",\n \"-DRE2_BUILD_TESTING:BOOL=OFF\",\n \"-DBUILD_TESTING:BOOL=OFF\",\n \"-DBUILD_SHARED_LIBS=OFF\",\n \"-DCMAKE_POLICY_DEFAULT_CMP0063=NEW\",\n \"-DSPM_ENABLE_SHARED=OFF\",\n f\"-DTORCH_COMPILED_WITH_CXX_ABI={_get_cxx11_abi()}\",\n ]\n build_args = [\"--target\", \"install\"]\n\n # Default to Ninja\n if \"CMAKE_GENERATOR\" not in os.environ or platform.system() == \"Windows\":\n cmake_args += [\"-GNinja\"]\n if platform.system() == \"Windows\":\n import sys\n\n python_version = sys.version_info\n cmake_args += [\n \"-DCMAKE_C_COMPILER=cl\",\n \"-DCMAKE_CXX_COMPILER=cl\",\n f\"-DPYTHON_VERSION={python_version.major}.{python_version.minor}\",\n ]\n\n # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level\n # across all generators.\n if \"CMAKE_BUILD_PARALLEL_LEVEL\" not in os.environ:\n # self.parallel is a Python 3 only way to set parallel jobs by hand\n # using -j in the build_ext call, not supported by pip or PyPA-build.\n if hasattr(self, \"parallel\") and self.parallel:\n # CMake 3.12+ only.\n build_args += [\"-j{}\".format(self.parallel)]\n\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n\n subprocess.check_call([\"cmake\", str(_ROOT_DIR)] + cmake_args, cwd=self.build_temp)\n subprocess.check_call([\"cmake\", \"--build\", \".\"] + build_args, cwd=self.build_temp)\n\n def get_ext_filename(self, fullname):\n ext_filename = super().get_ext_filename(fullname)\n ext_filename_parts = ext_filename.split(\".\")\n without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]\n ext_filename = \".\".join(without_abi)\n return ext_filename\n", "path": "tools/setup_helpers/extension.py"}]}
| 1,957 | 209 |
gh_patches_debug_38918
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-1933
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scrapy 1.1.0 RC3 - exception thrown with invalid ssl certificate
Hello,
I am crawling sometimes websites with an invalid ssl certificate. For example, Scrapy 1.1.0 RC3 fails to open when I do:
> scrapy shell https://www.directoriosanitario.com/directorio
> or
> scrapy shell https://saobinv.5go.cc/top/
and throws the following exception:
> twisted.web._newclient.ResponseNeverReceived: [<twisted.python.failure.Failure service_identity.exceptions.VerificationError: VerificationError(errors=[DNSMismatch(mismatched_id=DNS_ID(hostname=b'www.directoriosanitario.com'))])>]
I tried it with Scrapy 1.0.5 on python 2.7 and the spider opens but warns with:
> AttributeError: 'NoneType' object has no attribute 'failVerification'
Is there a way to force the spider to open with Scrapy 1.1.0 RC3?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/core/downloader/tls.py`
Content:
```
1 from OpenSSL import SSL
2
3
4 METHOD_SSLv3 = 'SSLv3'
5 METHOD_TLS = 'TLS'
6 METHOD_TLSv10 = 'TLSv1.0'
7 METHOD_TLSv11 = 'TLSv1.1'
8 METHOD_TLSv12 = 'TLSv1.2'
9
10 openssl_methods = {
11 METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)
12 METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)
13 METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only
14 METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only
15 METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only
16 }
17
```
Path: `scrapy/core/downloader/contextfactory.py`
Content:
```
1 from OpenSSL import SSL
2 from twisted.internet.ssl import ClientContextFactory
3
4 try:
5
6 from zope.interface.declarations import implementer
7
8 # the following should be available from Twisted 14.0.0
9 from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust
10 from twisted.internet._sslverify import ClientTLSOptions
11 from twisted.web.client import BrowserLikePolicyForHTTPS
12 from twisted.web.iweb import IPolicyForHTTPS
13
14 @implementer(IPolicyForHTTPS)
15 class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
16 """
17 Non-peer-certificate verifying HTTPS context factory
18
19 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
20 which allows TLS protocol negotiation
21
22 'A TLS/SSL connection established with [this method] may
23 understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'
24 """
25
26 def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):
27 super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)
28 self._ssl_method = method
29
30 def getCertificateOptions(self):
31 # setting verify=True will require you to provide CAs
32 # to verify against; in other words: it's not that simple
33
34 # backward-compatible SSL/TLS method:
35 #
36 # * this will respect `method` attribute in often recommended
37 # `ScrapyClientContextFactory` subclass
38 # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)
39 #
40 # * getattr() for `_ssl_method` attribute for context factories
41 # not calling super(..., self).__init__
42 return CertificateOptions(verify=False,
43 method=getattr(self, 'method',
44 getattr(self, '_ssl_method', None)))
45
46 # kept for old-style HTTP/1.0 downloader context twisted calls,
47 # e.g. connectSSL()
48 def getContext(self, hostname=None, port=None):
49 return self.getCertificateOptions().getContext()
50
51 def creatorForNetloc(self, hostname, port):
52 return ClientTLSOptions(hostname.decode("ascii"), self.getContext())
53
54
55 @implementer(IPolicyForHTTPS)
56 class BrowserLikeContextFactory(ScrapyClientContextFactory):
57 """
58 Twisted-recommended context factory for web clients.
59
60 Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:
61 "The default is to use a BrowserLikePolicyForHTTPS,
62 so unless you have special requirements you can leave this as-is."
63
64 creatorForNetloc() is the same as BrowserLikePolicyForHTTPS
65 except this context factory allows setting the TLS/SSL method to use.
66
67 Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
68 which allows TLS protocol negotiation.
69 """
70 def creatorForNetloc(self, hostname, port):
71
72 # trustRoot set to platformTrust() will use the platform's root CAs.
73 #
74 # This means that a website like https://www.cacert.org will be rejected
75 # by default, since CAcert.org CA certificate is seldom shipped.
76 return optionsForClientTLS(hostname.decode("ascii"),
77 trustRoot=platformTrust(),
78 extraCertificateOptions={
79 'method': self._ssl_method,
80 })
81
82 except ImportError:
83
84 class ScrapyClientContextFactory(ClientContextFactory):
85 "A SSL context factory which is more permissive against SSL bugs."
86 # see https://github.com/scrapy/scrapy/issues/82
87 # and https://github.com/scrapy/scrapy/issues/26
88 # and https://github.com/scrapy/scrapy/issues/981
89
90 def __init__(self, method=SSL.SSLv23_METHOD):
91 self.method = method
92
93 def getContext(self, hostname=None, port=None):
94 ctx = ClientContextFactory.getContext(self)
95 # Enable all workarounds to SSL bugs as documented by
96 # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
97 ctx.set_options(SSL.OP_ALL)
98 return ctx
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py
--- a/scrapy/core/downloader/contextfactory.py
+++ b/scrapy/core/downloader/contextfactory.py
@@ -6,11 +6,16 @@
from zope.interface.declarations import implementer
# the following should be available from Twisted 14.0.0
- from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust
- from twisted.internet._sslverify import ClientTLSOptions
+ from twisted.internet.ssl import (optionsForClientTLS,
+ CertificateOptions,
+ platformTrust)
+
from twisted.web.client import BrowserLikePolicyForHTTPS
from twisted.web.iweb import IPolicyForHTTPS
+ from scrapy.core.downloader.tls import ScrapyClientTLSOptions
+
+
@implementer(IPolicyForHTTPS)
class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
"""
@@ -49,7 +54,7 @@
return self.getCertificateOptions().getContext()
def creatorForNetloc(self, hostname, port):
- return ClientTLSOptions(hostname.decode("ascii"), self.getContext())
+ return ScrapyClientTLSOptions(hostname.decode("ascii"), self.getContext())
@implementer(IPolicyForHTTPS)
diff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py
--- a/scrapy/core/downloader/tls.py
+++ b/scrapy/core/downloader/tls.py
@@ -1,6 +1,9 @@
+import logging
from OpenSSL import SSL
+logger = logging.getLogger(__name__)
+
METHOD_SSLv3 = 'SSLv3'
METHOD_TLS = 'TLS'
METHOD_TLSv10 = 'TLSv1.0'
@@ -14,3 +17,36 @@
METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only
METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only
}
+
+# ClientTLSOptions requires a recent-enough version of Twisted
+try:
+
+ # taken from twisted/twisted/internet/_sslverify.py
+ try:
+ from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START
+ except ImportError:
+ SSL_CB_HANDSHAKE_START = 0x10
+ SSL_CB_HANDSHAKE_DONE = 0x20
+
+ from twisted.internet._sslverify import (ClientTLSOptions,
+ _maybeSetHostNameIndication,
+ verifyHostname,
+ VerificationError)
+
+ class ScrapyClientTLSOptions(ClientTLSOptions):
+ # same as Twisted's ClientTLSOptions,
+ # except that VerificationError is caught
+ # and doesn't close the connection
+ def _identityVerifyingInfoCallback(self, connection, where, ret):
+ if where & SSL_CB_HANDSHAKE_START:
+ _maybeSetHostNameIndication(connection, self._hostnameBytes)
+ elif where & SSL_CB_HANDSHAKE_DONE:
+ try:
+ verifyHostname(connection, self._hostnameASCII)
+ except VerificationError as e:
+ logger.warning(e)
+
+except ImportError:
+ # ImportError should not matter for older Twisted versions
+ # as the above is not used in the fallback ScrapyClientContextFactory
+ pass
|
{"golden_diff": "diff --git a/scrapy/core/downloader/contextfactory.py b/scrapy/core/downloader/contextfactory.py\n--- a/scrapy/core/downloader/contextfactory.py\n+++ b/scrapy/core/downloader/contextfactory.py\n@@ -6,11 +6,16 @@\n from zope.interface.declarations import implementer\n \n # the following should be available from Twisted 14.0.0\n- from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust\n- from twisted.internet._sslverify import ClientTLSOptions\n+ from twisted.internet.ssl import (optionsForClientTLS,\n+ CertificateOptions,\n+ platformTrust)\n+\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n \n+ from scrapy.core.downloader.tls import ScrapyClientTLSOptions\n+\n+\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n@@ -49,7 +54,7 @@\n return self.getCertificateOptions().getContext()\n \n def creatorForNetloc(self, hostname, port):\n- return ClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n+ return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n \n \n @implementer(IPolicyForHTTPS)\ndiff --git a/scrapy/core/downloader/tls.py b/scrapy/core/downloader/tls.py\n--- a/scrapy/core/downloader/tls.py\n+++ b/scrapy/core/downloader/tls.py\n@@ -1,6 +1,9 @@\n+import logging\n from OpenSSL import SSL\n \n \n+logger = logging.getLogger(__name__)\n+\n METHOD_SSLv3 = 'SSLv3'\n METHOD_TLS = 'TLS'\n METHOD_TLSv10 = 'TLSv1.0'\n@@ -14,3 +17,36 @@\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n }\n+\n+# ClientTLSOptions requires a recent-enough version of Twisted\n+try:\n+\n+ # taken from twisted/twisted/internet/_sslverify.py\n+ try:\n+ from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START\n+ except ImportError:\n+ SSL_CB_HANDSHAKE_START = 0x10\n+ SSL_CB_HANDSHAKE_DONE = 0x20\n+\n+ from twisted.internet._sslverify import (ClientTLSOptions,\n+ _maybeSetHostNameIndication,\n+ verifyHostname,\n+ VerificationError)\n+\n+ class ScrapyClientTLSOptions(ClientTLSOptions):\n+ # same as Twisted's ClientTLSOptions,\n+ # except that VerificationError is caught\n+ # and doesn't close the connection\n+ def _identityVerifyingInfoCallback(self, connection, where, ret):\n+ if where & SSL_CB_HANDSHAKE_START:\n+ _maybeSetHostNameIndication(connection, self._hostnameBytes)\n+ elif where & SSL_CB_HANDSHAKE_DONE:\n+ try:\n+ verifyHostname(connection, self._hostnameASCII)\n+ except VerificationError as e:\n+ logger.warning(e)\n+\n+except ImportError:\n+ # ImportError should not matter for older Twisted versions\n+ # as the above is not used in the fallback ScrapyClientContextFactory\n+ pass\n", "issue": "Scrapy 1.1.0 RC3 - exception thrown with invalid ssl certificate\nHello,\n\nI am crawling sometimes websites with an invalid ssl certificate. For example, Scrapy 1.1.0 RC3 fails to open when I do:\n\n> scrapy shell https://www.directoriosanitario.com/directorio\n> or\n> scrapy shell https://saobinv.5go.cc/top/\n\nand throws the following exception:\n\n> twisted.web._newclient.ResponseNeverReceived: [<twisted.python.failure.Failure service_identity.exceptions.VerificationError: VerificationError(errors=[DNSMismatch(mismatched_id=DNS_ID(hostname=b'www.directoriosanitario.com'))])>]\n\nI tried it with Scrapy 1.0.5 on python 2.7 and the spider opens but warns with: \n\n> AttributeError: 'NoneType' object has no attribute 'failVerification'\n\nIs there a way to force the spider to open with Scrapy 1.1.0 RC3?\n\n", "before_files": [{"content": "from OpenSSL import SSL\n\n\nMETHOD_SSLv3 = 'SSLv3'\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n", "path": "scrapy/core/downloader/tls.py"}, {"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\ntry:\n\n from zope.interface.declarations import implementer\n\n # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import optionsForClientTLS, CertificateOptions, platformTrust\n from twisted.internet._sslverify import ClientTLSOptions\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)))\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nexcept ImportError:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}], "after_files": [{"content": "import logging\nfrom OpenSSL import SSL\n\n\nlogger = logging.getLogger(__name__)\n\nMETHOD_SSLv3 = 'SSLv3'\nMETHOD_TLS = 'TLS'\nMETHOD_TLSv10 = 'TLSv1.0'\nMETHOD_TLSv11 = 'TLSv1.1'\nMETHOD_TLSv12 = 'TLSv1.2'\n\nopenssl_methods = {\n METHOD_TLS: SSL.SSLv23_METHOD, # protocol negotiation (recommended)\n METHOD_SSLv3: SSL.SSLv3_METHOD, # SSL 3 (NOT recommended)\n METHOD_TLSv10: SSL.TLSv1_METHOD, # TLS 1.0 only\n METHOD_TLSv11: getattr(SSL, 'TLSv1_1_METHOD', 5), # TLS 1.1 only\n METHOD_TLSv12: getattr(SSL, 'TLSv1_2_METHOD', 6), # TLS 1.2 only\n}\n\n# ClientTLSOptions requires a recent-enough version of Twisted\ntry:\n\n # taken from twisted/twisted/internet/_sslverify.py\n try:\n from OpenSSL.SSL import SSL_CB_HANDSHAKE_DONE, SSL_CB_HANDSHAKE_START\n except ImportError:\n SSL_CB_HANDSHAKE_START = 0x10\n SSL_CB_HANDSHAKE_DONE = 0x20\n\n from twisted.internet._sslverify import (ClientTLSOptions,\n _maybeSetHostNameIndication,\n verifyHostname,\n VerificationError)\n\n class ScrapyClientTLSOptions(ClientTLSOptions):\n # same as Twisted's ClientTLSOptions,\n # except that VerificationError is caught\n # and doesn't close the connection\n def _identityVerifyingInfoCallback(self, connection, where, ret):\n if where & SSL_CB_HANDSHAKE_START:\n _maybeSetHostNameIndication(connection, self._hostnameBytes)\n elif where & SSL_CB_HANDSHAKE_DONE:\n try:\n verifyHostname(connection, self._hostnameASCII)\n except VerificationError as e:\n logger.warning(e)\n\nexcept ImportError:\n # ImportError should not matter for older Twisted versions\n # as the above is not used in the fallback ScrapyClientContextFactory\n pass\n", "path": "scrapy/core/downloader/tls.py"}, {"content": "from OpenSSL import SSL\nfrom twisted.internet.ssl import ClientContextFactory\n\ntry:\n\n from zope.interface.declarations import implementer\n\n # the following should be available from Twisted 14.0.0\n from twisted.internet.ssl import (optionsForClientTLS,\n CertificateOptions,\n platformTrust)\n\n from twisted.web.client import BrowserLikePolicyForHTTPS\n from twisted.web.iweb import IPolicyForHTTPS\n\n from scrapy.core.downloader.tls import ScrapyClientTLSOptions\n\n\n @implementer(IPolicyForHTTPS)\n class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):\n \"\"\"\n Non-peer-certificate verifying HTTPS context factory\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation\n\n 'A TLS/SSL connection established with [this method] may\n understand the SSLv3, TLSv1, TLSv1.1 and TLSv1.2 protocols.'\n \"\"\"\n\n def __init__(self, method=SSL.SSLv23_METHOD, *args, **kwargs):\n super(ScrapyClientContextFactory, self).__init__(*args, **kwargs)\n self._ssl_method = method\n\n def getCertificateOptions(self):\n # setting verify=True will require you to provide CAs\n # to verify against; in other words: it's not that simple\n\n # backward-compatible SSL/TLS method:\n #\n # * this will respect `method` attribute in often recommended\n # `ScrapyClientContextFactory` subclass\n # (https://github.com/scrapy/scrapy/issues/1429#issuecomment-131782133)\n #\n # * getattr() for `_ssl_method` attribute for context factories\n # not calling super(..., self).__init__\n return CertificateOptions(verify=False,\n method=getattr(self, 'method',\n getattr(self, '_ssl_method', None)))\n\n # kept for old-style HTTP/1.0 downloader context twisted calls,\n # e.g. connectSSL()\n def getContext(self, hostname=None, port=None):\n return self.getCertificateOptions().getContext()\n\n def creatorForNetloc(self, hostname, port):\n return ScrapyClientTLSOptions(hostname.decode(\"ascii\"), self.getContext())\n\n\n @implementer(IPolicyForHTTPS)\n class BrowserLikeContextFactory(ScrapyClientContextFactory):\n \"\"\"\n Twisted-recommended context factory for web clients.\n\n Quoting http://twistedmatrix.com/documents/current/api/twisted.web.client.Agent.html:\n \"The default is to use a BrowserLikePolicyForHTTPS,\n so unless you have special requirements you can leave this as-is.\"\n\n creatorForNetloc() is the same as BrowserLikePolicyForHTTPS\n except this context factory allows setting the TLS/SSL method to use.\n\n Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)\n which allows TLS protocol negotiation.\n \"\"\"\n def creatorForNetloc(self, hostname, port):\n\n # trustRoot set to platformTrust() will use the platform's root CAs.\n #\n # This means that a website like https://www.cacert.org will be rejected\n # by default, since CAcert.org CA certificate is seldom shipped.\n return optionsForClientTLS(hostname.decode(\"ascii\"),\n trustRoot=platformTrust(),\n extraCertificateOptions={\n 'method': self._ssl_method,\n })\n\nexcept ImportError:\n\n class ScrapyClientContextFactory(ClientContextFactory):\n \"A SSL context factory which is more permissive against SSL bugs.\"\n # see https://github.com/scrapy/scrapy/issues/82\n # and https://github.com/scrapy/scrapy/issues/26\n # and https://github.com/scrapy/scrapy/issues/981\n\n def __init__(self, method=SSL.SSLv23_METHOD):\n self.method = method\n\n def getContext(self, hostname=None, port=None):\n ctx = ClientContextFactory.getContext(self)\n # Enable all workarounds to SSL bugs as documented by\n # http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html\n ctx.set_options(SSL.OP_ALL)\n return ctx\n", "path": "scrapy/core/downloader/contextfactory.py"}]}
| 1,799 | 758 |
gh_patches_debug_6961
|
rasdani/github-patches
|
git_diff
|
nextcloud__appstore-186
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Email confirmation email should be improved
I couldn't find the text for the email, so I suspect it's from some library.
Here is the content:
---
Hello from **apps.nextcloud.com**!
You're receiving this e-mail because user oparoz at apps.nextcloud.com has given **yours as an e-mail address to connect their account**.
To confirm this is correct, go to https://apps.nextcloud.com/confirm-email/Mzc:1bZksL:Y8YI3zMQ0fOllevi3VhZ-dmiSMU/
Thank you from **apps.nextcloud.com**!
**apps.nextcloud.com**
---
I've highlighted what should be altered.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nextcloudappstore/core/management/commands/setupsocial.py`
Content:
```
1 from allauth.socialaccount.models import SocialApp
2 from django.contrib.sites.models import Site
3 from django.core.management import BaseCommand
4
5
6 class Command(BaseCommand):
7 help = ('Updates the first site with the given domain and creates or '
8 'updates the GitHub social login application')
9
10 def add_arguments(self, parser):
11 social_meta = SocialApp._meta
12 parser.add_argument('--github-secret', required=True,
13 help=social_meta.get_field('secret').help_text)
14 parser.add_argument('--github-client-id', required=True,
15 help=social_meta.get_field('client_id').help_text)
16 site_meta = Site._meta
17 parser.add_argument('--domain', required=True,
18 help=site_meta.get_field('domain').help_text)
19
20 def handle(self, *args, **options):
21 # set up site which is required for social login
22 site = Site.objects.all()[0]
23 site.domain = options['domain']
24 site.name = options['domain']
25 site.save()
26 # set up github
27 app, created = SocialApp.objects.get_or_create(provider='github')
28 app.name = 'GitHub'
29 app.secret = options['github_secret']
30 app.client_id = options['github_client_id']
31 app.sites.add(site)
32 app.save()
33
34 msg = 'Successfully initialized social accounts'
35 self.stdout.write(self.style.SUCCESS(msg))
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nextcloudappstore/core/management/commands/setupsocial.py b/nextcloudappstore/core/management/commands/setupsocial.py
--- a/nextcloudappstore/core/management/commands/setupsocial.py
+++ b/nextcloudappstore/core/management/commands/setupsocial.py
@@ -21,7 +21,7 @@
# set up site which is required for social login
site = Site.objects.all()[0]
site.domain = options['domain']
- site.name = options['domain']
+ site.name = 'Nextcloud App Store'
site.save()
# set up github
app, created = SocialApp.objects.get_or_create(provider='github')
|
{"golden_diff": "diff --git a/nextcloudappstore/core/management/commands/setupsocial.py b/nextcloudappstore/core/management/commands/setupsocial.py\n--- a/nextcloudappstore/core/management/commands/setupsocial.py\n+++ b/nextcloudappstore/core/management/commands/setupsocial.py\n@@ -21,7 +21,7 @@\n # set up site which is required for social login\n site = Site.objects.all()[0]\n site.domain = options['domain']\n- site.name = options['domain']\n+ site.name = 'Nextcloud App Store'\n site.save()\n # set up github\n app, created = SocialApp.objects.get_or_create(provider='github')\n", "issue": "Email confirmation email should be improved\nI couldn't find the text for the email, so I suspect it's from some library.\n\nHere is the content:\n\n---\n\nHello from **apps.nextcloud.com**!\n\nYou're receiving this e-mail because user oparoz at apps.nextcloud.com has given **yours as an e-mail address to connect their account**.\n\nTo confirm this is correct, go to https://apps.nextcloud.com/confirm-email/Mzc:1bZksL:Y8YI3zMQ0fOllevi3VhZ-dmiSMU/\n\nThank you from **apps.nextcloud.com**!\n**apps.nextcloud.com**\n\n---\n\nI've highlighted what should be altered.\n\n", "before_files": [{"content": "from allauth.socialaccount.models import SocialApp\nfrom django.contrib.sites.models import Site\nfrom django.core.management import BaseCommand\n\n\nclass Command(BaseCommand):\n help = ('Updates the first site with the given domain and creates or '\n 'updates the GitHub social login application')\n\n def add_arguments(self, parser):\n social_meta = SocialApp._meta\n parser.add_argument('--github-secret', required=True,\n help=social_meta.get_field('secret').help_text)\n parser.add_argument('--github-client-id', required=True,\n help=social_meta.get_field('client_id').help_text)\n site_meta = Site._meta\n parser.add_argument('--domain', required=True,\n help=site_meta.get_field('domain').help_text)\n\n def handle(self, *args, **options):\n # set up site which is required for social login\n site = Site.objects.all()[0]\n site.domain = options['domain']\n site.name = options['domain']\n site.save()\n # set up github\n app, created = SocialApp.objects.get_or_create(provider='github')\n app.name = 'GitHub'\n app.secret = options['github_secret']\n app.client_id = options['github_client_id']\n app.sites.add(site)\n app.save()\n\n msg = 'Successfully initialized social accounts'\n self.stdout.write(self.style.SUCCESS(msg))\n", "path": "nextcloudappstore/core/management/commands/setupsocial.py"}], "after_files": [{"content": "from allauth.socialaccount.models import SocialApp\nfrom django.contrib.sites.models import Site\nfrom django.core.management import BaseCommand\n\n\nclass Command(BaseCommand):\n help = ('Updates the first site with the given domain and creates or '\n 'updates the GitHub social login application')\n\n def add_arguments(self, parser):\n social_meta = SocialApp._meta\n parser.add_argument('--github-secret', required=True,\n help=social_meta.get_field('secret').help_text)\n parser.add_argument('--github-client-id', required=True,\n help=social_meta.get_field('client_id').help_text)\n site_meta = Site._meta\n parser.add_argument('--domain', required=True,\n help=site_meta.get_field('domain').help_text)\n\n def handle(self, *args, **options):\n # set up site which is required for social login\n site = Site.objects.all()[0]\n site.domain = options['domain']\n site.name = 'Nextcloud App Store'\n site.save()\n # set up github\n app, created = SocialApp.objects.get_or_create(provider='github')\n app.name = 'GitHub'\n app.secret = options['github_secret']\n app.client_id = options['github_client_id']\n app.sites.add(site)\n app.save()\n\n msg = 'Successfully initialized social accounts'\n self.stdout.write(self.style.SUCCESS(msg))\n", "path": "nextcloudappstore/core/management/commands/setupsocial.py"}]}
| 764 | 154 |
gh_patches_debug_23246
|
rasdani/github-patches
|
git_diff
|
docarray__docarray-85
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix(hashing): ignore casting to float
Fixes #83
```python
>>> x = Document(text="float test 2.56")
>>> x.get_vocabulary()
Counter({'float': 1, 'test': 1, '2': 1, '56': 1})
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docarray/document/mixins/featurehash.py`
Content:
```
1 import hashlib
2 import json
3 from typing import Tuple, TYPE_CHECKING
4
5 import numpy as np
6
7 if TYPE_CHECKING:
8 from ...types import T
9
10
11 class FeatureHashMixin:
12 """Provide helper functions for feature hashing."""
13
14 def embed_feature_hashing(
15 self: 'T',
16 n_dim: int = 256,
17 sparse: bool = False,
18 fields: Tuple[str, ...] = ('text', 'tags'),
19 max_value: int = 1_000_000,
20 ) -> 'T':
21 """Convert an arbitrary set of attributes into a fixed-dimensional matrix using the hashing trick.
22
23 :param n_dim: the dimensionality of each document in the output embedding.
24 Small numbers of features are likely to cause hash collisions,
25 but large numbers will cause larger overall parameter dimensions.
26 :param sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.
27 Note that this feature requires ``scipy``
28 :param fields: which attributes to be considered as for feature hashing.
29 """
30 if sparse:
31 from scipy.sparse import csr_matrix
32
33 idxs, data = [], [] # sparse
34 table = np.zeros(n_dim) # dense
35
36 for f in fields:
37 if 'text' in fields:
38 all_tokens = self.get_vocabulary(('text',))
39 for f_id, val in all_tokens.items():
40 _hash_column(f_id, val, n_dim, max_value, idxs, data, table)
41
42 if 'tags' in fields:
43 for k, v in self.tags.items():
44 _hash_column(k, v, n_dim, max_value, idxs, data, table)
45
46 v = getattr(self, f, None)
47 if v:
48 _hash_column(f, v, n_dim, max_value, idxs, data, table)
49
50 if sparse:
51 self.embedding = csr_matrix((data, zip(*idxs)), shape=(1, n_dim))
52 else:
53 self.embedding = table
54 return self
55
56
57 def _hash_column(col_name, col_val, n_dim, max_value, idxs, data, table):
58 h = _any_hash(col_name)
59 col_val = _any_hash(col_val) % max_value
60 col = h % n_dim
61 idxs.append((0, col))
62 data.append(np.sign(h) * col_val)
63 table[col] += np.sign(h) * col_val
64
65
66 def _any_hash(v):
67 try:
68 return int(v) # parse int parameter
69 except ValueError:
70 try:
71 return float(v) # parse float parameter
72 except ValueError:
73 if not v:
74 # ignore it when the parameter is empty
75 return 0
76 if isinstance(v, str):
77 v = v.strip()
78 if v.lower() in {'true', 'yes'}: # parse boolean parameter
79 return 1
80 if v.lower() in {'false', 'no'}:
81 return 0
82 if isinstance(v, (tuple, dict, list)):
83 v = json.dumps(v, sort_keys=True)
84
85 return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docarray/document/mixins/featurehash.py b/docarray/document/mixins/featurehash.py
--- a/docarray/document/mixins/featurehash.py
+++ b/docarray/document/mixins/featurehash.py
@@ -64,22 +64,24 @@
def _any_hash(v):
- try:
- return int(v) # parse int parameter
- except ValueError:
+ if not v:
+ # ignore it when the parameter is empty
+ return 0
+ elif isinstance(v, (tuple, dict, list, str)):
+ if isinstance(v, str):
+ v = v.strip()
+ if v.lower() in {'true', 'yes'}: # parse boolean parameter
+ return 1
+ if v.lower() in {'false', 'no'}:
+ return 0
+ else:
+ v = json.dumps(v, sort_keys=True)
+ return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)
+ else:
try:
- return float(v) # parse float parameter
+ return int(v) # parse int parameter
except ValueError:
- if not v:
- # ignore it when the parameter is empty
- return 0
- if isinstance(v, str):
- v = v.strip()
- if v.lower() in {'true', 'yes'}: # parse boolean parameter
- return 1
- if v.lower() in {'false', 'no'}:
- return 0
- if isinstance(v, (tuple, dict, list)):
- v = json.dumps(v, sort_keys=True)
-
- return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)
+ try:
+ return float(v) # parse float parameter
+ except ValueError:
+ return 0 # unable to hash
|
{"golden_diff": "diff --git a/docarray/document/mixins/featurehash.py b/docarray/document/mixins/featurehash.py\n--- a/docarray/document/mixins/featurehash.py\n+++ b/docarray/document/mixins/featurehash.py\n@@ -64,22 +64,24 @@\n \n \n def _any_hash(v):\n- try:\n- return int(v) # parse int parameter\n- except ValueError:\n+ if not v:\n+ # ignore it when the parameter is empty\n+ return 0\n+ elif isinstance(v, (tuple, dict, list, str)):\n+ if isinstance(v, str):\n+ v = v.strip()\n+ if v.lower() in {'true', 'yes'}: # parse boolean parameter\n+ return 1\n+ if v.lower() in {'false', 'no'}:\n+ return 0\n+ else:\n+ v = json.dumps(v, sort_keys=True)\n+ return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)\n+ else:\n try:\n- return float(v) # parse float parameter\n+ return int(v) # parse int parameter\n except ValueError:\n- if not v:\n- # ignore it when the parameter is empty\n- return 0\n- if isinstance(v, str):\n- v = v.strip()\n- if v.lower() in {'true', 'yes'}: # parse boolean parameter\n- return 1\n- if v.lower() in {'false', 'no'}:\n- return 0\n- if isinstance(v, (tuple, dict, list)):\n- v = json.dumps(v, sort_keys=True)\n-\n- return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)\n+ try:\n+ return float(v) # parse float parameter\n+ except ValueError:\n+ return 0 # unable to hash\n", "issue": "fix(hashing): ignore casting to float\nFixes #83 \r\n\r\n```python\r\n>>> x = Document(text=\"float test 2.56\")\r\n>>> x.get_vocabulary()\r\nCounter({'float': 1, 'test': 1, '2': 1, '56': 1})\r\n```\n", "before_files": [{"content": "import hashlib\nimport json\nfrom typing import Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nif TYPE_CHECKING:\n from ...types import T\n\n\nclass FeatureHashMixin:\n \"\"\"Provide helper functions for feature hashing.\"\"\"\n\n def embed_feature_hashing(\n self: 'T',\n n_dim: int = 256,\n sparse: bool = False,\n fields: Tuple[str, ...] = ('text', 'tags'),\n max_value: int = 1_000_000,\n ) -> 'T':\n \"\"\"Convert an arbitrary set of attributes into a fixed-dimensional matrix using the hashing trick.\n\n :param n_dim: the dimensionality of each document in the output embedding.\n Small numbers of features are likely to cause hash collisions,\n but large numbers will cause larger overall parameter dimensions.\n :param sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.\n Note that this feature requires ``scipy``\n :param fields: which attributes to be considered as for feature hashing.\n \"\"\"\n if sparse:\n from scipy.sparse import csr_matrix\n\n idxs, data = [], [] # sparse\n table = np.zeros(n_dim) # dense\n\n for f in fields:\n if 'text' in fields:\n all_tokens = self.get_vocabulary(('text',))\n for f_id, val in all_tokens.items():\n _hash_column(f_id, val, n_dim, max_value, idxs, data, table)\n\n if 'tags' in fields:\n for k, v in self.tags.items():\n _hash_column(k, v, n_dim, max_value, idxs, data, table)\n\n v = getattr(self, f, None)\n if v:\n _hash_column(f, v, n_dim, max_value, idxs, data, table)\n\n if sparse:\n self.embedding = csr_matrix((data, zip(*idxs)), shape=(1, n_dim))\n else:\n self.embedding = table\n return self\n\n\ndef _hash_column(col_name, col_val, n_dim, max_value, idxs, data, table):\n h = _any_hash(col_name)\n col_val = _any_hash(col_val) % max_value\n col = h % n_dim\n idxs.append((0, col))\n data.append(np.sign(h) * col_val)\n table[col] += np.sign(h) * col_val\n\n\ndef _any_hash(v):\n try:\n return int(v) # parse int parameter\n except ValueError:\n try:\n return float(v) # parse float parameter\n except ValueError:\n if not v:\n # ignore it when the parameter is empty\n return 0\n if isinstance(v, str):\n v = v.strip()\n if v.lower() in {'true', 'yes'}: # parse boolean parameter\n return 1\n if v.lower() in {'false', 'no'}:\n return 0\n if isinstance(v, (tuple, dict, list)):\n v = json.dumps(v, sort_keys=True)\n\n return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)\n", "path": "docarray/document/mixins/featurehash.py"}], "after_files": [{"content": "import hashlib\nimport json\nfrom typing import Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nif TYPE_CHECKING:\n from ...types import T\n\n\nclass FeatureHashMixin:\n \"\"\"Provide helper functions for feature hashing.\"\"\"\n\n def embed_feature_hashing(\n self: 'T',\n n_dim: int = 256,\n sparse: bool = False,\n fields: Tuple[str, ...] = ('text', 'tags'),\n max_value: int = 1_000_000,\n ) -> 'T':\n \"\"\"Convert an arbitrary set of attributes into a fixed-dimensional matrix using the hashing trick.\n\n :param n_dim: the dimensionality of each document in the output embedding.\n Small numbers of features are likely to cause hash collisions,\n but large numbers will cause larger overall parameter dimensions.\n :param sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.\n Note that this feature requires ``scipy``\n :param fields: which attributes to be considered as for feature hashing.\n \"\"\"\n if sparse:\n from scipy.sparse import csr_matrix\n\n idxs, data = [], [] # sparse\n table = np.zeros(n_dim) # dense\n\n for f in fields:\n if 'text' in fields:\n all_tokens = self.get_vocabulary(('text',))\n for f_id, val in all_tokens.items():\n _hash_column(f_id, val, n_dim, max_value, idxs, data, table)\n\n if 'tags' in fields:\n for k, v in self.tags.items():\n _hash_column(k, v, n_dim, max_value, idxs, data, table)\n\n v = getattr(self, f, None)\n if v:\n _hash_column(f, v, n_dim, max_value, idxs, data, table)\n\n if sparse:\n self.embedding = csr_matrix((data, zip(*idxs)), shape=(1, n_dim))\n else:\n self.embedding = table\n return self\n\n\ndef _hash_column(col_name, col_val, n_dim, max_value, idxs, data, table):\n h = _any_hash(col_name)\n col_val = _any_hash(col_val) % max_value\n col = h % n_dim\n idxs.append((0, col))\n data.append(np.sign(h) * col_val)\n table[col] += np.sign(h) * col_val\n\n\ndef _any_hash(v):\n if not v:\n # ignore it when the parameter is empty\n return 0\n elif isinstance(v, (tuple, dict, list, str)):\n if isinstance(v, str):\n v = v.strip()\n if v.lower() in {'true', 'yes'}: # parse boolean parameter\n return 1\n if v.lower() in {'false', 'no'}:\n return 0\n else:\n v = json.dumps(v, sort_keys=True)\n return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)\n else:\n try:\n return int(v) # parse int parameter\n except ValueError:\n try:\n return float(v) # parse float parameter\n except ValueError:\n return 0 # unable to hash\n", "path": "docarray/document/mixins/featurehash.py"}]}
| 1,186 | 434 |
gh_patches_debug_29060
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-1399
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tox (including Travis) not testing cythonized variants
As described in ``README.rst``, Falcon can be cythonized for ~20% performance gain (or actually even more). Installing Falcon from *sdist* into an environment with Cython does the trick:
```python
>>> import falcon
>>> falcon.api # As we can see, falcon.api is coming from the dynamically-linked (cythonized) library api.so
<module 'falcon.api' from '/home/vytas/.virtualenvs/fresh/local/lib/python2.7/site-packages/falcon/api.so'>
```
However, this does not hold under Tox ``py27_cython`` and ``py36_cython`` environments, including runs in Travis, as the properly cythonized Falcon is shadowed by the local source directory. This could potentially be worked around by changing dir in Tox, but apparently pytest is even more stubborn as it is correctly determining the root dir of tests, and changing to it.
See also discussions here:
* https://github.com/tox-dev/tox/issues/54
* https://github.com/tox-dev/tox/issues/514
The last comment on the latter also explains the possible patterns to work this around: https://github.com/tox-dev/tox/issues/514#issuecomment-327779367 (links to the useful https://docs.pytest.org/en/latest/goodpractices.html#choosing-a-test-layout-import-rules ).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/cmd/print_routes.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright 2013 by Rackspace Hosting, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """
16 Script that prints out the routes of an API instance.
17 """
18
19 from __future__ import print_function
20
21 from functools import partial
22 import inspect
23
24 import falcon
25
26
27 def print_routes(api, verbose=False): # pragma: no cover
28 """
29 Initial call.
30
31 :param api: The falcon.API or callable that returns an instance to look at.
32 :type api: falcon.API or callable
33 :param verbose: If the output should be verbose.
34 :type verbose: bool
35 """
36 traverse(api._router._roots, verbose=verbose)
37
38
39 def traverse(roots, parent='', verbose=False):
40 """
41 Recursive call which also handles printing output.
42
43 :param api: The falcon.API or callable that returns an instance to look at.
44 :type api: falcon.API or callable
45 :param parent: The parent uri path to the current iteration.
46 :type parent: str
47 :param verbose: If the output should be verbose.
48 :type verbose: bool
49 """
50 for root in roots:
51 if root.method_map:
52 print('->', parent + '/' + root.raw_segment)
53 if verbose:
54 for method, func in root.method_map.items():
55 if func.__name__ != 'method_not_allowed':
56 if isinstance(func, partial):
57 real_func = func.func
58 else:
59 real_func = func
60
61 source_file = inspect.getsourcefile(real_func)
62
63 print('-->{0} {1}:{2}'.format(
64 method,
65 source_file,
66 source_file[1]
67 ))
68
69 if root.children:
70 traverse(root.children, parent + '/' + root.raw_segment, verbose)
71
72
73 def main():
74 """
75 Main entrypoint.
76 """
77 import argparse
78
79 parser = argparse.ArgumentParser(
80 description='Example: print-api-routes myprogram:app')
81 parser.add_argument(
82 '-v', '--verbose', action='store_true',
83 help='Prints out information for each method.')
84 parser.add_argument(
85 'api_module',
86 help='The module and api to inspect. Example: myapp.somemodule:api',
87 )
88 args = parser.parse_args()
89
90 try:
91 module, instance = args.api_module.split(':', 1)
92 except ValueError:
93 parser.error(
94 'The api_module must include a colon between '
95 'the module and instnace')
96 api = getattr(__import__(module, fromlist=[True]), instance)
97 if not isinstance(api, falcon.API):
98 if callable(api):
99 api = api()
100 if not isinstance(api, falcon.API):
101 parser.error(
102 '{0} did not return a falcon.API instance'.format(
103 args.api_module))
104 else:
105 parser.error(
106 'The instance must be of falcon.API or be '
107 'a callable without args that returns falcon.API')
108 print_routes(api, verbose=args.verbose)
109
110
111 if __name__ == '__main__':
112 main()
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/cmd/print_routes.py b/falcon/cmd/print_routes.py
--- a/falcon/cmd/print_routes.py
+++ b/falcon/cmd/print_routes.py
@@ -58,13 +58,19 @@
else:
real_func = func
- source_file = inspect.getsourcefile(real_func)
-
- print('-->{0} {1}:{2}'.format(
- method,
- source_file,
- source_file[1]
- ))
+ try:
+ source_file = inspect.getsourcefile(real_func)
+ source_lines = inspect.getsourcelines(real_func)
+ source_info = '{}:{}'.format(source_file,
+ source_lines[1])
+ except TypeError:
+ # NOTE(vytas): If Falcon is cythonized, all default
+ # responders coming from cythonized modules will
+ # appear as built-in functions, and raise a
+ # TypeError when trying to locate the source file.
+ source_info = '[unknown file]'
+
+ print('-->' + method, source_info)
if root.children:
traverse(root.children, parent + '/' + root.raw_segment, verbose)
@@ -92,7 +98,7 @@
except ValueError:
parser.error(
'The api_module must include a colon between '
- 'the module and instnace')
+ 'the module and instance')
api = getattr(__import__(module, fromlist=[True]), instance)
if not isinstance(api, falcon.API):
if callable(api):
|
{"golden_diff": "diff --git a/falcon/cmd/print_routes.py b/falcon/cmd/print_routes.py\n--- a/falcon/cmd/print_routes.py\n+++ b/falcon/cmd/print_routes.py\n@@ -58,13 +58,19 @@\n else:\n real_func = func\n \n- source_file = inspect.getsourcefile(real_func)\n-\n- print('-->{0} {1}:{2}'.format(\n- method,\n- source_file,\n- source_file[1]\n- ))\n+ try:\n+ source_file = inspect.getsourcefile(real_func)\n+ source_lines = inspect.getsourcelines(real_func)\n+ source_info = '{}:{}'.format(source_file,\n+ source_lines[1])\n+ except TypeError:\n+ # NOTE(vytas): If Falcon is cythonized, all default\n+ # responders coming from cythonized modules will\n+ # appear as built-in functions, and raise a\n+ # TypeError when trying to locate the source file.\n+ source_info = '[unknown file]'\n+\n+ print('-->' + method, source_info)\n \n if root.children:\n traverse(root.children, parent + '/' + root.raw_segment, verbose)\n@@ -92,7 +98,7 @@\n except ValueError:\n parser.error(\n 'The api_module must include a colon between '\n- 'the module and instnace')\n+ 'the module and instance')\n api = getattr(__import__(module, fromlist=[True]), instance)\n if not isinstance(api, falcon.API):\n if callable(api):\n", "issue": "Tox (including Travis) not testing cythonized variants\nAs described in ``README.rst``, Falcon can be cythonized for ~20% performance gain (or actually even more). Installing Falcon from *sdist* into an environment with Cython does the trick:\r\n\r\n```python\r\n>>> import falcon\r\n>>> falcon.api # As we can see, falcon.api is coming from the dynamically-linked (cythonized) library api.so\r\n<module 'falcon.api' from '/home/vytas/.virtualenvs/fresh/local/lib/python2.7/site-packages/falcon/api.so'>\r\n```\r\n\r\nHowever, this does not hold under Tox ``py27_cython`` and ``py36_cython`` environments, including runs in Travis, as the properly cythonized Falcon is shadowed by the local source directory. This could potentially be worked around by changing dir in Tox, but apparently pytest is even more stubborn as it is correctly determining the root dir of tests, and changing to it.\r\n\r\nSee also discussions here:\r\n* https://github.com/tox-dev/tox/issues/54\r\n* https://github.com/tox-dev/tox/issues/514\r\n\r\nThe last comment on the latter also explains the possible patterns to work this around: https://github.com/tox-dev/tox/issues/514#issuecomment-327779367 (links to the useful https://docs.pytest.org/en/latest/goodpractices.html#choosing-a-test-layout-import-rules ).\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nScript that prints out the routes of an API instance.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom functools import partial\nimport inspect\n\nimport falcon\n\n\ndef print_routes(api, verbose=False): # pragma: no cover\n \"\"\"\n Initial call.\n\n :param api: The falcon.API or callable that returns an instance to look at.\n :type api: falcon.API or callable\n :param verbose: If the output should be verbose.\n :type verbose: bool\n \"\"\"\n traverse(api._router._roots, verbose=verbose)\n\n\ndef traverse(roots, parent='', verbose=False):\n \"\"\"\n Recursive call which also handles printing output.\n\n :param api: The falcon.API or callable that returns an instance to look at.\n :type api: falcon.API or callable\n :param parent: The parent uri path to the current iteration.\n :type parent: str\n :param verbose: If the output should be verbose.\n :type verbose: bool\n \"\"\"\n for root in roots:\n if root.method_map:\n print('->', parent + '/' + root.raw_segment)\n if verbose:\n for method, func in root.method_map.items():\n if func.__name__ != 'method_not_allowed':\n if isinstance(func, partial):\n real_func = func.func\n else:\n real_func = func\n\n source_file = inspect.getsourcefile(real_func)\n\n print('-->{0} {1}:{2}'.format(\n method,\n source_file,\n source_file[1]\n ))\n\n if root.children:\n traverse(root.children, parent + '/' + root.raw_segment, verbose)\n\n\ndef main():\n \"\"\"\n Main entrypoint.\n \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Example: print-api-routes myprogram:app')\n parser.add_argument(\n '-v', '--verbose', action='store_true',\n help='Prints out information for each method.')\n parser.add_argument(\n 'api_module',\n help='The module and api to inspect. Example: myapp.somemodule:api',\n )\n args = parser.parse_args()\n\n try:\n module, instance = args.api_module.split(':', 1)\n except ValueError:\n parser.error(\n 'The api_module must include a colon between '\n 'the module and instnace')\n api = getattr(__import__(module, fromlist=[True]), instance)\n if not isinstance(api, falcon.API):\n if callable(api):\n api = api()\n if not isinstance(api, falcon.API):\n parser.error(\n '{0} did not return a falcon.API instance'.format(\n args.api_module))\n else:\n parser.error(\n 'The instance must be of falcon.API or be '\n 'a callable without args that returns falcon.API')\n print_routes(api, verbose=args.verbose)\n\n\nif __name__ == '__main__':\n main()\n", "path": "falcon/cmd/print_routes.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nScript that prints out the routes of an API instance.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom functools import partial\nimport inspect\n\nimport falcon\n\n\ndef print_routes(api, verbose=False): # pragma: no cover\n \"\"\"\n Initial call.\n\n :param api: The falcon.API or callable that returns an instance to look at.\n :type api: falcon.API or callable\n :param verbose: If the output should be verbose.\n :type verbose: bool\n \"\"\"\n traverse(api._router._roots, verbose=verbose)\n\n\ndef traverse(roots, parent='', verbose=False):\n \"\"\"\n Recursive call which also handles printing output.\n\n :param api: The falcon.API or callable that returns an instance to look at.\n :type api: falcon.API or callable\n :param parent: The parent uri path to the current iteration.\n :type parent: str\n :param verbose: If the output should be verbose.\n :type verbose: bool\n \"\"\"\n for root in roots:\n if root.method_map:\n print('->', parent + '/' + root.raw_segment)\n if verbose:\n for method, func in root.method_map.items():\n if func.__name__ != 'method_not_allowed':\n if isinstance(func, partial):\n real_func = func.func\n else:\n real_func = func\n\n try:\n source_file = inspect.getsourcefile(real_func)\n source_lines = inspect.getsourcelines(real_func)\n source_info = '{}:{}'.format(source_file,\n source_lines[1])\n except TypeError:\n # NOTE(vytas): If Falcon is cythonized, all default\n # responders coming from cythonized modules will\n # appear as built-in functions, and raise a\n # TypeError when trying to locate the source file.\n source_info = '[unknown file]'\n\n print('-->' + method, source_info)\n\n if root.children:\n traverse(root.children, parent + '/' + root.raw_segment, verbose)\n\n\ndef main():\n \"\"\"\n Main entrypoint.\n \"\"\"\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Example: print-api-routes myprogram:app')\n parser.add_argument(\n '-v', '--verbose', action='store_true',\n help='Prints out information for each method.')\n parser.add_argument(\n 'api_module',\n help='The module and api to inspect. Example: myapp.somemodule:api',\n )\n args = parser.parse_args()\n\n try:\n module, instance = args.api_module.split(':', 1)\n except ValueError:\n parser.error(\n 'The api_module must include a colon between '\n 'the module and instance')\n api = getattr(__import__(module, fromlist=[True]), instance)\n if not isinstance(api, falcon.API):\n if callable(api):\n api = api()\n if not isinstance(api, falcon.API):\n parser.error(\n '{0} did not return a falcon.API instance'.format(\n args.api_module))\n else:\n parser.error(\n 'The instance must be of falcon.API or be '\n 'a callable without args that returns falcon.API')\n print_routes(api, verbose=args.verbose)\n\n\nif __name__ == '__main__':\n main()\n", "path": "falcon/cmd/print_routes.py"}]}
| 1,585 | 336 |
gh_patches_debug_10000
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-5290
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] KeyValueStore fails to check the cached data with new data
Typo bug.
```
def store_value_bytes(self, key: str, data: bytes) -> str:
...
if cache_value_file_path.exists():
old_data = cache_value_file_path.write_bytes()
...
```
should be:
```
def store_value_bytes(self, key: str, data: bytes) -> str:
...
if cache_value_file_path.exists():
old_data = cache_value_file_path.read_bytes()
...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/kfp/components/_key_value_store.py`
Content:
```
1 import hashlib
2 from pathlib import Path
3
4
5 class KeyValueStore:
6 KEY_FILE_SUFFIX = '.key'
7 VALUE_FILE_SUFFIX = '.value'
8
9 def __init__(
10 self,
11 cache_dir: str,
12 ):
13 cache_dir = Path(cache_dir)
14 hash_func = (lambda text: hashlib.sha256(text.encode('utf-8')).hexdigest())
15 self.cache_dir = cache_dir
16 self.hash_func = hash_func
17
18 def store_value_text(self, key: str, text: str) -> str:
19 return self.store_value_bytes(key, text.encode('utf-8'))
20
21 def store_value_bytes(self, key: str, data: bytes) -> str:
22 cache_id = self.hash_func(key)
23 self.cache_dir.mkdir(parents=True, exist_ok=True)
24 cache_key_file_path = self.cache_dir / (cache_id + KeyValueStore.KEY_FILE_SUFFIX)
25 cache_value_file_path = self.cache_dir / (cache_id + KeyValueStore.VALUE_FILE_SUFFIX)
26 if cache_key_file_path.exists():
27 old_key = cache_key_file_path.read_text()
28 if key != old_key:
29 raise RuntimeError(
30 'Cache is corrupted: File "{}" contains existing key '
31 '"{}" != new key "{}"'.format(cache_key_file_path, old_key, key)
32 )
33 if cache_value_file_path.exists():
34 old_data = cache_value_file_path.write_bytes()
35 if data != old_data:
36 # TODO: Add options to raise error when overwriting the value.
37 pass
38 cache_value_file_path.write_bytes(data)
39 cache_key_file_path.write_text(key)
40 return cache_id
41
42 def try_get_value_text(self, key: str) -> str:
43 result = self.try_get_value_bytes(key)
44 if result is None:
45 return None
46 return result.decode('utf-8')
47
48 def try_get_value_bytes(self, key: str) -> bytes:
49 cache_id = self.hash_func(key)
50 cache_value_file_path = self.cache_dir / (cache_id + KeyValueStore.VALUE_FILE_SUFFIX)
51 if cache_value_file_path.exists():
52 return cache_value_file_path.read_bytes()
53 return None
54
55 def exists(self, key: str) -> bool:
56 cache_id = self.hash_func(key)
57 cache_key_file_path = self.cache_dir / (cache_id + KeyValueStore.KEY_FILE_SUFFIX)
58 return cache_key_file_path.exists()
59
60 def keys(self):
61 for cache_key_file_path in self.cache_dir.glob('*' + KeyValueStore.KEY_FILE_SUFFIX):
62 yield Path(cache_key_file_path).read_text()
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/kfp/components/_key_value_store.py b/sdk/python/kfp/components/_key_value_store.py
--- a/sdk/python/kfp/components/_key_value_store.py
+++ b/sdk/python/kfp/components/_key_value_store.py
@@ -31,7 +31,7 @@
'"{}" != new key "{}"'.format(cache_key_file_path, old_key, key)
)
if cache_value_file_path.exists():
- old_data = cache_value_file_path.write_bytes()
+ old_data = cache_value_file_path.read_bytes()
if data != old_data:
# TODO: Add options to raise error when overwriting the value.
pass
|
{"golden_diff": "diff --git a/sdk/python/kfp/components/_key_value_store.py b/sdk/python/kfp/components/_key_value_store.py\n--- a/sdk/python/kfp/components/_key_value_store.py\n+++ b/sdk/python/kfp/components/_key_value_store.py\n@@ -31,7 +31,7 @@\n '\"{}\" != new key \"{}\"'.format(cache_key_file_path, old_key, key)\n )\n if cache_value_file_path.exists():\n- old_data = cache_value_file_path.write_bytes()\n+ old_data = cache_value_file_path.read_bytes()\n if data != old_data:\n # TODO: Add options to raise error when overwriting the value.\n pass\n", "issue": "[Bug] KeyValueStore fails to check the cached data with new data\nTypo bug.\r\n\r\n```\r\ndef store_value_bytes(self, key: str, data: bytes) -> str:\r\n ... \r\n if cache_value_file_path.exists():\r\n old_data = cache_value_file_path.write_bytes()\r\n ... \r\n```\r\nshould be:\r\n```\r\ndef store_value_bytes(self, key: str, data: bytes) -> str:\r\n ... \r\n if cache_value_file_path.exists():\r\n old_data = cache_value_file_path.read_bytes()\r\n ... \r\n```\n", "before_files": [{"content": "import hashlib\nfrom pathlib import Path\n\n\nclass KeyValueStore:\n KEY_FILE_SUFFIX = '.key'\n VALUE_FILE_SUFFIX = '.value'\n\n def __init__(\n self,\n cache_dir: str,\n ):\n cache_dir = Path(cache_dir)\n hash_func = (lambda text: hashlib.sha256(text.encode('utf-8')).hexdigest())\n self.cache_dir = cache_dir\n self.hash_func = hash_func\n\n def store_value_text(self, key: str, text: str) -> str:\n return self.store_value_bytes(key, text.encode('utf-8'))\n\n def store_value_bytes(self, key: str, data: bytes) -> str:\n cache_id = self.hash_func(key)\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n cache_key_file_path = self.cache_dir / (cache_id + KeyValueStore.KEY_FILE_SUFFIX)\n cache_value_file_path = self.cache_dir / (cache_id + KeyValueStore.VALUE_FILE_SUFFIX)\n if cache_key_file_path.exists():\n old_key = cache_key_file_path.read_text()\n if key != old_key:\n raise RuntimeError(\n 'Cache is corrupted: File \"{}\" contains existing key '\n '\"{}\" != new key \"{}\"'.format(cache_key_file_path, old_key, key)\n )\n if cache_value_file_path.exists():\n old_data = cache_value_file_path.write_bytes()\n if data != old_data:\n # TODO: Add options to raise error when overwriting the value.\n pass\n cache_value_file_path.write_bytes(data)\n cache_key_file_path.write_text(key)\n return cache_id\n\n def try_get_value_text(self, key: str) -> str:\n result = self.try_get_value_bytes(key)\n if result is None:\n return None\n return result.decode('utf-8')\n\n def try_get_value_bytes(self, key: str) -> bytes:\n cache_id = self.hash_func(key)\n cache_value_file_path = self.cache_dir / (cache_id + KeyValueStore.VALUE_FILE_SUFFIX)\n if cache_value_file_path.exists():\n return cache_value_file_path.read_bytes()\n return None\n\n def exists(self, key: str) -> bool:\n cache_id = self.hash_func(key)\n cache_key_file_path = self.cache_dir / (cache_id + KeyValueStore.KEY_FILE_SUFFIX)\n return cache_key_file_path.exists()\n\n def keys(self):\n for cache_key_file_path in self.cache_dir.glob('*' + KeyValueStore.KEY_FILE_SUFFIX):\n yield Path(cache_key_file_path).read_text()\n", "path": "sdk/python/kfp/components/_key_value_store.py"}], "after_files": [{"content": "import hashlib\nfrom pathlib import Path\n\n\nclass KeyValueStore:\n KEY_FILE_SUFFIX = '.key'\n VALUE_FILE_SUFFIX = '.value'\n\n def __init__(\n self,\n cache_dir: str,\n ):\n cache_dir = Path(cache_dir)\n hash_func = (lambda text: hashlib.sha256(text.encode('utf-8')).hexdigest())\n self.cache_dir = cache_dir\n self.hash_func = hash_func\n\n def store_value_text(self, key: str, text: str) -> str:\n return self.store_value_bytes(key, text.encode('utf-8'))\n\n def store_value_bytes(self, key: str, data: bytes) -> str:\n cache_id = self.hash_func(key)\n self.cache_dir.mkdir(parents=True, exist_ok=True)\n cache_key_file_path = self.cache_dir / (cache_id + KeyValueStore.KEY_FILE_SUFFIX)\n cache_value_file_path = self.cache_dir / (cache_id + KeyValueStore.VALUE_FILE_SUFFIX)\n if cache_key_file_path.exists():\n old_key = cache_key_file_path.read_text()\n if key != old_key:\n raise RuntimeError(\n 'Cache is corrupted: File \"{}\" contains existing key '\n '\"{}\" != new key \"{}\"'.format(cache_key_file_path, old_key, key)\n )\n if cache_value_file_path.exists():\n old_data = cache_value_file_path.read_bytes()\n if data != old_data:\n # TODO: Add options to raise error when overwriting the value.\n pass\n cache_value_file_path.write_bytes(data)\n cache_key_file_path.write_text(key)\n return cache_id\n\n def try_get_value_text(self, key: str) -> str:\n result = self.try_get_value_bytes(key)\n if result is None:\n return None\n return result.decode('utf-8')\n\n def try_get_value_bytes(self, key: str) -> bytes:\n cache_id = self.hash_func(key)\n cache_value_file_path = self.cache_dir / (cache_id + KeyValueStore.VALUE_FILE_SUFFIX)\n if cache_value_file_path.exists():\n return cache_value_file_path.read_bytes()\n return None\n\n def exists(self, key: str) -> bool:\n cache_id = self.hash_func(key)\n cache_key_file_path = self.cache_dir / (cache_id + KeyValueStore.KEY_FILE_SUFFIX)\n return cache_key_file_path.exists()\n\n def keys(self):\n for cache_key_file_path in self.cache_dir.glob('*' + KeyValueStore.KEY_FILE_SUFFIX):\n yield Path(cache_key_file_path).read_text()\n", "path": "sdk/python/kfp/components/_key_value_store.py"}]}
| 1,034 | 144 |
gh_patches_debug_56902
|
rasdani/github-patches
|
git_diff
|
NVIDIA__NVFlare-172
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in aggregating models
I'm using NVFlare 2.0.6 and torchvision ResNet-50 model for training. The FL system (consisting 4 clients) had completed the training for the first round but when aggregating into the global model I faced this error. Please help me resolve this problem, thank you.
```
2022-01-27 18:08:14,731 - InTimeAccumulateWeightedAggregator - INFO - [run=1, wf=scatter_gather_ctl]: aggregating 4 update(s) at round 0
Traceback (most recent call last):
File "/home/jupyter-test/.conda/envs/fl/lib/python3.8/site-packages/nvflare/app_common/workflows/scatter_and_gather.py", line 202, in control_flow
self._global_weights = self.shareable_gen.shareable_to_learnable(aggr_result, fl_ctx)
File "/home/jupyter-test/.conda/envs/fl/lib/python3.8/site-packages/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py", line 60, in shareable_to_learnable
weights[v_name] += v_value
numpy.core._exceptions._UFuncOutputCastingError: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'
2022-01-27 18:08:14,813 - ScatterAndGather - ERROR - [run=1, wf=scatter_gather_ctl]: Exception in ScatterAndGather control_flow: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'
Traceback (most recent call last):
File "/home/jupyter-test/.conda/envs/fl/lib/python3.8/site-packages/nvflare/app_common/workflows/scatter_and_gather.py", line 202, in control_flow
self._global_weights = self.shareable_gen.shareable_to_learnable(aggr_result, fl_ctx)
File "/home/jupyter-test/.conda/envs/fl/lib/python3.8/site-packages/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py", line 60, in shareable_to_learnable
weights[v_name] += v_value
numpy.core._exceptions._UFuncOutputCastingError: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'
2022-01-27 18:08:14,813 - ServerRunner - ERROR - [run=1, wf=scatter_gather_ctl]: Aborting current RUN due to FATAL_SYSTEM_ERROR received: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'
2022-01-27 18:08:14,813 - ServerRunner - INFO - [run=1, wf=scatter_gather_ctl]: asked to abort - triggered abort_signal to stop the RUN
2022-01-27 18:08:14,813 - ServerRunner - INFO - [run=1, wf=scatter_gather_ctl]: Workflow: scatter_gather_ctl finalizing ...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvflare/app_common/shareablegenerators/full_model_shareable_generator.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from nvflare.apis.dxo import DataKind, from_shareable
16 from nvflare.apis.fl_context import FLContext
17 from nvflare.apis.shareable import Shareable
18 from nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, model_learnable_to_dxo
19 from nvflare.app_common.abstract.shareable_generator import ShareableGenerator
20 from nvflare.app_common.app_constant import AppConstants
21
22
23 class FullModelShareableGenerator(ShareableGenerator):
24 def learnable_to_shareable(self, ml: ModelLearnable, fl_ctx: FLContext) -> Shareable:
25 """Convert Learnable to Shareable
26
27 Args:
28 model (Learnable): model to be converted
29 fl_ctx (FLContext): FL context
30
31 Returns:
32 Shareable: a shareable containing a DXO object,
33 """
34 dxo = model_learnable_to_dxo(ml)
35 return dxo.to_shareable()
36
37 def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> ModelLearnable:
38 """Convert Shareable to Learnable
39
40 Supporting TYPE == TYPE_WEIGHT_DIFF or TYPE_WEIGHTS
41
42 Args:
43 shareable (Shareable): Shareable that contains a DXO object
44 fl_ctx (FLContext): FL context
45
46 Returns: a ModelLearnable object
47 """
48 base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)
49 if not base_model:
50 self.system_panic(reason="No global base model!", fl_ctx=fl_ctx)
51 return base_model
52
53 weights = base_model[ModelLearnableKey.WEIGHTS]
54 dxo = from_shareable(shareable)
55
56 if dxo.data_kind == DataKind.WEIGHT_DIFF:
57 if dxo.data is not None:
58 model_diff = dxo.data
59 for v_name, v_value in model_diff.items():
60 weights[v_name] += v_value
61 elif dxo.data_kind == DataKind.WEIGHTS:
62 weights = dxo.data
63 if not weights:
64 self.log_info(fl_ctx, "No model weights found. Model will not be updated.")
65 else:
66 base_model[ModelLearnableKey.WEIGHTS] = weights
67
68 base_model[ModelLearnableKey.META] = dxo.get_meta_props()
69 return base_model
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py b/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py
--- a/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py
+++ b/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py
@@ -57,7 +57,7 @@
if dxo.data is not None:
model_diff = dxo.data
for v_name, v_value in model_diff.items():
- weights[v_name] += v_value
+ weights[v_name] = weights[v_name] + v_value
elif dxo.data_kind == DataKind.WEIGHTS:
weights = dxo.data
if not weights:
|
{"golden_diff": "diff --git a/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py b/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py\n--- a/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py\n+++ b/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py\n@@ -57,7 +57,7 @@\n if dxo.data is not None:\n model_diff = dxo.data\n for v_name, v_value in model_diff.items():\n- weights[v_name] += v_value\n+ weights[v_name] = weights[v_name] + v_value\n elif dxo.data_kind == DataKind.WEIGHTS:\n weights = dxo.data\n if not weights:\n", "issue": "Error in aggregating models\nI'm using NVFlare 2.0.6 and torchvision ResNet-50 model for training. The FL system (consisting 4 clients) had completed the training for the first round but when aggregating into the global model I faced this error. Please help me resolve this problem, thank you. \r\n```\r\n2022-01-27 18:08:14,731 - InTimeAccumulateWeightedAggregator - INFO - [run=1, wf=scatter_gather_ctl]: aggregating 4 update(s) at round 0\r\nTraceback (most recent call last):\r\n File \"/home/jupyter-test/.conda/envs/fl/lib/python3.8/site-packages/nvflare/app_common/workflows/scatter_and_gather.py\", line 202, in control_flow\r\n self._global_weights = self.shareable_gen.shareable_to_learnable(aggr_result, fl_ctx)\r\n File \"/home/jupyter-test/.conda/envs/fl/lib/python3.8/site-packages/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py\", line 60, in shareable_to_learnable\r\n weights[v_name] += v_value\r\nnumpy.core._exceptions._UFuncOutputCastingError: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'\r\n2022-01-27 18:08:14,813 - ScatterAndGather - ERROR - [run=1, wf=scatter_gather_ctl]: Exception in ScatterAndGather control_flow: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'\r\nTraceback (most recent call last):\r\n File \"/home/jupyter-test/.conda/envs/fl/lib/python3.8/site-packages/nvflare/app_common/workflows/scatter_and_gather.py\", line 202, in control_flow\r\n self._global_weights = self.shareable_gen.shareable_to_learnable(aggr_result, fl_ctx)\r\n File \"/home/jupyter-test/.conda/envs/fl/lib/python3.8/site-packages/nvflare/app_common/shareablegenerators/full_model_shareable_generator.py\", line 60, in shareable_to_learnable\r\n weights[v_name] += v_value\r\nnumpy.core._exceptions._UFuncOutputCastingError: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'\r\n2022-01-27 18:08:14,813 - ServerRunner - ERROR - [run=1, wf=scatter_gather_ctl]: Aborting current RUN due to FATAL_SYSTEM_ERROR received: Cannot cast ufunc 'add' output from dtype('float64') to dtype('int64') with casting rule 'same_kind'\r\n2022-01-27 18:08:14,813 - ServerRunner - INFO - [run=1, wf=scatter_gather_ctl]: asked to abort - triggered abort_signal to stop the RUN\r\n2022-01-27 18:08:14,813 - ServerRunner - INFO - [run=1, wf=scatter_gather_ctl]: Workflow: scatter_gather_ctl finalizing ...\r\n```\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nvflare.apis.dxo import DataKind, from_shareable\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.shareable import Shareable\nfrom nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, model_learnable_to_dxo\nfrom nvflare.app_common.abstract.shareable_generator import ShareableGenerator\nfrom nvflare.app_common.app_constant import AppConstants\n\n\nclass FullModelShareableGenerator(ShareableGenerator):\n def learnable_to_shareable(self, ml: ModelLearnable, fl_ctx: FLContext) -> Shareable:\n \"\"\"Convert Learnable to Shareable\n\n Args:\n model (Learnable): model to be converted\n fl_ctx (FLContext): FL context\n\n Returns:\n Shareable: a shareable containing a DXO object,\n \"\"\"\n dxo = model_learnable_to_dxo(ml)\n return dxo.to_shareable()\n\n def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> ModelLearnable:\n \"\"\"Convert Shareable to Learnable\n\n Supporting TYPE == TYPE_WEIGHT_DIFF or TYPE_WEIGHTS\n\n Args:\n shareable (Shareable): Shareable that contains a DXO object\n fl_ctx (FLContext): FL context\n\n Returns: a ModelLearnable object\n \"\"\"\n base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)\n if not base_model:\n self.system_panic(reason=\"No global base model!\", fl_ctx=fl_ctx)\n return base_model\n\n weights = base_model[ModelLearnableKey.WEIGHTS]\n dxo = from_shareable(shareable)\n\n if dxo.data_kind == DataKind.WEIGHT_DIFF:\n if dxo.data is not None:\n model_diff = dxo.data\n for v_name, v_value in model_diff.items():\n weights[v_name] += v_value\n elif dxo.data_kind == DataKind.WEIGHTS:\n weights = dxo.data\n if not weights:\n self.log_info(fl_ctx, \"No model weights found. Model will not be updated.\")\n else:\n base_model[ModelLearnableKey.WEIGHTS] = weights\n\n base_model[ModelLearnableKey.META] = dxo.get_meta_props()\n return base_model\n", "path": "nvflare/app_common/shareablegenerators/full_model_shareable_generator.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nvflare.apis.dxo import DataKind, from_shareable\nfrom nvflare.apis.fl_context import FLContext\nfrom nvflare.apis.shareable import Shareable\nfrom nvflare.app_common.abstract.model import ModelLearnable, ModelLearnableKey, model_learnable_to_dxo\nfrom nvflare.app_common.abstract.shareable_generator import ShareableGenerator\nfrom nvflare.app_common.app_constant import AppConstants\n\n\nclass FullModelShareableGenerator(ShareableGenerator):\n def learnable_to_shareable(self, ml: ModelLearnable, fl_ctx: FLContext) -> Shareable:\n \"\"\"Convert Learnable to Shareable\n\n Args:\n model (Learnable): model to be converted\n fl_ctx (FLContext): FL context\n\n Returns:\n Shareable: a shareable containing a DXO object,\n \"\"\"\n dxo = model_learnable_to_dxo(ml)\n return dxo.to_shareable()\n\n def shareable_to_learnable(self, shareable: Shareable, fl_ctx: FLContext) -> ModelLearnable:\n \"\"\"Convert Shareable to Learnable\n\n Supporting TYPE == TYPE_WEIGHT_DIFF or TYPE_WEIGHTS\n\n Args:\n shareable (Shareable): Shareable that contains a DXO object\n fl_ctx (FLContext): FL context\n\n Returns: a ModelLearnable object\n \"\"\"\n base_model = fl_ctx.get_prop(AppConstants.GLOBAL_MODEL)\n if not base_model:\n self.system_panic(reason=\"No global base model!\", fl_ctx=fl_ctx)\n return base_model\n\n weights = base_model[ModelLearnableKey.WEIGHTS]\n dxo = from_shareable(shareable)\n\n if dxo.data_kind == DataKind.WEIGHT_DIFF:\n if dxo.data is not None:\n model_diff = dxo.data\n for v_name, v_value in model_diff.items():\n weights[v_name] = weights[v_name] + v_value\n elif dxo.data_kind == DataKind.WEIGHTS:\n weights = dxo.data\n if not weights:\n self.log_info(fl_ctx, \"No model weights found. Model will not be updated.\")\n else:\n base_model[ModelLearnableKey.WEIGHTS] = weights\n\n base_model[ModelLearnableKey.META] = dxo.get_meta_props()\n return base_model\n", "path": "nvflare/app_common/shareablegenerators/full_model_shareable_generator.py"}]}
| 1,784 | 164 |
gh_patches_debug_29566
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2361
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Saving existing explorations is broken: UIQuery names need to be unique
## Description
https://github.com/centerofci/mathesar/pull/2315 modified query names to be unique per schema.
It does not ignore the current name of the query while checking the condition.
To reproduce: Try saving an existing query after making changes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/queries.py`
Content:
```
1 from django.core.exceptions import ValidationError
2 from django.urls import reverse
3 from rest_access_policy import PermittedPkRelatedField
4 from rest_framework import serializers
5
6 from mathesar.api.db.permissions.query_table import QueryTableAccessPolicy
7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
8 from mathesar.api.exceptions.validation_exceptions.exceptions import DuplicateUIQueryInSchemaAPIException
9 from mathesar.models.base import Table
10 from mathesar.models.query import UIQuery
11
12
13 class BaseQuerySerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
14 schema = serializers.SerializerMethodField('get_schema')
15 base_table = PermittedPkRelatedField(
16 access_policy=QueryTableAccessPolicy,
17 queryset=Table.current_objects.all()
18 )
19
20 class Meta:
21 model = UIQuery
22 fields = ['schema', 'initial_columns', 'transformations', 'base_table', 'display_names']
23
24 def get_schema(self, uiquery):
25 base_table = uiquery.base_table
26 if base_table:
27 return base_table.schema.id
28
29 def validate(self, attrs):
30 unexpected_fields = set(self.initial_data) - set(self.fields)
31 if unexpected_fields:
32 raise ValidationError(f"Unexpected field(s): {unexpected_fields}")
33 self._validate_uniqueness(attrs)
34 return attrs
35
36 def _validate_uniqueness(self, attrs):
37 """
38 Uniqueness is only defined when both name and base_table are defined.
39
40 Would be nice to define this in terms of Django's UniqueConstraint, but that doesn't seem
41 possible, due to schema being a child property of base_table.
42 """
43 name = attrs.get('name')
44 if name:
45 base_table = attrs.get('base_table')
46 if base_table:
47 schema = base_table.schema
48 queries_with_same_name = UIQuery.objects.filter(name=name)
49 duplicate_in_schema_exists = \
50 queries_with_same_name\
51 .filter(base_table__schema=schema)\
52 .exists()
53 if duplicate_in_schema_exists:
54 raise DuplicateUIQueryInSchemaAPIException(field='name')
55
56
57 class QuerySerializer(BaseQuerySerializer):
58 results_url = serializers.SerializerMethodField('get_results_url')
59 records_url = serializers.SerializerMethodField('get_records_url')
60 columns_url = serializers.SerializerMethodField('get_columns_url')
61
62 class Meta:
63 model = UIQuery
64 fields = '__all__'
65
66 def get_records_url(self, obj):
67 if isinstance(obj, UIQuery) and obj.pk is not None:
68 # Only get records_url if we are serializing an existing persisted UIQuery
69 request = self.context['request']
70 return request.build_absolute_uri(reverse('query-records', kwargs={'pk': obj.pk}))
71 else:
72 return None
73
74 def get_columns_url(self, obj):
75 if isinstance(obj, UIQuery) and obj.pk is not None:
76 # Only get columns_url if we are serializing an existing persisted UIQuery
77 request = self.context['request']
78 return request.build_absolute_uri(reverse('query-columns', kwargs={'pk': obj.pk}))
79 else:
80 return None
81
82 def get_results_url(self, obj):
83 if isinstance(obj, UIQuery) and obj.pk is not None:
84 # Only get records_url if we are serializing an existing persisted UIQuery
85 request = self.context['request']
86 return request.build_absolute_uri(reverse('query-results', kwargs={'pk': obj.pk}))
87 else:
88 return None
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/api/serializers/queries.py b/mathesar/api/serializers/queries.py
--- a/mathesar/api/serializers/queries.py
+++ b/mathesar/api/serializers/queries.py
@@ -1,5 +1,7 @@
from django.core.exceptions import ValidationError
from django.urls import reverse
+from django.db.models import Q
+
from rest_access_policy import PermittedPkRelatedField
from rest_framework import serializers
@@ -45,14 +47,23 @@
base_table = attrs.get('base_table')
if base_table:
schema = base_table.schema
- queries_with_same_name = UIQuery.objects.filter(name=name)
- duplicate_in_schema_exists = \
- queries_with_same_name\
- .filter(base_table__schema=schema)\
- .exists()
- if duplicate_in_schema_exists:
+ is_duplicate_q = self._get_is_duplicate_q(name, schema)
+ duplicates = UIQuery.objects.filter(is_duplicate_q)
+ if duplicates.exists():
raise DuplicateUIQueryInSchemaAPIException(field='name')
+ def _get_is_duplicate_q(self, name, schema):
+ has_same_name_q = Q(name=name)
+ has_same_schema_q = Q(base_table__schema=schema)
+ is_duplicate_q = has_same_name_q & has_same_schema_q
+ is_update = self.instance is not None
+ if is_update:
+ # If this is an update, filter self out of found duplicates
+ id = self.instance.id
+ is_not_this_instance_q = ~Q(id=id)
+ is_duplicate_q = is_duplicate_q & is_not_this_instance_q
+ return is_duplicate_q
+
class QuerySerializer(BaseQuerySerializer):
results_url = serializers.SerializerMethodField('get_results_url')
|
{"golden_diff": "diff --git a/mathesar/api/serializers/queries.py b/mathesar/api/serializers/queries.py\n--- a/mathesar/api/serializers/queries.py\n+++ b/mathesar/api/serializers/queries.py\n@@ -1,5 +1,7 @@\n from django.core.exceptions import ValidationError\n from django.urls import reverse\n+from django.db.models import Q\n+\n from rest_access_policy import PermittedPkRelatedField\n from rest_framework import serializers\n \n@@ -45,14 +47,23 @@\n base_table = attrs.get('base_table')\n if base_table:\n schema = base_table.schema\n- queries_with_same_name = UIQuery.objects.filter(name=name)\n- duplicate_in_schema_exists = \\\n- queries_with_same_name\\\n- .filter(base_table__schema=schema)\\\n- .exists()\n- if duplicate_in_schema_exists:\n+ is_duplicate_q = self._get_is_duplicate_q(name, schema)\n+ duplicates = UIQuery.objects.filter(is_duplicate_q)\n+ if duplicates.exists():\n raise DuplicateUIQueryInSchemaAPIException(field='name')\n \n+ def _get_is_duplicate_q(self, name, schema):\n+ has_same_name_q = Q(name=name)\n+ has_same_schema_q = Q(base_table__schema=schema)\n+ is_duplicate_q = has_same_name_q & has_same_schema_q\n+ is_update = self.instance is not None\n+ if is_update:\n+ # If this is an update, filter self out of found duplicates\n+ id = self.instance.id\n+ is_not_this_instance_q = ~Q(id=id)\n+ is_duplicate_q = is_duplicate_q & is_not_this_instance_q\n+ return is_duplicate_q\n+\n \n class QuerySerializer(BaseQuerySerializer):\n results_url = serializers.SerializerMethodField('get_results_url')\n", "issue": "Saving existing explorations is broken: UIQuery names need to be unique\n## Description\r\nhttps://github.com/centerofci/mathesar/pull/2315 modified query names to be unique per schema.\r\nIt does not ignore the current name of the query while checking the condition.\r\n\r\nTo reproduce: Try saving an existing query after making changes.\r\n\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.urls import reverse\nfrom rest_access_policy import PermittedPkRelatedField\nfrom rest_framework import serializers\n\nfrom mathesar.api.db.permissions.query_table import QueryTableAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import DuplicateUIQueryInSchemaAPIException\nfrom mathesar.models.base import Table\nfrom mathesar.models.query import UIQuery\n\n\nclass BaseQuerySerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n schema = serializers.SerializerMethodField('get_schema')\n base_table = PermittedPkRelatedField(\n access_policy=QueryTableAccessPolicy,\n queryset=Table.current_objects.all()\n )\n\n class Meta:\n model = UIQuery\n fields = ['schema', 'initial_columns', 'transformations', 'base_table', 'display_names']\n\n def get_schema(self, uiquery):\n base_table = uiquery.base_table\n if base_table:\n return base_table.schema.id\n\n def validate(self, attrs):\n unexpected_fields = set(self.initial_data) - set(self.fields)\n if unexpected_fields:\n raise ValidationError(f\"Unexpected field(s): {unexpected_fields}\")\n self._validate_uniqueness(attrs)\n return attrs\n\n def _validate_uniqueness(self, attrs):\n \"\"\"\n Uniqueness is only defined when both name and base_table are defined.\n\n Would be nice to define this in terms of Django's UniqueConstraint, but that doesn't seem\n possible, due to schema being a child property of base_table.\n \"\"\"\n name = attrs.get('name')\n if name:\n base_table = attrs.get('base_table')\n if base_table:\n schema = base_table.schema\n queries_with_same_name = UIQuery.objects.filter(name=name)\n duplicate_in_schema_exists = \\\n queries_with_same_name\\\n .filter(base_table__schema=schema)\\\n .exists()\n if duplicate_in_schema_exists:\n raise DuplicateUIQueryInSchemaAPIException(field='name')\n\n\nclass QuerySerializer(BaseQuerySerializer):\n results_url = serializers.SerializerMethodField('get_results_url')\n records_url = serializers.SerializerMethodField('get_records_url')\n columns_url = serializers.SerializerMethodField('get_columns_url')\n\n class Meta:\n model = UIQuery\n fields = '__all__'\n\n def get_records_url(self, obj):\n if isinstance(obj, UIQuery) and obj.pk is not None:\n # Only get records_url if we are serializing an existing persisted UIQuery\n request = self.context['request']\n return request.build_absolute_uri(reverse('query-records', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, UIQuery) and obj.pk is not None:\n # Only get columns_url if we are serializing an existing persisted UIQuery\n request = self.context['request']\n return request.build_absolute_uri(reverse('query-columns', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_results_url(self, obj):\n if isinstance(obj, UIQuery) and obj.pk is not None:\n # Only get records_url if we are serializing an existing persisted UIQuery\n request = self.context['request']\n return request.build_absolute_uri(reverse('query-results', kwargs={'pk': obj.pk}))\n else:\n return None\n", "path": "mathesar/api/serializers/queries.py"}], "after_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.urls import reverse\nfrom django.db.models import Q\n\nfrom rest_access_policy import PermittedPkRelatedField\nfrom rest_framework import serializers\n\nfrom mathesar.api.db.permissions.query_table import QueryTableAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import DuplicateUIQueryInSchemaAPIException\nfrom mathesar.models.base import Table\nfrom mathesar.models.query import UIQuery\n\n\nclass BaseQuerySerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n schema = serializers.SerializerMethodField('get_schema')\n base_table = PermittedPkRelatedField(\n access_policy=QueryTableAccessPolicy,\n queryset=Table.current_objects.all()\n )\n\n class Meta:\n model = UIQuery\n fields = ['schema', 'initial_columns', 'transformations', 'base_table', 'display_names']\n\n def get_schema(self, uiquery):\n base_table = uiquery.base_table\n if base_table:\n return base_table.schema.id\n\n def validate(self, attrs):\n unexpected_fields = set(self.initial_data) - set(self.fields)\n if unexpected_fields:\n raise ValidationError(f\"Unexpected field(s): {unexpected_fields}\")\n self._validate_uniqueness(attrs)\n return attrs\n\n def _validate_uniqueness(self, attrs):\n \"\"\"\n Uniqueness is only defined when both name and base_table are defined.\n\n Would be nice to define this in terms of Django's UniqueConstraint, but that doesn't seem\n possible, due to schema being a child property of base_table.\n \"\"\"\n name = attrs.get('name')\n if name:\n base_table = attrs.get('base_table')\n if base_table:\n schema = base_table.schema\n is_duplicate_q = self._get_is_duplicate_q(name, schema)\n duplicates = UIQuery.objects.filter(is_duplicate_q)\n if duplicates.exists():\n raise DuplicateUIQueryInSchemaAPIException(field='name')\n\n def _get_is_duplicate_q(self, name, schema):\n has_same_name_q = Q(name=name)\n has_same_schema_q = Q(base_table__schema=schema)\n is_duplicate_q = has_same_name_q & has_same_schema_q\n is_update = self.instance is not None\n if is_update:\n # If this is an update, filter self out of found duplicates\n id = self.instance.id\n is_not_this_instance_q = ~Q(id=id)\n is_duplicate_q = is_duplicate_q & is_not_this_instance_q\n return is_duplicate_q\n\n\nclass QuerySerializer(BaseQuerySerializer):\n results_url = serializers.SerializerMethodField('get_results_url')\n records_url = serializers.SerializerMethodField('get_records_url')\n columns_url = serializers.SerializerMethodField('get_columns_url')\n\n class Meta:\n model = UIQuery\n fields = '__all__'\n\n def get_records_url(self, obj):\n if isinstance(obj, UIQuery) and obj.pk is not None:\n # Only get records_url if we are serializing an existing persisted UIQuery\n request = self.context['request']\n return request.build_absolute_uri(reverse('query-records', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, UIQuery) and obj.pk is not None:\n # Only get columns_url if we are serializing an existing persisted UIQuery\n request = self.context['request']\n return request.build_absolute_uri(reverse('query-columns', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_results_url(self, obj):\n if isinstance(obj, UIQuery) and obj.pk is not None:\n # Only get records_url if we are serializing an existing persisted UIQuery\n request = self.context['request']\n return request.build_absolute_uri(reverse('query-results', kwargs={'pk': obj.pk}))\n else:\n return None\n", "path": "mathesar/api/serializers/queries.py"}]}
| 1,222 | 389 |
gh_patches_debug_4230
|
rasdani/github-patches
|
git_diff
|
numba__numba-873
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PR #856 introduced regression in macro expansion of more than one block
PR #856 caused macro expansion to effectively cease after performing macro expansion in one block, due to the logic in `numba/macro.py`:
``` python
for blk in blocks.values():
module_getattr_folding(constants, blk)
expanded = expanded or expand_macros_in_block(constants, blk)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numba/macro.py`
Content:
```
1 """
2 Macro handling passes
3
4 Macros are expanded on block-by-block
5 """
6 from __future__ import absolute_import, print_function, division
7 from numba import ir
8
9
10 class MacroError(Exception):
11 '''
12 An exception thrown during macro expansion
13 '''
14 pass
15
16
17 def expand_macros(blocks):
18 '''
19 Performs macro expansion on blocks
20
21 Args
22 ----
23 blocks: list
24 the blocks to macro-expand
25 return: bool
26 True if any macros were expanded
27 '''
28 constants = {}
29 expanded = False
30 for blk in blocks.values():
31 module_getattr_folding(constants, blk)
32 expanded = expanded or expand_macros_in_block(constants, blk)
33 return expanded
34
35 def module_getattr_folding(constants, block):
36 '''
37 Performs constant-folding of getattr instructions within a block. Any
38 constants defined within the block are also added to the constant pool.
39
40 Args
41 ----
42 constants: dict
43 The pool of constants to use, which will be updated with any new
44 constants in this block
45 block: ir.Block
46 The block to perform constant folding on
47 '''
48 for inst in block.body:
49 if isinstance(inst, ir.Assign):
50 rhs = inst.value
51
52 if isinstance(rhs, ir.Global):
53 constants[inst.target.name] = rhs.value
54
55 elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':
56 if rhs.value.name in constants:
57 base = constants[rhs.value.name]
58 constants[inst.target.name] = getattr(base, rhs.attr)
59
60 elif isinstance(rhs, ir.Const):
61 constants[inst.target.name] = rhs.value
62
63 elif isinstance(rhs, ir.Var) and rhs.name in constants:
64 constants[inst.target.name] = constants[rhs.name]
65
66 elif isinstance(rhs, ir.FreeVar):
67 constants[inst.target.name] = rhs.value
68
69 def expand_macros_in_block(constants, block):
70 '''
71 Performs macro expansion on a block.
72
73 Args
74 ----
75 constants: dict
76 The pool of constants which contains the values which contains mappings
77 from variable names to callee names
78 block: ir.Block
79 The block to perform macro expansion on
80 return: bool
81 True if any macros were expanded
82 '''
83 expanded = False
84 for inst in block.body:
85 if isinstance(inst, ir.Assign):
86 rhs = inst.value
87 if isinstance(rhs, ir.Expr) and rhs.op == 'call':
88 callee = rhs.func
89 macro = constants.get(callee.name)
90 if isinstance(macro, Macro):
91 # Rewrite calling macro
92 assert macro.callable
93 args = [constants[arg.name] for arg in rhs.args]
94 kws = dict((k, constants[v.name]) for k, v in rhs.kws)
95 try:
96 result = macro.func(*args, **kws)
97 except BaseException as e:
98 msg = str(e)
99 headfmt = "Macro expansion failed at {line}"
100 head = headfmt.format(line=inst.loc)
101 newmsg = "{0}:\n{1}".format(head, msg)
102 raise MacroError(newmsg)
103 if result:
104 # Insert a new function
105 result.loc = rhs.loc
106 inst.value = ir.Expr.call(func=result, args=rhs.args,
107 kws=rhs.kws, loc=rhs.loc)
108 expanded = True
109 elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':
110 # Rewrite get attribute to macro call
111 # Non-calling macro must be triggered by get attribute
112 base = constants.get(rhs.value.name)
113 if base is not None:
114 value = getattr(base, rhs.attr)
115 if isinstance(value, Macro):
116 macro = value
117 if not macro.callable:
118 intr = ir.Intrinsic(macro.name, macro.func, args=())
119 inst.value = ir.Expr.call(func=intr, args=(),
120 kws=(), loc=rhs.loc)
121 expanded = True
122 return expanded
123
124
125 class Macro(object):
126 '''
127 A macro object is expanded to a function call
128
129 Args
130 ----
131 name: str
132 Name of this Macro
133 func: function
134 Function that evaluates the macro expansion
135 callable: bool
136 True if the Macro represents a callable function.
137 False if it is represents some other type.
138 argnames: list
139 If ``callable`` is True, this holds a list of the names of arguments
140 to the function.
141 '''
142
143 __slots__ = 'name', 'func', 'callable', 'argnames'
144
145 def __init__(self, name, func, callable=False, argnames=None):
146 self.name = name
147 self.func = func
148 self.callable = callable
149 self.argnames = argnames
150
151 def __repr__(self):
152 return '<macro %s -> %s>' % (self.name, self.func)
153
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numba/macro.py b/numba/macro.py
--- a/numba/macro.py
+++ b/numba/macro.py
@@ -29,7 +29,8 @@
expanded = False
for blk in blocks.values():
module_getattr_folding(constants, blk)
- expanded = expanded or expand_macros_in_block(constants, blk)
+ block_expanded = expand_macros_in_block(constants, blk)
+ expanded = expanded or block_expanded
return expanded
def module_getattr_folding(constants, block):
|
{"golden_diff": "diff --git a/numba/macro.py b/numba/macro.py\n--- a/numba/macro.py\n+++ b/numba/macro.py\n@@ -29,7 +29,8 @@\n expanded = False\n for blk in blocks.values():\n module_getattr_folding(constants, blk)\n- expanded = expanded or expand_macros_in_block(constants, blk)\n+ block_expanded = expand_macros_in_block(constants, blk)\n+ expanded = expanded or block_expanded\n return expanded\n \n def module_getattr_folding(constants, block):\n", "issue": "PR #856 introduced regression in macro expansion of more than one block\nPR #856 caused macro expansion to effectively cease after performing macro expansion in one block, due to the logic in `numba/macro.py`:\n\n``` python\nfor blk in blocks.values():\n module_getattr_folding(constants, blk)\n expanded = expanded or expand_macros_in_block(constants, blk)\n```\n\n", "before_files": [{"content": "\"\"\"\nMacro handling passes\n\nMacros are expanded on block-by-block\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\nfrom numba import ir\n\n\nclass MacroError(Exception):\n '''\n An exception thrown during macro expansion\n '''\n pass\n\n\ndef expand_macros(blocks):\n '''\n Performs macro expansion on blocks\n\n Args\n ----\n blocks: list\n the blocks to macro-expand\n return: bool\n True if any macros were expanded\n '''\n constants = {}\n expanded = False\n for blk in blocks.values():\n module_getattr_folding(constants, blk)\n expanded = expanded or expand_macros_in_block(constants, blk)\n return expanded\n\ndef module_getattr_folding(constants, block):\n '''\n Performs constant-folding of getattr instructions within a block. Any\n constants defined within the block are also added to the constant pool.\n\n Args\n ----\n constants: dict\n The pool of constants to use, which will be updated with any new\n constants in this block\n block: ir.Block\n The block to perform constant folding on\n '''\n for inst in block.body:\n if isinstance(inst, ir.Assign):\n rhs = inst.value\n\n if isinstance(rhs, ir.Global):\n constants[inst.target.name] = rhs.value\n\n elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':\n if rhs.value.name in constants:\n base = constants[rhs.value.name]\n constants[inst.target.name] = getattr(base, rhs.attr)\n\n elif isinstance(rhs, ir.Const):\n constants[inst.target.name] = rhs.value\n\n elif isinstance(rhs, ir.Var) and rhs.name in constants:\n constants[inst.target.name] = constants[rhs.name]\n\n elif isinstance(rhs, ir.FreeVar):\n constants[inst.target.name] = rhs.value\n\ndef expand_macros_in_block(constants, block):\n '''\n Performs macro expansion on a block.\n\n Args\n ----\n constants: dict\n The pool of constants which contains the values which contains mappings\n from variable names to callee names\n block: ir.Block\n The block to perform macro expansion on\n return: bool\n True if any macros were expanded\n '''\n expanded = False\n for inst in block.body:\n if isinstance(inst, ir.Assign):\n rhs = inst.value\n if isinstance(rhs, ir.Expr) and rhs.op == 'call':\n callee = rhs.func\n macro = constants.get(callee.name)\n if isinstance(macro, Macro):\n # Rewrite calling macro\n assert macro.callable\n args = [constants[arg.name] for arg in rhs.args]\n kws = dict((k, constants[v.name]) for k, v in rhs.kws)\n try:\n result = macro.func(*args, **kws)\n except BaseException as e:\n msg = str(e)\n headfmt = \"Macro expansion failed at {line}\"\n head = headfmt.format(line=inst.loc)\n newmsg = \"{0}:\\n{1}\".format(head, msg)\n raise MacroError(newmsg)\n if result:\n # Insert a new function\n result.loc = rhs.loc\n inst.value = ir.Expr.call(func=result, args=rhs.args,\n kws=rhs.kws, loc=rhs.loc)\n expanded = True\n elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':\n # Rewrite get attribute to macro call\n # Non-calling macro must be triggered by get attribute\n base = constants.get(rhs.value.name)\n if base is not None:\n value = getattr(base, rhs.attr)\n if isinstance(value, Macro):\n macro = value\n if not macro.callable:\n intr = ir.Intrinsic(macro.name, macro.func, args=())\n inst.value = ir.Expr.call(func=intr, args=(),\n kws=(), loc=rhs.loc)\n expanded = True\n return expanded\n\n\nclass Macro(object):\n '''\n A macro object is expanded to a function call\n\n Args\n ----\n name: str\n Name of this Macro\n func: function\n Function that evaluates the macro expansion\n callable: bool\n True if the Macro represents a callable function.\n False if it is represents some other type.\n argnames: list\n If ``callable`` is True, this holds a list of the names of arguments\n to the function.\n '''\n\n __slots__ = 'name', 'func', 'callable', 'argnames'\n\n def __init__(self, name, func, callable=False, argnames=None):\n self.name = name\n self.func = func\n self.callable = callable\n self.argnames = argnames\n\n def __repr__(self):\n return '<macro %s -> %s>' % (self.name, self.func)\n\n", "path": "numba/macro.py"}], "after_files": [{"content": "\"\"\"\nMacro handling passes\n\nMacros are expanded on block-by-block\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\nfrom numba import ir\n\n\nclass MacroError(Exception):\n '''\n An exception thrown during macro expansion\n '''\n pass\n\n\ndef expand_macros(blocks):\n '''\n Performs macro expansion on blocks\n\n Args\n ----\n blocks: list\n the blocks to macro-expand\n return: bool\n True if any macros were expanded\n '''\n constants = {}\n expanded = False\n for blk in blocks.values():\n module_getattr_folding(constants, blk)\n block_expanded = expand_macros_in_block(constants, blk)\n expanded = expanded or block_expanded\n return expanded\n\ndef module_getattr_folding(constants, block):\n '''\n Performs constant-folding of getattr instructions within a block. Any\n constants defined within the block are also added to the constant pool.\n\n Args\n ----\n constants: dict\n The pool of constants to use, which will be updated with any new\n constants in this block\n block: ir.Block\n The block to perform constant folding on\n '''\n for inst in block.body:\n if isinstance(inst, ir.Assign):\n rhs = inst.value\n\n if isinstance(rhs, ir.Global):\n constants[inst.target.name] = rhs.value\n\n elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':\n if rhs.value.name in constants:\n base = constants[rhs.value.name]\n constants[inst.target.name] = getattr(base, rhs.attr)\n\n elif isinstance(rhs, ir.Const):\n constants[inst.target.name] = rhs.value\n\n elif isinstance(rhs, ir.Var) and rhs.name in constants:\n constants[inst.target.name] = constants[rhs.name]\n\n elif isinstance(rhs, ir.FreeVar):\n constants[inst.target.name] = rhs.value\n\ndef expand_macros_in_block(constants, block):\n '''\n Performs macro expansion on a block.\n\n Args\n ----\n constants: dict\n The pool of constants which contains the values which contains mappings\n from variable names to callee names\n block: ir.Block\n The block to perform macro expansion on\n return: bool\n True if any macros were expanded\n '''\n expanded = False\n for inst in block.body:\n if isinstance(inst, ir.Assign):\n rhs = inst.value\n if isinstance(rhs, ir.Expr) and rhs.op == 'call':\n callee = rhs.func\n macro = constants.get(callee.name)\n if isinstance(macro, Macro):\n # Rewrite calling macro\n assert macro.callable\n args = [constants[arg.name] for arg in rhs.args]\n kws = dict((k, constants[v.name]) for k, v in rhs.kws)\n try:\n result = macro.func(*args, **kws)\n except BaseException as e:\n msg = str(e)\n headfmt = \"Macro expansion failed at {line}\"\n head = headfmt.format(line=inst.loc)\n newmsg = \"{0}:\\n{1}\".format(head, msg)\n raise MacroError(newmsg)\n if result:\n # Insert a new function\n result.loc = rhs.loc\n inst.value = ir.Expr.call(func=result, args=rhs.args,\n kws=rhs.kws, loc=rhs.loc)\n expanded = True\n elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':\n # Rewrite get attribute to macro call\n # Non-calling macro must be triggered by get attribute\n base = constants.get(rhs.value.name)\n if base is not None:\n value = getattr(base, rhs.attr)\n if isinstance(value, Macro):\n macro = value\n if not macro.callable:\n intr = ir.Intrinsic(macro.name, macro.func, args=())\n inst.value = ir.Expr.call(func=intr, args=(),\n kws=(), loc=rhs.loc)\n expanded = True\n return expanded\n\n\nclass Macro(object):\n '''\n A macro object is expanded to a function call\n\n Args\n ----\n name: str\n Name of this Macro\n func: function\n Function that evaluates the macro expansion\n callable: bool\n True if the Macro represents a callable function.\n False if it is represents some other type.\n argnames: list\n If ``callable`` is True, this holds a list of the names of arguments\n to the function.\n '''\n\n __slots__ = 'name', 'func', 'callable', 'argnames'\n\n def __init__(self, name, func, callable=False, argnames=None):\n self.name = name\n self.func = func\n self.callable = callable\n self.argnames = argnames\n\n def __repr__(self):\n return '<macro %s -> %s>' % (self.name, self.func)\n\n", "path": "numba/macro.py"}]}
| 1,742 | 122 |
gh_patches_debug_7749
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-1696
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
implement AWS::Synthetics changes from May 14, 2020 update
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/synthetics.py`
Content:
```
1 # Copyright (c) 2020, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Tags
7 from .validators import (integer, boolean, canary_runtime_version)
8
9
10 class VPCConfig(AWSProperty):
11 props = {
12 'SecurityGroupIds': ([basestring], True),
13 'SubnetIds': ([basestring], True),
14 'VpcId': (basestring, False),
15 }
16
17
18 class Schedule(AWSProperty):
19 props = {
20 'DurationInSeconds': (basestring, True),
21 'Expression': (basestring, True),
22 }
23
24
25 class RunConfig(AWSProperty):
26 props = {
27 'TimeoutInSeconds': (integer, True),
28 }
29
30
31 class Code(AWSProperty):
32 props = {
33 'Handler': (basestring, False),
34 'S3Bucket': (basestring, False),
35 'S3Key': (basestring, False),
36 'S3ObjectVersion': (basestring, False),
37 'Script': (basestring, False),
38 }
39
40
41 class Canary(AWSObject):
42 resource_type = "AWS::Synthetics::Canary"
43
44 props = {
45 'ArtifactS3Location': (basestring, True),
46 'Code': (Code, True),
47 'ExecutionRoleArn': (basestring, True),
48 'FailureRetentionPeriod': (integer, False),
49 'Name': (basestring, True),
50 'RunConfig': (RunConfig, False),
51 'RuntimeVersion': (canary_runtime_version, True),
52 'Schedule': (Schedule, True),
53 'StartCanaryAfterCreation': (boolean, True),
54 'SuccessRetentionPeriod': (integer, False),
55 'Tags': (Tags, False),
56 'VPCConfig': (VPCConfig, False),
57 }
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/synthetics.py b/troposphere/synthetics.py
--- a/troposphere/synthetics.py
+++ b/troposphere/synthetics.py
@@ -47,7 +47,7 @@
'ExecutionRoleArn': (basestring, True),
'FailureRetentionPeriod': (integer, False),
'Name': (basestring, True),
- 'RunConfig': (RunConfig, False),
+ 'RunConfig': (RunConfig, True),
'RuntimeVersion': (canary_runtime_version, True),
'Schedule': (Schedule, True),
'StartCanaryAfterCreation': (boolean, True),
|
{"golden_diff": "diff --git a/troposphere/synthetics.py b/troposphere/synthetics.py\n--- a/troposphere/synthetics.py\n+++ b/troposphere/synthetics.py\n@@ -47,7 +47,7 @@\n 'ExecutionRoleArn': (basestring, True),\n 'FailureRetentionPeriod': (integer, False),\n 'Name': (basestring, True),\n- 'RunConfig': (RunConfig, False),\n+ 'RunConfig': (RunConfig, True),\n 'RuntimeVersion': (canary_runtime_version, True),\n 'Schedule': (Schedule, True),\n 'StartCanaryAfterCreation': (boolean, True),\n", "issue": "implement AWS::Synthetics changes from May 14, 2020 update\n\n", "before_files": [{"content": "# Copyright (c) 2020, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import (integer, boolean, canary_runtime_version)\n\n\nclass VPCConfig(AWSProperty):\n props = {\n 'SecurityGroupIds': ([basestring], True),\n 'SubnetIds': ([basestring], True),\n 'VpcId': (basestring, False),\n }\n\n\nclass Schedule(AWSProperty):\n props = {\n 'DurationInSeconds': (basestring, True),\n 'Expression': (basestring, True),\n }\n\n\nclass RunConfig(AWSProperty):\n props = {\n 'TimeoutInSeconds': (integer, True),\n }\n\n\nclass Code(AWSProperty):\n props = {\n 'Handler': (basestring, False),\n 'S3Bucket': (basestring, False),\n 'S3Key': (basestring, False),\n 'S3ObjectVersion': (basestring, False),\n 'Script': (basestring, False),\n }\n\n\nclass Canary(AWSObject):\n resource_type = \"AWS::Synthetics::Canary\"\n\n props = {\n 'ArtifactS3Location': (basestring, True),\n 'Code': (Code, True),\n 'ExecutionRoleArn': (basestring, True),\n 'FailureRetentionPeriod': (integer, False),\n 'Name': (basestring, True),\n 'RunConfig': (RunConfig, False),\n 'RuntimeVersion': (canary_runtime_version, True),\n 'Schedule': (Schedule, True),\n 'StartCanaryAfterCreation': (boolean, True),\n 'SuccessRetentionPeriod': (integer, False),\n 'Tags': (Tags, False),\n 'VPCConfig': (VPCConfig, False),\n }\n", "path": "troposphere/synthetics.py"}], "after_files": [{"content": "# Copyright (c) 2020, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import (integer, boolean, canary_runtime_version)\n\n\nclass VPCConfig(AWSProperty):\n props = {\n 'SecurityGroupIds': ([basestring], True),\n 'SubnetIds': ([basestring], True),\n 'VpcId': (basestring, False),\n }\n\n\nclass Schedule(AWSProperty):\n props = {\n 'DurationInSeconds': (basestring, True),\n 'Expression': (basestring, True),\n }\n\n\nclass RunConfig(AWSProperty):\n props = {\n 'TimeoutInSeconds': (integer, True),\n }\n\n\nclass Code(AWSProperty):\n props = {\n 'Handler': (basestring, False),\n 'S3Bucket': (basestring, False),\n 'S3Key': (basestring, False),\n 'S3ObjectVersion': (basestring, False),\n 'Script': (basestring, False),\n }\n\n\nclass Canary(AWSObject):\n resource_type = \"AWS::Synthetics::Canary\"\n\n props = {\n 'ArtifactS3Location': (basestring, True),\n 'Code': (Code, True),\n 'ExecutionRoleArn': (basestring, True),\n 'FailureRetentionPeriod': (integer, False),\n 'Name': (basestring, True),\n 'RunConfig': (RunConfig, True),\n 'RuntimeVersion': (canary_runtime_version, True),\n 'Schedule': (Schedule, True),\n 'StartCanaryAfterCreation': (boolean, True),\n 'SuccessRetentionPeriod': (integer, False),\n 'Tags': (Tags, False),\n 'VPCConfig': (VPCConfig, False),\n }\n", "path": "troposphere/synthetics.py"}]}
| 793 | 144 |
gh_patches_debug_38141
|
rasdani/github-patches
|
git_diff
|
apache__tvm-2921
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TEST][TENSORFLOW] Cache the Downloaded File
So far the model files used in the TF end to end tests re-download the file in each test run.
This causes the test execution to be slow. Eventually, the test server can be blocked by the place that hosts the data-source.
We need to change the implementation to cache to local and only re-download the file if necessary.
cc @srkreddy1238 @icemelon9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/tvm/contrib/download.py`
Content:
```
1 """Helper utility for downloading"""
2 from __future__ import print_function
3 from __future__ import absolute_import as _abs
4
5 import os
6 import sys
7 import time
8
9 def download(url, path, overwrite=False, size_compare=False, verbose=1):
10 """Downloads the file from the internet.
11 Set the input options correctly to overwrite or do the size comparison
12
13 Parameters
14 ----------
15 url : str
16 Download url.
17
18 path : str
19 Local file path to save downloaded file
20
21 overwrite : bool, optional
22 Whether to overwrite existing file
23
24 size_compare : bool, optional
25 Whether to do size compare to check downloaded file.
26
27 verbose: int, optional
28 Verbose level
29 """
30 if sys.version_info >= (3,):
31 import urllib.request as urllib2
32 else:
33 import urllib2
34
35 if os.path.isfile(path) and not overwrite:
36 if size_compare:
37 import requests
38 file_size = os.path.getsize(path)
39 res_head = requests.head(url)
40 res_get = requests.get(url, stream=True)
41 if 'Content-Length' not in res_head.headers:
42 res_get = urllib2.urlopen(url)
43 url_file_size = int(res_get.headers['Content-Length'])
44 if url_file_size != file_size:
45 print("exist file got corrupted, downloading %s file freshly..." % path)
46 download(url, path, True, False)
47 return
48 print('File {} exists, skip.'.format(path))
49 return
50
51 if verbose >= 1:
52 print('Downloading from url {} to {}'.format(url, path))
53
54 # Stateful start time
55 start_time = time.time()
56
57 def _download_progress(count, block_size, total_size):
58 #pylint: disable=unused-argument
59 """Show the download progress.
60 """
61 if count == 0:
62 return
63 duration = time.time() - start_time
64 progress_size = int(count * block_size)
65 speed = int(progress_size / (1024 * duration))
66 percent = min(int(count * block_size * 100 / total_size), 100)
67 sys.stdout.write("\r...%d%%, %.2f MB, %d KB/s, %d seconds passed" %
68 (percent, progress_size / (1024.0 * 1024), speed, duration))
69 sys.stdout.flush()
70
71 if sys.version_info >= (3,):
72 urllib2.urlretrieve(url, path, reporthook=_download_progress)
73 print("")
74 else:
75 f = urllib2.urlopen(url)
76 data = f.read()
77 with open(path, "wb") as code:
78 code.write(data)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/tvm/contrib/download.py b/python/tvm/contrib/download.py
--- a/python/tvm/contrib/download.py
+++ b/python/tvm/contrib/download.py
@@ -5,8 +5,10 @@
import os
import sys
import time
+import uuid
+import shutil
-def download(url, path, overwrite=False, size_compare=False, verbose=1):
+def download(url, path, overwrite=False, size_compare=False, verbose=1, retries=3):
"""Downloads the file from the internet.
Set the input options correctly to overwrite or do the size comparison
@@ -53,6 +55,11 @@
# Stateful start time
start_time = time.time()
+ dirpath = os.path.dirname(path)
+ if not os.path.isdir(dirpath):
+ os.makedirs(dirpath)
+ random_uuid = str(uuid.uuid4())
+ tempfile = os.path.join(dirpath, random_uuid)
def _download_progress(count, block_size, total_size):
#pylint: disable=unused-argument
@@ -68,11 +75,62 @@
(percent, progress_size / (1024.0 * 1024), speed, duration))
sys.stdout.flush()
- if sys.version_info >= (3,):
- urllib2.urlretrieve(url, path, reporthook=_download_progress)
- print("")
+ while retries >= 0:
+ # Disable pyling too broad Exception
+ # pylint: disable=W0703
+ try:
+ if sys.version_info >= (3,):
+ urllib2.urlretrieve(url, tempfile, reporthook=_download_progress)
+ print("")
+ else:
+ f = urllib2.urlopen(url)
+ data = f.read()
+ with open(tempfile, "wb") as code:
+ code.write(data)
+ shutil.move(tempfile, path)
+ break
+ except Exception as err:
+ retries -= 1
+ if retries == 0:
+ os.remove(tempfile)
+ raise err
+ else:
+ print("download failed due to {}, retrying, {} attempt{} left"
+ .format(repr(err), retries, 's' if retries > 1 else ''))
+
+
+TEST_DATA_ROOT_PATH = os.path.join(os.path.expanduser('~'), '.tvm_test_data')
+if not os.path.exists(TEST_DATA_ROOT_PATH):
+ os.mkdir(TEST_DATA_ROOT_PATH)
+
+def download_testdata(url, relpath, module=None):
+ """Downloads the test data from the internet.
+
+ Parameters
+ ----------
+ url : str
+ Download url.
+
+ relpath : str
+ Relative file path.
+
+ module : Union[str, list, tuple], optional
+ Subdirectory paths under test data folder.
+
+ Returns
+ -------
+ abspath : str
+ Absolute file path of downloaded file
+ """
+ global TEST_DATA_ROOT_PATH
+ if module is None:
+ module_path = ''
+ elif isinstance(module, str):
+ module_path = module
+ elif isinstance(module, (list, tuple)):
+ module_path = os.path.join(*module)
else:
- f = urllib2.urlopen(url)
- data = f.read()
- with open(path, "wb") as code:
- code.write(data)
+ raise ValueError("Unsupported module: " + module)
+ abspath = os.path.join(TEST_DATA_ROOT_PATH, module_path, relpath)
+ download(url, abspath, overwrite=False, size_compare=True)
+ return abspath
|
{"golden_diff": "diff --git a/python/tvm/contrib/download.py b/python/tvm/contrib/download.py\n--- a/python/tvm/contrib/download.py\n+++ b/python/tvm/contrib/download.py\n@@ -5,8 +5,10 @@\n import os\n import sys\n import time\n+import uuid\n+import shutil\n \n-def download(url, path, overwrite=False, size_compare=False, verbose=1):\n+def download(url, path, overwrite=False, size_compare=False, verbose=1, retries=3):\n \"\"\"Downloads the file from the internet.\n Set the input options correctly to overwrite or do the size comparison\n \n@@ -53,6 +55,11 @@\n \n # Stateful start time\n start_time = time.time()\n+ dirpath = os.path.dirname(path)\n+ if not os.path.isdir(dirpath):\n+ os.makedirs(dirpath)\n+ random_uuid = str(uuid.uuid4())\n+ tempfile = os.path.join(dirpath, random_uuid)\n \n def _download_progress(count, block_size, total_size):\n #pylint: disable=unused-argument\n@@ -68,11 +75,62 @@\n (percent, progress_size / (1024.0 * 1024), speed, duration))\n sys.stdout.flush()\n \n- if sys.version_info >= (3,):\n- urllib2.urlretrieve(url, path, reporthook=_download_progress)\n- print(\"\")\n+ while retries >= 0:\n+ # Disable pyling too broad Exception\n+ # pylint: disable=W0703\n+ try:\n+ if sys.version_info >= (3,):\n+ urllib2.urlretrieve(url, tempfile, reporthook=_download_progress)\n+ print(\"\")\n+ else:\n+ f = urllib2.urlopen(url)\n+ data = f.read()\n+ with open(tempfile, \"wb\") as code:\n+ code.write(data)\n+ shutil.move(tempfile, path)\n+ break\n+ except Exception as err:\n+ retries -= 1\n+ if retries == 0:\n+ os.remove(tempfile)\n+ raise err\n+ else:\n+ print(\"download failed due to {}, retrying, {} attempt{} left\"\n+ .format(repr(err), retries, 's' if retries > 1 else ''))\n+\n+\n+TEST_DATA_ROOT_PATH = os.path.join(os.path.expanduser('~'), '.tvm_test_data')\n+if not os.path.exists(TEST_DATA_ROOT_PATH):\n+ os.mkdir(TEST_DATA_ROOT_PATH)\n+\n+def download_testdata(url, relpath, module=None):\n+ \"\"\"Downloads the test data from the internet.\n+\n+ Parameters\n+ ----------\n+ url : str\n+ Download url.\n+\n+ relpath : str\n+ Relative file path.\n+\n+ module : Union[str, list, tuple], optional\n+ Subdirectory paths under test data folder.\n+\n+ Returns\n+ -------\n+ abspath : str\n+ Absolute file path of downloaded file\n+ \"\"\"\n+ global TEST_DATA_ROOT_PATH\n+ if module is None:\n+ module_path = ''\n+ elif isinstance(module, str):\n+ module_path = module\n+ elif isinstance(module, (list, tuple)):\n+ module_path = os.path.join(*module)\n else:\n- f = urllib2.urlopen(url)\n- data = f.read()\n- with open(path, \"wb\") as code:\n- code.write(data)\n+ raise ValueError(\"Unsupported module: \" + module)\n+ abspath = os.path.join(TEST_DATA_ROOT_PATH, module_path, relpath)\n+ download(url, abspath, overwrite=False, size_compare=True)\n+ return abspath\n", "issue": "[TEST][TENSORFLOW] Cache the Downloaded File\nSo far the model files used in the TF end to end tests re-download the file in each test run. \r\nThis causes the test execution to be slow. Eventually, the test server can be blocked by the place that hosts the data-source.\r\n\r\nWe need to change the implementation to cache to local and only re-download the file if necessary.\r\n\r\ncc @srkreddy1238 @icemelon9 \n", "before_files": [{"content": "\"\"\"Helper utility for downloading\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import as _abs\n\nimport os\nimport sys\nimport time\n\ndef download(url, path, overwrite=False, size_compare=False, verbose=1):\n \"\"\"Downloads the file from the internet.\n Set the input options correctly to overwrite or do the size comparison\n\n Parameters\n ----------\n url : str\n Download url.\n\n path : str\n Local file path to save downloaded file\n\n overwrite : bool, optional\n Whether to overwrite existing file\n\n size_compare : bool, optional\n Whether to do size compare to check downloaded file.\n\n verbose: int, optional\n Verbose level\n \"\"\"\n if sys.version_info >= (3,):\n import urllib.request as urllib2\n else:\n import urllib2\n\n if os.path.isfile(path) and not overwrite:\n if size_compare:\n import requests\n file_size = os.path.getsize(path)\n res_head = requests.head(url)\n res_get = requests.get(url, stream=True)\n if 'Content-Length' not in res_head.headers:\n res_get = urllib2.urlopen(url)\n url_file_size = int(res_get.headers['Content-Length'])\n if url_file_size != file_size:\n print(\"exist file got corrupted, downloading %s file freshly...\" % path)\n download(url, path, True, False)\n return\n print('File {} exists, skip.'.format(path))\n return\n\n if verbose >= 1:\n print('Downloading from url {} to {}'.format(url, path))\n\n # Stateful start time\n start_time = time.time()\n\n def _download_progress(count, block_size, total_size):\n #pylint: disable=unused-argument\n \"\"\"Show the download progress.\n \"\"\"\n if count == 0:\n return\n duration = time.time() - start_time\n progress_size = int(count * block_size)\n speed = int(progress_size / (1024 * duration))\n percent = min(int(count * block_size * 100 / total_size), 100)\n sys.stdout.write(\"\\r...%d%%, %.2f MB, %d KB/s, %d seconds passed\" %\n (percent, progress_size / (1024.0 * 1024), speed, duration))\n sys.stdout.flush()\n\n if sys.version_info >= (3,):\n urllib2.urlretrieve(url, path, reporthook=_download_progress)\n print(\"\")\n else:\n f = urllib2.urlopen(url)\n data = f.read()\n with open(path, \"wb\") as code:\n code.write(data)\n", "path": "python/tvm/contrib/download.py"}], "after_files": [{"content": "\"\"\"Helper utility for downloading\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import as _abs\n\nimport os\nimport sys\nimport time\nimport uuid\nimport shutil\n\ndef download(url, path, overwrite=False, size_compare=False, verbose=1, retries=3):\n \"\"\"Downloads the file from the internet.\n Set the input options correctly to overwrite or do the size comparison\n\n Parameters\n ----------\n url : str\n Download url.\n\n path : str\n Local file path to save downloaded file\n\n overwrite : bool, optional\n Whether to overwrite existing file\n\n size_compare : bool, optional\n Whether to do size compare to check downloaded file.\n\n verbose: int, optional\n Verbose level\n \"\"\"\n if sys.version_info >= (3,):\n import urllib.request as urllib2\n else:\n import urllib2\n\n if os.path.isfile(path) and not overwrite:\n if size_compare:\n import requests\n file_size = os.path.getsize(path)\n res_head = requests.head(url)\n res_get = requests.get(url, stream=True)\n if 'Content-Length' not in res_head.headers:\n res_get = urllib2.urlopen(url)\n url_file_size = int(res_get.headers['Content-Length'])\n if url_file_size != file_size:\n print(\"exist file got corrupted, downloading %s file freshly...\" % path)\n download(url, path, True, False)\n return\n print('File {} exists, skip.'.format(path))\n return\n\n if verbose >= 1:\n print('Downloading from url {} to {}'.format(url, path))\n\n # Stateful start time\n start_time = time.time()\n dirpath = os.path.dirname(path)\n if not os.path.isdir(dirpath):\n os.makedirs(dirpath)\n random_uuid = str(uuid.uuid4())\n tempfile = os.path.join(dirpath, random_uuid)\n\n def _download_progress(count, block_size, total_size):\n #pylint: disable=unused-argument\n \"\"\"Show the download progress.\n \"\"\"\n if count == 0:\n return\n duration = time.time() - start_time\n progress_size = int(count * block_size)\n speed = int(progress_size / (1024 * duration))\n percent = min(int(count * block_size * 100 / total_size), 100)\n sys.stdout.write(\"\\r...%d%%, %.2f MB, %d KB/s, %d seconds passed\" %\n (percent, progress_size / (1024.0 * 1024), speed, duration))\n sys.stdout.flush()\n\n while retries >= 0:\n # Disable pyling too broad Exception\n # pylint: disable=W0703\n try:\n if sys.version_info >= (3,):\n urllib2.urlretrieve(url, tempfile, reporthook=_download_progress)\n print(\"\")\n else:\n f = urllib2.urlopen(url)\n data = f.read()\n with open(tempfile, \"wb\") as code:\n code.write(data)\n shutil.move(tempfile, path)\n break\n except Exception as err:\n retries -= 1\n if retries == 0:\n os.remove(tempfile)\n raise err\n else:\n print(\"download failed due to {}, retrying, {} attempt{} left\"\n .format(repr(err), retries, 's' if retries > 1 else ''))\n\n\nTEST_DATA_ROOT_PATH = os.path.join(os.path.expanduser('~'), '.tvm_test_data')\nif not os.path.exists(TEST_DATA_ROOT_PATH):\n os.mkdir(TEST_DATA_ROOT_PATH)\n\ndef download_testdata(url, relpath, module=None):\n \"\"\"Downloads the test data from the internet.\n\n Parameters\n ----------\n url : str\n Download url.\n\n relpath : str\n Relative file path.\n\n module : Union[str, list, tuple], optional\n Subdirectory paths under test data folder.\n\n Returns\n -------\n abspath : str\n Absolute file path of downloaded file\n \"\"\"\n global TEST_DATA_ROOT_PATH\n if module is None:\n module_path = ''\n elif isinstance(module, str):\n module_path = module\n elif isinstance(module, (list, tuple)):\n module_path = os.path.join(*module)\n else:\n raise ValueError(\"Unsupported module: \" + module)\n abspath = os.path.join(TEST_DATA_ROOT_PATH, module_path, relpath)\n download(url, abspath, overwrite=False, size_compare=True)\n return abspath\n", "path": "python/tvm/contrib/download.py"}]}
| 1,086 | 803 |
gh_patches_debug_1585
|
rasdani/github-patches
|
git_diff
|
scipy__scipy-10447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build adds the user folder inside the code base
After building the SciPy on Windows 10, the base folder has a copy of the following folder
```
C:\Users\<user>\Documents\GitHub\scipy\Users\<user>\AppData\Local\Temp\tmpuvtg6i4i\main.obj
```
From the look of the structure, it seems like a relative folder is used instead of an absolute one hence it recreates the temp folder within the codebase.
<strike>I think this might be related to the pocketfft development as I recently started to see it but might also be another C++ source change.</strike> Happens at the `cluster._optimal_leaf_ordering` compilation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy/fft/_pocketfft/setup.py`
Content:
```
1
2 def try_compile(compiler, code=None, flags=[], ext='.cpp'):
3 """Returns True if the compiler is able to compile the given code"""
4 import tempfile
5 from distutils.errors import CompileError
6 import os
7
8 code = code or 'int main (int argc, char **argv) { return 0; }'
9
10 with tempfile.TemporaryDirectory() as temp_dir:
11 fname = os.path.join(temp_dir, 'main'+ext)
12 with open(fname, 'w') as f:
13 f.write(code)
14
15 try:
16 compiler.compile([fname], extra_postargs=flags)
17 except CompileError:
18 return False
19 return True
20
21
22 def has_flag(compiler, flag):
23 return try_compile(compiler, flags=[flag])
24
25
26 def get_std_flag(compiler):
27 # Test the compiler for the highest available c++ standard flag
28 gnu_flags = ['--std=c++14', '--std=c++11']
29 flags_by_cc = {
30 'msvc': ['/std:c++14', None],
31 'intelw': ['/Qstd=c++14', '/Qstd=c++11']
32 }
33 flags = flags_by_cc.get(compiler.compiler_type, gnu_flags)
34
35 for flag in flags:
36 if flag is None:
37 return None
38
39 if has_flag(compiler, flag):
40 return flag
41
42 from numpy.distutils import log
43 log.warn('Could not detect c++ standard flag')
44 return None
45
46
47 def try_add_flag(args, compiler, flag):
48 """Appends flag to the list of arguments if supported by the compiler"""
49 if try_compile(compiler, flags=args+[flag]):
50 args.append(flag)
51
52
53 def pre_build_hook(build_ext, ext):
54 cc = build_ext._cxx_compiler
55 args = ext.extra_compile_args
56
57 std_flag = get_std_flag(build_ext._cxx_compiler)
58 if std_flag is not None:
59 args.append(std_flag)
60
61 if cc.compiler_type == 'msvc':
62 args.append('/EHsc')
63 else:
64 try_add_flag(args, cc, '-fvisibility=hidden')
65
66 import sys
67 if sys.platform == 'darwin':
68 args.append('-mmacosx-version-min=10.7')
69 try_add_flag(args, cc, '-stdlib=libc++')
70
71
72 def configuration(parent_package='', top_path=None):
73 from numpy.distutils.misc_util import Configuration
74 import pybind11
75 include_dirs = [pybind11.get_include(True), pybind11.get_include(False)]
76
77 config = Configuration('_pocketfft', parent_package, top_path)
78 ext = config.add_extension('pypocketfft',
79 sources=['pypocketfft.cxx'],
80 depends=['pocketfft_hdronly.h'],
81 include_dirs=include_dirs,
82 language='c++')
83 ext._pre_build_hook = pre_build_hook
84
85 config.add_data_files('LICENSE.md')
86 config.add_data_dir('tests')
87 return config
88
89
90 if __name__ == '__main__':
91 from numpy.distutils.core import setup
92 setup(**configuration(top_path='').todict())
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scipy/fft/_pocketfft/setup.py b/scipy/fft/_pocketfft/setup.py
--- a/scipy/fft/_pocketfft/setup.py
+++ b/scipy/fft/_pocketfft/setup.py
@@ -13,7 +13,7 @@
f.write(code)
try:
- compiler.compile([fname], extra_postargs=flags)
+ compiler.compile([fname], output_dir=temp_dir, extra_postargs=flags)
except CompileError:
return False
return True
|
{"golden_diff": "diff --git a/scipy/fft/_pocketfft/setup.py b/scipy/fft/_pocketfft/setup.py\n--- a/scipy/fft/_pocketfft/setup.py\n+++ b/scipy/fft/_pocketfft/setup.py\n@@ -13,7 +13,7 @@\n f.write(code)\n \n try:\n- compiler.compile([fname], extra_postargs=flags)\n+ compiler.compile([fname], output_dir=temp_dir, extra_postargs=flags)\n except CompileError:\n return False\n return True\n", "issue": "Build adds the user folder inside the code base\nAfter building the SciPy on Windows 10, the base folder has a copy of the following folder\r\n\r\n```\r\nC:\\Users\\<user>\\Documents\\GitHub\\scipy\\Users\\<user>\\AppData\\Local\\Temp\\tmpuvtg6i4i\\main.obj\r\n```\r\nFrom the look of the structure, it seems like a relative folder is used instead of an absolute one hence it recreates the temp folder within the codebase. \r\n\r\n<strike>I think this might be related to the pocketfft development as I recently started to see it but might also be another C++ source change.</strike> Happens at the `cluster._optimal_leaf_ordering` compilation\r\n\r\n\n", "before_files": [{"content": "\ndef try_compile(compiler, code=None, flags=[], ext='.cpp'):\n \"\"\"Returns True if the compiler is able to compile the given code\"\"\"\n import tempfile\n from distutils.errors import CompileError\n import os\n\n code = code or 'int main (int argc, char **argv) { return 0; }'\n\n with tempfile.TemporaryDirectory() as temp_dir:\n fname = os.path.join(temp_dir, 'main'+ext)\n with open(fname, 'w') as f:\n f.write(code)\n\n try:\n compiler.compile([fname], extra_postargs=flags)\n except CompileError:\n return False\n return True\n\n\ndef has_flag(compiler, flag):\n return try_compile(compiler, flags=[flag])\n\n\ndef get_std_flag(compiler):\n # Test the compiler for the highest available c++ standard flag\n gnu_flags = ['--std=c++14', '--std=c++11']\n flags_by_cc = {\n 'msvc': ['/std:c++14', None],\n 'intelw': ['/Qstd=c++14', '/Qstd=c++11']\n }\n flags = flags_by_cc.get(compiler.compiler_type, gnu_flags)\n\n for flag in flags:\n if flag is None:\n return None\n\n if has_flag(compiler, flag):\n return flag\n\n from numpy.distutils import log\n log.warn('Could not detect c++ standard flag')\n return None\n\n\ndef try_add_flag(args, compiler, flag):\n \"\"\"Appends flag to the list of arguments if supported by the compiler\"\"\"\n if try_compile(compiler, flags=args+[flag]):\n args.append(flag)\n\n\ndef pre_build_hook(build_ext, ext):\n cc = build_ext._cxx_compiler\n args = ext.extra_compile_args\n\n std_flag = get_std_flag(build_ext._cxx_compiler)\n if std_flag is not None:\n args.append(std_flag)\n\n if cc.compiler_type == 'msvc':\n args.append('/EHsc')\n else:\n try_add_flag(args, cc, '-fvisibility=hidden')\n\n import sys\n if sys.platform == 'darwin':\n args.append('-mmacosx-version-min=10.7')\n try_add_flag(args, cc, '-stdlib=libc++')\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n import pybind11\n include_dirs = [pybind11.get_include(True), pybind11.get_include(False)]\n\n config = Configuration('_pocketfft', parent_package, top_path)\n ext = config.add_extension('pypocketfft',\n sources=['pypocketfft.cxx'],\n depends=['pocketfft_hdronly.h'],\n include_dirs=include_dirs,\n language='c++')\n ext._pre_build_hook = pre_build_hook\n\n config.add_data_files('LICENSE.md')\n config.add_data_dir('tests')\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "path": "scipy/fft/_pocketfft/setup.py"}], "after_files": [{"content": "\ndef try_compile(compiler, code=None, flags=[], ext='.cpp'):\n \"\"\"Returns True if the compiler is able to compile the given code\"\"\"\n import tempfile\n from distutils.errors import CompileError\n import os\n\n code = code or 'int main (int argc, char **argv) { return 0; }'\n\n with tempfile.TemporaryDirectory() as temp_dir:\n fname = os.path.join(temp_dir, 'main'+ext)\n with open(fname, 'w') as f:\n f.write(code)\n\n try:\n compiler.compile([fname], output_dir=temp_dir, extra_postargs=flags)\n except CompileError:\n return False\n return True\n\n\ndef has_flag(compiler, flag):\n return try_compile(compiler, flags=[flag])\n\n\ndef get_std_flag(compiler):\n # Test the compiler for the highest available c++ standard flag\n gnu_flags = ['--std=c++14', '--std=c++11']\n flags_by_cc = {\n 'msvc': ['/std:c++14', None],\n 'intelw': ['/Qstd=c++14', '/Qstd=c++11']\n }\n flags = flags_by_cc.get(compiler.compiler_type, gnu_flags)\n\n for flag in flags:\n if flag is None:\n return None\n\n if has_flag(compiler, flag):\n return flag\n\n from numpy.distutils import log\n log.warn('Could not detect c++ standard flag')\n return None\n\n\ndef try_add_flag(args, compiler, flag):\n \"\"\"Appends flag to the list of arguments if supported by the compiler\"\"\"\n if try_compile(compiler, flags=args+[flag]):\n args.append(flag)\n\n\ndef pre_build_hook(build_ext, ext):\n cc = build_ext._cxx_compiler\n args = ext.extra_compile_args\n\n std_flag = get_std_flag(build_ext._cxx_compiler)\n if std_flag is not None:\n args.append(std_flag)\n\n if cc.compiler_type == 'msvc':\n args.append('/EHsc')\n else:\n try_add_flag(args, cc, '-fvisibility=hidden')\n\n import sys\n if sys.platform == 'darwin':\n args.append('-mmacosx-version-min=10.7')\n try_add_flag(args, cc, '-stdlib=libc++')\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration\n import pybind11\n include_dirs = [pybind11.get_include(True), pybind11.get_include(False)]\n\n config = Configuration('_pocketfft', parent_package, top_path)\n ext = config.add_extension('pypocketfft',\n sources=['pypocketfft.cxx'],\n depends=['pocketfft_hdronly.h'],\n include_dirs=include_dirs,\n language='c++')\n ext._pre_build_hook = pre_build_hook\n\n config.add_data_files('LICENSE.md')\n config.add_data_dir('tests')\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "path": "scipy/fft/_pocketfft/setup.py"}]}
| 1,269 | 117 |
gh_patches_debug_15992
|
rasdani/github-patches
|
git_diff
|
pymodbus-dev__pymodbus-1339
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't run pymodbus.simulator --help
<!--
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/pymodbus-dev/pymodbus/issues
* check the discussions forum https://github.com/pymodbus-dev/pymodbus/discussions
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
Before opening a new issue, make sure you do the following
-->
### Versions
* Python: 3.10.6
* OS: Linux
* Pymodbus: 3.1.3
* Modbus Hardware (if used):
### Description
Trying to run `pymodbus.simulator --help` fails:
```
<coroutine object main at 0x7efcc073cf90>
sys:1: RuntimeWarning: coroutine 'main' was never awaited
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
```
The `main` function used as entry point for the simulator is an async function: https://github.com/pymodbus-dev/pymodbus/blob/12859d0b82cc215a18ac757fe9319cdf1f9ec890/pymodbus/server/simulator/main.py#L113
It can't be used directly as an entry point. The entry point should be a function using `asyncio.run`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymodbus/server/simulator/main.py`
Content:
```
1 #!/usr/bin/env python3
2 """HTTP server for modbus simulator.
3
4 The modbus simulator contain 3 distint parts:
5
6 - Datastore simulator, to define registers and their behaviour including actions: (simulator)(../../datastore/simulator.py)
7 - Modbus server: (server)(./http_server.py)
8 - HTTP server with REST API and web pages providing an online console in your browser
9
10 Multiple setups for different server types and/or devices are prepared in a (json file)(./setup.json), the detailed configuration is explained in (doc)(README.md)
11
12 The command line parameters are kept to a minimum:
13
14 usage: main.py [-h] [--modbus_server MODBUS_SERVER]
15 [--modbus_device MODBUS_DEVICE] [--http_host HTTP_HOST]
16 [--http_port HTTP_PORT]
17 [--log {critical,error,warning,info,debug}]
18 [--json_file JSON_FILE]
19 [--custom_actions_module CUSTOM_ACTIONS_MODULE]
20
21 Modbus server with REST-API and web server
22
23 options:
24 -h, --help show this help message and exit
25 --modbus_server MODBUS_SERVER
26 use <modbus_server> from server_list in json file
27 --modbus_device MODBUS_DEVICE
28 use <modbus_device> from device_list in json file
29 --http_host HTTP_HOST
30 use <http_host> as host to bind http listen
31 --http_port HTTP_PORT
32 use <http_port> as port to bind http listen
33 --log {critical,error,warning,info,debug}
34 set log level, default is info
35 --log_file LOG_FILE
36 name of server log file, default is "server.log"
37 --json_file JSON_FILE
38 name of json_file, default is "setup.json"
39 --custom_actions_module CUSTOM_ACTIONS_MODULE
40 python file with custom actions, default is none
41 """
42 import argparse
43 import asyncio
44
45 from pymodbus import pymodbus_apply_logging_config
46 from pymodbus.logging import Log
47 from pymodbus.server.simulator.http_server import ModbusSimulatorServer
48
49
50 async def run():
51 """Run simulator."""
52
53
54 def get_commandline():
55 """Get command line arguments."""
56 parser = argparse.ArgumentParser(
57 description="Modbus server with REST-API and web server"
58 )
59 parser.add_argument(
60 "--modbus_server",
61 help="use <modbus_server> from server_list in json file",
62 type=str,
63 )
64 parser.add_argument(
65 "--modbus_device",
66 help="use <modbus_device> from device_list in json file",
67 type=str,
68 )
69 parser.add_argument(
70 "--http_host",
71 help="use <http_host> as host to bind http listen",
72 type=str,
73 )
74 parser.add_argument(
75 "--http_port",
76 help="use <http_port> as port to bind http listen",
77 type=str,
78 )
79 parser.add_argument(
80 "--log",
81 choices=["critical", "error", "warning", "info", "debug"],
82 help="set log level, default is info",
83 default="info",
84 type=str,
85 )
86 parser.add_argument(
87 "--json_file",
88 help='name of json file, default is "setup.json"',
89 type=str,
90 )
91 parser.add_argument(
92 "--log_file",
93 help='name of server log file, default is "server.log"',
94 type=str,
95 )
96 parser.add_argument(
97 "--custom_actions_module",
98 help="python file with custom actions, default is none",
99 type=str,
100 )
101 args = parser.parse_args()
102 pymodbus_apply_logging_config(args.log.upper())
103 Log.info("Start simulator")
104 cmd_args = {}
105 for argument in args.__dict__:
106 if argument == "log":
107 continue
108 if args.__dict__[argument] is not None:
109 cmd_args[argument] = args.__dict__[argument]
110 return cmd_args
111
112
113 async def main():
114 """Run server."""
115 cmd_args = get_commandline()
116 task = ModbusSimulatorServer(**cmd_args)
117
118 await task.run_forever()
119
120
121 if __name__ == "__main__":
122 asyncio.run(main(), debug=True)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py
--- a/pymodbus/server/simulator/main.py
+++ b/pymodbus/server/simulator/main.py
@@ -47,10 +47,6 @@
from pymodbus.server.simulator.http_server import ModbusSimulatorServer
-async def run():
- """Run simulator."""
-
-
def get_commandline():
"""Get command line arguments."""
parser = argparse.ArgumentParser(
@@ -110,13 +106,12 @@
return cmd_args
-async def main():
+def main():
"""Run server."""
cmd_args = get_commandline()
task = ModbusSimulatorServer(**cmd_args)
-
- await task.run_forever()
+ asyncio.run(task.run_forever(), debug=True)
if __name__ == "__main__":
- asyncio.run(main(), debug=True)
+ main()
|
{"golden_diff": "diff --git a/pymodbus/server/simulator/main.py b/pymodbus/server/simulator/main.py\n--- a/pymodbus/server/simulator/main.py\n+++ b/pymodbus/server/simulator/main.py\n@@ -47,10 +47,6 @@\n from pymodbus.server.simulator.http_server import ModbusSimulatorServer\n \n \n-async def run():\n- \"\"\"Run simulator.\"\"\"\n-\n-\n def get_commandline():\n \"\"\"Get command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n@@ -110,13 +106,12 @@\n return cmd_args\n \n \n-async def main():\n+def main():\n \"\"\"Run server.\"\"\"\n cmd_args = get_commandline()\n task = ModbusSimulatorServer(**cmd_args)\n-\n- await task.run_forever()\n+ asyncio.run(task.run_forever(), debug=True)\n \n \n if __name__ == \"__main__\":\n- asyncio.run(main(), debug=True)\n+ main()\n", "issue": "Can't run pymodbus.simulator --help\n<!--\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/pymodbus-dev/pymodbus/issues\r\n * check the discussions forum https://github.com/pymodbus-dev/pymodbus/discussions\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n\r\n Before opening a new issue, make sure you do the following\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.10.6\r\n* OS: Linux\r\n* Pymodbus: 3.1.3\r\n* Modbus Hardware (if used):\r\n\r\n### Description\r\n\r\nTrying to run `pymodbus.simulator --help` fails:\r\n\r\n```\r\n<coroutine object main at 0x7efcc073cf90>\r\nsys:1: RuntimeWarning: coroutine 'main' was never awaited\r\nRuntimeWarning: Enable tracemalloc to get the object allocation traceback\r\n```\r\n\r\nThe `main` function used as entry point for the simulator is an async function: https://github.com/pymodbus-dev/pymodbus/blob/12859d0b82cc215a18ac757fe9319cdf1f9ec890/pymodbus/server/simulator/main.py#L113\r\n\r\nIt can't be used directly as an entry point. The entry point should be a function using `asyncio.run`.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"HTTP server for modbus simulator.\n\nThe modbus simulator contain 3 distint parts:\n\n- Datastore simulator, to define registers and their behaviour including actions: (simulator)(../../datastore/simulator.py)\n- Modbus server: (server)(./http_server.py)\n- HTTP server with REST API and web pages providing an online console in your browser\n\nMultiple setups for different server types and/or devices are prepared in a (json file)(./setup.json), the detailed configuration is explained in (doc)(README.md)\n\nThe command line parameters are kept to a minimum:\n\nusage: main.py [-h] [--modbus_server MODBUS_SERVER]\n [--modbus_device MODBUS_DEVICE] [--http_host HTTP_HOST]\n [--http_port HTTP_PORT]\n [--log {critical,error,warning,info,debug}]\n [--json_file JSON_FILE]\n [--custom_actions_module CUSTOM_ACTIONS_MODULE]\n\nModbus server with REST-API and web server\n\noptions:\n -h, --help show this help message and exit\n --modbus_server MODBUS_SERVER\n use <modbus_server> from server_list in json file\n --modbus_device MODBUS_DEVICE\n use <modbus_device> from device_list in json file\n --http_host HTTP_HOST\n use <http_host> as host to bind http listen\n --http_port HTTP_PORT\n use <http_port> as port to bind http listen\n --log {critical,error,warning,info,debug}\n set log level, default is info\n --log_file LOG_FILE\n name of server log file, default is \"server.log\"\n --json_file JSON_FILE\n name of json_file, default is \"setup.json\"\n --custom_actions_module CUSTOM_ACTIONS_MODULE\n python file with custom actions, default is none\n\"\"\"\nimport argparse\nimport asyncio\n\nfrom pymodbus import pymodbus_apply_logging_config\nfrom pymodbus.logging import Log\nfrom pymodbus.server.simulator.http_server import ModbusSimulatorServer\n\n\nasync def run():\n \"\"\"Run simulator.\"\"\"\n\n\ndef get_commandline():\n \"\"\"Get command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Modbus server with REST-API and web server\"\n )\n parser.add_argument(\n \"--modbus_server\",\n help=\"use <modbus_server> from server_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--modbus_device\",\n help=\"use <modbus_device> from device_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--http_host\",\n help=\"use <http_host> as host to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--http_port\",\n help=\"use <http_port> as port to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--log\",\n choices=[\"critical\", \"error\", \"warning\", \"info\", \"debug\"],\n help=\"set log level, default is info\",\n default=\"info\",\n type=str,\n )\n parser.add_argument(\n \"--json_file\",\n help='name of json file, default is \"setup.json\"',\n type=str,\n )\n parser.add_argument(\n \"--log_file\",\n help='name of server log file, default is \"server.log\"',\n type=str,\n )\n parser.add_argument(\n \"--custom_actions_module\",\n help=\"python file with custom actions, default is none\",\n type=str,\n )\n args = parser.parse_args()\n pymodbus_apply_logging_config(args.log.upper())\n Log.info(\"Start simulator\")\n cmd_args = {}\n for argument in args.__dict__:\n if argument == \"log\":\n continue\n if args.__dict__[argument] is not None:\n cmd_args[argument] = args.__dict__[argument]\n return cmd_args\n\n\nasync def main():\n \"\"\"Run server.\"\"\"\n cmd_args = get_commandline()\n task = ModbusSimulatorServer(**cmd_args)\n\n await task.run_forever()\n\n\nif __name__ == \"__main__\":\n asyncio.run(main(), debug=True)\n", "path": "pymodbus/server/simulator/main.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"HTTP server for modbus simulator.\n\nThe modbus simulator contain 3 distint parts:\n\n- Datastore simulator, to define registers and their behaviour including actions: (simulator)(../../datastore/simulator.py)\n- Modbus server: (server)(./http_server.py)\n- HTTP server with REST API and web pages providing an online console in your browser\n\nMultiple setups for different server types and/or devices are prepared in a (json file)(./setup.json), the detailed configuration is explained in (doc)(README.md)\n\nThe command line parameters are kept to a minimum:\n\nusage: main.py [-h] [--modbus_server MODBUS_SERVER]\n [--modbus_device MODBUS_DEVICE] [--http_host HTTP_HOST]\n [--http_port HTTP_PORT]\n [--log {critical,error,warning,info,debug}]\n [--json_file JSON_FILE]\n [--custom_actions_module CUSTOM_ACTIONS_MODULE]\n\nModbus server with REST-API and web server\n\noptions:\n -h, --help show this help message and exit\n --modbus_server MODBUS_SERVER\n use <modbus_server> from server_list in json file\n --modbus_device MODBUS_DEVICE\n use <modbus_device> from device_list in json file\n --http_host HTTP_HOST\n use <http_host> as host to bind http listen\n --http_port HTTP_PORT\n use <http_port> as port to bind http listen\n --log {critical,error,warning,info,debug}\n set log level, default is info\n --log_file LOG_FILE\n name of server log file, default is \"server.log\"\n --json_file JSON_FILE\n name of json_file, default is \"setup.json\"\n --custom_actions_module CUSTOM_ACTIONS_MODULE\n python file with custom actions, default is none\n\"\"\"\nimport argparse\nimport asyncio\n\nfrom pymodbus import pymodbus_apply_logging_config\nfrom pymodbus.logging import Log\nfrom pymodbus.server.simulator.http_server import ModbusSimulatorServer\n\n\ndef get_commandline():\n \"\"\"Get command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Modbus server with REST-API and web server\"\n )\n parser.add_argument(\n \"--modbus_server\",\n help=\"use <modbus_server> from server_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--modbus_device\",\n help=\"use <modbus_device> from device_list in json file\",\n type=str,\n )\n parser.add_argument(\n \"--http_host\",\n help=\"use <http_host> as host to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--http_port\",\n help=\"use <http_port> as port to bind http listen\",\n type=str,\n )\n parser.add_argument(\n \"--log\",\n choices=[\"critical\", \"error\", \"warning\", \"info\", \"debug\"],\n help=\"set log level, default is info\",\n default=\"info\",\n type=str,\n )\n parser.add_argument(\n \"--json_file\",\n help='name of json file, default is \"setup.json\"',\n type=str,\n )\n parser.add_argument(\n \"--log_file\",\n help='name of server log file, default is \"server.log\"',\n type=str,\n )\n parser.add_argument(\n \"--custom_actions_module\",\n help=\"python file with custom actions, default is none\",\n type=str,\n )\n args = parser.parse_args()\n pymodbus_apply_logging_config(args.log.upper())\n Log.info(\"Start simulator\")\n cmd_args = {}\n for argument in args.__dict__:\n if argument == \"log\":\n continue\n if args.__dict__[argument] is not None:\n cmd_args[argument] = args.__dict__[argument]\n return cmd_args\n\n\ndef main():\n \"\"\"Run server.\"\"\"\n cmd_args = get_commandline()\n task = ModbusSimulatorServer(**cmd_args)\n asyncio.run(task.run_forever(), debug=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "pymodbus/server/simulator/main.py"}]}
| 1,738 | 208 |
gh_patches_debug_31074
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1957
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ERROR: Cannot locate specified Dockerfile
I'm not sure if this is a Docker Compose bug or docker-py bug, but this used to work:
`docker-compose.yml`:
```yaml
version: '3.5'
services:
php:
build:
context: .
dockerfile: ./docker/php.Dockerfile
```
but now the `./` prefix is causing:
```
ERROR: Cannot locate specified Dockerfile: ./docker/php.Dockerfile
```
I have to change it to `dockerfile: docker/php.Dockerfile` to get it to work.
--
docker-py version: 3.1.1
Python 3.6.4
`docker version`:
```
Client:
Version: 18.02.0-ce
API version: 1.36
Go version: go1.9.4
Git commit: fc4de447b5
Built: Tue Feb 13 15:28:01 2018
OS/Arch: linux/amd64
Experimental: false
Orchestrator: swarm
Server:
Engine:
Version: 18.02.0-ce
API version: 1.36 (minimum version 1.12)
Go version: go1.9.4
Git commit: fc4de447b5
Built: Tue Feb 13 15:28:34 2018
OS/Arch: linux/amd64
Experimental: false
```
OS: Manjaro Linux 17.1.6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/utils/build.py`
Content:
```
1 import os
2 import re
3
4 from ..constants import IS_WINDOWS_PLATFORM
5 from fnmatch import fnmatch
6 from itertools import chain
7 from .utils import create_archive
8
9
10 def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
11 root = os.path.abspath(path)
12 exclude = exclude or []
13 return create_archive(
14 files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),
15 root=root, fileobj=fileobj, gzip=gzip
16 )
17
18
19 _SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
20
21
22 def exclude_paths(root, patterns, dockerfile=None):
23 """
24 Given a root directory path and a list of .dockerignore patterns, return
25 an iterator of all paths (both regular files and directories) in the root
26 directory that do *not* match any of the patterns.
27
28 All paths returned are relative to the root.
29 """
30
31 if dockerfile is None:
32 dockerfile = 'Dockerfile'
33
34 def normalize(p):
35 # Leading and trailing slashes are not relevant. Yes,
36 # "foo.py/" must exclude the "foo.py" regular file. "."
37 # components are not relevant either, even if the whole
38 # pattern is only ".", as the Docker reference states: "For
39 # historical reasons, the pattern . is ignored."
40 split = [pt for pt in re.split(_SEP, p) if pt and pt != '.']
41 # ".." component must be cleared with the potential previous
42 # component, regardless of whether it exists: "A preprocessing
43 # step [...] eliminates . and .. elements using Go's
44 # filepath.".
45 i = 0
46 while i < len(split):
47 if split[i] == '..':
48 del split[i]
49 if i > 0:
50 del split[i - 1]
51 i -= 1
52 else:
53 i += 1
54 return split
55
56 patterns = (
57 (True, normalize(p[1:]))
58 if p.startswith('!') else
59 (False, normalize(p))
60 for p in patterns)
61 patterns = list(reversed(list(chain(
62 # Exclude empty patterns such as "." or the empty string.
63 filter(lambda p: p[1], patterns),
64 # Always include the Dockerfile and .dockerignore
65 [(True, dockerfile.split('/')), (True, ['.dockerignore'])]))))
66 return set(walk(root, patterns))
67
68
69 def walk(root, patterns, default=True):
70 """
71 A collection of file lying below root that should be included according to
72 patterns.
73 """
74
75 def match(p):
76 if p[1][0] == '**':
77 rec = (p[0], p[1][1:])
78 return [p] + (match(rec) if rec[1] else [rec])
79 elif fnmatch(f, p[1][0]):
80 return [(p[0], p[1][1:])]
81 else:
82 return []
83
84 for f in os.listdir(root):
85 cur = os.path.join(root, f)
86 # The patterns if recursing in that directory.
87 sub = list(chain(*(match(p) for p in patterns)))
88 # Whether this file is explicitely included / excluded.
89 hit = next((p[0] for p in sub if not p[1]), None)
90 # Whether this file is implicitely included / excluded.
91 matched = default if hit is None else hit
92 sub = list(filter(lambda p: p[1], sub))
93 if os.path.isdir(cur):
94 # Entirely skip directories if there are no chance any subfile will
95 # be included.
96 if all(not p[0] for p in sub) and not matched:
97 continue
98 # I think this would greatly speed up dockerignore handling by not
99 # recursing into directories we are sure would be entirely
100 # included, and only yielding the directory itself, which will be
101 # recursively archived anyway. However the current unit test expect
102 # the full list of subfiles and I'm not 100% sure it would make no
103 # difference yet.
104 # if all(p[0] for p in sub) and matched:
105 # yield f
106 # continue
107 children = False
108 for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):
109 yield r
110 children = True
111 # The current unit tests expect directories only under those
112 # conditions. It might be simplifiable though.
113 if (not sub or not children) and hit or hit is None and default:
114 yield f
115 elif matched:
116 yield f
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/utils/build.py b/docker/utils/build.py
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -31,18 +31,21 @@
if dockerfile is None:
dockerfile = 'Dockerfile'
+ def split_path(p):
+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
+
def normalize(p):
# Leading and trailing slashes are not relevant. Yes,
# "foo.py/" must exclude the "foo.py" regular file. "."
# components are not relevant either, even if the whole
# pattern is only ".", as the Docker reference states: "For
# historical reasons, the pattern . is ignored."
- split = [pt for pt in re.split(_SEP, p) if pt and pt != '.']
# ".." component must be cleared with the potential previous
# component, regardless of whether it exists: "A preprocessing
# step [...] eliminates . and .. elements using Go's
# filepath.".
i = 0
+ split = split_path(p)
while i < len(split):
if split[i] == '..':
del split[i]
@@ -62,7 +65,7 @@
# Exclude empty patterns such as "." or the empty string.
filter(lambda p: p[1], patterns),
# Always include the Dockerfile and .dockerignore
- [(True, dockerfile.split('/')), (True, ['.dockerignore'])]))))
+ [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))
return set(walk(root, patterns))
|
{"golden_diff": "diff --git a/docker/utils/build.py b/docker/utils/build.py\n--- a/docker/utils/build.py\n+++ b/docker/utils/build.py\n@@ -31,18 +31,21 @@\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n \n+ def split_path(p):\n+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']\n+\n def normalize(p):\n # Leading and trailing slashes are not relevant. Yes,\n # \"foo.py/\" must exclude the \"foo.py\" regular file. \".\"\n # components are not relevant either, even if the whole\n # pattern is only \".\", as the Docker reference states: \"For\n # historical reasons, the pattern . is ignored.\"\n- split = [pt for pt in re.split(_SEP, p) if pt and pt != '.']\n # \"..\" component must be cleared with the potential previous\n # component, regardless of whether it exists: \"A preprocessing\n # step [...] eliminates . and .. elements using Go's\n # filepath.\".\n i = 0\n+ split = split_path(p)\n while i < len(split):\n if split[i] == '..':\n del split[i]\n@@ -62,7 +65,7 @@\n # Exclude empty patterns such as \".\" or the empty string.\n filter(lambda p: p[1], patterns),\n # Always include the Dockerfile and .dockerignore\n- [(True, dockerfile.split('/')), (True, ['.dockerignore'])]))))\n+ [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))\n return set(walk(root, patterns))\n", "issue": "ERROR: Cannot locate specified Dockerfile\nI'm not sure if this is a Docker Compose bug or docker-py bug, but this used to work:\r\n\r\n`docker-compose.yml`:\r\n```yaml\r\nversion: '3.5'\r\n\r\nservices:\r\n php:\r\n build:\r\n context: .\r\n dockerfile: ./docker/php.Dockerfile\r\n```\r\n\r\nbut now the `./` prefix is causing:\r\n```\r\nERROR: Cannot locate specified Dockerfile: ./docker/php.Dockerfile\r\n```\r\n\r\nI have to change it to `dockerfile: docker/php.Dockerfile` to get it to work.\r\n\r\n--\r\n\r\ndocker-py version: 3.1.1\r\n\r\nPython 3.6.4\r\n\r\n`docker version`:\r\n```\r\nClient:\r\n Version:\t18.02.0-ce\r\n API version:\t1.36\r\n Go version:\tgo1.9.4\r\n Git commit:\tfc4de447b5\r\n Built:\tTue Feb 13 15:28:01 2018\r\n OS/Arch:\tlinux/amd64\r\n Experimental:\tfalse\r\n Orchestrator:\tswarm\r\n\r\nServer:\r\n Engine:\r\n Version:\t18.02.0-ce\r\n API version:\t1.36 (minimum version 1.12)\r\n Go version:\tgo1.9.4\r\n Git commit:\tfc4de447b5\r\n Built:\tTue Feb 13 15:28:34 2018\r\n OS/Arch:\tlinux/amd64\r\n Experimental:\tfalse\r\n```\r\n\r\nOS: Manjaro Linux 17.1.6\n", "before_files": [{"content": "import os\nimport re\n\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom fnmatch import fnmatch\nfrom itertools import chain\nfrom .utils import create_archive\n\n\ndef tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):\n root = os.path.abspath(path)\n exclude = exclude or []\n return create_archive(\n files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),\n root=root, fileobj=fileobj, gzip=gzip\n )\n\n\n_SEP = re.compile('/|\\\\\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n def normalize(p):\n # Leading and trailing slashes are not relevant. Yes,\n # \"foo.py/\" must exclude the \"foo.py\" regular file. \".\"\n # components are not relevant either, even if the whole\n # pattern is only \".\", as the Docker reference states: \"For\n # historical reasons, the pattern . is ignored.\"\n split = [pt for pt in re.split(_SEP, p) if pt and pt != '.']\n # \"..\" component must be cleared with the potential previous\n # component, regardless of whether it exists: \"A preprocessing\n # step [...] eliminates . and .. elements using Go's\n # filepath.\".\n i = 0\n while i < len(split):\n if split[i] == '..':\n del split[i]\n if i > 0:\n del split[i - 1]\n i -= 1\n else:\n i += 1\n return split\n\n patterns = (\n (True, normalize(p[1:]))\n if p.startswith('!') else\n (False, normalize(p))\n for p in patterns)\n patterns = list(reversed(list(chain(\n # Exclude empty patterns such as \".\" or the empty string.\n filter(lambda p: p[1], patterns),\n # Always include the Dockerfile and .dockerignore\n [(True, dockerfile.split('/')), (True, ['.dockerignore'])]))))\n return set(walk(root, patterns))\n\n\ndef walk(root, patterns, default=True):\n \"\"\"\n A collection of file lying below root that should be included according to\n patterns.\n \"\"\"\n\n def match(p):\n if p[1][0] == '**':\n rec = (p[0], p[1][1:])\n return [p] + (match(rec) if rec[1] else [rec])\n elif fnmatch(f, p[1][0]):\n return [(p[0], p[1][1:])]\n else:\n return []\n\n for f in os.listdir(root):\n cur = os.path.join(root, f)\n # The patterns if recursing in that directory.\n sub = list(chain(*(match(p) for p in patterns)))\n # Whether this file is explicitely included / excluded.\n hit = next((p[0] for p in sub if not p[1]), None)\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n if os.path.isdir(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n continue\n # I think this would greatly speed up dockerignore handling by not\n # recursing into directories we are sure would be entirely\n # included, and only yielding the directory itself, which will be\n # recursively archived anyway. However the current unit test expect\n # the full list of subfiles and I'm not 100% sure it would make no\n # difference yet.\n # if all(p[0] for p in sub) and matched:\n # yield f\n # continue\n children = False\n for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):\n yield r\n children = True\n # The current unit tests expect directories only under those\n # conditions. It might be simplifiable though.\n if (not sub or not children) and hit or hit is None and default:\n yield f\n elif matched:\n yield f\n", "path": "docker/utils/build.py"}], "after_files": [{"content": "import os\nimport re\n\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom fnmatch import fnmatch\nfrom itertools import chain\nfrom .utils import create_archive\n\n\ndef tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):\n root = os.path.abspath(path)\n exclude = exclude or []\n return create_archive(\n files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),\n root=root, fileobj=fileobj, gzip=gzip\n )\n\n\n_SEP = re.compile('/|\\\\\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n def split_path(p):\n return [pt for pt in re.split(_SEP, p) if pt and pt != '.']\n\n def normalize(p):\n # Leading and trailing slashes are not relevant. Yes,\n # \"foo.py/\" must exclude the \"foo.py\" regular file. \".\"\n # components are not relevant either, even if the whole\n # pattern is only \".\", as the Docker reference states: \"For\n # historical reasons, the pattern . is ignored.\"\n # \"..\" component must be cleared with the potential previous\n # component, regardless of whether it exists: \"A preprocessing\n # step [...] eliminates . and .. elements using Go's\n # filepath.\".\n i = 0\n split = split_path(p)\n while i < len(split):\n if split[i] == '..':\n del split[i]\n if i > 0:\n del split[i - 1]\n i -= 1\n else:\n i += 1\n return split\n\n patterns = (\n (True, normalize(p[1:]))\n if p.startswith('!') else\n (False, normalize(p))\n for p in patterns)\n patterns = list(reversed(list(chain(\n # Exclude empty patterns such as \".\" or the empty string.\n filter(lambda p: p[1], patterns),\n # Always include the Dockerfile and .dockerignore\n [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))\n return set(walk(root, patterns))\n\n\ndef walk(root, patterns, default=True):\n \"\"\"\n A collection of file lying below root that should be included according to\n patterns.\n \"\"\"\n\n def match(p):\n if p[1][0] == '**':\n rec = (p[0], p[1][1:])\n return [p] + (match(rec) if rec[1] else [rec])\n elif fnmatch(f, p[1][0]):\n return [(p[0], p[1][1:])]\n else:\n return []\n\n for f in os.listdir(root):\n cur = os.path.join(root, f)\n # The patterns if recursing in that directory.\n sub = list(chain(*(match(p) for p in patterns)))\n # Whether this file is explicitely included / excluded.\n hit = next((p[0] for p in sub if not p[1]), None)\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n if os.path.isdir(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n continue\n # I think this would greatly speed up dockerignore handling by not\n # recursing into directories we are sure would be entirely\n # included, and only yielding the directory itself, which will be\n # recursively archived anyway. However the current unit test expect\n # the full list of subfiles and I'm not 100% sure it would make no\n # difference yet.\n # if all(p[0] for p in sub) and matched:\n # yield f\n # continue\n children = False\n for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):\n yield r\n children = True\n # The current unit tests expect directories only under those\n # conditions. It might be simplifiable though.\n if (not sub or not children) and hit or hit is None and default:\n yield f\n elif matched:\n yield f\n", "path": "docker/utils/build.py"}]}
| 1,867 | 362 |
gh_patches_debug_650
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1942
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.109
On the docket:
+ [x] pex does not support musllinux wheels #1933
+ [x] Empty string PEX_PATH="" env var causes CWD (.) to be added bootstrapped pex_path #1936
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.108"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.108"
+__version__ = "2.1.109"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.108\"\n+__version__ = \"2.1.109\"\n", "issue": "Release 2.1.109\nOn the docket:\r\n+ [x] pex does not support musllinux wheels #1933\r\n+ [x] Empty string PEX_PATH=\"\" env var causes CWD (.) to be added bootstrapped pex_path #1936\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.108\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.109\"\n", "path": "pex/version.py"}]}
| 374 | 98 |
gh_patches_debug_25401
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-1307
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Infrastructure list is filtered on "Ouvrage" by default
J'ai créé des points d'aménagements (8 au total), cependant la plupart ne s'affichent pas (ni dans la liste, ni sur la carte)...

Lorsque je rentre dans une fiche aménagement et que je reclique sur le bouton liste, là ils apparaissent tous mais seulement sur la carte.

Par contre, si je touche au zoom, ils disparaissent et je n'ai plus que les trois du début.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/infrastructure/filters.py`
Content:
```
1 from django.utils.translation import ugettext_lazy as _
2
3 from geotrek.common.filters import StructureRelatedFilterSet, YearFilter
4 from geotrek.maintenance.filters import InterventionYearSelect
5
6 from .models import INFRASTRUCTURE_TYPES, Infrastructure, Signage
7
8
9 class InfrastructureYearSelect(InterventionYearSelect):
10 label = _(u"Intervention year")
11
12
13 class InfrastructureFilterSet(StructureRelatedFilterSet):
14 intervention_year = YearFilter(name='interventions_set__date',
15 widget=InfrastructureYearSelect,
16 label=_(u"Intervention year"))
17
18 def __init__(self, *args, **kwargs):
19 super(InfrastructureFilterSet, self).__init__(*args, **kwargs)
20 field = self.form.fields['type']
21 field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)
22
23 class Meta(StructureRelatedFilterSet.Meta):
24 model = Infrastructure
25 fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']
26
27
28 class SignageFilterSet(StructureRelatedFilterSet):
29 intervention_year = YearFilter(name='interventions_set__date',
30 widget=InfrastructureYearSelect)
31
32 def __init__(self, *args, **kwargs):
33 super(SignageFilterSet, self).__init__(*args, **kwargs)
34 field = self.form.fields['type']
35 field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE)
36
37 class Meta(StructureRelatedFilterSet.Meta):
38 model = Signage
39 fields = StructureRelatedFilterSet.Meta.fields + ['type']
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geotrek/infrastructure/filters.py b/geotrek/infrastructure/filters.py
--- a/geotrek/infrastructure/filters.py
+++ b/geotrek/infrastructure/filters.py
@@ -20,6 +20,11 @@
field = self.form.fields['type']
field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)
+ field = self.form.fields['type__type']
+ all_choices = field.widget.choices
+ all_choices = [c for c in all_choices if c[0] != INFRASTRUCTURE_TYPES.SIGNAGE]
+ field.widget.choices = [('', _(u"Category"))] + all_choices
+
class Meta(StructureRelatedFilterSet.Meta):
model = Infrastructure
fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']
@@ -29,11 +34,6 @@
intervention_year = YearFilter(name='interventions_set__date',
widget=InfrastructureYearSelect)
- def __init__(self, *args, **kwargs):
- super(SignageFilterSet, self).__init__(*args, **kwargs)
- field = self.form.fields['type']
- field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE)
-
class Meta(StructureRelatedFilterSet.Meta):
model = Signage
- fields = StructureRelatedFilterSet.Meta.fields + ['type']
+ fields = StructureRelatedFilterSet.Meta.fields
|
{"golden_diff": "diff --git a/geotrek/infrastructure/filters.py b/geotrek/infrastructure/filters.py\n--- a/geotrek/infrastructure/filters.py\n+++ b/geotrek/infrastructure/filters.py\n@@ -20,6 +20,11 @@\n field = self.form.fields['type']\n field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n \n+ field = self.form.fields['type__type']\n+ all_choices = field.widget.choices\n+ all_choices = [c for c in all_choices if c[0] != INFRASTRUCTURE_TYPES.SIGNAGE]\n+ field.widget.choices = [('', _(u\"Category\"))] + all_choices\n+\n class Meta(StructureRelatedFilterSet.Meta):\n model = Infrastructure\n fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']\n@@ -29,11 +34,6 @@\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n \n- def __init__(self, *args, **kwargs):\n- super(SignageFilterSet, self).__init__(*args, **kwargs)\n- field = self.form.fields['type']\n- field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n-\n class Meta(StructureRelatedFilterSet.Meta):\n model = Signage\n- fields = StructureRelatedFilterSet.Meta.fields + ['type']\n+ fields = StructureRelatedFilterSet.Meta.fields\n", "issue": "Infrastructure list is filtered on \"Ouvrage\" by default\nJ'ai cr\u00e9\u00e9 des points d'am\u00e9nagements (8 au total), cependant la plupart ne s'affichent pas (ni dans la liste, ni sur la carte)...\n\nLorsque je rentre dans une fiche am\u00e9nagement et que je reclique sur le bouton liste, l\u00e0 ils apparaissent tous mais seulement sur la carte.\n\nPar contre, si je touche au zoom, ils disparaissent et je n'ai plus que les trois du d\u00e9but.\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom geotrek.common.filters import StructureRelatedFilterSet, YearFilter\nfrom geotrek.maintenance.filters import InterventionYearSelect\n\nfrom .models import INFRASTRUCTURE_TYPES, Infrastructure, Signage\n\n\nclass InfrastructureYearSelect(InterventionYearSelect):\n label = _(u\"Intervention year\")\n\n\nclass InfrastructureFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect,\n label=_(u\"Intervention year\"))\n\n def __init__(self, *args, **kwargs):\n super(InfrastructureFilterSet, self).__init__(*args, **kwargs)\n field = self.form.fields['type']\n field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Infrastructure\n fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']\n\n\nclass SignageFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n\n def __init__(self, *args, **kwargs):\n super(SignageFilterSet, self).__init__(*args, **kwargs)\n field = self.form.fields['type']\n field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Signage\n fields = StructureRelatedFilterSet.Meta.fields + ['type']\n", "path": "geotrek/infrastructure/filters.py"}], "after_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom geotrek.common.filters import StructureRelatedFilterSet, YearFilter\nfrom geotrek.maintenance.filters import InterventionYearSelect\n\nfrom .models import INFRASTRUCTURE_TYPES, Infrastructure, Signage\n\n\nclass InfrastructureYearSelect(InterventionYearSelect):\n label = _(u\"Intervention year\")\n\n\nclass InfrastructureFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect,\n label=_(u\"Intervention year\"))\n\n def __init__(self, *args, **kwargs):\n super(InfrastructureFilterSet, self).__init__(*args, **kwargs)\n field = self.form.fields['type']\n field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n\n field = self.form.fields['type__type']\n all_choices = field.widget.choices\n all_choices = [c for c in all_choices if c[0] != INFRASTRUCTURE_TYPES.SIGNAGE]\n field.widget.choices = [('', _(u\"Category\"))] + all_choices\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Infrastructure\n fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']\n\n\nclass SignageFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Signage\n fields = StructureRelatedFilterSet.Meta.fields\n", "path": "geotrek/infrastructure/filters.py"}]}
| 905 | 332 |
gh_patches_debug_11644
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-954
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CI-fail] GLScatterPlotItem failing on Windows Builds
```
Traceback (most recent call last):
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\latebind.py", line 41, in __call__
return self._finalCall( *args, **named )
TypeError: 'NoneType' object is not callable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\a\1\s\pyqtgraph\opengl\GLViewWidget.py", line 60, in addItem
item.initializeGL()
File "D:\a\1\s\pyqtgraph\opengl\items\GLScatterPlotItem.py", line 70, in initializeGL
self.pointTexture = glGenTextures(1)
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\latebind.py", line 61, in __call__
return self.wrapperFunction( self.baseFunction, *args, **named )
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\GL\exceptional.py", line 178, in glGenTextures
baseFunction( count, textures)
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\latebind.py", line 45, in __call__
return self._finalCall( *args, **named )
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\wrapper.py", line 664, in wrapperCall
raise err
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\wrapper.py", line 657, in wrapperCall
result = wrappedOperation( *cArguments )
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\platform\baseplatform.py", line 402, in __call__
return self( *args, **named )
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\error.py", line 232, in glCheckError
baseOperation = baseOperation,
OpenGL.error.GLError: GLError(
err = 1282,
description = b'invalid operation',
baseOperation = glGenTextures,
pyArgs = (1, c_ulong(0)),
cArgs = (1, <cparam 'P' (00000158BE5A9310)>),
cArguments = (1, <cparam 'P' (00000158BE5A9310)>)
)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 7, in <module>
File "D:\a\1\s\examples\GLScatterPlotItem.py", line 46, in <module>
w.addItem(sp1)
File "D:\a\1\s\pyqtgraph\opengl\GLViewWidget.py", line 62, in addItem
self.checkOpenGLVersion('Error while adding item %s to GLViewWidget.' % str(item))
File "D:\a\1\s\pyqtgraph\opengl\GLViewWidget.py", line 429, in checkOpenGLVersion
ver = glGetString(GL_VERSION).split()[0]
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\platform\baseplatform.py", line 402, in __call__
return self( *args, **named )
File "c:\hostedtoolcache\windows\python\3.7.3\x64\lib\site-packages\OpenGL\error.py", line 232, in glCheckError
baseOperation = baseOperation,
OpenGL.error.GLError: GLError(
err = 1282,
description = b'invalid operation',
baseOperation = glGetString,
cArguments = (GL_VERSION,)
)
Failed Scatter Plot Example Test Located in GLScatterPlotItem.py
```
Likely related to #928
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/opengl/glInfo.py`
Content:
```
1 from ..Qt import QtCore, QtGui, QtOpenGL
2 from OpenGL.GL import *
3 app = QtGui.QApplication([])
4
5 class GLTest(QtOpenGL.QGLWidget):
6 def __init__(self):
7 QtOpenGL.QGLWidget.__init__(self)
8 self.makeCurrent()
9 print("GL version:" + glGetString(GL_VERSION))
10 print("MAX_TEXTURE_SIZE: %d" % glGetIntegerv(GL_MAX_TEXTURE_SIZE))
11 print("MAX_3D_TEXTURE_SIZE: %d" % glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE))
12 print("Extensions: " + glGetString(GL_EXTENSIONS))
13
14 GLTest()
15
16
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/opengl/glInfo.py b/pyqtgraph/opengl/glInfo.py
--- a/pyqtgraph/opengl/glInfo.py
+++ b/pyqtgraph/opengl/glInfo.py
@@ -6,10 +6,10 @@
def __init__(self):
QtOpenGL.QGLWidget.__init__(self)
self.makeCurrent()
- print("GL version:" + glGetString(GL_VERSION))
+ print("GL version:" + glGetString(GL_VERSION).decode("utf-8"))
print("MAX_TEXTURE_SIZE: %d" % glGetIntegerv(GL_MAX_TEXTURE_SIZE))
print("MAX_3D_TEXTURE_SIZE: %d" % glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE))
- print("Extensions: " + glGetString(GL_EXTENSIONS))
+ print("Extensions: " + glGetString(GL_EXTENSIONS).decode("utf-8").replace(" ", "\n"))
GLTest()
|
{"golden_diff": "diff --git a/pyqtgraph/opengl/glInfo.py b/pyqtgraph/opengl/glInfo.py\n--- a/pyqtgraph/opengl/glInfo.py\n+++ b/pyqtgraph/opengl/glInfo.py\n@@ -6,10 +6,10 @@\n def __init__(self):\n QtOpenGL.QGLWidget.__init__(self)\n self.makeCurrent()\n- print(\"GL version:\" + glGetString(GL_VERSION))\n+ print(\"GL version:\" + glGetString(GL_VERSION).decode(\"utf-8\"))\n print(\"MAX_TEXTURE_SIZE: %d\" % glGetIntegerv(GL_MAX_TEXTURE_SIZE))\n print(\"MAX_3D_TEXTURE_SIZE: %d\" % glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE))\n- print(\"Extensions: \" + glGetString(GL_EXTENSIONS))\n+ print(\"Extensions: \" + glGetString(GL_EXTENSIONS).decode(\"utf-8\").replace(\" \", \"\\n\"))\n \n GLTest()\n", "issue": "[CI-fail] GLScatterPlotItem failing on Windows Builds\n```\r\nTraceback (most recent call last):\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\latebind.py\", line 41, in __call__\r\n\r\n return self._finalCall( *args, **named )\r\n\r\nTypeError: 'NoneType' object is not callable\r\n\r\n\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\n\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"D:\\a\\1\\s\\pyqtgraph\\opengl\\GLViewWidget.py\", line 60, in addItem\r\n\r\n item.initializeGL()\r\n\r\n File \"D:\\a\\1\\s\\pyqtgraph\\opengl\\items\\GLScatterPlotItem.py\", line 70, in initializeGL\r\n\r\n self.pointTexture = glGenTextures(1)\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\latebind.py\", line 61, in __call__\r\n\r\n return self.wrapperFunction( self.baseFunction, *args, **named )\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\GL\\exceptional.py\", line 178, in glGenTextures\r\n\r\n baseFunction( count, textures)\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\latebind.py\", line 45, in __call__\r\n\r\n return self._finalCall( *args, **named )\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\wrapper.py\", line 664, in wrapperCall\r\n\r\n raise err\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\wrapper.py\", line 657, in wrapperCall\r\n\r\n result = wrappedOperation( *cArguments )\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\platform\\baseplatform.py\", line 402, in __call__\r\n\r\n return self( *args, **named )\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\error.py\", line 232, in glCheckError\r\n\r\n baseOperation = baseOperation,\r\n\r\nOpenGL.error.GLError: GLError(\r\n\r\n\terr = 1282,\r\n\r\n\tdescription = b'invalid operation',\r\n\r\n\tbaseOperation = glGenTextures,\r\n\r\n\tpyArgs = (1, c_ulong(0)),\r\n\r\n\tcArgs = (1, <cparam 'P' (00000158BE5A9310)>),\r\n\r\n\tcArguments = (1, <cparam 'P' (00000158BE5A9310)>)\r\n\r\n)\r\n\r\n\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\n\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"<stdin>\", line 7, in <module>\r\n\r\n File \"D:\\a\\1\\s\\examples\\GLScatterPlotItem.py\", line 46, in <module>\r\n\r\n w.addItem(sp1)\r\n\r\n File \"D:\\a\\1\\s\\pyqtgraph\\opengl\\GLViewWidget.py\", line 62, in addItem\r\n\r\n self.checkOpenGLVersion('Error while adding item %s to GLViewWidget.' % str(item))\r\n\r\n File \"D:\\a\\1\\s\\pyqtgraph\\opengl\\GLViewWidget.py\", line 429, in checkOpenGLVersion\r\n\r\n ver = glGetString(GL_VERSION).split()[0]\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\platform\\baseplatform.py\", line 402, in __call__\r\n\r\n return self( *args, **named )\r\n\r\n File \"c:\\hostedtoolcache\\windows\\python\\3.7.3\\x64\\lib\\site-packages\\OpenGL\\error.py\", line 232, in glCheckError\r\n\r\n baseOperation = baseOperation,\r\n\r\nOpenGL.error.GLError: GLError(\r\n\r\n\terr = 1282,\r\n\r\n\tdescription = b'invalid operation',\r\n\r\n\tbaseOperation = glGetString,\r\n\r\n\tcArguments = (GL_VERSION,)\r\n\r\n)\r\n\r\n\r\nFailed Scatter Plot Example Test Located in GLScatterPlotItem.py \r\n```\r\n\r\nLikely related to #928 \n", "before_files": [{"content": "from ..Qt import QtCore, QtGui, QtOpenGL\nfrom OpenGL.GL import *\napp = QtGui.QApplication([])\n\nclass GLTest(QtOpenGL.QGLWidget):\n def __init__(self):\n QtOpenGL.QGLWidget.__init__(self)\n self.makeCurrent()\n print(\"GL version:\" + glGetString(GL_VERSION))\n print(\"MAX_TEXTURE_SIZE: %d\" % glGetIntegerv(GL_MAX_TEXTURE_SIZE))\n print(\"MAX_3D_TEXTURE_SIZE: %d\" % glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE))\n print(\"Extensions: \" + glGetString(GL_EXTENSIONS))\n\nGLTest()\n\n\n", "path": "pyqtgraph/opengl/glInfo.py"}], "after_files": [{"content": "from ..Qt import QtCore, QtGui, QtOpenGL\nfrom OpenGL.GL import *\napp = QtGui.QApplication([])\n\nclass GLTest(QtOpenGL.QGLWidget):\n def __init__(self):\n QtOpenGL.QGLWidget.__init__(self)\n self.makeCurrent()\n print(\"GL version:\" + glGetString(GL_VERSION).decode(\"utf-8\"))\n print(\"MAX_TEXTURE_SIZE: %d\" % glGetIntegerv(GL_MAX_TEXTURE_SIZE))\n print(\"MAX_3D_TEXTURE_SIZE: %d\" % glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE))\n print(\"Extensions: \" + glGetString(GL_EXTENSIONS).decode(\"utf-8\").replace(\" \", \"\\n\"))\n\nGLTest()\n\n\n", "path": "pyqtgraph/opengl/glInfo.py"}]}
| 1,431 | 199 |
gh_patches_debug_27636
|
rasdani/github-patches
|
git_diff
|
graspologic-org__graspologic-829
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Inaccurracy in how to use autokmeans
## Expected Behavior
https://github.com/microsoft/graspologic/blob/10de2bf17b972decbab318568154af226dcd71fa/graspologic/cluster/kclust.py#L16
This line is false; higher silhouette score is better, to my knowledge? https://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html
## Actual Behavior
Documentation correctly reports how to use the package.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspologic/cluster/kclust.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 from typing import Optional, Union
5
6 import numpy as np
7 from sklearn.cluster import KMeans
8 from sklearn.metrics import adjusted_rand_score, silhouette_score
9
10 from graspologic.types import List
11
12 from .base import BaseCluster
13
14
15 class KMeansCluster(BaseCluster):
16 ari_: Optional[List[float]]
17
18 """
19 KMeans Cluster.
20
21 It computes all possible models from one component to
22 ``max_clusters``. The best model is given by the lowest silhouette score.
23
24 Parameters
25 ----------
26 max_clusters : int, defaults to 1.
27 The maximum number of mixture components to consider.
28
29 random_state : int, RandomState instance or None, optional (default=None)
30 If int, ``random_state`` is the seed used by the random number generator;
31 If RandomState instance, ``random_state`` is the random number generator;
32 If None, the random number generator is the RandomState instance used
33 by ``np.random``.
34
35 Attributes
36 ----------
37 n_clusters_ : int
38 Optimal number of components. If y is given, it is based on largest
39 ARI. Otherwise, it is based on smallest loss.
40
41 model_ : KMeans object
42 Fitted KMeans object fitted with optimal n_components.
43
44 silhouette_ : list
45 List of silhouette scores computed for all possible number
46 of clusters given by ``range(2, max_clusters)``.
47
48 ari_ : list
49 Only computed when y is given. List of ARI values computed for
50 all possible number of clusters given by ``range(2, max_clusters)``.
51 """
52
53 def __init__(
54 self,
55 max_clusters: int = 2,
56 random_state: Optional[Union[int, np.random.RandomState]] = None,
57 ):
58 if isinstance(max_clusters, int):
59 if max_clusters <= 1:
60 msg = "n_components must be >= 2 or None."
61 raise ValueError(msg)
62 else:
63 self.max_clusters = max_clusters
64 else:
65 msg = "max_clusters must be an integer, not {}.".format(type(max_clusters))
66 raise TypeError(msg)
67 self.random_state = random_state
68
69 def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> "KMeansCluster":
70 """
71 Fits kmeans model to the data.
72
73 Parameters
74 ----------
75 X : array-like, shape (n_samples, n_features)
76 List of n_features-dimensional data points. Each row
77 corresponds to a single data point.
78
79 y : array-like, shape (n_samples,), optional (default=None)
80 List of labels for `X` if available. Used to compute ARI scores.
81
82 Returns
83 -------
84 self
85 """
86 # Deal with number of clusters
87 if self.max_clusters > X.shape[0]:
88 msg = "n_components must be >= n_samples, but got \
89 n_components = {}, n_samples = {}".format(
90 self.max_clusters, X.shape[0]
91 )
92 raise ValueError(msg)
93 else:
94 max_clusters = self.max_clusters
95
96 # Get parameters
97 random_state = self.random_state
98
99 # Compute all models
100 models = []
101 silhouettes = []
102 aris = []
103 for n in range(2, max_clusters + 1):
104 model = KMeans(n_clusters=n, random_state=random_state)
105
106 # Fit and compute values
107 predictions = model.fit_predict(X)
108 models.append(model)
109 silhouettes.append(silhouette_score(X, predictions))
110 if y is not None:
111 aris.append(adjusted_rand_score(y, predictions))
112
113 if y is not None:
114 self.ari_ = aris
115 self.silhouette_ = silhouettes
116 self.n_clusters_ = np.argmax(aris) + 1
117 self.model_ = models[np.argmax(aris)]
118 else:
119 self.ari_ = None
120 self.silhouette_ = silhouettes
121 self.n_clusters_ = np.argmax(silhouettes) + 1
122 self.model_ = models[np.argmax(silhouettes)]
123
124 return self
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/graspologic/cluster/kclust.py b/graspologic/cluster/kclust.py
--- a/graspologic/cluster/kclust.py
+++ b/graspologic/cluster/kclust.py
@@ -18,13 +18,15 @@
"""
KMeans Cluster.
- It computes all possible models from one component to
- ``max_clusters``. The best model is given by the lowest silhouette score.
+ It computes all possible models from one component to ``max_clusters``.
+ When the true labels are known, the best model is given by the model with highest
+ adjusted Rand index (ARI).
+ Otherwise, the best model is given by the model with highest silhouette score.
Parameters
----------
- max_clusters : int, defaults to 1.
- The maximum number of mixture components to consider.
+ max_clusters : int, default=2.
+ The maximum number of clusters to consider. Must be ``>=2``.
random_state : int, RandomState instance or None, optional (default=None)
If int, ``random_state`` is the seed used by the random number generator;
@@ -35,11 +37,11 @@
Attributes
----------
n_clusters_ : int
- Optimal number of components. If y is given, it is based on largest
- ARI. Otherwise, it is based on smallest loss.
+ Optimal number of clusters. If y is given, it is based on largest
+ ARI. Otherwise, it is based on highest silhouette score.
model_ : KMeans object
- Fitted KMeans object fitted with optimal n_components.
+ Fitted KMeans object fitted with ``n_clusters_``.
silhouette_ : list
List of silhouette scores computed for all possible number
|
{"golden_diff": "diff --git a/graspologic/cluster/kclust.py b/graspologic/cluster/kclust.py\n--- a/graspologic/cluster/kclust.py\n+++ b/graspologic/cluster/kclust.py\n@@ -18,13 +18,15 @@\n \"\"\"\n KMeans Cluster.\n \n- It computes all possible models from one component to\n- ``max_clusters``. The best model is given by the lowest silhouette score.\n+ It computes all possible models from one component to ``max_clusters``.\n+ When the true labels are known, the best model is given by the model with highest\n+ adjusted Rand index (ARI).\n+ Otherwise, the best model is given by the model with highest silhouette score.\n \n Parameters\n ----------\n- max_clusters : int, defaults to 1.\n- The maximum number of mixture components to consider.\n+ max_clusters : int, default=2.\n+ The maximum number of clusters to consider. Must be ``>=2``.\n \n random_state : int, RandomState instance or None, optional (default=None)\n If int, ``random_state`` is the seed used by the random number generator;\n@@ -35,11 +37,11 @@\n Attributes\n ----------\n n_clusters_ : int\n- Optimal number of components. If y is given, it is based on largest\n- ARI. Otherwise, it is based on smallest loss.\n+ Optimal number of clusters. If y is given, it is based on largest\n+ ARI. Otherwise, it is based on highest silhouette score.\n \n model_ : KMeans object\n- Fitted KMeans object fitted with optimal n_components.\n+ Fitted KMeans object fitted with ``n_clusters_``.\n \n silhouette_ : list\n List of silhouette scores computed for all possible number\n", "issue": "[BUG] Inaccurracy in how to use autokmeans\n## Expected Behavior\r\n\r\nhttps://github.com/microsoft/graspologic/blob/10de2bf17b972decbab318568154af226dcd71fa/graspologic/cluster/kclust.py#L16\r\n\r\nThis line is false; higher silhouette score is better, to my knowledge? https://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html\r\n\r\n## Actual Behavior\r\n\r\nDocumentation correctly reports how to use the package.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom typing import Optional, Union\n\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import adjusted_rand_score, silhouette_score\n\nfrom graspologic.types import List\n\nfrom .base import BaseCluster\n\n\nclass KMeansCluster(BaseCluster):\n ari_: Optional[List[float]]\n\n \"\"\"\n KMeans Cluster.\n\n It computes all possible models from one component to\n ``max_clusters``. The best model is given by the lowest silhouette score.\n\n Parameters\n ----------\n max_clusters : int, defaults to 1.\n The maximum number of mixture components to consider.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, ``random_state`` is the seed used by the random number generator;\n If RandomState instance, ``random_state`` is the random number generator;\n If None, the random number generator is the RandomState instance used\n by ``np.random``.\n\n Attributes\n ----------\n n_clusters_ : int\n Optimal number of components. If y is given, it is based on largest\n ARI. Otherwise, it is based on smallest loss.\n\n model_ : KMeans object\n Fitted KMeans object fitted with optimal n_components.\n\n silhouette_ : list\n List of silhouette scores computed for all possible number\n of clusters given by ``range(2, max_clusters)``.\n\n ari_ : list\n Only computed when y is given. List of ARI values computed for\n all possible number of clusters given by ``range(2, max_clusters)``.\n \"\"\"\n\n def __init__(\n self,\n max_clusters: int = 2,\n random_state: Optional[Union[int, np.random.RandomState]] = None,\n ):\n if isinstance(max_clusters, int):\n if max_clusters <= 1:\n msg = \"n_components must be >= 2 or None.\"\n raise ValueError(msg)\n else:\n self.max_clusters = max_clusters\n else:\n msg = \"max_clusters must be an integer, not {}.\".format(type(max_clusters))\n raise TypeError(msg)\n self.random_state = random_state\n\n def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> \"KMeansCluster\":\n \"\"\"\n Fits kmeans model to the data.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for `X` if available. Used to compute ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n # Deal with number of clusters\n if self.max_clusters > X.shape[0]:\n msg = \"n_components must be >= n_samples, but got \\\n n_components = {}, n_samples = {}\".format(\n self.max_clusters, X.shape[0]\n )\n raise ValueError(msg)\n else:\n max_clusters = self.max_clusters\n\n # Get parameters\n random_state = self.random_state\n\n # Compute all models\n models = []\n silhouettes = []\n aris = []\n for n in range(2, max_clusters + 1):\n model = KMeans(n_clusters=n, random_state=random_state)\n\n # Fit and compute values\n predictions = model.fit_predict(X)\n models.append(model)\n silhouettes.append(silhouette_score(X, predictions))\n if y is not None:\n aris.append(adjusted_rand_score(y, predictions))\n\n if y is not None:\n self.ari_ = aris\n self.silhouette_ = silhouettes\n self.n_clusters_ = np.argmax(aris) + 1\n self.model_ = models[np.argmax(aris)]\n else:\n self.ari_ = None\n self.silhouette_ = silhouettes\n self.n_clusters_ = np.argmax(silhouettes) + 1\n self.model_ = models[np.argmax(silhouettes)]\n\n return self\n", "path": "graspologic/cluster/kclust.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom typing import Optional, Union\n\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import adjusted_rand_score, silhouette_score\n\nfrom graspologic.types import List\n\nfrom .base import BaseCluster\n\n\nclass KMeansCluster(BaseCluster):\n ari_: Optional[List[float]]\n\n \"\"\"\n KMeans Cluster.\n\n It computes all possible models from one component to ``max_clusters``.\n When the true labels are known, the best model is given by the model with highest\n adjusted Rand index (ARI).\n Otherwise, the best model is given by the model with highest silhouette score.\n\n Parameters\n ----------\n max_clusters : int, default=2.\n The maximum number of clusters to consider. Must be ``>=2``.\n\n random_state : int, RandomState instance or None, optional (default=None)\n If int, ``random_state`` is the seed used by the random number generator;\n If RandomState instance, ``random_state`` is the random number generator;\n If None, the random number generator is the RandomState instance used\n by ``np.random``.\n\n Attributes\n ----------\n n_clusters_ : int\n Optimal number of clusters. If y is given, it is based on largest\n ARI. Otherwise, it is based on highest silhouette score.\n\n model_ : KMeans object\n Fitted KMeans object fitted with ``n_clusters_``.\n\n silhouette_ : list\n List of silhouette scores computed for all possible number\n of clusters given by ``range(2, max_clusters)``.\n\n ari_ : list\n Only computed when y is given. List of ARI values computed for\n all possible number of clusters given by ``range(2, max_clusters)``.\n \"\"\"\n\n def __init__(\n self,\n max_clusters: int = 2,\n random_state: Optional[Union[int, np.random.RandomState]] = None,\n ):\n if isinstance(max_clusters, int):\n if max_clusters <= 1:\n msg = \"n_components must be >= 2 or None.\"\n raise ValueError(msg)\n else:\n self.max_clusters = max_clusters\n else:\n msg = \"max_clusters must be an integer, not {}.\".format(type(max_clusters))\n raise TypeError(msg)\n self.random_state = random_state\n\n def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> \"KMeansCluster\":\n \"\"\"\n Fits kmeans model to the data.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n List of n_features-dimensional data points. Each row\n corresponds to a single data point.\n\n y : array-like, shape (n_samples,), optional (default=None)\n List of labels for `X` if available. Used to compute ARI scores.\n\n Returns\n -------\n self\n \"\"\"\n # Deal with number of clusters\n if self.max_clusters > X.shape[0]:\n msg = \"n_components must be >= n_samples, but got \\\n n_components = {}, n_samples = {}\".format(\n self.max_clusters, X.shape[0]\n )\n raise ValueError(msg)\n else:\n max_clusters = self.max_clusters\n\n # Get parameters\n random_state = self.random_state\n\n # Compute all models\n models = []\n silhouettes = []\n aris = []\n for n in range(2, max_clusters + 1):\n model = KMeans(n_clusters=n, random_state=random_state)\n\n # Fit and compute values\n predictions = model.fit_predict(X)\n models.append(model)\n silhouettes.append(silhouette_score(X, predictions))\n if y is not None:\n aris.append(adjusted_rand_score(y, predictions))\n\n if y is not None:\n self.ari_ = aris\n self.silhouette_ = silhouettes\n self.n_clusters_ = np.argmax(aris) + 1\n self.model_ = models[np.argmax(aris)]\n else:\n self.ari_ = None\n self.silhouette_ = silhouettes\n self.n_clusters_ = np.argmax(silhouettes) + 1\n self.model_ = models[np.argmax(silhouettes)]\n\n return self\n", "path": "graspologic/cluster/kclust.py"}]}
| 1,561 | 402 |
gh_patches_debug_3066
|
rasdani/github-patches
|
git_diff
|
searx__searx-200
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bing_news can't parse other languages date
When searching for french article, the time is noted as "Il y a 5 minutes", and so, doesn't match for the regex `"^[0-9]+ minute(s|) ago$"`.
Do you see a way to internationalize this detection ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/bing_news.py`
Content:
```
1 ## Bing (News)
2 #
3 # @website https://www.bing.com/news
4 # @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
5 # max. 5000 query/month
6 #
7 # @using-api no (because of query limit)
8 # @results HTML (using search portal)
9 # @stable no (HTML can change)
10 # @parse url, title, content, publishedDate
11
12 from urllib import urlencode
13 from cgi import escape
14 from lxml import html
15 from datetime import datetime, timedelta
16 from dateutil import parser
17 import re
18
19 # engine dependent config
20 categories = ['news']
21 paging = True
22 language_support = True
23
24 # search-url
25 base_url = 'https://www.bing.com/'
26 search_string = 'news/search?{query}&first={offset}'
27
28
29 # do search-request
30 def request(query, params):
31 offset = (params['pageno'] - 1) * 10 + 1
32
33 if params['language'] == 'all':
34 language = 'en-US'
35 else:
36 language = params['language'].replace('_', '-')
37
38 search_path = search_string.format(
39 query=urlencode({'q': query, 'setmkt': language}),
40 offset=offset)
41
42 params['cookies']['SRCHHPGUSR'] = \
43 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
44
45 params['url'] = base_url + search_path
46 return params
47
48
49 # get response from search-request
50 def response(resp):
51 results = []
52
53 dom = html.fromstring(resp.content)
54
55 # parse results
56 for result in dom.xpath('//div[@class="sn_r"]'):
57 link = result.xpath('.//div[@class="newstitle"]/a')[0]
58 url = link.attrib.get('href')
59 title = ' '.join(link.xpath('.//text()'))
60 contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
61 '//span[@class="sn_snip"]//text()')
62 if contentXPath is not None:
63 content = escape(' '.join(contentXPath))
64
65 # parse publishedDate
66 publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
67 '//span[contains(@class,"sn_ST")]'
68 '//span[contains(@class,"sn_tm")]'
69 '//text()')
70 if publishedDateXPath is not None:
71 publishedDate = escape(' '.join(publishedDateXPath))
72
73 if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
74 timeNumbers = re.findall(r'\d+', publishedDate)
75 publishedDate = datetime.now()\
76 - timedelta(minutes=int(timeNumbers[0]))
77 elif re.match("^[0-9]+ hour(s|) ago$", publishedDate):
78 timeNumbers = re.findall(r'\d+', publishedDate)
79 publishedDate = datetime.now()\
80 - timedelta(hours=int(timeNumbers[0]))
81 elif re.match("^[0-9]+ hour(s|),"
82 " [0-9]+ minute(s|) ago$", publishedDate):
83 timeNumbers = re.findall(r'\d+', publishedDate)
84 publishedDate = datetime.now()\
85 - timedelta(hours=int(timeNumbers[0]))\
86 - timedelta(minutes=int(timeNumbers[1]))
87 elif re.match("^[0-9]+ day(s|) ago$", publishedDate):
88 timeNumbers = re.findall(r'\d+', publishedDate)
89 publishedDate = datetime.now()\
90 - timedelta(days=int(timeNumbers[0]))
91 else:
92 try:
93 # FIXME use params['language'] to parse either mm/dd or dd/mm
94 publishedDate = parser.parse(publishedDate, dayfirst=False)
95 except TypeError:
96 # FIXME
97 publishedDate = datetime.now()
98
99 # append result
100 results.append({'url': url,
101 'title': title,
102 'publishedDate': publishedDate,
103 'content': content})
104
105 # return results
106 return results
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -39,8 +39,7 @@
query=urlencode({'q': query, 'setmkt': language}),
offset=offset)
- params['cookies']['SRCHHPGUSR'] = \
- 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
+ params['cookies']['_FP'] = "ui=en-US"
params['url'] = base_url + search_path
return params
|
{"golden_diff": "diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py\n--- a/searx/engines/bing_news.py\n+++ b/searx/engines/bing_news.py\n@@ -39,8 +39,7 @@\n query=urlencode({'q': query, 'setmkt': language}),\n offset=offset)\n \n- params['cookies']['SRCHHPGUSR'] = \\\n- 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]\n+ params['cookies']['_FP'] = \"ui=en-US\"\n \n params['url'] = base_url + search_path\n return params\n", "issue": "bing_news can't parse other languages date\nWhen searching for french article, the time is noted as \"Il y a 5 minutes\", and so, doesn't match for the regex `\"^[0-9]+ minute(s|) ago$\"`.\n\nDo you see a way to internationalize this detection ?\n\n", "before_files": [{"content": "## Bing (News)\n#\n# @website https://www.bing.com/news\n# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),\n# max. 5000 query/month\n#\n# @using-api no (because of query limit)\n# @results HTML (using search portal)\n# @stable no (HTML can change)\n# @parse url, title, content, publishedDate\n\nfrom urllib import urlencode\nfrom cgi import escape\nfrom lxml import html\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\nimport re\n\n# engine dependent config\ncategories = ['news']\npaging = True\nlanguage_support = True\n\n# search-url\nbase_url = 'https://www.bing.com/'\nsearch_string = 'news/search?{query}&first={offset}'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10 + 1\n\n if params['language'] == 'all':\n language = 'en-US'\n else:\n language = params['language'].replace('_', '-')\n\n search_path = search_string.format(\n query=urlencode({'q': query, 'setmkt': language}),\n offset=offset)\n\n params['cookies']['SRCHHPGUSR'] = \\\n 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]\n\n params['url'] = base_url + search_path\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.content)\n\n # parse results\n for result in dom.xpath('//div[@class=\"sn_r\"]'):\n link = result.xpath('.//div[@class=\"newstitle\"]/a')[0]\n url = link.attrib.get('href')\n title = ' '.join(link.xpath('.//text()'))\n contentXPath = result.xpath('.//div[@class=\"sn_txt\"]/div'\n '//span[@class=\"sn_snip\"]//text()')\n if contentXPath is not None:\n content = escape(' '.join(contentXPath))\n\n # parse publishedDate\n publishedDateXPath = result.xpath('.//div[@class=\"sn_txt\"]/div'\n '//span[contains(@class,\"sn_ST\")]'\n '//span[contains(@class,\"sn_tm\")]'\n '//text()')\n if publishedDateXPath is not None:\n publishedDate = escape(' '.join(publishedDateXPath))\n\n if re.match(\"^[0-9]+ minute(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(minutes=int(timeNumbers[0]))\n elif re.match(\"^[0-9]+ hour(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(hours=int(timeNumbers[0]))\n elif re.match(\"^[0-9]+ hour(s|),\"\n \" [0-9]+ minute(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(hours=int(timeNumbers[0]))\\\n - timedelta(minutes=int(timeNumbers[1]))\n elif re.match(\"^[0-9]+ day(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(days=int(timeNumbers[0]))\n else:\n try:\n # FIXME use params['language'] to parse either mm/dd or dd/mm\n publishedDate = parser.parse(publishedDate, dayfirst=False)\n except TypeError:\n # FIXME\n publishedDate = datetime.now()\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'publishedDate': publishedDate,\n 'content': content})\n\n # return results\n return results\n", "path": "searx/engines/bing_news.py"}], "after_files": [{"content": "## Bing (News)\n#\n# @website https://www.bing.com/news\n# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),\n# max. 5000 query/month\n#\n# @using-api no (because of query limit)\n# @results HTML (using search portal)\n# @stable no (HTML can change)\n# @parse url, title, content, publishedDate\n\nfrom urllib import urlencode\nfrom cgi import escape\nfrom lxml import html\nfrom datetime import datetime, timedelta\nfrom dateutil import parser\nimport re\n\n# engine dependent config\ncategories = ['news']\npaging = True\nlanguage_support = True\n\n# search-url\nbase_url = 'https://www.bing.com/'\nsearch_string = 'news/search?{query}&first={offset}'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * 10 + 1\n\n if params['language'] == 'all':\n language = 'en-US'\n else:\n language = params['language'].replace('_', '-')\n\n search_path = search_string.format(\n query=urlencode({'q': query, 'setmkt': language}),\n offset=offset)\n\n params['cookies']['_FP'] = \"ui=en-US\"\n\n params['url'] = base_url + search_path\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.content)\n\n # parse results\n for result in dom.xpath('//div[@class=\"sn_r\"]'):\n link = result.xpath('.//div[@class=\"newstitle\"]/a')[0]\n url = link.attrib.get('href')\n title = ' '.join(link.xpath('.//text()'))\n contentXPath = result.xpath('.//div[@class=\"sn_txt\"]/div'\n '//span[@class=\"sn_snip\"]//text()')\n if contentXPath is not None:\n content = escape(' '.join(contentXPath))\n\n # parse publishedDate\n publishedDateXPath = result.xpath('.//div[@class=\"sn_txt\"]/div'\n '//span[contains(@class,\"sn_ST\")]'\n '//span[contains(@class,\"sn_tm\")]'\n '//text()')\n if publishedDateXPath is not None:\n publishedDate = escape(' '.join(publishedDateXPath))\n\n if re.match(\"^[0-9]+ minute(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(minutes=int(timeNumbers[0]))\n elif re.match(\"^[0-9]+ hour(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(hours=int(timeNumbers[0]))\n elif re.match(\"^[0-9]+ hour(s|),\"\n \" [0-9]+ minute(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(hours=int(timeNumbers[0]))\\\n - timedelta(minutes=int(timeNumbers[1]))\n elif re.match(\"^[0-9]+ day(s|) ago$\", publishedDate):\n timeNumbers = re.findall(r'\\d+', publishedDate)\n publishedDate = datetime.now()\\\n - timedelta(days=int(timeNumbers[0]))\n else:\n try:\n # FIXME use params['language'] to parse either mm/dd or dd/mm\n publishedDate = parser.parse(publishedDate, dayfirst=False)\n except TypeError:\n # FIXME\n publishedDate = datetime.now()\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'publishedDate': publishedDate,\n 'content': content})\n\n # return results\n return results\n", "path": "searx/engines/bing_news.py"}]}
| 1,400 | 158 |
gh_patches_debug_3814
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-573
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problem with --checkout reclone
The message should ask me about recloning `/Users/audreyr/.cookiecutters/cookiecutter-pypackage`, not `/Users/audreyr/.cookiecutters`.
```
$ cookiecutter https://github.com/eliasdorneles/cookiecutter-pypackage/ -c adding-travis-setup-for-pypi-deployment
You've cloned /Users/audreyr/.cookiecutters before. Is it okay to delete and re-clone it? [yes]:
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/vcs.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.vcs
6 ----------------
7
8 Helper functions for working with version control systems.
9 """
10
11 from __future__ import unicode_literals
12 import logging
13 import os
14 import subprocess
15 import sys
16
17 from whichcraft import which
18
19 from .exceptions import UnknownRepoType, VCSNotInstalled
20 from .prompt import read_user_yes_no
21 from .utils import make_sure_path_exists, rmtree
22
23
24 def prompt_and_delete_repo(repo_dir, no_input=False):
25 """
26 Asks the user whether it's okay to delete the previously-cloned repo.
27 If yes, deletes it. Otherwise, Cookiecutter exits.
28
29 :param repo_dir: Directory of previously-cloned repo.
30 :param no_input: Suppress prompt to delete repo and just delete it.
31 """
32
33 # Suppress prompt if called via API
34 if no_input:
35 ok_to_delete = True
36 else:
37 question = (
38 "You've cloned {0} before. "
39 'Is it okay to delete and re-clone it?'
40 ).format(repo_dir)
41
42 ok_to_delete = read_user_yes_no(question, 'yes')
43
44 if ok_to_delete:
45 rmtree(repo_dir)
46 else:
47 sys.exit()
48
49
50 def identify_repo(repo_url):
51 """
52 Determines if `repo_url` should be treated as a URL to a git or hg repo.
53 Repos can be identified prepeding "hg+" or "git+" to repo URL.
54
55 :param repo_url: Repo URL of unknown type.
56 :returns: ("git", repo_url), ("hg", repo_url), or None.
57 """
58 repo_url_values = repo_url.split('+')
59 if len(repo_url_values) == 2:
60 repo_type = repo_url_values[0]
61 if repo_type in ["git", "hg"]:
62 return repo_type, repo_url_values[1]
63 else:
64 raise UnknownRepoType
65 else:
66 if "git" in repo_url:
67 return "git", repo_url
68 elif "bitbucket" in repo_url:
69 return "hg", repo_url
70 else:
71 raise UnknownRepoType
72
73
74 def is_vcs_installed(repo_type):
75 """
76 Check if the version control system for a repo type is installed.
77
78 :param repo_type:
79 """
80 return bool(which(repo_type))
81
82
83 def clone(repo_url, checkout=None, clone_to_dir=".", no_input=False):
84 """
85 Clone a repo to the current directory.
86
87 :param repo_url: Repo URL of unknown type.
88 :param checkout: The branch, tag or commit ID to checkout after clone.
89 :param clone_to_dir: The directory to clone to.
90 Defaults to the current directory.
91 :param no_input: Suppress all user prompts when calling via API.
92 """
93
94 # Ensure that clone_to_dir exists
95 clone_to_dir = os.path.expanduser(clone_to_dir)
96 make_sure_path_exists(clone_to_dir)
97
98 # identify the repo_type
99 repo_type, repo_url = identify_repo(repo_url)
100
101 # check that the appropriate VCS for the repo_type is installed
102 if not is_vcs_installed(repo_type):
103 msg = "'{0}' is not installed.".format(repo_type)
104 raise VCSNotInstalled(msg)
105
106 tail = os.path.split(repo_url)[1]
107 if repo_type == 'git':
108 repo_dir = os.path.normpath(os.path.join(clone_to_dir,
109 tail.rsplit('.git')[0]))
110 elif repo_type == 'hg':
111 repo_dir = os.path.normpath(os.path.join(clone_to_dir, tail))
112 logging.debug('repo_dir is {0}'.format(repo_dir))
113
114 if os.path.isdir(repo_dir):
115 prompt_and_delete_repo(repo_dir, no_input=no_input)
116
117 if repo_type in ['git', 'hg']:
118 subprocess.check_call([repo_type, 'clone', repo_url], cwd=clone_to_dir)
119 if checkout is not None:
120 subprocess.check_call([repo_type, 'checkout', checkout],
121 cwd=repo_dir)
122
123 return repo_dir
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cookiecutter/vcs.py b/cookiecutter/vcs.py
--- a/cookiecutter/vcs.py
+++ b/cookiecutter/vcs.py
@@ -103,6 +103,7 @@
msg = "'{0}' is not installed.".format(repo_type)
raise VCSNotInstalled(msg)
+ repo_url = repo_url.rstrip('/')
tail = os.path.split(repo_url)[1]
if repo_type == 'git':
repo_dir = os.path.normpath(os.path.join(clone_to_dir,
|
{"golden_diff": "diff --git a/cookiecutter/vcs.py b/cookiecutter/vcs.py\n--- a/cookiecutter/vcs.py\n+++ b/cookiecutter/vcs.py\n@@ -103,6 +103,7 @@\n msg = \"'{0}' is not installed.\".format(repo_type)\n raise VCSNotInstalled(msg)\n \n+ repo_url = repo_url.rstrip('/')\n tail = os.path.split(repo_url)[1]\n if repo_type == 'git':\n repo_dir = os.path.normpath(os.path.join(clone_to_dir,\n", "issue": "Problem with --checkout reclone\nThe message should ask me about recloning `/Users/audreyr/.cookiecutters/cookiecutter-pypackage`, not `/Users/audreyr/.cookiecutters`.\n\n```\n$ cookiecutter https://github.com/eliasdorneles/cookiecutter-pypackage/ -c adding-travis-setup-for-pypi-deployment\nYou've cloned /Users/audreyr/.cookiecutters before. Is it okay to delete and re-clone it? [yes]: \n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.vcs\n----------------\n\nHelper functions for working with version control systems.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport subprocess\nimport sys\n\nfrom whichcraft import which\n\nfrom .exceptions import UnknownRepoType, VCSNotInstalled\nfrom .prompt import read_user_yes_no\nfrom .utils import make_sure_path_exists, rmtree\n\n\ndef prompt_and_delete_repo(repo_dir, no_input=False):\n \"\"\"\n Asks the user whether it's okay to delete the previously-cloned repo.\n If yes, deletes it. Otherwise, Cookiecutter exits.\n\n :param repo_dir: Directory of previously-cloned repo.\n :param no_input: Suppress prompt to delete repo and just delete it.\n \"\"\"\n\n # Suppress prompt if called via API\n if no_input:\n ok_to_delete = True\n else:\n question = (\n \"You've cloned {0} before. \"\n 'Is it okay to delete and re-clone it?'\n ).format(repo_dir)\n\n ok_to_delete = read_user_yes_no(question, 'yes')\n\n if ok_to_delete:\n rmtree(repo_dir)\n else:\n sys.exit()\n\n\ndef identify_repo(repo_url):\n \"\"\"\n Determines if `repo_url` should be treated as a URL to a git or hg repo.\n Repos can be identified prepeding \"hg+\" or \"git+\" to repo URL.\n\n :param repo_url: Repo URL of unknown type.\n :returns: (\"git\", repo_url), (\"hg\", repo_url), or None.\n \"\"\"\n repo_url_values = repo_url.split('+')\n if len(repo_url_values) == 2:\n repo_type = repo_url_values[0]\n if repo_type in [\"git\", \"hg\"]:\n return repo_type, repo_url_values[1]\n else:\n raise UnknownRepoType\n else:\n if \"git\" in repo_url:\n return \"git\", repo_url\n elif \"bitbucket\" in repo_url:\n return \"hg\", repo_url\n else:\n raise UnknownRepoType\n\n\ndef is_vcs_installed(repo_type):\n \"\"\"\n Check if the version control system for a repo type is installed.\n\n :param repo_type:\n \"\"\"\n return bool(which(repo_type))\n\n\ndef clone(repo_url, checkout=None, clone_to_dir=\".\", no_input=False):\n \"\"\"\n Clone a repo to the current directory.\n\n :param repo_url: Repo URL of unknown type.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param clone_to_dir: The directory to clone to.\n Defaults to the current directory.\n :param no_input: Suppress all user prompts when calling via API.\n \"\"\"\n\n # Ensure that clone_to_dir exists\n clone_to_dir = os.path.expanduser(clone_to_dir)\n make_sure_path_exists(clone_to_dir)\n\n # identify the repo_type\n repo_type, repo_url = identify_repo(repo_url)\n\n # check that the appropriate VCS for the repo_type is installed\n if not is_vcs_installed(repo_type):\n msg = \"'{0}' is not installed.\".format(repo_type)\n raise VCSNotInstalled(msg)\n\n tail = os.path.split(repo_url)[1]\n if repo_type == 'git':\n repo_dir = os.path.normpath(os.path.join(clone_to_dir,\n tail.rsplit('.git')[0]))\n elif repo_type == 'hg':\n repo_dir = os.path.normpath(os.path.join(clone_to_dir, tail))\n logging.debug('repo_dir is {0}'.format(repo_dir))\n\n if os.path.isdir(repo_dir):\n prompt_and_delete_repo(repo_dir, no_input=no_input)\n\n if repo_type in ['git', 'hg']:\n subprocess.check_call([repo_type, 'clone', repo_url], cwd=clone_to_dir)\n if checkout is not None:\n subprocess.check_call([repo_type, 'checkout', checkout],\n cwd=repo_dir)\n\n return repo_dir\n", "path": "cookiecutter/vcs.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.vcs\n----------------\n\nHelper functions for working with version control systems.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport subprocess\nimport sys\n\nfrom whichcraft import which\n\nfrom .exceptions import UnknownRepoType, VCSNotInstalled\nfrom .prompt import read_user_yes_no\nfrom .utils import make_sure_path_exists, rmtree\n\n\ndef prompt_and_delete_repo(repo_dir, no_input=False):\n \"\"\"\n Asks the user whether it's okay to delete the previously-cloned repo.\n If yes, deletes it. Otherwise, Cookiecutter exits.\n\n :param repo_dir: Directory of previously-cloned repo.\n :param no_input: Suppress prompt to delete repo and just delete it.\n \"\"\"\n\n # Suppress prompt if called via API\n if no_input:\n ok_to_delete = True\n else:\n question = (\n \"You've cloned {0} before. \"\n 'Is it okay to delete and re-clone it?'\n ).format(repo_dir)\n\n ok_to_delete = read_user_yes_no(question, 'yes')\n\n if ok_to_delete:\n rmtree(repo_dir)\n else:\n sys.exit()\n\n\ndef identify_repo(repo_url):\n \"\"\"\n Determines if `repo_url` should be treated as a URL to a git or hg repo.\n Repos can be identified prepeding \"hg+\" or \"git+\" to repo URL.\n\n :param repo_url: Repo URL of unknown type.\n :returns: (\"git\", repo_url), (\"hg\", repo_url), or None.\n \"\"\"\n repo_url_values = repo_url.split('+')\n if len(repo_url_values) == 2:\n repo_type = repo_url_values[0]\n if repo_type in [\"git\", \"hg\"]:\n return repo_type, repo_url_values[1]\n else:\n raise UnknownRepoType\n else:\n if \"git\" in repo_url:\n return \"git\", repo_url\n elif \"bitbucket\" in repo_url:\n return \"hg\", repo_url\n else:\n raise UnknownRepoType\n\n\ndef is_vcs_installed(repo_type):\n \"\"\"\n Check if the version control system for a repo type is installed.\n\n :param repo_type:\n \"\"\"\n return bool(which(repo_type))\n\n\ndef clone(repo_url, checkout=None, clone_to_dir=\".\", no_input=False):\n \"\"\"\n Clone a repo to the current directory.\n\n :param repo_url: Repo URL of unknown type.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param clone_to_dir: The directory to clone to.\n Defaults to the current directory.\n :param no_input: Suppress all user prompts when calling via API.\n \"\"\"\n\n # Ensure that clone_to_dir exists\n clone_to_dir = os.path.expanduser(clone_to_dir)\n make_sure_path_exists(clone_to_dir)\n\n # identify the repo_type\n repo_type, repo_url = identify_repo(repo_url)\n\n # check that the appropriate VCS for the repo_type is installed\n if not is_vcs_installed(repo_type):\n msg = \"'{0}' is not installed.\".format(repo_type)\n raise VCSNotInstalled(msg)\n\n repo_url = repo_url.rstrip('/')\n tail = os.path.split(repo_url)[1]\n if repo_type == 'git':\n repo_dir = os.path.normpath(os.path.join(clone_to_dir,\n tail.rsplit('.git')[0]))\n elif repo_type == 'hg':\n repo_dir = os.path.normpath(os.path.join(clone_to_dir, tail))\n logging.debug('repo_dir is {0}'.format(repo_dir))\n\n if os.path.isdir(repo_dir):\n prompt_and_delete_repo(repo_dir, no_input=no_input)\n\n if repo_type in ['git', 'hg']:\n subprocess.check_call([repo_type, 'clone', repo_url], cwd=clone_to_dir)\n if checkout is not None:\n subprocess.check_call([repo_type, 'checkout', checkout],\n cwd=repo_dir)\n\n return repo_dir\n", "path": "cookiecutter/vcs.py"}]}
| 1,512 | 121 |
gh_patches_debug_22116
|
rasdani/github-patches
|
git_diff
|
pyload__pyload-1381
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
uplea plugin (still) broken
Hi again,
sorry but inspite of #1369 and #1375, uplea is still not working; now it's back with downloading the HTML download page...
24 26.04.2015 23:29:20 INFO Download finished: *****
23 26.04.2015 23:29:02 INFO Download starts: ****
The resulting file has correct name but is 14KB big; expected size if around 350MB
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/hoster/UpleaCom.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import re
4
5 from urlparse import urljoin
6
7 from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
8
9
10 class UpleaCom(XFSHoster):
11 __name__ = "UpleaCom"
12 __type__ = "hoster"
13 __version__ = "0.08"
14
15 __pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
16
17 __description__ = """Uplea.com hoster plugin"""
18 __license__ = "GPLv3"
19 __authors__ = [("Redleon", None),
20 ("GammaC0de", None)]
21
22
23 NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<'
24 SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
25 SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]
26
27 OFFLINE_PATTERN = r'>You followed an invalid or expired link'
28 PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'
29
30 LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"'
31 HOSTER_DOMAIN = "uplea.com"
32
33 WAIT_PATTERN = r'timeText: ?([\d.]+),'
34 STEP_PATTERN = r'<a href="(/step/.+)">'
35
36
37 def setup(self):
38 self.multiDL = False
39 self.chunkLimit = 1
40 self.resumeDownload = True
41
42
43 def handleFree(self, pyfile):
44 m = re.search(self.STEP_PATTERN, self.html)
45 if m is None:
46 self.error(_("STEP_PATTERN not found"))
47
48 self.html = self.load(urljoin("http://uplea.com/", m.group(1)))
49
50 m = re.search(self.WAIT_PATTERN, self.html)
51 if m:
52 self.logDebug(_("Waiting %s seconds") % m.group(1))
53 self.wait(m.group(1), True)
54 self.retry()
55
56 m = re.search(self.PREMIUM_PATTERN, self.html)
57 if m:
58 self.error(_("This URL requires a premium account"))
59
60 m = re.search(self.LINK_PATTERN, self.html)
61 if m is None:
62 self.error(_("LINK_PATTERN not found"))
63
64 self.link = m.group(1)
65 self.wait(15)
66
67
68 getInfo = create_getInfo(UpleaCom)
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py
--- a/module/plugins/hoster/UpleaCom.py
+++ b/module/plugins/hoster/UpleaCom.py
@@ -10,7 +10,7 @@
class UpleaCom(XFSHoster):
__name__ = "UpleaCom"
__type__ = "hoster"
- __version__ = "0.08"
+ __version__ = "0.10"
__pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
@@ -20,9 +20,11 @@
("GammaC0de", None)]
- NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<'
- SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
- SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]
+ DISPOSITION = False #@TODO: Remove in 0.4.10
+
+ NAME_PATTERN = r'<span class="gold-text">(?P<N>.+?)</span>'
+ SIZE_PATTERN = r'<span class="label label-info agmd">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
+ SIZE_REPLACEMENTS = [('ko','KB'), ('mo','MB'), ('go','GB'), ('Ko','KB'), ('Mo','MB'), ('Go','GB')]
OFFLINE_PATTERN = r'>You followed an invalid or expired link'
PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'
|
{"golden_diff": "diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py\n--- a/module/plugins/hoster/UpleaCom.py\n+++ b/module/plugins/hoster/UpleaCom.py\n@@ -10,7 +10,7 @@\n class UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n- __version__ = \"0.08\"\n+ __version__ = \"0.10\"\n \n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n \n@@ -20,9 +20,11 @@\n (\"GammaC0de\", None)]\n \n \n- NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n- SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n- SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n+ DISPOSITION = False #@TODO: Remove in 0.4.10\n+\n+ NAME_PATTERN = r'<span class=\"gold-text\">(?P<N>.+?)</span>'\n+ SIZE_PATTERN = r'<span class=\"label label-info agmd\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n+ SIZE_REPLACEMENTS = [('ko','KB'), ('mo','MB'), ('go','GB'), ('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n \n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n", "issue": "uplea plugin (still) broken\nHi again,\n\nsorry but inspite of #1369 and #1375, uplea is still not working; now it's back with downloading the HTML download page...\n24 26.04.2015 23:29:20 INFO Download finished: *****\n23 26.04.2015 23:29:02 INFO Download starts: ****\n\nThe resulting file has correct name but is 14KB big; expected size if around 350MB\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urlparse import urljoin\n\nfrom module.plugins.internal.XFSHoster import XFSHoster, create_getInfo\n\n\nclass UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n __version__ = \"0.08\"\n\n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n\n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Redleon\", None),\n (\"GammaC0de\", None)]\n\n\n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n\n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n\n LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n HOSTER_DOMAIN = \"uplea.com\"\n\n WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n\n\n def setup(self):\n self.multiDL = False\n self.chunkLimit = 1\n self.resumeDownload = True\n\n\n def handleFree(self, pyfile):\n m = re.search(self.STEP_PATTERN, self.html)\n if m is None:\n self.error(_(\"STEP_PATTERN not found\"))\n\n self.html = self.load(urljoin(\"http://uplea.com/\", m.group(1)))\n\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.logDebug(_(\"Waiting %s seconds\") % m.group(1))\n self.wait(m.group(1), True)\n self.retry()\n\n m = re.search(self.PREMIUM_PATTERN, self.html)\n if m:\n self.error(_(\"This URL requires a premium account\"))\n\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n\n self.link = m.group(1)\n self.wait(15)\n\n\ngetInfo = create_getInfo(UpleaCom)\n", "path": "module/plugins/hoster/UpleaCom.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urlparse import urljoin\n\nfrom module.plugins.internal.XFSHoster import XFSHoster, create_getInfo\n\n\nclass UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n __version__ = \"0.10\"\n\n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n\n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Redleon\", None),\n (\"GammaC0de\", None)]\n\n\n DISPOSITION = False #@TODO: Remove in 0.4.10\n\n NAME_PATTERN = r'<span class=\"gold-text\">(?P<N>.+?)</span>'\n SIZE_PATTERN = r'<span class=\"label label-info agmd\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n SIZE_REPLACEMENTS = [('ko','KB'), ('mo','MB'), ('go','GB'), ('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n\n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n\n LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n HOSTER_DOMAIN = \"uplea.com\"\n\n WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n\n\n def setup(self):\n self.multiDL = False\n self.chunkLimit = 1\n self.resumeDownload = True\n\n\n def handleFree(self, pyfile):\n m = re.search(self.STEP_PATTERN, self.html)\n if m is None:\n self.error(_(\"STEP_PATTERN not found\"))\n\n self.html = self.load(urljoin(\"http://uplea.com/\", m.group(1)))\n\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.logDebug(_(\"Waiting %s seconds\") % m.group(1))\n self.wait(m.group(1), True)\n self.retry()\n\n m = re.search(self.PREMIUM_PATTERN, self.html)\n if m:\n self.error(_(\"This URL requires a premium account\"))\n\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n\n self.link = m.group(1)\n self.wait(15)\n\n\ngetInfo = create_getInfo(UpleaCom)\n", "path": "module/plugins/hoster/UpleaCom.py"}]}
| 1,086 | 402 |
gh_patches_debug_42531
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-1763
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSS bundles generation breaks background images relative urls
This is a bug related to PR #1300.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/resources/browser/combine.py`
Content:
```
1 from zExceptions import NotFound
2 from Acquisition import aq_base
3 from datetime import datetime
4 from plone.registry.interfaces import IRegistry
5 from plone.resource.file import FilesystemFile
6 from plone.resource.interfaces import IResourceDirectory
7 from Products.CMFPlone.interfaces import IBundleRegistry
8 from Products.CMFPlone.interfaces.resources import (
9 OVERRIDE_RESOURCE_DIRECTORY_NAME,
10 )
11 from StringIO import StringIO
12 from zope.component import getUtility
13 from zope.component import queryUtility
14
15 PRODUCTION_RESOURCE_DIRECTORY = "production"
16
17
18 def get_production_resource_directory():
19 persistent_directory = queryUtility(IResourceDirectory, name="persistent")
20 if persistent_directory is None:
21 return ''
22 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
23 try:
24 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
25 except NotFound:
26 return "%s/++unique++1" % PRODUCTION_RESOURCE_DIRECTORY
27 timestamp = production_folder.readFile('timestamp.txt')
28 return "%s/++unique++%s" % (
29 PRODUCTION_RESOURCE_DIRECTORY, timestamp)
30
31
32 def get_resource(context, path):
33 if path.startswith('++plone++'):
34 # ++plone++ resources can be customized, we return their override
35 # value if any
36 overrides = get_override_directory(context)
37 filepath = path[9:]
38 if overrides.isFile(filepath):
39 return overrides.readFile(filepath)
40
41 resource = context.unrestrictedTraverse(path)
42 if isinstance(resource, FilesystemFile):
43 (directory, sep, filename) = path.rpartition('/')
44 return context.unrestrictedTraverse(directory).readFile(filename)
45 else:
46 if hasattr(aq_base(resource), 'GET'):
47 # for FileResource
48 return resource.GET()
49 else:
50 # any BrowserView
51 return resource()
52
53
54 def write_js(context, folder, meta_bundle):
55 registry = getUtility(IRegistry)
56 resources = []
57
58 # default resources
59 if meta_bundle == 'default' and registry.records.get(
60 'plone.resources/jquery.js'
61 ):
62 resources.append(get_resource(context,
63 registry.records['plone.resources/jquery.js'].value))
64 resources.append(get_resource(context,
65 registry.records['plone.resources.requirejs'].value))
66 resources.append(get_resource(context,
67 registry.records['plone.resources.configjs'].value))
68
69 # bundles
70 bundles = registry.collectionOfInterface(
71 IBundleRegistry, prefix="plone.bundles", check=False)
72 for bundle in bundles.values():
73 if bundle.merge_with == meta_bundle and bundle.jscompilation:
74 resources.append(get_resource(context, bundle.jscompilation))
75
76 fi = StringIO()
77 for script in resources:
78 fi.write(script + '\n')
79 folder.writeFile(meta_bundle + ".js", fi)
80
81
82 def write_css(context, folder, meta_bundle):
83 registry = getUtility(IRegistry)
84 resources = []
85
86 bundles = registry.collectionOfInterface(
87 IBundleRegistry, prefix="plone.bundles", check=False)
88 for bundle in bundles.values():
89 if bundle.merge_with == meta_bundle and bundle.csscompilation:
90 resources.append(get_resource(context, bundle.csscompilation))
91
92 fi = StringIO()
93 for script in resources:
94 fi.write(script + '\n')
95 folder.writeFile(meta_bundle + ".css", fi)
96
97
98 def get_override_directory(context):
99 persistent_directory = queryUtility(IResourceDirectory, name="persistent")
100 if persistent_directory is None:
101 return
102 if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:
103 persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)
104 return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]
105
106
107 def combine_bundles(context):
108 container = get_override_directory(context)
109 if PRODUCTION_RESOURCE_DIRECTORY not in container:
110 container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)
111 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
112
113 # store timestamp
114 fi = StringIO()
115 fi.write(datetime.now().isoformat())
116 production_folder.writeFile("timestamp.txt", fi)
117
118 # generate new combined bundles
119 write_js(context, production_folder, 'default')
120 write_js(context, production_folder, 'logged-in')
121 write_css(context, production_folder, 'default')
122 write_css(context, production_folder, 'logged-in')
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py
--- a/Products/CMFPlone/resources/browser/combine.py
+++ b/Products/CMFPlone/resources/browser/combine.py
@@ -1,18 +1,20 @@
-from zExceptions import NotFound
from Acquisition import aq_base
from datetime import datetime
from plone.registry.interfaces import IRegistry
from plone.resource.file import FilesystemFile
from plone.resource.interfaces import IResourceDirectory
from Products.CMFPlone.interfaces import IBundleRegistry
-from Products.CMFPlone.interfaces.resources import (
- OVERRIDE_RESOURCE_DIRECTORY_NAME,
-)
+from Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa
from StringIO import StringIO
+from zExceptions import NotFound
from zope.component import getUtility
from zope.component import queryUtility
+import logging
+import re
+
PRODUCTION_RESOURCE_DIRECTORY = "production"
+logger = logging.getLogger(__name__)
def get_production_resource_directory():
@@ -24,6 +26,8 @@
production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]
except NotFound:
return "%s/++unique++1" % PRODUCTION_RESOURCE_DIRECTORY
+ if 'timestamp.txt' not in production_folder:
+ return "%s/++unique++1" % PRODUCTION_RESOURCE_DIRECTORY
timestamp = production_folder.readFile('timestamp.txt')
return "%s/++unique++%s" % (
PRODUCTION_RESOURCE_DIRECTORY, timestamp)
@@ -38,7 +42,12 @@
if overrides.isFile(filepath):
return overrides.readFile(filepath)
- resource = context.unrestrictedTraverse(path)
+ try:
+ resource = context.unrestrictedTraverse(path)
+ except NotFound:
+ logger.warn(u"Could not find resource {0}. You may have to create it first.".format(path)) # noqa
+ return
+
if isinstance(resource, FilesystemFile):
(directory, sep, filename) = path.rpartition('/')
return context.unrestrictedTraverse(directory).readFile(filename)
@@ -71,7 +80,10 @@
IBundleRegistry, prefix="plone.bundles", check=False)
for bundle in bundles.values():
if bundle.merge_with == meta_bundle and bundle.jscompilation:
- resources.append(get_resource(context, bundle.jscompilation))
+ resource = get_resource(context, bundle.jscompilation)
+ if not resource:
+ continue
+ resources.append(resource)
fi = StringIO()
for script in resources:
@@ -87,7 +99,18 @@
IBundleRegistry, prefix="plone.bundles", check=False)
for bundle in bundles.values():
if bundle.merge_with == meta_bundle and bundle.csscompilation:
- resources.append(get_resource(context, bundle.csscompilation))
+ css = get_resource(context, bundle.csscompilation)
+ if not css:
+ continue
+ (path, sep, filename) = bundle.csscompilation.rpartition('/')
+ # Process relative urls:
+ # we prefix with current resource path any url not starting with
+ # '/' or http: or data:
+ css = re.sub(
+ r"""(url\(['"]?(?!['"]?([a-z]+:|\/)))""",
+ r'\1%s/' % path,
+ css)
+ resources.append(css)
fi = StringIO()
for script in resources:
|
{"golden_diff": "diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py\n--- a/Products/CMFPlone/resources/browser/combine.py\n+++ b/Products/CMFPlone/resources/browser/combine.py\n@@ -1,18 +1,20 @@\n-from zExceptions import NotFound\n from Acquisition import aq_base\n from datetime import datetime\n from plone.registry.interfaces import IRegistry\n from plone.resource.file import FilesystemFile\n from plone.resource.interfaces import IResourceDirectory\n from Products.CMFPlone.interfaces import IBundleRegistry\n-from Products.CMFPlone.interfaces.resources import (\n- OVERRIDE_RESOURCE_DIRECTORY_NAME,\n-)\n+from Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa\n from StringIO import StringIO\n+from zExceptions import NotFound\n from zope.component import getUtility\n from zope.component import queryUtility\n \n+import logging\n+import re\n+\n PRODUCTION_RESOURCE_DIRECTORY = \"production\"\n+logger = logging.getLogger(__name__)\n \n \n def get_production_resource_directory():\n@@ -24,6 +26,8 @@\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n+ if 'timestamp.txt' not in production_folder:\n+ return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n@@ -38,7 +42,12 @@\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n \n- resource = context.unrestrictedTraverse(path)\n+ try:\n+ resource = context.unrestrictedTraverse(path)\n+ except NotFound:\n+ logger.warn(u\"Could not find resource {0}. You may have to create it first.\".format(path)) # noqa\n+ return\n+\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n@@ -71,7 +80,10 @@\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.jscompilation:\n- resources.append(get_resource(context, bundle.jscompilation))\n+ resource = get_resource(context, bundle.jscompilation)\n+ if not resource:\n+ continue\n+ resources.append(resource)\n \n fi = StringIO()\n for script in resources:\n@@ -87,7 +99,18 @@\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.csscompilation:\n- resources.append(get_resource(context, bundle.csscompilation))\n+ css = get_resource(context, bundle.csscompilation)\n+ if not css:\n+ continue\n+ (path, sep, filename) = bundle.csscompilation.rpartition('/')\n+ # Process relative urls:\n+ # we prefix with current resource path any url not starting with\n+ # '/' or http: or data:\n+ css = re.sub(\n+ r\"\"\"(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))\"\"\",\n+ r'\\1%s/' % path,\n+ css)\n+ resources.append(css)\n \n fi = StringIO()\n for script in resources:\n", "issue": "CSS bundles generation breaks background images relative urls\nThis is a bug related to PR #1300.\n\n", "before_files": [{"content": "from zExceptions import NotFound\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import (\n OVERRIDE_RESOURCE_DIRECTORY_NAME,\n)\nfrom StringIO import StringIO\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nPRODUCTION_RESOURCE_DIRECTORY = \"production\"\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n if path.startswith('++plone++'):\n # ++plone++ resources can be customized, we return their override\n # value if any\n overrides = get_override_directory(context)\n filepath = path[9:]\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n else:\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n return resource.GET()\n else:\n # any BrowserView\n return resource()\n\n\ndef write_js(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n # default resources\n if meta_bundle == 'default' and registry.records.get(\n 'plone.resources/jquery.js'\n ):\n resources.append(get_resource(context,\n registry.records['plone.resources/jquery.js'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.requirejs'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.configjs'].value))\n\n # bundles\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.jscompilation:\n resources.append(get_resource(context, bundle.jscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".js\", fi)\n\n\ndef write_css(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.csscompilation:\n resources.append(get_resource(context, bundle.csscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".css\", fi)\n\n\ndef get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n\ndef combine_bundles(context):\n container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile(\"timestamp.txt\", fi)\n\n # generate new combined bundles\n write_js(context, production_folder, 'default')\n write_js(context, production_folder, 'logged-in')\n write_css(context, production_folder, 'default')\n write_css(context, production_folder, 'logged-in')\n", "path": "Products/CMFPlone/resources/browser/combine.py"}], "after_files": [{"content": "from Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa\nfrom StringIO import StringIO\nfrom zExceptions import NotFound\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nimport logging\nimport re\n\nPRODUCTION_RESOURCE_DIRECTORY = \"production\"\nlogger = logging.getLogger(__name__)\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n if 'timestamp.txt' not in production_folder:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n if path.startswith('++plone++'):\n # ++plone++ resources can be customized, we return their override\n # value if any\n overrides = get_override_directory(context)\n filepath = path[9:]\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n\n try:\n resource = context.unrestrictedTraverse(path)\n except NotFound:\n logger.warn(u\"Could not find resource {0}. You may have to create it first.\".format(path)) # noqa\n return\n\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n else:\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n return resource.GET()\n else:\n # any BrowserView\n return resource()\n\n\ndef write_js(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n # default resources\n if meta_bundle == 'default' and registry.records.get(\n 'plone.resources/jquery.js'\n ):\n resources.append(get_resource(context,\n registry.records['plone.resources/jquery.js'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.requirejs'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.configjs'].value))\n\n # bundles\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.jscompilation:\n resource = get_resource(context, bundle.jscompilation)\n if not resource:\n continue\n resources.append(resource)\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".js\", fi)\n\n\ndef write_css(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle and bundle.csscompilation:\n css = get_resource(context, bundle.csscompilation)\n if not css:\n continue\n (path, sep, filename) = bundle.csscompilation.rpartition('/')\n # Process relative urls:\n # we prefix with current resource path any url not starting with\n # '/' or http: or data:\n css = re.sub(\n r\"\"\"(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))\"\"\",\n r'\\1%s/' % path,\n css)\n resources.append(css)\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".css\", fi)\n\n\ndef get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n\ndef combine_bundles(context):\n container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile(\"timestamp.txt\", fi)\n\n # generate new combined bundles\n write_js(context, production_folder, 'default')\n write_js(context, production_folder, 'logged-in')\n write_css(context, production_folder, 'default')\n write_css(context, production_folder, 'logged-in')\n", "path": "Products/CMFPlone/resources/browser/combine.py"}]}
| 1,424 | 755 |
gh_patches_debug_658
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-2258
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.148
On the docket:
+ [x] The Pex CLI should warn when it creates a PEX zip that requires zip64. #2247
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.147"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.147"
+__version__ = "2.1.148"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.147\"\n+__version__ = \"2.1.148\"\n", "issue": "Release 2.1.148\nOn the docket:\r\n+ [x] The Pex CLI should warn when it creates a PEX zip that requires zip64. #2247\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.147\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.148\"\n", "path": "pex/version.py"}]}
| 352 | 98 |
gh_patches_debug_2942
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-3004
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
installing latest 5.0.3 on windows machines is still using pywin32==227 but not pywin32==301
[Bump pywin32 from 227 to 301 ]( https://github.com/docker/docker-py/commit/e0d186d754693feb7d27c2352e455c5febb4a5cd) was already merged in to bump pywin32 from 227 to 301. But, when installing latest 5.0.3 on windows machines is resulting in install of pywin32==227
Most likely extras_require needs updated
https://github.com/docker/docker-py/blob/a48a5a9647761406d66e8271f19fab7fa0c5f582/setup.py#L19
Pywin32 upgrade
Fix issue #2902
@aiordache @ulyssessouza, please, accept this PR to fix this annoying bug
Don't pin to pywin32 227
The hard pin to 227 is keeping us from using docker with other projects that depend on a newer version of pywin32.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import codecs
4 import os
5
6 from setuptools import find_packages
7 from setuptools import setup
8
9 ROOT_DIR = os.path.dirname(__file__)
10 SOURCE_DIR = os.path.join(ROOT_DIR)
11
12 requirements = [
13 'websocket-client >= 0.32.0',
14 'requests >= 2.14.2, != 2.18.0',
15 ]
16
17 extras_require = {
18 # win32 APIs if on Windows (required for npipe support)
19 ':sys_platform == "win32"': 'pywin32==227',
20
21 # If using docker-py over TLS, highly recommend this option is
22 # pip-installed or pinned.
23
24 # TODO: if pip installing both "requests" and "requests[security]", the
25 # extra package from the "security" option are not installed (see
26 # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of
27 # installing the extra dependencies, install the following instead:
28 # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'
29 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],
30
31 # Only required when connecting using the ssh:// protocol
32 'ssh': ['paramiko>=2.4.3'],
33
34 }
35
36 version = None
37 exec(open('docker/version.py').read())
38
39 with open('./test-requirements.txt') as test_reqs_txt:
40 test_requirements = [line for line in test_reqs_txt]
41
42
43 long_description = ''
44 with codecs.open('./README.md', encoding='utf-8') as readme_md:
45 long_description = readme_md.read()
46
47 setup(
48 name="docker",
49 version=version,
50 description="A Python library for the Docker Engine API.",
51 long_description=long_description,
52 long_description_content_type='text/markdown',
53 url='https://github.com/docker/docker-py',
54 project_urls={
55 'Documentation': 'https://docker-py.readthedocs.io',
56 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501
57 'Source': 'https://github.com/docker/docker-py',
58 'Tracker': 'https://github.com/docker/docker-py/issues',
59 },
60 packages=find_packages(exclude=["tests.*", "tests"]),
61 install_requires=requirements,
62 tests_require=test_requirements,
63 extras_require=extras_require,
64 python_requires='>=3.6',
65 zip_safe=False,
66 test_suite='tests',
67 classifiers=[
68 'Development Status :: 5 - Production/Stable',
69 'Environment :: Other Environment',
70 'Intended Audience :: Developers',
71 'Operating System :: OS Independent',
72 'Programming Language :: Python',
73 'Programming Language :: Python :: 3',
74 'Programming Language :: Python :: 3.6',
75 'Programming Language :: Python :: 3.7',
76 'Programming Language :: Python :: 3.8',
77 'Programming Language :: Python :: 3.9',
78 'Programming Language :: Python :: 3.10',
79 'Topic :: Software Development',
80 'Topic :: Utilities',
81 'License :: OSI Approved :: Apache Software License',
82 ],
83 maintainer='Ulysses Souza',
84 maintainer_email='[email protected]',
85 )
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
extras_require = {
# win32 APIs if on Windows (required for npipe support)
- ':sys_platform == "win32"': 'pywin32==227',
+ ':sys_platform == "win32"': 'pywin32>=304',
# If using docker-py over TLS, highly recommend this option is
# pip-installed or pinned.
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n \n extras_require = {\n # win32 APIs if on Windows (required for npipe support)\n- ':sys_platform == \"win32\"': 'pywin32==227',\n+ ':sys_platform == \"win32\"': 'pywin32>=304',\n \n # If using docker-py over TLS, highly recommend this option is\n # pip-installed or pinned.\n", "issue": "installing latest 5.0.3 on windows machines is still using pywin32==227 but not pywin32==301\n[Bump pywin32 from 227 to 301 ]( https://github.com/docker/docker-py/commit/e0d186d754693feb7d27c2352e455c5febb4a5cd) was already merged in to bump pywin32 from 227 to 301. But, when installing latest 5.0.3 on windows machines is resulting in install of pywin32==227\r\n\r\nMost likely extras_require needs updated\r\nhttps://github.com/docker/docker-py/blob/a48a5a9647761406d66e8271f19fab7fa0c5f582/setup.py#L19\r\n\r\n\r\n\r\n\nPywin32 upgrade\nFix issue #2902\r\n\r\n@aiordache @ulyssessouza, please, accept this PR to fix this annoying bug\r\n\nDon't pin to pywin32 227\nThe hard pin to 227 is keeping us from using docker with other projects that depend on a newer version of pywin32.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport codecs\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'websocket-client >= 0.32.0',\n 'requests >= 2.14.2, != 2.18.0',\n]\n\nextras_require = {\n # win32 APIs if on Windows (required for npipe support)\n ':sys_platform == \"win32\"': 'pywin32==227',\n\n # If using docker-py over TLS, highly recommend this option is\n # pip-installed or pinned.\n\n # TODO: if pip installing both \"requests\" and \"requests[security]\", the\n # extra package from the \"security\" option are not installed (see\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],\n\n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.3'],\n\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\nwith codecs.open('./README.md', encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/docker/docker-py',\n project_urls={\n 'Documentation': 'https://docker-py.readthedocs.io',\n 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501\n 'Source': 'https://github.com/docker/docker-py',\n 'Tracker': 'https://github.com/docker/docker-py/issues',\n },\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n python_requires='>=3.6',\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Ulysses Souza',\n maintainer_email='[email protected]',\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport codecs\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'websocket-client >= 0.32.0',\n 'requests >= 2.14.2, != 2.18.0',\n]\n\nextras_require = {\n # win32 APIs if on Windows (required for npipe support)\n ':sys_platform == \"win32\"': 'pywin32>=304',\n\n # If using docker-py over TLS, highly recommend this option is\n # pip-installed or pinned.\n\n # TODO: if pip installing both \"requests\" and \"requests[security]\", the\n # extra package from the \"security\" option are not installed (see\n # https://github.com/pypa/pip/issues/4391). Once that's fixed, instead of\n # installing the extra dependencies, install the following instead:\n # 'requests[security] >= 2.5.2, != 2.11.0, != 2.12.2'\n 'tls': ['pyOpenSSL>=17.5.0', 'cryptography>=3.4.7', 'idna>=2.0.0'],\n\n # Only required when connecting using the ssh:// protocol\n 'ssh': ['paramiko>=2.4.3'],\n\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nlong_description = ''\nwith codecs.open('./README.md', encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nsetup(\n name=\"docker\",\n version=version,\n description=\"A Python library for the Docker Engine API.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/docker/docker-py',\n project_urls={\n 'Documentation': 'https://docker-py.readthedocs.io',\n 'Changelog': 'https://docker-py.readthedocs.io/en/stable/change-log.html', # noqa: E501\n 'Source': 'https://github.com/docker/docker-py',\n 'Tracker': 'https://github.com/docker/docker-py/issues',\n },\n packages=find_packages(exclude=[\"tests.*\", \"tests\"]),\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n python_requires='>=3.6',\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n maintainer='Ulysses Souza',\n maintainer_email='[email protected]',\n)\n", "path": "setup.py"}]}
| 1,459 | 122 |
gh_patches_debug_12597
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-1110
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Double requests for documentHighlight
I've noticed that setting a cursor on some symbol makes the `documentHighlight` underline blink once.
Checked logs and saw the request being made twice on each cursor movement:
```
:: --> pyls textDocument/documentHighlight(12): {'textDocument': {'uri': 'file:////LSP/plugin/highlights.py'}, 'position': {'character': 8, 'line': 38}}
:: --> pyls textDocument/documentHighlight(13): {'textDocument': {'uri': 'file:////LSP/plugin/highlights.py'}, 'position': {'character': 8, 'line': 38}}
```
Then added log in `DocumentHighlightListener` class, inside `on_selection_modified_async` method and that listener seems to be triggered twice on cursor movement. Tested with `print(self.view.file_name())`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/highlights.py`
Content:
```
1 import sublime
2 from .core.protocol import Request, Range, DocumentHighlightKind
3 from .core.registry import LSPViewEventListener
4 from .core.settings import settings
5 from .core.typing import List, Dict, Optional
6 from .core.views import range_to_region, text_document_position_params
7 from .core.windows import debounced
8
9 SUBLIME_WORD_MASK = 515
10 NO_HIGHLIGHT_SCOPES = 'comment, string'
11
12 _kind2name = {
13 DocumentHighlightKind.Unknown: "unknown",
14 DocumentHighlightKind.Text: "text",
15 DocumentHighlightKind.Read: "read",
16 DocumentHighlightKind.Write: "write"
17 }
18
19
20 def remove_highlights(view: sublime.View) -> None:
21 for kind in settings.document_highlight_scopes.keys():
22 view.erase_regions("lsp_highlight_{}".format(kind))
23
24
25 class DocumentHighlightListener(LSPViewEventListener):
26 def __init__(self, view: sublime.View) -> None:
27 super().__init__(view)
28 self._initialized = False
29 self._enabled = False
30 self._stored_point = -1
31
32 @classmethod
33 def is_applicable(cls, view_settings: dict) -> bool:
34 if 'documentHighlight' in settings.disabled_capabilities:
35 return False
36 return cls.has_supported_syntax(view_settings)
37
38 def on_selection_modified_async(self) -> None:
39 if not self._initialized:
40 self._initialize()
41 if self._enabled and settings.document_highlight_style:
42 try:
43 current_point = self.view.sel()[0].begin()
44 except IndexError:
45 return
46 self._stored_point = current_point
47 self._clear_regions()
48 debounced(self._on_document_highlight, 500, lambda: self._stored_point == current_point, async_thread=True)
49
50 def _initialize(self) -> None:
51 self._initialized = True
52 session = self.session("documentHighlightProvider")
53 if session:
54 self._enabled = True
55
56 def _clear_regions(self) -> None:
57 for kind in settings.document_highlight_scopes.keys():
58 self.view.erase_regions("lsp_highlight_{}".format(kind))
59
60 def _on_document_highlight(self) -> None:
61 self._clear_regions()
62 if len(self.view.sel()) != 1:
63 return
64 point = self.view.sel()[0].begin()
65 word_at_sel = self.view.classify(point)
66 if word_at_sel & SUBLIME_WORD_MASK:
67 if self.view.match_selector(point, NO_HIGHLIGHT_SCOPES):
68 return
69 session = self.session("documentHighlightProvider", point)
70 if session:
71 params = text_document_position_params(self.view, point)
72 request = Request.documentHighlight(params)
73 session.send_request(request, self._handle_response)
74
75 def _handle_response(self, response: Optional[List]) -> None:
76 if not response:
77 return
78 kind2regions = {} # type: Dict[str, List[sublime.Region]]
79 for kind in range(0, 4):
80 kind2regions[_kind2name[kind]] = []
81 for highlight in response:
82 r = range_to_region(Range.from_lsp(highlight["range"]), self.view)
83 kind = highlight.get("kind", DocumentHighlightKind.Unknown)
84 if kind is not None:
85 kind2regions[_kind2name[kind]].append(r)
86 if settings.document_highlight_style == "fill":
87 flags = 0
88 elif settings.document_highlight_style == "box":
89 flags = sublime.DRAW_NO_FILL
90 else:
91 flags = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE
92 if settings.document_highlight_style == "underline":
93 flags |= sublime.DRAW_SOLID_UNDERLINE
94 elif settings.document_highlight_style == "stippled":
95 flags |= sublime.DRAW_STIPPLED_UNDERLINE
96 elif settings.document_highlight_style == "squiggly":
97 flags |= sublime.DRAW_SQUIGGLY_UNDERLINE
98
99 self._clear_regions()
100 for kind_str, regions in kind2regions.items():
101 if regions:
102 scope = settings.document_highlight_scopes.get(kind_str, None)
103 if scope:
104 self.view.add_regions("lsp_highlight_{}".format(kind_str),
105 regions, scope=scope, flags=flags)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugin/highlights.py b/plugin/highlights.py
--- a/plugin/highlights.py
+++ b/plugin/highlights.py
@@ -43,9 +43,11 @@
current_point = self.view.sel()[0].begin()
except IndexError:
return
- self._stored_point = current_point
self._clear_regions()
- debounced(self._on_document_highlight, 500, lambda: self._stored_point == current_point, async_thread=True)
+ if self._stored_point != current_point:
+ self._stored_point = current_point
+ debounced(self._on_document_highlight, 500, lambda: self._stored_point == current_point,
+ async_thread=True)
def _initialize(self) -> None:
self._initialized = True
|
{"golden_diff": "diff --git a/plugin/highlights.py b/plugin/highlights.py\n--- a/plugin/highlights.py\n+++ b/plugin/highlights.py\n@@ -43,9 +43,11 @@\n current_point = self.view.sel()[0].begin()\n except IndexError:\n return\n- self._stored_point = current_point\n self._clear_regions()\n- debounced(self._on_document_highlight, 500, lambda: self._stored_point == current_point, async_thread=True)\n+ if self._stored_point != current_point:\n+ self._stored_point = current_point\n+ debounced(self._on_document_highlight, 500, lambda: self._stored_point == current_point,\n+ async_thread=True)\n \n def _initialize(self) -> None:\n self._initialized = True\n", "issue": "Double requests for documentHighlight\nI've noticed that setting a cursor on some symbol makes the `documentHighlight` underline blink once.\r\n\r\nChecked logs and saw the request being made twice on each cursor movement:\r\n```\r\n:: --> pyls textDocument/documentHighlight(12): {'textDocument': {'uri': 'file:////LSP/plugin/highlights.py'}, 'position': {'character': 8, 'line': 38}}\r\n:: --> pyls textDocument/documentHighlight(13): {'textDocument': {'uri': 'file:////LSP/plugin/highlights.py'}, 'position': {'character': 8, 'line': 38}}\r\n```\r\n\r\nThen added log in `DocumentHighlightListener` class, inside `on_selection_modified_async` method and that listener seems to be triggered twice on cursor movement. Tested with `print(self.view.file_name())`.\n", "before_files": [{"content": "import sublime\nfrom .core.protocol import Request, Range, DocumentHighlightKind\nfrom .core.registry import LSPViewEventListener\nfrom .core.settings import settings\nfrom .core.typing import List, Dict, Optional\nfrom .core.views import range_to_region, text_document_position_params\nfrom .core.windows import debounced\n\nSUBLIME_WORD_MASK = 515\nNO_HIGHLIGHT_SCOPES = 'comment, string'\n\n_kind2name = {\n DocumentHighlightKind.Unknown: \"unknown\",\n DocumentHighlightKind.Text: \"text\",\n DocumentHighlightKind.Read: \"read\",\n DocumentHighlightKind.Write: \"write\"\n}\n\n\ndef remove_highlights(view: sublime.View) -> None:\n for kind in settings.document_highlight_scopes.keys():\n view.erase_regions(\"lsp_highlight_{}\".format(kind))\n\n\nclass DocumentHighlightListener(LSPViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._initialized = False\n self._enabled = False\n self._stored_point = -1\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'documentHighlight' in settings.disabled_capabilities:\n return False\n return cls.has_supported_syntax(view_settings)\n\n def on_selection_modified_async(self) -> None:\n if not self._initialized:\n self._initialize()\n if self._enabled and settings.document_highlight_style:\n try:\n current_point = self.view.sel()[0].begin()\n except IndexError:\n return\n self._stored_point = current_point\n self._clear_regions()\n debounced(self._on_document_highlight, 500, lambda: self._stored_point == current_point, async_thread=True)\n\n def _initialize(self) -> None:\n self._initialized = True\n session = self.session(\"documentHighlightProvider\")\n if session:\n self._enabled = True\n\n def _clear_regions(self) -> None:\n for kind in settings.document_highlight_scopes.keys():\n self.view.erase_regions(\"lsp_highlight_{}\".format(kind))\n\n def _on_document_highlight(self) -> None:\n self._clear_regions()\n if len(self.view.sel()) != 1:\n return\n point = self.view.sel()[0].begin()\n word_at_sel = self.view.classify(point)\n if word_at_sel & SUBLIME_WORD_MASK:\n if self.view.match_selector(point, NO_HIGHLIGHT_SCOPES):\n return\n session = self.session(\"documentHighlightProvider\", point)\n if session:\n params = text_document_position_params(self.view, point)\n request = Request.documentHighlight(params)\n session.send_request(request, self._handle_response)\n\n def _handle_response(self, response: Optional[List]) -> None:\n if not response:\n return\n kind2regions = {} # type: Dict[str, List[sublime.Region]]\n for kind in range(0, 4):\n kind2regions[_kind2name[kind]] = []\n for highlight in response:\n r = range_to_region(Range.from_lsp(highlight[\"range\"]), self.view)\n kind = highlight.get(\"kind\", DocumentHighlightKind.Unknown)\n if kind is not None:\n kind2regions[_kind2name[kind]].append(r)\n if settings.document_highlight_style == \"fill\":\n flags = 0\n elif settings.document_highlight_style == \"box\":\n flags = sublime.DRAW_NO_FILL\n else:\n flags = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE\n if settings.document_highlight_style == \"underline\":\n flags |= sublime.DRAW_SOLID_UNDERLINE\n elif settings.document_highlight_style == \"stippled\":\n flags |= sublime.DRAW_STIPPLED_UNDERLINE\n elif settings.document_highlight_style == \"squiggly\":\n flags |= sublime.DRAW_SQUIGGLY_UNDERLINE\n\n self._clear_regions()\n for kind_str, regions in kind2regions.items():\n if regions:\n scope = settings.document_highlight_scopes.get(kind_str, None)\n if scope:\n self.view.add_regions(\"lsp_highlight_{}\".format(kind_str),\n regions, scope=scope, flags=flags)\n", "path": "plugin/highlights.py"}], "after_files": [{"content": "import sublime\nfrom .core.protocol import Request, Range, DocumentHighlightKind\nfrom .core.registry import LSPViewEventListener\nfrom .core.settings import settings\nfrom .core.typing import List, Dict, Optional\nfrom .core.views import range_to_region, text_document_position_params\nfrom .core.windows import debounced\n\nSUBLIME_WORD_MASK = 515\nNO_HIGHLIGHT_SCOPES = 'comment, string'\n\n_kind2name = {\n DocumentHighlightKind.Unknown: \"unknown\",\n DocumentHighlightKind.Text: \"text\",\n DocumentHighlightKind.Read: \"read\",\n DocumentHighlightKind.Write: \"write\"\n}\n\n\ndef remove_highlights(view: sublime.View) -> None:\n for kind in settings.document_highlight_scopes.keys():\n view.erase_regions(\"lsp_highlight_{}\".format(kind))\n\n\nclass DocumentHighlightListener(LSPViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._initialized = False\n self._enabled = False\n self._stored_point = -1\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'documentHighlight' in settings.disabled_capabilities:\n return False\n return cls.has_supported_syntax(view_settings)\n\n def on_selection_modified_async(self) -> None:\n if not self._initialized:\n self._initialize()\n if self._enabled and settings.document_highlight_style:\n try:\n current_point = self.view.sel()[0].begin()\n except IndexError:\n return\n self._clear_regions()\n if self._stored_point != current_point:\n self._stored_point = current_point\n debounced(self._on_document_highlight, 500, lambda: self._stored_point == current_point,\n async_thread=True)\n\n def _initialize(self) -> None:\n self._initialized = True\n session = self.session(\"documentHighlightProvider\")\n if session:\n self._enabled = True\n\n def _clear_regions(self) -> None:\n for kind in settings.document_highlight_scopes.keys():\n self.view.erase_regions(\"lsp_highlight_{}\".format(kind))\n\n def _on_document_highlight(self) -> None:\n self._clear_regions()\n if len(self.view.sel()) != 1:\n return\n point = self.view.sel()[0].begin()\n word_at_sel = self.view.classify(point)\n if word_at_sel & SUBLIME_WORD_MASK:\n if self.view.match_selector(point, NO_HIGHLIGHT_SCOPES):\n return\n session = self.session(\"documentHighlightProvider\", point)\n if session:\n params = text_document_position_params(self.view, point)\n request = Request.documentHighlight(params)\n session.send_request(request, self._handle_response)\n\n def _handle_response(self, response: Optional[List]) -> None:\n if not response:\n return\n kind2regions = {} # type: Dict[str, List[sublime.Region]]\n for kind in range(0, 4):\n kind2regions[_kind2name[kind]] = []\n for highlight in response:\n r = range_to_region(Range.from_lsp(highlight[\"range\"]), self.view)\n kind = highlight.get(\"kind\", DocumentHighlightKind.Unknown)\n if kind is not None:\n kind2regions[_kind2name[kind]].append(r)\n if settings.document_highlight_style == \"fill\":\n flags = 0\n elif settings.document_highlight_style == \"box\":\n flags = sublime.DRAW_NO_FILL\n else:\n flags = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE\n if settings.document_highlight_style == \"underline\":\n flags |= sublime.DRAW_SOLID_UNDERLINE\n elif settings.document_highlight_style == \"stippled\":\n flags |= sublime.DRAW_STIPPLED_UNDERLINE\n elif settings.document_highlight_style == \"squiggly\":\n flags |= sublime.DRAW_SQUIGGLY_UNDERLINE\n\n self._clear_regions()\n for kind_str, regions in kind2regions.items():\n if regions:\n scope = settings.document_highlight_scopes.get(kind_str, None)\n if scope:\n self.view.add_regions(\"lsp_highlight_{}\".format(kind_str),\n regions, scope=scope, flags=flags)\n", "path": "plugin/highlights.py"}]}
| 1,543 | 174 |
gh_patches_debug_15724
|
rasdani/github-patches
|
git_diff
|
pyscript__pyscript-1902
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
js_modules not behaving like in Polyscript
### Checklist
- [X] I added a descriptive title
- [X] I searched for other issues and couldn't find a solution or duplication
- [X] I already searched in Google and didn't find any good information or help
### What happened?
Apparently `from pyscript.js_modules import Thing` doesn't work in *PyScript* the same way it does on *Polyscript*.
The main difference is that in *PyScript* that's exported within the Python code, as opposite of being registered as JS module like it is for *Polyscript* where *js_modules* use `registerJSModule` utility instead.
### What browsers are you seeing the problem on? (if applicable)
_No response_
### Console info
_No response_
### Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyscript.core/src/stdlib/pyscript/magic_js.py`
Content:
```
1 import js as globalThis
2 from polyscript import js_modules
3 from pyscript.util import NotSupported
4
5 RUNNING_IN_WORKER = not hasattr(globalThis, "document")
6
7 if RUNNING_IN_WORKER:
8 import js
9 import polyscript
10
11 PyWorker = NotSupported(
12 "pyscript.PyWorker",
13 "pyscript.PyWorker works only when running in the main thread",
14 )
15 window = polyscript.xworker.window
16 document = window.document
17 js.document = document
18 sync = polyscript.xworker.sync
19
20 # in workers the display does not have a default ID
21 # but there is a sync utility from xworker
22 def current_target():
23 return polyscript.target
24
25 else:
26 import _pyscript
27 from _pyscript import PyWorker
28
29 window = globalThis
30 document = globalThis.document
31 sync = NotSupported(
32 "pyscript.sync", "pyscript.sync works only when running in a worker"
33 )
34
35 # in MAIN the current element target exist, just use it
36 def current_target():
37 return _pyscript.target
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyscript.core/src/stdlib/pyscript/magic_js.py b/pyscript.core/src/stdlib/pyscript/magic_js.py
--- a/pyscript.core/src/stdlib/pyscript/magic_js.py
+++ b/pyscript.core/src/stdlib/pyscript/magic_js.py
@@ -1,9 +1,28 @@
+import sys
+
import js as globalThis
from polyscript import js_modules
from pyscript.util import NotSupported
RUNNING_IN_WORKER = not hasattr(globalThis, "document")
+
+# allow `from pyscript.js_modules.xxx import yyy`
+class JSModule(object):
+ def __init__(self, name):
+ self.name = name
+
+ def __getattr__(self, field):
+ # avoid pyodide looking for non existent fields
+ if not field.startswith("_"):
+ return getattr(getattr(js_modules, self.name), field)
+
+
+# generate N modules in the system that will proxy the real value
+for name in globalThis.Reflect.ownKeys(js_modules):
+ sys.modules[f"pyscript.js_modules.{name}"] = JSModule(name)
+sys.modules["pyscript.js_modules"] = js_modules
+
if RUNNING_IN_WORKER:
import js
import polyscript
|
{"golden_diff": "diff --git a/pyscript.core/src/stdlib/pyscript/magic_js.py b/pyscript.core/src/stdlib/pyscript/magic_js.py\n--- a/pyscript.core/src/stdlib/pyscript/magic_js.py\n+++ b/pyscript.core/src/stdlib/pyscript/magic_js.py\n@@ -1,9 +1,28 @@\n+import sys\n+\n import js as globalThis\n from polyscript import js_modules\n from pyscript.util import NotSupported\n \n RUNNING_IN_WORKER = not hasattr(globalThis, \"document\")\n \n+\n+# allow `from pyscript.js_modules.xxx import yyy`\n+class JSModule(object):\n+ def __init__(self, name):\n+ self.name = name\n+\n+ def __getattr__(self, field):\n+ # avoid pyodide looking for non existent fields\n+ if not field.startswith(\"_\"):\n+ return getattr(getattr(js_modules, self.name), field)\n+\n+\n+# generate N modules in the system that will proxy the real value\n+for name in globalThis.Reflect.ownKeys(js_modules):\n+ sys.modules[f\"pyscript.js_modules.{name}\"] = JSModule(name)\n+sys.modules[\"pyscript.js_modules\"] = js_modules\n+\n if RUNNING_IN_WORKER:\n import js\n import polyscript\n", "issue": "js_modules not behaving like in Polyscript\n### Checklist\n\n- [X] I added a descriptive title\n- [X] I searched for other issues and couldn't find a solution or duplication\n- [X] I already searched in Google and didn't find any good information or help\n\n### What happened?\n\nApparently `from pyscript.js_modules import Thing` doesn't work in *PyScript* the same way it does on *Polyscript*.\r\n\r\nThe main difference is that in *PyScript* that's exported within the Python code, as opposite of being registered as JS module like it is for *Polyscript* where *js_modules* use `registerJSModule` utility instead.\n\n### What browsers are you seeing the problem on? (if applicable)\n\n_No response_\n\n### Console info\n\n_No response_\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "import js as globalThis\nfrom polyscript import js_modules\nfrom pyscript.util import NotSupported\n\nRUNNING_IN_WORKER = not hasattr(globalThis, \"document\")\n\nif RUNNING_IN_WORKER:\n import js\n import polyscript\n\n PyWorker = NotSupported(\n \"pyscript.PyWorker\",\n \"pyscript.PyWorker works only when running in the main thread\",\n )\n window = polyscript.xworker.window\n document = window.document\n js.document = document\n sync = polyscript.xworker.sync\n\n # in workers the display does not have a default ID\n # but there is a sync utility from xworker\n def current_target():\n return polyscript.target\n\nelse:\n import _pyscript\n from _pyscript import PyWorker\n\n window = globalThis\n document = globalThis.document\n sync = NotSupported(\n \"pyscript.sync\", \"pyscript.sync works only when running in a worker\"\n )\n\n # in MAIN the current element target exist, just use it\n def current_target():\n return _pyscript.target\n", "path": "pyscript.core/src/stdlib/pyscript/magic_js.py"}], "after_files": [{"content": "import sys\n\nimport js as globalThis\nfrom polyscript import js_modules\nfrom pyscript.util import NotSupported\n\nRUNNING_IN_WORKER = not hasattr(globalThis, \"document\")\n\n\n# allow `from pyscript.js_modules.xxx import yyy`\nclass JSModule(object):\n def __init__(self, name):\n self.name = name\n\n def __getattr__(self, field):\n # avoid pyodide looking for non existent fields\n if not field.startswith(\"_\"):\n return getattr(getattr(js_modules, self.name), field)\n\n\n# generate N modules in the system that will proxy the real value\nfor name in globalThis.Reflect.ownKeys(js_modules):\n sys.modules[f\"pyscript.js_modules.{name}\"] = JSModule(name)\nsys.modules[\"pyscript.js_modules\"] = js_modules\n\nif RUNNING_IN_WORKER:\n import js\n import polyscript\n\n PyWorker = NotSupported(\n \"pyscript.PyWorker\",\n \"pyscript.PyWorker works only when running in the main thread\",\n )\n window = polyscript.xworker.window\n document = window.document\n js.document = document\n sync = polyscript.xworker.sync\n\n # in workers the display does not have a default ID\n # but there is a sync utility from xworker\n def current_target():\n return polyscript.target\n\nelse:\n import _pyscript\n from _pyscript import PyWorker\n\n window = globalThis\n document = globalThis.document\n sync = NotSupported(\n \"pyscript.sync\", \"pyscript.sync works only when running in a worker\"\n )\n\n # in MAIN the current element target exist, just use it\n def current_target():\n return _pyscript.target\n", "path": "pyscript.core/src/stdlib/pyscript/magic_js.py"}]}
| 753 | 283 |
gh_patches_debug_5283
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-469
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move to using master in azavea/models
We've been using a branch of azavea/models, which can cause confusion.
Before release, merge the `upgrade-sept-2018` branch into the main branch and update our install_deps script accordingly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/runner/command_dag.py`
Content:
```
1 import networkx as nx
2
3 import rastervision as rv
4 from rastervision.utils.files import file_exists
5
6 import click
7
8
9 class CommandDAG:
10 """ A directed acyclic graph of command definitions.
11 """
12
13 def __init__(self,
14 command_definitions,
15 rerun_commands=False,
16 skip_file_check=False):
17 """Generates a CommandDAG from a list of CommandDefinitions
18
19 This logic checks if there are any non-exsiting URIs that are
20 not produced as outputs by some command in the set. If so,
21 it raises a ConfigError stating the missing files.
22 """
23 # Create a set of edges, from input_uri to command_config and
24 # from command_config to output_uri. Nodes for commands are their
25 # index into command_definitions.
26
27 uri_dag = nx.DiGraph()
28
29 for idx, command_def in enumerate(command_definitions):
30 uri_dag.add_node(idx)
31 for input_uri in command_def.io_def.input_uris:
32 uri_dag.add_edge(input_uri, idx)
33
34 for output_uri in command_def.io_def.output_uris:
35 uri_dag.add_edge(idx, output_uri)
36
37 # Find all source input_uris, and ensure they exist.
38 if not skip_file_check:
39 unsolved_sources = [
40 uri for uri in uri_dag.nodes
41 if (type(uri) == str and len(uri_dag.in_edges(uri)) == 0)
42 ]
43
44 missing_files = []
45
46 with click.progressbar(
47 unsolved_sources,
48 label='Ensuring input files exists ') as uris:
49 for uri in uris:
50 if not file_exists(uri):
51 missing_files.append(uri)
52
53 if any(missing_files):
54 raise rv.ConfigError(
55 'Files do not exist and are not supplied by commands:\n'
56 '\t{}\n'.format(',\b\t'.join(missing_files)))
57
58 # If we are not rerunning, remove commands that have existing outputs.
59 self.skipped_commands = []
60 if not rerun_commands:
61 commands_to_outputs = [(idx, edge[1]) for idx in uri_dag.nodes
62 if type(idx) == int
63 for edge in uri_dag.out_edges(idx)]
64 with click.progressbar(
65 commands_to_outputs,
66 label='Checking for existing output') as lst:
67 for idx, output_uri in lst:
68 if file_exists(output_uri):
69 uri_dag.remove_edge(idx, output_uri)
70
71 for idx in set(map(lambda x: x[0], commands_to_outputs)):
72 if len(uri_dag.out_edges(idx)) == 0:
73 self.skipped_commands.append(command_definitions[idx])
74 uri_dag.remove_node(idx)
75
76 # Collapse the graph to create edges from command to command.
77 command_id_dag = nx.DiGraph()
78
79 for idx in [idx for idx in uri_dag.nodes if (type(idx) == int)]:
80 command_id_dag.add_node(idx)
81 for upstream_idx in [
82 edge2[0] for edge1 in uri_dag.in_edges(idx)
83 for edge2 in uri_dag.in_edges(edge1[0])
84 ]:
85 command_id_dag.add_edge(upstream_idx, idx)
86
87 # Feed this digraph of commands to the child runner.
88 self.command_definitions = command_definitions
89 self.command_id_dag = command_id_dag
90
91 def get_sorted_commands(self):
92 """Return a topologically sorted list of commands configurations.
93
94 Returns a list of command configurations that are sorted such that every
95 command that depends on some other parent command appears later
96 than that parent command.
97 """
98 return [
99 self.command_definitions[idx].command_config
100 for idx in self.get_sorted_command_ids()
101 ]
102
103 def get_sorted_command_ids(self):
104 """Return a topologically sorted list of commands ids.
105
106 Returns a list of command IDs that can be used to retrieve
107 specific commands out of this DAG. These are sorted such that every
108 command that depends on some other parent command appears later
109 than that parent command.
110 """
111 return [idx for idx in nx.topological_sort(self.command_id_dag)]
112
113 def get_command(self, command_id):
114 """Retrieves a command configuration for the given ID"""
115 return self.get_command_definition(command_id).command_config
116
117 def get_command_definition(self, command_id):
118 """Retrieves a command definition for the given ID"""
119 return self.command_definitions[command_id]
120
121 def get_upstream_command_ids(self, command_id):
122 """Returns the command ids for upstream commands for the command
123 with the given id.
124 """
125 return list(
126 map(lambda x: x[0], self.command_id_dag.in_edges(command_id)))
127
128 def get_command_definitions(self):
129 """Returns the command definitions that will be run in this DAG."""
130 return [
131 self.command_definitions[idx] for idx in self.command_id_dag.nodes
132 ]
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rastervision/runner/command_dag.py b/rastervision/runner/command_dag.py
--- a/rastervision/runner/command_dag.py
+++ b/rastervision/runner/command_dag.py
@@ -45,7 +45,7 @@
with click.progressbar(
unsolved_sources,
- label='Ensuring input files exists ') as uris:
+ label='Ensuring input files exist ') as uris:
for uri in uris:
if not file_exists(uri):
missing_files.append(uri)
|
{"golden_diff": "diff --git a/rastervision/runner/command_dag.py b/rastervision/runner/command_dag.py\n--- a/rastervision/runner/command_dag.py\n+++ b/rastervision/runner/command_dag.py\n@@ -45,7 +45,7 @@\n \n with click.progressbar(\n unsolved_sources,\n- label='Ensuring input files exists ') as uris:\n+ label='Ensuring input files exist ') as uris:\n for uri in uris:\n if not file_exists(uri):\n missing_files.append(uri)\n", "issue": "Move to using master in azavea/models\nWe've been using a branch of azavea/models, which can cause confusion.\r\n\r\nBefore release, merge the `upgrade-sept-2018` branch into the main branch and update our install_deps script accordingly.\n", "before_files": [{"content": "import networkx as nx\n\nimport rastervision as rv\nfrom rastervision.utils.files import file_exists\n\nimport click\n\n\nclass CommandDAG:\n \"\"\" A directed acyclic graph of command definitions.\n \"\"\"\n\n def __init__(self,\n command_definitions,\n rerun_commands=False,\n skip_file_check=False):\n \"\"\"Generates a CommandDAG from a list of CommandDefinitions\n\n This logic checks if there are any non-exsiting URIs that are\n not produced as outputs by some command in the set. If so,\n it raises a ConfigError stating the missing files.\n \"\"\"\n # Create a set of edges, from input_uri to command_config and\n # from command_config to output_uri. Nodes for commands are their\n # index into command_definitions.\n\n uri_dag = nx.DiGraph()\n\n for idx, command_def in enumerate(command_definitions):\n uri_dag.add_node(idx)\n for input_uri in command_def.io_def.input_uris:\n uri_dag.add_edge(input_uri, idx)\n\n for output_uri in command_def.io_def.output_uris:\n uri_dag.add_edge(idx, output_uri)\n\n # Find all source input_uris, and ensure they exist.\n if not skip_file_check:\n unsolved_sources = [\n uri for uri in uri_dag.nodes\n if (type(uri) == str and len(uri_dag.in_edges(uri)) == 0)\n ]\n\n missing_files = []\n\n with click.progressbar(\n unsolved_sources,\n label='Ensuring input files exists ') as uris:\n for uri in uris:\n if not file_exists(uri):\n missing_files.append(uri)\n\n if any(missing_files):\n raise rv.ConfigError(\n 'Files do not exist and are not supplied by commands:\\n'\n '\\t{}\\n'.format(',\\b\\t'.join(missing_files)))\n\n # If we are not rerunning, remove commands that have existing outputs.\n self.skipped_commands = []\n if not rerun_commands:\n commands_to_outputs = [(idx, edge[1]) for idx in uri_dag.nodes\n if type(idx) == int\n for edge in uri_dag.out_edges(idx)]\n with click.progressbar(\n commands_to_outputs,\n label='Checking for existing output') as lst:\n for idx, output_uri in lst:\n if file_exists(output_uri):\n uri_dag.remove_edge(idx, output_uri)\n\n for idx in set(map(lambda x: x[0], commands_to_outputs)):\n if len(uri_dag.out_edges(idx)) == 0:\n self.skipped_commands.append(command_definitions[idx])\n uri_dag.remove_node(idx)\n\n # Collapse the graph to create edges from command to command.\n command_id_dag = nx.DiGraph()\n\n for idx in [idx for idx in uri_dag.nodes if (type(idx) == int)]:\n command_id_dag.add_node(idx)\n for upstream_idx in [\n edge2[0] for edge1 in uri_dag.in_edges(idx)\n for edge2 in uri_dag.in_edges(edge1[0])\n ]:\n command_id_dag.add_edge(upstream_idx, idx)\n\n # Feed this digraph of commands to the child runner.\n self.command_definitions = command_definitions\n self.command_id_dag = command_id_dag\n\n def get_sorted_commands(self):\n \"\"\"Return a topologically sorted list of commands configurations.\n\n Returns a list of command configurations that are sorted such that every\n command that depends on some other parent command appears later\n than that parent command.\n \"\"\"\n return [\n self.command_definitions[idx].command_config\n for idx in self.get_sorted_command_ids()\n ]\n\n def get_sorted_command_ids(self):\n \"\"\"Return a topologically sorted list of commands ids.\n\n Returns a list of command IDs that can be used to retrieve\n specific commands out of this DAG. These are sorted such that every\n command that depends on some other parent command appears later\n than that parent command.\n \"\"\"\n return [idx for idx in nx.topological_sort(self.command_id_dag)]\n\n def get_command(self, command_id):\n \"\"\"Retrieves a command configuration for the given ID\"\"\"\n return self.get_command_definition(command_id).command_config\n\n def get_command_definition(self, command_id):\n \"\"\"Retrieves a command definition for the given ID\"\"\"\n return self.command_definitions[command_id]\n\n def get_upstream_command_ids(self, command_id):\n \"\"\"Returns the command ids for upstream commands for the command\n with the given id.\n \"\"\"\n return list(\n map(lambda x: x[0], self.command_id_dag.in_edges(command_id)))\n\n def get_command_definitions(self):\n \"\"\"Returns the command definitions that will be run in this DAG.\"\"\"\n return [\n self.command_definitions[idx] for idx in self.command_id_dag.nodes\n ]\n", "path": "rastervision/runner/command_dag.py"}], "after_files": [{"content": "import networkx as nx\n\nimport rastervision as rv\nfrom rastervision.utils.files import file_exists\n\nimport click\n\n\nclass CommandDAG:\n \"\"\" A directed acyclic graph of command definitions.\n \"\"\"\n\n def __init__(self,\n command_definitions,\n rerun_commands=False,\n skip_file_check=False):\n \"\"\"Generates a CommandDAG from a list of CommandDefinitions\n\n This logic checks if there are any non-exsiting URIs that are\n not produced as outputs by some command in the set. If so,\n it raises a ConfigError stating the missing files.\n \"\"\"\n # Create a set of edges, from input_uri to command_config and\n # from command_config to output_uri. Nodes for commands are their\n # index into command_definitions.\n\n uri_dag = nx.DiGraph()\n\n for idx, command_def in enumerate(command_definitions):\n uri_dag.add_node(idx)\n for input_uri in command_def.io_def.input_uris:\n uri_dag.add_edge(input_uri, idx)\n\n for output_uri in command_def.io_def.output_uris:\n uri_dag.add_edge(idx, output_uri)\n\n # Find all source input_uris, and ensure they exist.\n if not skip_file_check:\n unsolved_sources = [\n uri for uri in uri_dag.nodes\n if (type(uri) == str and len(uri_dag.in_edges(uri)) == 0)\n ]\n\n missing_files = []\n\n with click.progressbar(\n unsolved_sources,\n label='Ensuring input files exist ') as uris:\n for uri in uris:\n if not file_exists(uri):\n missing_files.append(uri)\n\n if any(missing_files):\n raise rv.ConfigError(\n 'Files do not exist and are not supplied by commands:\\n'\n '\\t{}\\n'.format(',\\b\\t'.join(missing_files)))\n\n # If we are not rerunning, remove commands that have existing outputs.\n self.skipped_commands = []\n if not rerun_commands:\n commands_to_outputs = [(idx, edge[1]) for idx in uri_dag.nodes\n if type(idx) == int\n for edge in uri_dag.out_edges(idx)]\n with click.progressbar(\n commands_to_outputs,\n label='Checking for existing output') as lst:\n for idx, output_uri in lst:\n if file_exists(output_uri):\n uri_dag.remove_edge(idx, output_uri)\n\n for idx in set(map(lambda x: x[0], commands_to_outputs)):\n if len(uri_dag.out_edges(idx)) == 0:\n self.skipped_commands.append(command_definitions[idx])\n uri_dag.remove_node(idx)\n\n # Collapse the graph to create edges from command to command.\n command_id_dag = nx.DiGraph()\n\n for idx in [idx for idx in uri_dag.nodes if (type(idx) == int)]:\n command_id_dag.add_node(idx)\n for upstream_idx in [\n edge2[0] for edge1 in uri_dag.in_edges(idx)\n for edge2 in uri_dag.in_edges(edge1[0])\n ]:\n command_id_dag.add_edge(upstream_idx, idx)\n\n # Feed this digraph of commands to the child runner.\n self.command_definitions = command_definitions\n self.command_id_dag = command_id_dag\n\n def get_sorted_commands(self):\n \"\"\"Return a topologically sorted list of commands configurations.\n\n Returns a list of command configurations that are sorted such that every\n command that depends on some other parent command appears later\n than that parent command.\n \"\"\"\n return [\n self.command_definitions[idx].command_config\n for idx in self.get_sorted_command_ids()\n ]\n\n def get_sorted_command_ids(self):\n \"\"\"Return a topologically sorted list of commands ids.\n\n Returns a list of command IDs that can be used to retrieve\n specific commands out of this DAG. These are sorted such that every\n command that depends on some other parent command appears later\n than that parent command.\n \"\"\"\n return [idx for idx in nx.topological_sort(self.command_id_dag)]\n\n def get_command(self, command_id):\n \"\"\"Retrieves a command configuration for the given ID\"\"\"\n return self.get_command_definition(command_id).command_config\n\n def get_command_definition(self, command_id):\n \"\"\"Retrieves a command definition for the given ID\"\"\"\n return self.command_definitions[command_id]\n\n def get_upstream_command_ids(self, command_id):\n \"\"\"Returns the command ids for upstream commands for the command\n with the given id.\n \"\"\"\n return list(\n map(lambda x: x[0], self.command_id_dag.in_edges(command_id)))\n\n def get_command_definitions(self):\n \"\"\"Returns the command definitions that will be run in this DAG.\"\"\"\n return [\n self.command_definitions[idx] for idx in self.command_id_dag.nodes\n ]\n", "path": "rastervision/runner/command_dag.py"}]}
| 1,670 | 125 |
gh_patches_debug_1363
|
rasdani/github-patches
|
git_diff
|
ManageIQ__integration_tests-7728
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cfme.log only showing on first test in a run.
cfme.log link only appears on the first test from a selection but shows all logs from all tests in that run. Expected to have a separate log link for each test specific to that test. See attached

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `artifactor/plugins/logger.py`
Content:
```
1 """ Logger plugin for Artifactor
2
3 Add a stanza to the artifactor config like this,
4 artifactor:
5 log_dir: /home/username/outdir
6 per_run: test #test, run, None
7 overwrite: True
8 plugins:
9 logger:
10 enabled: True
11 plugin: logger
12 level: DEBUG
13 """
14 import os
15 from logging import makeLogRecord
16 from artifactor import ArtifactorBasePlugin
17 from cfme.utils.log import make_file_handler
18
19
20 class Logger(ArtifactorBasePlugin):
21
22 class Test(object):
23 def __init__(self, ident):
24 self.ident = ident
25 self.in_progress = False
26 self.handler = None
27
28 def close(self):
29 if self.handle is not None:
30 self.handler.close()
31 self.handler = None
32
33 def plugin_initialize(self):
34 self.register_plugin_hook('start_test', self.start_test)
35 self.register_plugin_hook('finish_test', self.finish_test)
36 self.register_plugin_hook('log_message', self.log_message)
37
38 def configure(self):
39 self.configured = True
40 self.level = self.data.get('level', 'DEBUG')
41
42 @ArtifactorBasePlugin.check_configured
43 def start_test(self, artifact_path, test_name, test_location, slaveid):
44 if not slaveid:
45 slaveid = "Master"
46 test_ident = "{}/{}".format(test_location, test_name)
47 if slaveid in self.store:
48 if self.store[slaveid].in_progress:
49 print("Test already running, can't start another, logger")
50 return None
51 self.store[slaveid].close()
52 self.store[slaveid] = self.Test(test_ident)
53 self.store[slaveid].in_progress = True
54 filename = "{ident}-cfme.log".format(ident=self.ident)
55 self.store[slaveid].handler = make_file_handler(
56 filename,
57 root=artifact_path,
58 # we overwrite
59 mode='w',
60 level=self.level)
61
62 self.fire_hook('filedump', test_location=test_location, test_name=test_name,
63 description="cfme.log", slaveid=slaveid, contents="", file_type="log",
64 display_glyph="align-justify", dont_write=True,
65 os_filename=os.path.join(artifact_path, filename),
66 group_id="pytest-logfile")
67
68 @ArtifactorBasePlugin.check_configured
69 def finish_test(self, artifact_path, test_name, test_location, slaveid):
70 if not slaveid:
71 slaveid = "Master"
72 self.store[slaveid].in_progress = False
73 self.store[slaveid].close()
74
75 @ArtifactorBasePlugin.check_configured
76 def log_message(self, log_record, slaveid):
77 # json transport fallout: args must be a dict or a tuple, json makes a tuple into a list
78 args = log_record['args']
79 log_record['args'] = tuple(args) if isinstance(args, list) else args
80 record = makeLogRecord(log_record)
81 if not slaveid:
82 slaveid = "Master"
83 if slaveid in self.store:
84 handler = self.store[slaveid].handler
85 if handler and record.levelno >= handler.level:
86 handler.handle(record)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/artifactor/plugins/logger.py b/artifactor/plugins/logger.py
--- a/artifactor/plugins/logger.py
+++ b/artifactor/plugins/logger.py
@@ -26,7 +26,7 @@
self.handler = None
def close(self):
- if self.handle is not None:
+ if self.handler is not None:
self.handler.close()
self.handler = None
|
{"golden_diff": "diff --git a/artifactor/plugins/logger.py b/artifactor/plugins/logger.py\n--- a/artifactor/plugins/logger.py\n+++ b/artifactor/plugins/logger.py\n@@ -26,7 +26,7 @@\n self.handler = None\n \n def close(self):\n- if self.handle is not None:\n+ if self.handler is not None:\n self.handler.close()\n self.handler = None\n", "issue": "cfme.log only showing on first test in a run.\ncfme.log link only appears on the first test from a selection but shows all logs from all tests in that run. Expected to have a separate log link for each test specific to that test. See attached\r\n\r\n\n", "before_files": [{"content": "\"\"\" Logger plugin for Artifactor\n\nAdd a stanza to the artifactor config like this,\nartifactor:\n log_dir: /home/username/outdir\n per_run: test #test, run, None\n overwrite: True\n plugins:\n logger:\n enabled: True\n plugin: logger\n level: DEBUG\n\"\"\"\nimport os\nfrom logging import makeLogRecord\nfrom artifactor import ArtifactorBasePlugin\nfrom cfme.utils.log import make_file_handler\n\n\nclass Logger(ArtifactorBasePlugin):\n\n class Test(object):\n def __init__(self, ident):\n self.ident = ident\n self.in_progress = False\n self.handler = None\n\n def close(self):\n if self.handle is not None:\n self.handler.close()\n self.handler = None\n\n def plugin_initialize(self):\n self.register_plugin_hook('start_test', self.start_test)\n self.register_plugin_hook('finish_test', self.finish_test)\n self.register_plugin_hook('log_message', self.log_message)\n\n def configure(self):\n self.configured = True\n self.level = self.data.get('level', 'DEBUG')\n\n @ArtifactorBasePlugin.check_configured\n def start_test(self, artifact_path, test_name, test_location, slaveid):\n if not slaveid:\n slaveid = \"Master\"\n test_ident = \"{}/{}\".format(test_location, test_name)\n if slaveid in self.store:\n if self.store[slaveid].in_progress:\n print(\"Test already running, can't start another, logger\")\n return None\n self.store[slaveid].close()\n self.store[slaveid] = self.Test(test_ident)\n self.store[slaveid].in_progress = True\n filename = \"{ident}-cfme.log\".format(ident=self.ident)\n self.store[slaveid].handler = make_file_handler(\n filename,\n root=artifact_path,\n # we overwrite\n mode='w',\n level=self.level)\n\n self.fire_hook('filedump', test_location=test_location, test_name=test_name,\n description=\"cfme.log\", slaveid=slaveid, contents=\"\", file_type=\"log\",\n display_glyph=\"align-justify\", dont_write=True,\n os_filename=os.path.join(artifact_path, filename),\n group_id=\"pytest-logfile\")\n\n @ArtifactorBasePlugin.check_configured\n def finish_test(self, artifact_path, test_name, test_location, slaveid):\n if not slaveid:\n slaveid = \"Master\"\n self.store[slaveid].in_progress = False\n self.store[slaveid].close()\n\n @ArtifactorBasePlugin.check_configured\n def log_message(self, log_record, slaveid):\n # json transport fallout: args must be a dict or a tuple, json makes a tuple into a list\n args = log_record['args']\n log_record['args'] = tuple(args) if isinstance(args, list) else args\n record = makeLogRecord(log_record)\n if not slaveid:\n slaveid = \"Master\"\n if slaveid in self.store:\n handler = self.store[slaveid].handler\n if handler and record.levelno >= handler.level:\n handler.handle(record)\n", "path": "artifactor/plugins/logger.py"}], "after_files": [{"content": "\"\"\" Logger plugin for Artifactor\n\nAdd a stanza to the artifactor config like this,\nartifactor:\n log_dir: /home/username/outdir\n per_run: test #test, run, None\n overwrite: True\n plugins:\n logger:\n enabled: True\n plugin: logger\n level: DEBUG\n\"\"\"\nimport os\nfrom logging import makeLogRecord\nfrom artifactor import ArtifactorBasePlugin\nfrom cfme.utils.log import make_file_handler\n\n\nclass Logger(ArtifactorBasePlugin):\n\n class Test(object):\n def __init__(self, ident):\n self.ident = ident\n self.in_progress = False\n self.handler = None\n\n def close(self):\n if self.handler is not None:\n self.handler.close()\n self.handler = None\n\n def plugin_initialize(self):\n self.register_plugin_hook('start_test', self.start_test)\n self.register_plugin_hook('finish_test', self.finish_test)\n self.register_plugin_hook('log_message', self.log_message)\n\n def configure(self):\n self.configured = True\n self.level = self.data.get('level', 'DEBUG')\n\n @ArtifactorBasePlugin.check_configured\n def start_test(self, artifact_path, test_name, test_location, slaveid):\n if not slaveid:\n slaveid = \"Master\"\n test_ident = \"{}/{}\".format(test_location, test_name)\n if slaveid in self.store:\n if self.store[slaveid].in_progress:\n print(\"Test already running, can't start another, logger\")\n return None\n self.store[slaveid].close()\n self.store[slaveid] = self.Test(test_ident)\n self.store[slaveid].in_progress = True\n filename = \"{ident}-cfme.log\".format(ident=self.ident)\n self.store[slaveid].handler = make_file_handler(\n filename,\n root=artifact_path,\n # we overwrite\n mode='w',\n level=self.level)\n\n self.fire_hook('filedump', test_location=test_location, test_name=test_name,\n description=\"cfme.log\", slaveid=slaveid, contents=\"\", file_type=\"log\",\n display_glyph=\"align-justify\", dont_write=True,\n os_filename=os.path.join(artifact_path, filename),\n group_id=\"pytest-logfile\")\n\n @ArtifactorBasePlugin.check_configured\n def finish_test(self, artifact_path, test_name, test_location, slaveid):\n if not slaveid:\n slaveid = \"Master\"\n self.store[slaveid].in_progress = False\n self.store[slaveid].close()\n\n @ArtifactorBasePlugin.check_configured\n def log_message(self, log_record, slaveid):\n # json transport fallout: args must be a dict or a tuple, json makes a tuple into a list\n args = log_record['args']\n log_record['args'] = tuple(args) if isinstance(args, list) else args\n record = makeLogRecord(log_record)\n if not slaveid:\n slaveid = \"Master\"\n if slaveid in self.store:\n handler = self.store[slaveid].handler\n if handler and record.levelno >= handler.level:\n handler.handle(record)\n", "path": "artifactor/plugins/logger.py"}]}
| 1,257 | 88 |
gh_patches_debug_613
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1314
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.38
On the docket:
+ [ ] PEX direct requirement metadata for resolves via Pip is incorrect. #1311
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.37"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.37"
+__version__ = "2.1.38"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.37\"\n+__version__ = \"2.1.38\"\n", "issue": "Release 2.1.38\nOn the docket:\r\n+ [ ] PEX direct requirement metadata for resolves via Pip is incorrect. #1311\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.37\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.38\"\n", "path": "pex/version.py"}]}
| 342 | 96 |
gh_patches_debug_8086
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-1904
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Higher resolution icons are still saved in 32x32 directory
Despite Lutris bumping its icon size to 128x128 (currently it's still 64x64 as bump to 128x128 hasn't been deployed yet), it still saves the icons into `icons/hicolor/32x32`.
It should probably not do that and save it in proper 128x128 location instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/settings.py`
Content:
```
1 """Internal settings."""
2 import os
3 from gi.repository import GLib
4 from lutris.util.settings import SettingsIO
5 from lutris import __version__
6
7 PROJECT = "Lutris"
8 VERSION = __version__
9 COPYRIGHT = "(c) 2010-2019 Lutris Gaming Platform"
10 AUTHORS = [
11 "The Lutris team"
12 ]
13
14 # Paths
15 CONFIG_DIR = os.path.join(GLib.get_user_config_dir(), "lutris")
16 CONFIG_FILE = os.path.join(CONFIG_DIR, "lutris.conf")
17 DATA_DIR = os.path.join(GLib.get_user_data_dir(), "lutris")
18 RUNNER_DIR = os.path.join(DATA_DIR, "runners")
19 RUNTIME_DIR = os.path.join(DATA_DIR, "runtime")
20 CACHE_DIR = os.path.join(GLib.get_user_cache_dir(), "lutris")
21 GAME_CONFIG_DIR = os.path.join(CONFIG_DIR, "games")
22
23 TMP_PATH = os.path.join(CACHE_DIR, "tmp")
24 BANNER_PATH = os.path.join(DATA_DIR, "banners")
25 COVERART_PATH = os.path.join(DATA_DIR, "coverart")
26 ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "32x32", "apps")
27
28 sio = SettingsIO(CONFIG_FILE)
29 PGA_DB = sio.read_setting("pga_path") or os.path.join(DATA_DIR, "pga.db")
30 SITE_URL = sio.read_setting("website") or "https://lutris.net"
31
32 INSTALLER_URL = SITE_URL + "/api/installers/%s"
33 # XXX change this, should query on the installer, not the game.
34 INSTALLER_REVISION_URL = SITE_URL + "/api/installers/games/%s/revisions/%s"
35 GAME_URL = SITE_URL + "/games/%s/"
36 ICON_URL = SITE_URL + "/games/icon/%s.png"
37 BANNER_URL = SITE_URL + "/games/banner/%s.jpg"
38 RUNTIME_URL = "https://lutris.net/api/runtime"
39
40 read_setting = sio.read_setting
41 write_setting = sio.write_setting
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lutris/settings.py b/lutris/settings.py
--- a/lutris/settings.py
+++ b/lutris/settings.py
@@ -23,7 +23,7 @@
TMP_PATH = os.path.join(CACHE_DIR, "tmp")
BANNER_PATH = os.path.join(DATA_DIR, "banners")
COVERART_PATH = os.path.join(DATA_DIR, "coverart")
-ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "32x32", "apps")
+ICON_PATH = os.path.join(GLib.get_user_data_dir(), "icons", "hicolor", "128x128", "apps")
sio = SettingsIO(CONFIG_FILE)
PGA_DB = sio.read_setting("pga_path") or os.path.join(DATA_DIR, "pga.db")
|
{"golden_diff": "diff --git a/lutris/settings.py b/lutris/settings.py\n--- a/lutris/settings.py\n+++ b/lutris/settings.py\n@@ -23,7 +23,7 @@\n TMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\n BANNER_PATH = os.path.join(DATA_DIR, \"banners\")\n COVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\n-ICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"32x32\", \"apps\")\n+ICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n \n sio = SettingsIO(CONFIG_FILE)\n PGA_DB = sio.read_setting(\"pga_path\") or os.path.join(DATA_DIR, \"pga.db\")\n", "issue": "Higher resolution icons are still saved in 32x32 directory\nDespite Lutris bumping its icon size to 128x128 (currently it's still 64x64 as bump to 128x128 hasn't been deployed yet), it still saves the icons into `icons/hicolor/32x32`.\r\nIt should probably not do that and save it in proper 128x128 location instead.\n", "before_files": [{"content": "\"\"\"Internal settings.\"\"\"\nimport os\nfrom gi.repository import GLib\nfrom lutris.util.settings import SettingsIO\nfrom lutris import __version__\n\nPROJECT = \"Lutris\"\nVERSION = __version__\nCOPYRIGHT = \"(c) 2010-2019 Lutris Gaming Platform\"\nAUTHORS = [\n \"The Lutris team\"\n]\n\n# Paths\nCONFIG_DIR = os.path.join(GLib.get_user_config_dir(), \"lutris\")\nCONFIG_FILE = os.path.join(CONFIG_DIR, \"lutris.conf\")\nDATA_DIR = os.path.join(GLib.get_user_data_dir(), \"lutris\")\nRUNNER_DIR = os.path.join(DATA_DIR, \"runners\")\nRUNTIME_DIR = os.path.join(DATA_DIR, \"runtime\")\nCACHE_DIR = os.path.join(GLib.get_user_cache_dir(), \"lutris\")\nGAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n\nTMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\nBANNER_PATH = os.path.join(DATA_DIR, \"banners\")\nCOVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\nICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"32x32\", \"apps\")\n\nsio = SettingsIO(CONFIG_FILE)\nPGA_DB = sio.read_setting(\"pga_path\") or os.path.join(DATA_DIR, \"pga.db\")\nSITE_URL = sio.read_setting(\"website\") or \"https://lutris.net\"\n\nINSTALLER_URL = SITE_URL + \"/api/installers/%s\"\n# XXX change this, should query on the installer, not the game.\nINSTALLER_REVISION_URL = SITE_URL + \"/api/installers/games/%s/revisions/%s\"\nGAME_URL = SITE_URL + \"/games/%s/\"\nICON_URL = SITE_URL + \"/games/icon/%s.png\"\nBANNER_URL = SITE_URL + \"/games/banner/%s.jpg\"\nRUNTIME_URL = \"https://lutris.net/api/runtime\"\n\nread_setting = sio.read_setting\nwrite_setting = sio.write_setting\n", "path": "lutris/settings.py"}], "after_files": [{"content": "\"\"\"Internal settings.\"\"\"\nimport os\nfrom gi.repository import GLib\nfrom lutris.util.settings import SettingsIO\nfrom lutris import __version__\n\nPROJECT = \"Lutris\"\nVERSION = __version__\nCOPYRIGHT = \"(c) 2010-2019 Lutris Gaming Platform\"\nAUTHORS = [\n \"The Lutris team\"\n]\n\n# Paths\nCONFIG_DIR = os.path.join(GLib.get_user_config_dir(), \"lutris\")\nCONFIG_FILE = os.path.join(CONFIG_DIR, \"lutris.conf\")\nDATA_DIR = os.path.join(GLib.get_user_data_dir(), \"lutris\")\nRUNNER_DIR = os.path.join(DATA_DIR, \"runners\")\nRUNTIME_DIR = os.path.join(DATA_DIR, \"runtime\")\nCACHE_DIR = os.path.join(GLib.get_user_cache_dir(), \"lutris\")\nGAME_CONFIG_DIR = os.path.join(CONFIG_DIR, \"games\")\n\nTMP_PATH = os.path.join(CACHE_DIR, \"tmp\")\nBANNER_PATH = os.path.join(DATA_DIR, \"banners\")\nCOVERART_PATH = os.path.join(DATA_DIR, \"coverart\")\nICON_PATH = os.path.join(GLib.get_user_data_dir(), \"icons\", \"hicolor\", \"128x128\", \"apps\")\n\nsio = SettingsIO(CONFIG_FILE)\nPGA_DB = sio.read_setting(\"pga_path\") or os.path.join(DATA_DIR, \"pga.db\")\nSITE_URL = sio.read_setting(\"website\") or \"https://lutris.net\"\n\nINSTALLER_URL = SITE_URL + \"/api/installers/%s\"\n# XXX change this, should query on the installer, not the game.\nINSTALLER_REVISION_URL = SITE_URL + \"/api/installers/games/%s/revisions/%s\"\nGAME_URL = SITE_URL + \"/games/%s/\"\nICON_URL = SITE_URL + \"/games/icon/%s.png\"\nBANNER_URL = SITE_URL + \"/games/banner/%s.jpg\"\nRUNTIME_URL = \"https://lutris.net/api/runtime\"\n\nread_setting = sio.read_setting\nwrite_setting = sio.write_setting\n", "path": "lutris/settings.py"}]}
| 859 | 180 |
gh_patches_debug_3319
|
rasdani/github-patches
|
git_diff
|
spack__spack-3825
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`scorep` does not work on Darwin
The Score-P package requires a case-sensitive file system. This is described in the install notes, and I confirmed with the developers. I suggest to disable Score-P on Darwin to avoid others having to track down this problem in the same way I had to. Alternatively, we can add an install-time test whether the build or install directories are on a case-insensitive file system.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/scorep/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Scorep(AutotoolsPackage):
29 """The Score-P measurement infrastructure is a highly scalable and
30 easy-to-use tool suite for profiling, event tracing, and online analysis
31 of HPC applications.
32 """
33
34 homepage = "http://www.vi-hps.org/projects/score-p"
35 url = "http://www.vi-hps.org/upload/packages/scorep/scorep-2.0.2.tar.gz"
36
37 version('3.0', '44da8beaa3f71436a5f6fe51938aab2f')
38 version('2.0.2', '8f00e79e1b5b96e511c5ebecd10b2888')
39 version('1.4.2', '3b9a042b13bdd5836452354e6567f71e')
40 version('1.3', '9db6f957b7f51fa01377a9537867a55c')
41
42 ##########
43 # Dependencies for SCORE-P are quite tight. See the homepage for more
44 # information.
45 # SCOREP 3
46 depends_on('otf2@2:', when='@3:')
47 depends_on('opari2@2:', when='@3:')
48 depends_on('[email protected]:', when='@3:')
49 # SCOREP 2.0.2
50 depends_on('[email protected]', when='@2.0.2')
51 depends_on('[email protected]', when='@2.0.2')
52 depends_on('[email protected]:4.4', when='@2.0.2')
53 # SCOREP 1.4.2
54 depends_on('[email protected]:1.6', when='@1.4.2')
55 depends_on('[email protected]', when='@1.4.2')
56 depends_on('[email protected]:4.4', when='@1.4.2')
57 # SCOREP 1.3
58 depends_on("[email protected]", when='@1.3')
59 depends_on("[email protected]", when='@1.3')
60 depends_on("[email protected]", when='@1.3')
61 ##########
62
63 depends_on("mpi")
64 depends_on("papi")
65
66 variant('shmem', default=False, description='Enable shmem tracing')
67
68 def configure_args(self):
69 spec = self.spec
70
71 config_args = [
72 "--with-otf2=%s" % spec['otf2'].prefix.bin,
73 "--with-opari2=%s" % spec['opari2'].prefix.bin,
74 "--with-cube=%s" % spec['cube'].prefix.bin,
75 "--with-papi-header=%s" % spec['papi'].prefix.include,
76 "--with-papi-lib=%s" % spec['papi'].prefix.lib,
77 "--enable-shared",
78 ]
79
80 if '~shmem' in spec:
81 config_args.append("--without-shmem")
82
83 config_args.extend(["CFLAGS=-fPIC", "CXXFLAGS=-fPIC"])
84 return config_args
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/var/spack/repos/builtin/packages/scorep/package.py b/var/spack/repos/builtin/packages/scorep/package.py
--- a/var/spack/repos/builtin/packages/scorep/package.py
+++ b/var/spack/repos/builtin/packages/scorep/package.py
@@ -65,6 +65,11 @@
variant('shmem', default=False, description='Enable shmem tracing')
+ # Score-P requires a case-sensitive file system, and therefore
+ # does not work on macOS
+ # https://github.com/LLNL/spack/issues/1609
+ conflicts('platform=darwin')
+
def configure_args(self):
spec = self.spec
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/scorep/package.py b/var/spack/repos/builtin/packages/scorep/package.py\n--- a/var/spack/repos/builtin/packages/scorep/package.py\n+++ b/var/spack/repos/builtin/packages/scorep/package.py\n@@ -65,6 +65,11 @@\n \n variant('shmem', default=False, description='Enable shmem tracing')\n \n+ # Score-P requires a case-sensitive file system, and therefore\n+ # does not work on macOS\n+ # https://github.com/LLNL/spack/issues/1609\n+ conflicts('platform=darwin')\n+\n def configure_args(self):\n spec = self.spec\n", "issue": "`scorep` does not work on Darwin\nThe Score-P package requires a case-sensitive file system. This is described in the install notes, and I confirmed with the developers. I suggest to disable Score-P on Darwin to avoid others having to track down this problem in the same way I had to. Alternatively, we can add an install-time test whether the build or install directories are on a case-insensitive file system.\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Scorep(AutotoolsPackage):\n \"\"\"The Score-P measurement infrastructure is a highly scalable and\n easy-to-use tool suite for profiling, event tracing, and online analysis\n of HPC applications.\n \"\"\"\n\n homepage = \"http://www.vi-hps.org/projects/score-p\"\n url = \"http://www.vi-hps.org/upload/packages/scorep/scorep-2.0.2.tar.gz\"\n\n version('3.0', '44da8beaa3f71436a5f6fe51938aab2f')\n version('2.0.2', '8f00e79e1b5b96e511c5ebecd10b2888')\n version('1.4.2', '3b9a042b13bdd5836452354e6567f71e')\n version('1.3', '9db6f957b7f51fa01377a9537867a55c')\n\n ##########\n # Dependencies for SCORE-P are quite tight. See the homepage for more\n # information.\n # SCOREP 3\n depends_on('otf2@2:', when='@3:')\n depends_on('opari2@2:', when='@3:')\n depends_on('[email protected]:', when='@3:')\n # SCOREP 2.0.2\n depends_on('[email protected]', when='@2.0.2')\n depends_on('[email protected]', when='@2.0.2')\n depends_on('[email protected]:4.4', when='@2.0.2')\n # SCOREP 1.4.2\n depends_on('[email protected]:1.6', when='@1.4.2')\n depends_on('[email protected]', when='@1.4.2')\n depends_on('[email protected]:4.4', when='@1.4.2')\n # SCOREP 1.3\n depends_on(\"[email protected]\", when='@1.3')\n depends_on(\"[email protected]\", when='@1.3')\n depends_on(\"[email protected]\", when='@1.3')\n ##########\n\n depends_on(\"mpi\")\n depends_on(\"papi\")\n\n variant('shmem', default=False, description='Enable shmem tracing')\n\n def configure_args(self):\n spec = self.spec\n\n config_args = [\n \"--with-otf2=%s\" % spec['otf2'].prefix.bin,\n \"--with-opari2=%s\" % spec['opari2'].prefix.bin,\n \"--with-cube=%s\" % spec['cube'].prefix.bin,\n \"--with-papi-header=%s\" % spec['papi'].prefix.include,\n \"--with-papi-lib=%s\" % spec['papi'].prefix.lib,\n \"--enable-shared\",\n ]\n\n if '~shmem' in spec:\n config_args.append(\"--without-shmem\")\n\n config_args.extend([\"CFLAGS=-fPIC\", \"CXXFLAGS=-fPIC\"])\n return config_args\n", "path": "var/spack/repos/builtin/packages/scorep/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Scorep(AutotoolsPackage):\n \"\"\"The Score-P measurement infrastructure is a highly scalable and\n easy-to-use tool suite for profiling, event tracing, and online analysis\n of HPC applications.\n \"\"\"\n\n homepage = \"http://www.vi-hps.org/projects/score-p\"\n url = \"http://www.vi-hps.org/upload/packages/scorep/scorep-2.0.2.tar.gz\"\n\n version('3.0', '44da8beaa3f71436a5f6fe51938aab2f')\n version('2.0.2', '8f00e79e1b5b96e511c5ebecd10b2888')\n version('1.4.2', '3b9a042b13bdd5836452354e6567f71e')\n version('1.3', '9db6f957b7f51fa01377a9537867a55c')\n\n ##########\n # Dependencies for SCORE-P are quite tight. See the homepage for more\n # information.\n # SCOREP 3\n depends_on('otf2@2:', when='@3:')\n depends_on('opari2@2:', when='@3:')\n depends_on('[email protected]:', when='@3:')\n # SCOREP 2.0.2\n depends_on('[email protected]', when='@2.0.2')\n depends_on('[email protected]', when='@2.0.2')\n depends_on('[email protected]:4.4', when='@2.0.2')\n # SCOREP 1.4.2\n depends_on('[email protected]:1.6', when='@1.4.2')\n depends_on('[email protected]', when='@1.4.2')\n depends_on('[email protected]:4.4', when='@1.4.2')\n # SCOREP 1.3\n depends_on(\"[email protected]\", when='@1.3')\n depends_on(\"[email protected]\", when='@1.3')\n depends_on(\"[email protected]\", when='@1.3')\n ##########\n\n depends_on(\"mpi\")\n depends_on(\"papi\")\n\n variant('shmem', default=False, description='Enable shmem tracing')\n\n # Score-P requires a case-sensitive file system, and therefore\n # does not work on macOS\n # https://github.com/LLNL/spack/issues/1609\n conflicts('platform=darwin')\n\n def configure_args(self):\n spec = self.spec\n\n config_args = [\n \"--with-otf2=%s\" % spec['otf2'].prefix.bin,\n \"--with-opari2=%s\" % spec['opari2'].prefix.bin,\n \"--with-cube=%s\" % spec['cube'].prefix.bin,\n \"--with-papi-header=%s\" % spec['papi'].prefix.include,\n \"--with-papi-lib=%s\" % spec['papi'].prefix.lib,\n \"--enable-shared\",\n ]\n\n if '~shmem' in spec:\n config_args.append(\"--without-shmem\")\n\n config_args.extend([\"CFLAGS=-fPIC\", \"CXXFLAGS=-fPIC\"])\n return config_args\n", "path": "var/spack/repos/builtin/packages/scorep/package.py"}]}
| 1,552 | 156 |
gh_patches_debug_29093
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-1093
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
decimal_to_binary() should return identical values as bin()
https://github.com/TheAlgorithms/Python/blob/7b267e5e4f8ccb72dd58fcf0057642fd62a36bdf/conversions/decimal_to_binary.py#L4
Please change __decimal_to_binary()__ to return identical values as the Python builtin [__bin()__](https://docs.python.org/3/library/functions.html#bin). With doctests to prove it please.
@PatOnTheBack @Corruption13
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conversions/decimal_to_binary.py`
Content:
```
1 """Convert a Decimal Number to a Binary Number."""
2
3
4 def decimal_to_binary(num):
5 """Convert a Decimal Number to a Binary Number."""
6 binary = []
7 while num > 0:
8 binary.insert(0, num % 2)
9 num >>= 1
10 return "".join(str(e) for e in binary)
11
12
13 def main():
14 """Print binary equivelents of decimal numbers."""
15 print("\n2 in binary is:")
16 print(decimal_to_binary(2)) # = 10
17 print("\n7 in binary is:")
18 print(decimal_to_binary(7)) # = 111
19 print("\n35 in binary is:")
20 print(decimal_to_binary(35)) # = 100011
21 print("\n")
22
23
24 if __name__ == '__main__':
25 main()
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py
--- a/conversions/decimal_to_binary.py
+++ b/conversions/decimal_to_binary.py
@@ -2,24 +2,57 @@
def decimal_to_binary(num):
- """Convert a Decimal Number to a Binary Number."""
+
+ """
+ Convert a Integer Decimal Number to a Binary Number as str.
+ >>> decimal_to_binary(0)
+ '0b0'
+ >>> decimal_to_binary(2)
+ '0b10'
+ >>> decimal_to_binary(7)
+ '0b111'
+ >>> decimal_to_binary(35)
+ '0b100011'
+ >>> # negatives work too
+ >>> decimal_to_binary(-2)
+ '-0b10'
+ >>> # other floats will error
+ >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
+ >>> # strings will error as well
+ >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS
+ Traceback (most recent call last):
+ ...
+ TypeError: 'str' object cannot be interpreted as an integer
+ """
+
+ if type(num) == float:
+ raise TypeError("'float' object cannot be interpreted as an integer")
+ if type(num) == str:
+ raise TypeError("'str' object cannot be interpreted as an integer")
+
+ if num == 0:
+ return "0b0"
+
+ negative = False
+
+ if num < 0:
+ negative = True
+ num = -num
+
binary = []
while num > 0:
binary.insert(0, num % 2)
num >>= 1
- return "".join(str(e) for e in binary)
+ if negative:
+ return "-0b" + "".join(str(e) for e in binary)
-def main():
- """Print binary equivelents of decimal numbers."""
- print("\n2 in binary is:")
- print(decimal_to_binary(2)) # = 10
- print("\n7 in binary is:")
- print(decimal_to_binary(7)) # = 111
- print("\n35 in binary is:")
- print(decimal_to_binary(35)) # = 100011
- print("\n")
+ return "0b" + "".join(str(e) for e in binary)
-if __name__ == '__main__':
- main()
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
|
{"golden_diff": "diff --git a/conversions/decimal_to_binary.py b/conversions/decimal_to_binary.py\n--- a/conversions/decimal_to_binary.py\n+++ b/conversions/decimal_to_binary.py\n@@ -2,24 +2,57 @@\n \n \n def decimal_to_binary(num):\n- \"\"\"Convert a Decimal Number to a Binary Number.\"\"\"\n+\n+ \"\"\"\n+ Convert a Integer Decimal Number to a Binary Number as str.\n+ >>> decimal_to_binary(0)\n+ '0b0'\n+ >>> decimal_to_binary(2)\n+ '0b10'\n+ >>> decimal_to_binary(7)\n+ '0b111'\n+ >>> decimal_to_binary(35)\n+ '0b100011'\n+ >>> # negatives work too\n+ >>> decimal_to_binary(-2)\n+ '-0b10'\n+ >>> # other floats will error\n+ >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS\n+ Traceback (most recent call last):\n+ ...\n+ TypeError: 'float' object cannot be interpreted as an integer\n+ >>> # strings will error as well\n+ >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS\n+ Traceback (most recent call last):\n+ ...\n+ TypeError: 'str' object cannot be interpreted as an integer\n+ \"\"\"\n+\n+ if type(num) == float:\n+ raise TypeError(\"'float' object cannot be interpreted as an integer\")\n+ if type(num) == str:\n+ raise TypeError(\"'str' object cannot be interpreted as an integer\")\n+\n+ if num == 0:\n+ return \"0b0\"\n+\n+ negative = False\n+\n+ if num < 0:\n+ negative = True\n+ num = -num\n+\n binary = []\n while num > 0:\n binary.insert(0, num % 2)\n num >>= 1\n- return \"\".join(str(e) for e in binary)\n \n+ if negative:\n+ return \"-0b\" + \"\".join(str(e) for e in binary)\n \n-def main():\n- \"\"\"Print binary equivelents of decimal numbers.\"\"\"\n- print(\"\\n2 in binary is:\")\n- print(decimal_to_binary(2)) # = 10\n- print(\"\\n7 in binary is:\")\n- print(decimal_to_binary(7)) # = 111\n- print(\"\\n35 in binary is:\")\n- print(decimal_to_binary(35)) # = 100011\n- print(\"\\n\")\n+ return \"0b\" + \"\".join(str(e) for e in binary)\n \n \n-if __name__ == '__main__':\n- main()\n+if __name__ == \"__main__\":\n+ import doctest\n+ doctest.testmod()\n", "issue": "decimal_to_binary() should return identical values as bin()\nhttps://github.com/TheAlgorithms/Python/blob/7b267e5e4f8ccb72dd58fcf0057642fd62a36bdf/conversions/decimal_to_binary.py#L4\r\n\r\nPlease change __decimal_to_binary()__ to return identical values as the Python builtin [__bin()__](https://docs.python.org/3/library/functions.html#bin). With doctests to prove it please.\r\n\r\n@PatOnTheBack @Corruption13\n", "before_files": [{"content": "\"\"\"Convert a Decimal Number to a Binary Number.\"\"\"\n\n\ndef decimal_to_binary(num):\n \"\"\"Convert a Decimal Number to a Binary Number.\"\"\"\n binary = []\n while num > 0:\n binary.insert(0, num % 2)\n num >>= 1\n return \"\".join(str(e) for e in binary)\n\n\ndef main():\n \"\"\"Print binary equivelents of decimal numbers.\"\"\"\n print(\"\\n2 in binary is:\")\n print(decimal_to_binary(2)) # = 10\n print(\"\\n7 in binary is:\")\n print(decimal_to_binary(7)) # = 111\n print(\"\\n35 in binary is:\")\n print(decimal_to_binary(35)) # = 100011\n print(\"\\n\")\n\n\nif __name__ == '__main__':\n main()\n", "path": "conversions/decimal_to_binary.py"}], "after_files": [{"content": "\"\"\"Convert a Decimal Number to a Binary Number.\"\"\"\n\n\ndef decimal_to_binary(num):\n\n \"\"\"\n Convert a Integer Decimal Number to a Binary Number as str.\n >>> decimal_to_binary(0)\n '0b0'\n >>> decimal_to_binary(2)\n '0b10'\n >>> decimal_to_binary(7)\n '0b111'\n >>> decimal_to_binary(35)\n '0b100011'\n >>> # negatives work too\n >>> decimal_to_binary(-2)\n '-0b10'\n >>> # other floats will error\n >>> decimal_to_binary(16.16) # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n TypeError: 'float' object cannot be interpreted as an integer\n >>> # strings will error as well\n >>> decimal_to_binary('0xfffff') # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n TypeError: 'str' object cannot be interpreted as an integer\n \"\"\"\n\n if type(num) == float:\n raise TypeError(\"'float' object cannot be interpreted as an integer\")\n if type(num) == str:\n raise TypeError(\"'str' object cannot be interpreted as an integer\")\n\n if num == 0:\n return \"0b0\"\n\n negative = False\n\n if num < 0:\n negative = True\n num = -num\n\n binary = []\n while num > 0:\n binary.insert(0, num % 2)\n num >>= 1\n\n if negative:\n return \"-0b\" + \"\".join(str(e) for e in binary)\n\n return \"0b\" + \"\".join(str(e) for e in binary)\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "path": "conversions/decimal_to_binary.py"}]}
| 609 | 628 |
gh_patches_debug_1651
|
rasdani/github-patches
|
git_diff
|
deeppavlov__DeepPavlov-76
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
What is "'Chainer' object has no attribute 'infer'
2018-03-04 14:09:23,638 (util.py:64 WorkerThread2) ERROR - TeleBot: "AttributeError occurred, args=("'Chainer' object has no attribute 'infer'",)
Traceback (most recent call last):
File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run
task(*args, **kwargs)
File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference
pred = model.infer(context)
AttributeError: 'Chainer' object has no attribute 'infer'
"
2018-03-04 14:09:23.638 ERROR in 'TeleBot'['util'] at line 64: AttributeError occurred, args=("'Chainer' object has no attribute 'infer'",)
Traceback (most recent call last):
File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run
task(*args, **kwargs)
File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference
pred = model.infer(context)
AttributeError: 'Chainer' object has no attribute 'infer'
Traceback (most recent call last):
File "deep.py", line 60, in <module>
main()
File "deep.py", line 56, in main
interact_model_by_telegram(pipeline_config_path, token)
File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 58, in interact_model_by_telegram
init_bot_for_model(token, model)
File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 52, in init_bot_for_model
bot.polling()
File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/__init__.py", line 264, in polling
self.__threaded_polling(none_stop, interval, timeout)
File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/__init__.py", line 288, in __threaded_polling
self.worker_pool.raise_exceptions()
File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 107, in raise_exceptions
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/six.py", line 693, in reraise
raise value
File "/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py", line 58, in run
task(*args, **kwargs)
File "/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py", line 48, in handle_inference
pred = model.infer(context)
AttributeError: 'Chainer' object has no attribute 'infer'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram_utils/telegram_ui.py`
Content:
```
1 """
2 Copyright 2017 Neural Networks and Deep Learning lab, MIPT
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16 import telebot
17
18 from deeppavlov.core.common.file import read_json
19 from deeppavlov.core.commands.infer import build_model_from_config
20
21
22 def init_bot_for_model(token, model):
23 bot = telebot.TeleBot(token)
24
25 model_name = type(model).__name__
26 models_info = read_json('../telegram_utils/models_info.json')
27 model_info = models_info[model_name] if model_name in models_info else models_info['@default']
28
29 @bot.message_handler(commands=['start'])
30 def send_start_message(message):
31 chat_id = message.chat.id
32 out_message = model_info['start_message']
33 if hasattr(model, 'reset'):
34 model.reset()
35 bot.send_message(chat_id, out_message)
36
37 @bot.message_handler(commands=['help'])
38 def send_help_message(message):
39 chat_id = message.chat.id
40 out_message = model_info['help_message']
41 bot.send_message(chat_id, out_message)
42
43 @bot.message_handler()
44 def handle_inference(message):
45 chat_id = message.chat.id
46 context = message.text
47
48 pred = model.infer(context)
49 reply_message = str(pred)
50 bot.send_message(chat_id, reply_message)
51
52 bot.polling()
53
54
55 def interact_model_by_telegram(config_path, token):
56 config = read_json(config_path)
57 model = build_model_from_config(config)
58 init_bot_for_model(token, model)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py
--- a/telegram_utils/telegram_ui.py
+++ b/telegram_utils/telegram_ui.py
@@ -45,7 +45,7 @@
chat_id = message.chat.id
context = message.text
- pred = model.infer(context)
+ pred = model(context)
reply_message = str(pred)
bot.send_message(chat_id, reply_message)
|
{"golden_diff": "diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py\n--- a/telegram_utils/telegram_ui.py\n+++ b/telegram_utils/telegram_ui.py\n@@ -45,7 +45,7 @@\n chat_id = message.chat.id\n context = message.text\n \n- pred = model.infer(context)\n+ pred = model(context)\n reply_message = str(pred)\n bot.send_message(chat_id, reply_message)\n", "issue": "What is \"'Chainer' object has no attribute 'infer'\n2018-03-04 14:09:23,638 (util.py:64 WorkerThread2) ERROR - TeleBot: \"AttributeError occurred, args=(\"'Chainer' object has no attribute 'infer'\",)\r\nTraceback (most recent call last):\r\n File \"/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py\", line 58, in run\r\n task(*args, **kwargs)\r\n File \"/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py\", line 48, in handle_inference\r\n pred = model.infer(context)\r\nAttributeError: 'Chainer' object has no attribute 'infer'\r\n\"\r\n2018-03-04 14:09:23.638 ERROR in 'TeleBot'['util'] at line 64: AttributeError occurred, args=(\"'Chainer' object has no attribute 'infer'\",)\r\nTraceback (most recent call last):\r\n File \"/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py\", line 58, in run\r\n task(*args, **kwargs)\r\n File \"/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py\", line 48, in handle_inference\r\n pred = model.infer(context)\r\nAttributeError: 'Chainer' object has no attribute 'infer'\r\n\r\nTraceback (most recent call last):\r\n File \"deep.py\", line 60, in <module>\r\n main()\r\n File \"deep.py\", line 56, in main\r\n interact_model_by_telegram(pipeline_config_path, token)\r\n File \"/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py\", line 58, in interact_model_by_telegram\r\n init_bot_for_model(token, model)\r\n File \"/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py\", line 52, in init_bot_for_model\r\n bot.polling()\r\n File \"/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/__init__.py\", line 264, in polling\r\n self.__threaded_polling(none_stop, interval, timeout)\r\n File \"/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/__init__.py\", line 288, in __threaded_polling\r\n self.worker_pool.raise_exceptions()\r\n File \"/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py\", line 107, in raise_exceptions\r\n six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])\r\n File \"/Users/developer/DeepPavlov/lib/python3.6/site-packages/six.py\", line 693, in reraise\r\n raise value\r\n File \"/Users/developer/DeepPavlov/lib/python3.6/site-packages/telebot/util.py\", line 58, in run\r\n task(*args, **kwargs)\r\n File \"/Users/developer/Project/DeepPavlov/telegram_utils/telegram_ui.py\", line 48, in handle_inference\r\n pred = model.infer(context)\r\nAttributeError: 'Chainer' object has no attribute 'infer'\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model.infer(context)\n reply_message = str(pred)\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model(context)\n reply_message = str(pred)\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py"}]}
| 1,541 | 99 |
gh_patches_debug_14730
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4775
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document link extractor usage outside CrawlSpider rules
https://docs.scrapy.org/en/latest/topics/link-extractors.html mentions that link extractors may be used outside `CrawlSpider`, but it does not go into detail on how to do that.
Also, there are broken references to `scrapy.link.Link`, we should provide reference documentation for that class.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/link.py`
Content:
```
1 """
2 This module defines the Link object used in Link extractors.
3
4 For actual link extractors implementation see scrapy.linkextractors, or
5 its documentation in: docs/topics/link-extractors.rst
6 """
7
8
9 class Link:
10 """Link objects represent an extracted link by the LinkExtractor."""
11
12 __slots__ = ['url', 'text', 'fragment', 'nofollow']
13
14 def __init__(self, url, text='', fragment='', nofollow=False):
15 if not isinstance(url, str):
16 got = url.__class__.__name__
17 raise TypeError(f"Link urls must be str objects, got {got}")
18 self.url = url
19 self.text = text
20 self.fragment = fragment
21 self.nofollow = nofollow
22
23 def __eq__(self, other):
24 return (
25 self.url == other.url
26 and self.text == other.text
27 and self.fragment == other.fragment
28 and self.nofollow == other.nofollow
29 )
30
31 def __hash__(self):
32 return hash(self.url) ^ hash(self.text) ^ hash(self.fragment) ^ hash(self.nofollow)
33
34 def __repr__(self):
35 return (
36 f'Link(url={self.url!r}, text={self.text!r}, '
37 f'fragment={self.fragment!r}, nofollow={self.nofollow!r})'
38 )
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/link.py b/scrapy/link.py
--- a/scrapy/link.py
+++ b/scrapy/link.py
@@ -7,7 +7,22 @@
class Link:
- """Link objects represent an extracted link by the LinkExtractor."""
+ """Link objects represent an extracted link by the LinkExtractor.
+
+ Using the anchor tag sample below to illustrate the parameters::
+
+ <a href="https://example.com/nofollow.html#foo" rel="nofollow">Dont follow this one</a>
+
+ :param url: the absolute url being linked to in the anchor tag.
+ From the sample, this is ``https://example.com/nofollow.html``.
+
+ :param text: the text in the anchor tag. From the sample, this is ``Dont follow this one``.
+
+ :param fragment: the part of the url after the hash symbol. From the sample, this is ``foo``.
+
+ :param nofollow: an indication of the presence or absence of a nofollow value in the ``rel`` attribute
+ of the anchor tag.
+ """
__slots__ = ['url', 'text', 'fragment', 'nofollow']
|
{"golden_diff": "diff --git a/scrapy/link.py b/scrapy/link.py\n--- a/scrapy/link.py\n+++ b/scrapy/link.py\n@@ -7,7 +7,22 @@\n \n \n class Link:\n- \"\"\"Link objects represent an extracted link by the LinkExtractor.\"\"\"\n+ \"\"\"Link objects represent an extracted link by the LinkExtractor.\n+\n+ Using the anchor tag sample below to illustrate the parameters::\n+\n+ <a href=\"https://example.com/nofollow.html#foo\" rel=\"nofollow\">Dont follow this one</a>\n+\n+ :param url: the absolute url being linked to in the anchor tag.\n+ From the sample, this is ``https://example.com/nofollow.html``.\n+\n+ :param text: the text in the anchor tag. From the sample, this is ``Dont follow this one``.\n+\n+ :param fragment: the part of the url after the hash symbol. From the sample, this is ``foo``.\n+\n+ :param nofollow: an indication of the presence or absence of a nofollow value in the ``rel`` attribute\n+ of the anchor tag.\n+ \"\"\"\n \n __slots__ = ['url', 'text', 'fragment', 'nofollow']\n", "issue": "Document link extractor usage outside CrawlSpider rules\nhttps://docs.scrapy.org/en/latest/topics/link-extractors.html mentions that link extractors may be used outside `CrawlSpider`, but it does not go into detail on how to do that.\r\n\r\nAlso, there are broken references to `scrapy.link.Link`, we should provide reference documentation for that class.\n", "before_files": [{"content": "\"\"\"\nThis module defines the Link object used in Link extractors.\n\nFor actual link extractors implementation see scrapy.linkextractors, or\nits documentation in: docs/topics/link-extractors.rst\n\"\"\"\n\n\nclass Link:\n \"\"\"Link objects represent an extracted link by the LinkExtractor.\"\"\"\n\n __slots__ = ['url', 'text', 'fragment', 'nofollow']\n\n def __init__(self, url, text='', fragment='', nofollow=False):\n if not isinstance(url, str):\n got = url.__class__.__name__\n raise TypeError(f\"Link urls must be str objects, got {got}\")\n self.url = url\n self.text = text\n self.fragment = fragment\n self.nofollow = nofollow\n\n def __eq__(self, other):\n return (\n self.url == other.url\n and self.text == other.text\n and self.fragment == other.fragment\n and self.nofollow == other.nofollow\n )\n\n def __hash__(self):\n return hash(self.url) ^ hash(self.text) ^ hash(self.fragment) ^ hash(self.nofollow)\n\n def __repr__(self):\n return (\n f'Link(url={self.url!r}, text={self.text!r}, '\n f'fragment={self.fragment!r}, nofollow={self.nofollow!r})'\n )\n", "path": "scrapy/link.py"}], "after_files": [{"content": "\"\"\"\nThis module defines the Link object used in Link extractors.\n\nFor actual link extractors implementation see scrapy.linkextractors, or\nits documentation in: docs/topics/link-extractors.rst\n\"\"\"\n\n\nclass Link:\n \"\"\"Link objects represent an extracted link by the LinkExtractor.\n\n Using the anchor tag sample below to illustrate the parameters::\n\n <a href=\"https://example.com/nofollow.html#foo\" rel=\"nofollow\">Dont follow this one</a>\n\n :param url: the absolute url being linked to in the anchor tag.\n From the sample, this is ``https://example.com/nofollow.html``.\n\n :param text: the text in the anchor tag. From the sample, this is ``Dont follow this one``.\n\n :param fragment: the part of the url after the hash symbol. From the sample, this is ``foo``.\n\n :param nofollow: an indication of the presence or absence of a nofollow value in the ``rel`` attribute\n of the anchor tag.\n \"\"\"\n\n __slots__ = ['url', 'text', 'fragment', 'nofollow']\n\n def __init__(self, url, text='', fragment='', nofollow=False):\n if not isinstance(url, str):\n got = url.__class__.__name__\n raise TypeError(\"Link urls must be str objects, got %s\" % got)\n self.url = url\n self.text = text\n self.fragment = fragment\n self.nofollow = nofollow\n\n def __eq__(self, other):\n return (\n self.url == other.url\n and self.text == other.text\n and self.fragment == other.fragment\n and self.nofollow == other.nofollow\n )\n\n def __hash__(self):\n return hash(self.url) ^ hash(self.text) ^ hash(self.fragment) ^ hash(self.nofollow)\n\n def __repr__(self):\n return (\n 'Link(url=%r, text=%r, fragment=%r, nofollow=%r)'\n % (self.url, self.text, self.fragment, self.nofollow)\n )\n", "path": "scrapy/link.py"}]}
| 690 | 262 |
gh_patches_debug_38824
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-399
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Generic Relation everywhere where generic fks are used
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/documents/serializers.py`
Content:
```
1 from rest_framework import serializers
2
3 from .models import Document, Paragraph
4
5
6 class ParagraphSerializer(serializers.Serializer):
7 id = serializers.IntegerField(required=False)
8 name = serializers.CharField(
9 required=False,
10 max_length=Paragraph._meta.get_field('name').max_length
11 )
12 weight = serializers.IntegerField()
13 text = serializers.CharField()
14
15
16 class DocumentSerializer(serializers.ModelSerializer):
17 paragraphs = ParagraphSerializer(many=True, partial=True)
18
19 class Meta:
20 model = Document
21 exclude = ('creator',)
22
23 def create(self, validated_data):
24 paragraphs = validated_data.pop('paragraphs')
25 user = self.context['request'].user
26 document = Document.objects.create(creator=user, **validated_data)
27
28 for paragraph in paragraphs:
29 Paragraph.objects.create(document=document, **paragraph)
30
31 return document
32
33 def update(self, instance, validated_data):
34 instance.name = validated_data['name']
35 instance.save()
36 paragraphs = validated_data.pop('paragraphs')
37
38 paragraph_ids = [item['id'] for item in paragraphs if 'id' in item]
39 instance.paragraphs.exclude(id__in=paragraph_ids).delete()
40
41 for paragraph in paragraphs:
42 paragraph['document'] = instance
43 if 'id' in paragraph:
44 instance.paragraphs.filter(id=paragraph['id'])\
45 .update(**paragraph)
46 else:
47 instance.paragraphs.create(**paragraph)
48
49 return instance
50
```
Path: `euth/documents/models.py`
Content:
```
1 from ckeditor.fields import RichTextField
2 from django.contrib.contenttypes.models import ContentType
3 from django.core.exceptions import ObjectDoesNotExist, ValidationError
4 from django.db import models
5 from django.utils.functional import cached_property
6 from django.utils.translation import ugettext_lazy as _
7
8 from contrib.transforms import html_transforms
9 from euth.comments import models as comment_models
10 from euth.contrib import base_models
11 from euth.modules import models as module_models
12
13
14 class Document(module_models.Item):
15 name = models.CharField(max_length=120)
16
17 def __str__(self):
18 return "{}_document_{}".format(str(self.module), self.pk)
19
20 def clean(self, *args, **kwargs):
21 if not self.pk:
22 try:
23 Document.objects.get(module=self.module)
24 raise ValidationError(
25 _('Document for that module already exists'))
26 except ObjectDoesNotExist:
27 super().clean(*args, **kwargs)
28 super().clean(*args, **kwargs)
29
30 @cached_property
31 def paragraphs_sorted(self):
32 return self.paragraphs.all().order_by('weight')
33
34 @cached_property
35 def comments(self):
36 contenttype = ContentType.objects.get_for_model(self)
37 pk = self.id
38 comments = comment_models.Comment.objects.all().filter(
39 content_type=contenttype, object_pk=pk)
40 return comments
41
42
43 class Paragraph(base_models.TimeStampedModel):
44 name = models.CharField(max_length=120, blank=True)
45 text = RichTextField()
46 weight = models.PositiveIntegerField()
47 document = models.ForeignKey(Document,
48 on_delete=models.CASCADE,
49 related_name='paragraphs')
50
51 class Meta:
52 ordering = ('weight',)
53
54 def __str__(self):
55 return "{}_paragraph_{}".format(str(self.document), self.weight)
56
57 def save(self, *args, **kwargs):
58 self.text = html_transforms.clean_html_field(
59 self.text)
60 super().save(*args, **kwargs)
61
62 @cached_property
63 def comments(self):
64 contenttype = ContentType.objects.get_for_model(self)
65 pk = self.id
66 comments = comment_models.Comment.objects.all().filter(
67 content_type=contenttype, object_pk=pk)
68 return comments
69
70 @cached_property
71 def project(self):
72 return self.document.project
73
```
Path: `euth/documents/phases.py`
Content:
```
1 from django.utils.translation import ugettext_lazy as _
2
3 from euth import phases
4
5 from . import apps, models, views
6
7
8 class CreateDocumentPhase(phases.PhaseContent):
9 """
10 Allows no interaction for participants, only
11 creation for moderators.
12 """
13 app = apps.DocumentConfig.label
14 phase = 'create_document'
15 view = views.DocumentCreateView
16 weight = 30
17
18 name = _('Create document phase')
19 module_name = _('commenting text')
20 description = _('Create text for the project.')
21
22 features = {}
23
24
25 phases.content.register(CreateDocumentPhase())
26
27
28 class CommentPhase(phases.PhaseContent):
29 """
30 Allows only commenting of paragraphs.
31 """
32 app = apps.DocumentConfig.label
33 phase = 'comment'
34 view = views.DocumentDetailView
35 weight = 40
36
37 name = _('Comment phase')
38 module_name = _('commenting text')
39 description = _('Collect comments for the text.')
40
41 features = {
42 'comment': (models.Paragraph,),
43 }
44
45
46 phases.content.register(CommentPhase())
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/euth/documents/models.py b/euth/documents/models.py
--- a/euth/documents/models.py
+++ b/euth/documents/models.py
@@ -1,5 +1,5 @@
from ckeditor.fields import RichTextField
-from django.contrib.contenttypes.models import ContentType
+from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
from django.utils.functional import cached_property
@@ -27,18 +27,6 @@
super().clean(*args, **kwargs)
super().clean(*args, **kwargs)
- @cached_property
- def paragraphs_sorted(self):
- return self.paragraphs.all().order_by('weight')
-
- @cached_property
- def comments(self):
- contenttype = ContentType.objects.get_for_model(self)
- pk = self.id
- comments = comment_models.Comment.objects.all().filter(
- content_type=contenttype, object_pk=pk)
- return comments
-
class Paragraph(base_models.TimeStampedModel):
name = models.CharField(max_length=120, blank=True)
@@ -47,6 +35,9 @@
document = models.ForeignKey(Document,
on_delete=models.CASCADE,
related_name='paragraphs')
+ comments = GenericRelation(comment_models.Comment,
+ related_query_name='paragraph',
+ object_id_field='object_pk')
class Meta:
ordering = ('weight',)
@@ -59,14 +50,6 @@
self.text)
super().save(*args, **kwargs)
- @cached_property
- def comments(self):
- contenttype = ContentType.objects.get_for_model(self)
- pk = self.id
- comments = comment_models.Comment.objects.all().filter(
- content_type=contenttype, object_pk=pk)
- return comments
-
@cached_property
def project(self):
return self.document.project
diff --git a/euth/documents/phases.py b/euth/documents/phases.py
--- a/euth/documents/phases.py
+++ b/euth/documents/phases.py
@@ -39,7 +39,7 @@
description = _('Collect comments for the text.')
features = {
- 'comment': (models.Paragraph,),
+ 'comment': (models.Paragraph, models.Document),
}
diff --git a/euth/documents/serializers.py b/euth/documents/serializers.py
--- a/euth/documents/serializers.py
+++ b/euth/documents/serializers.py
@@ -7,6 +7,7 @@
id = serializers.IntegerField(required=False)
name = serializers.CharField(
required=False,
+ allow_blank=True,
max_length=Paragraph._meta.get_field('name').max_length
)
weight = serializers.IntegerField()
|
{"golden_diff": "diff --git a/euth/documents/models.py b/euth/documents/models.py\n--- a/euth/documents/models.py\n+++ b/euth/documents/models.py\n@@ -1,5 +1,5 @@\n from ckeditor.fields import RichTextField\n-from django.contrib.contenttypes.models import ContentType\n+from django.contrib.contenttypes.fields import GenericRelation\n from django.core.exceptions import ObjectDoesNotExist, ValidationError\n from django.db import models\n from django.utils.functional import cached_property\n@@ -27,18 +27,6 @@\n super().clean(*args, **kwargs)\n super().clean(*args, **kwargs)\n \n- @cached_property\n- def paragraphs_sorted(self):\n- return self.paragraphs.all().order_by('weight')\n-\n- @cached_property\n- def comments(self):\n- contenttype = ContentType.objects.get_for_model(self)\n- pk = self.id\n- comments = comment_models.Comment.objects.all().filter(\n- content_type=contenttype, object_pk=pk)\n- return comments\n-\n \n class Paragraph(base_models.TimeStampedModel):\n name = models.CharField(max_length=120, blank=True)\n@@ -47,6 +35,9 @@\n document = models.ForeignKey(Document,\n on_delete=models.CASCADE,\n related_name='paragraphs')\n+ comments = GenericRelation(comment_models.Comment,\n+ related_query_name='paragraph',\n+ object_id_field='object_pk')\n \n class Meta:\n ordering = ('weight',)\n@@ -59,14 +50,6 @@\n self.text)\n super().save(*args, **kwargs)\n \n- @cached_property\n- def comments(self):\n- contenttype = ContentType.objects.get_for_model(self)\n- pk = self.id\n- comments = comment_models.Comment.objects.all().filter(\n- content_type=contenttype, object_pk=pk)\n- return comments\n-\n @cached_property\n def project(self):\n return self.document.project\ndiff --git a/euth/documents/phases.py b/euth/documents/phases.py\n--- a/euth/documents/phases.py\n+++ b/euth/documents/phases.py\n@@ -39,7 +39,7 @@\n description = _('Collect comments for the text.')\n \n features = {\n- 'comment': (models.Paragraph,),\n+ 'comment': (models.Paragraph, models.Document),\n }\n \n \ndiff --git a/euth/documents/serializers.py b/euth/documents/serializers.py\n--- a/euth/documents/serializers.py\n+++ b/euth/documents/serializers.py\n@@ -7,6 +7,7 @@\n id = serializers.IntegerField(required=False)\n name = serializers.CharField(\n required=False,\n+ allow_blank=True,\n max_length=Paragraph._meta.get_field('name').max_length\n )\n weight = serializers.IntegerField()\n", "issue": "Use Generic Relation everywhere where generic fks are used\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom .models import Document, Paragraph\n\n\nclass ParagraphSerializer(serializers.Serializer):\n id = serializers.IntegerField(required=False)\n name = serializers.CharField(\n required=False,\n max_length=Paragraph._meta.get_field('name').max_length\n )\n weight = serializers.IntegerField()\n text = serializers.CharField()\n\n\nclass DocumentSerializer(serializers.ModelSerializer):\n paragraphs = ParagraphSerializer(many=True, partial=True)\n\n class Meta:\n model = Document\n exclude = ('creator',)\n\n def create(self, validated_data):\n paragraphs = validated_data.pop('paragraphs')\n user = self.context['request'].user\n document = Document.objects.create(creator=user, **validated_data)\n\n for paragraph in paragraphs:\n Paragraph.objects.create(document=document, **paragraph)\n\n return document\n\n def update(self, instance, validated_data):\n instance.name = validated_data['name']\n instance.save()\n paragraphs = validated_data.pop('paragraphs')\n\n paragraph_ids = [item['id'] for item in paragraphs if 'id' in item]\n instance.paragraphs.exclude(id__in=paragraph_ids).delete()\n\n for paragraph in paragraphs:\n paragraph['document'] = instance\n if 'id' in paragraph:\n instance.paragraphs.filter(id=paragraph['id'])\\\n .update(**paragraph)\n else:\n instance.paragraphs.create(**paragraph)\n\n return instance\n", "path": "euth/documents/serializers.py"}, {"content": "from ckeditor.fields import RichTextField\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom contrib.transforms import html_transforms\nfrom euth.comments import models as comment_models\nfrom euth.contrib import base_models\nfrom euth.modules import models as module_models\n\n\nclass Document(module_models.Item):\n name = models.CharField(max_length=120)\n\n def __str__(self):\n return \"{}_document_{}\".format(str(self.module), self.pk)\n\n def clean(self, *args, **kwargs):\n if not self.pk:\n try:\n Document.objects.get(module=self.module)\n raise ValidationError(\n _('Document for that module already exists'))\n except ObjectDoesNotExist:\n super().clean(*args, **kwargs)\n super().clean(*args, **kwargs)\n\n @cached_property\n def paragraphs_sorted(self):\n return self.paragraphs.all().order_by('weight')\n\n @cached_property\n def comments(self):\n contenttype = ContentType.objects.get_for_model(self)\n pk = self.id\n comments = comment_models.Comment.objects.all().filter(\n content_type=contenttype, object_pk=pk)\n return comments\n\n\nclass Paragraph(base_models.TimeStampedModel):\n name = models.CharField(max_length=120, blank=True)\n text = RichTextField()\n weight = models.PositiveIntegerField()\n document = models.ForeignKey(Document,\n on_delete=models.CASCADE,\n related_name='paragraphs')\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_paragraph_{}\".format(str(self.document), self.weight)\n\n def save(self, *args, **kwargs):\n self.text = html_transforms.clean_html_field(\n self.text)\n super().save(*args, **kwargs)\n\n @cached_property\n def comments(self):\n contenttype = ContentType.objects.get_for_model(self)\n pk = self.id\n comments = comment_models.Comment.objects.all().filter(\n content_type=contenttype, object_pk=pk)\n return comments\n\n @cached_property\n def project(self):\n return self.document.project\n", "path": "euth/documents/models.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom euth import phases\n\nfrom . import apps, models, views\n\n\nclass CreateDocumentPhase(phases.PhaseContent):\n \"\"\"\n Allows no interaction for participants, only\n creation for moderators.\n \"\"\"\n app = apps.DocumentConfig.label\n phase = 'create_document'\n view = views.DocumentCreateView\n weight = 30\n\n name = _('Create document phase')\n module_name = _('commenting text')\n description = _('Create text for the project.')\n\n features = {}\n\n\nphases.content.register(CreateDocumentPhase())\n\n\nclass CommentPhase(phases.PhaseContent):\n \"\"\"\n Allows only commenting of paragraphs.\n \"\"\"\n app = apps.DocumentConfig.label\n phase = 'comment'\n view = views.DocumentDetailView\n weight = 40\n\n name = _('Comment phase')\n module_name = _('commenting text')\n description = _('Collect comments for the text.')\n\n features = {\n 'comment': (models.Paragraph,),\n }\n\n\nphases.content.register(CommentPhase())\n", "path": "euth/documents/phases.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom .models import Document, Paragraph\n\n\nclass ParagraphSerializer(serializers.Serializer):\n id = serializers.IntegerField(required=False)\n name = serializers.CharField(\n required=False,\n allow_blank=True,\n max_length=Paragraph._meta.get_field('name').max_length\n )\n weight = serializers.IntegerField()\n text = serializers.CharField()\n\n\nclass DocumentSerializer(serializers.ModelSerializer):\n paragraphs = ParagraphSerializer(many=True, partial=True)\n\n class Meta:\n model = Document\n exclude = ('creator',)\n\n def create(self, validated_data):\n paragraphs = validated_data.pop('paragraphs')\n user = self.context['request'].user\n document = Document.objects.create(creator=user, **validated_data)\n\n for paragraph in paragraphs:\n Paragraph.objects.create(document=document, **paragraph)\n\n return document\n\n def update(self, instance, validated_data):\n instance.name = validated_data['name']\n instance.save()\n paragraphs = validated_data.pop('paragraphs')\n\n paragraph_ids = [item['id'] for item in paragraphs if 'id' in item]\n instance.paragraphs.exclude(id__in=paragraph_ids).delete()\n\n for paragraph in paragraphs:\n paragraph['document'] = instance\n if 'id' in paragraph:\n instance.paragraphs.filter(id=paragraph['id'])\\\n .update(**paragraph)\n else:\n instance.paragraphs.create(**paragraph)\n\n return instance\n", "path": "euth/documents/serializers.py"}, {"content": "from ckeditor.fields import RichTextField\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom contrib.transforms import html_transforms\nfrom euth.comments import models as comment_models\nfrom euth.contrib import base_models\nfrom euth.modules import models as module_models\n\n\nclass Document(module_models.Item):\n name = models.CharField(max_length=120)\n\n def __str__(self):\n return \"{}_document_{}\".format(str(self.module), self.pk)\n\n def clean(self, *args, **kwargs):\n if not self.pk:\n try:\n Document.objects.get(module=self.module)\n raise ValidationError(\n _('Document for that module already exists'))\n except ObjectDoesNotExist:\n super().clean(*args, **kwargs)\n super().clean(*args, **kwargs)\n\n\nclass Paragraph(base_models.TimeStampedModel):\n name = models.CharField(max_length=120, blank=True)\n text = RichTextField()\n weight = models.PositiveIntegerField()\n document = models.ForeignKey(Document,\n on_delete=models.CASCADE,\n related_name='paragraphs')\n comments = GenericRelation(comment_models.Comment,\n related_query_name='paragraph',\n object_id_field='object_pk')\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_paragraph_{}\".format(str(self.document), self.weight)\n\n def save(self, *args, **kwargs):\n self.text = html_transforms.clean_html_field(\n self.text)\n super().save(*args, **kwargs)\n\n @cached_property\n def project(self):\n return self.document.project\n", "path": "euth/documents/models.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom euth import phases\n\nfrom . import apps, models, views\n\n\nclass CreateDocumentPhase(phases.PhaseContent):\n \"\"\"\n Allows no interaction for participants, only\n creation for moderators.\n \"\"\"\n app = apps.DocumentConfig.label\n phase = 'create_document'\n view = views.DocumentCreateView\n weight = 30\n\n name = _('Create document phase')\n module_name = _('commenting text')\n description = _('Create text for the project.')\n\n features = {}\n\n\nphases.content.register(CreateDocumentPhase())\n\n\nclass CommentPhase(phases.PhaseContent):\n \"\"\"\n Allows only commenting of paragraphs.\n \"\"\"\n app = apps.DocumentConfig.label\n phase = 'comment'\n view = views.DocumentDetailView\n weight = 40\n\n name = _('Comment phase')\n module_name = _('commenting text')\n description = _('Collect comments for the text.')\n\n features = {\n 'comment': (models.Paragraph, models.Document),\n }\n\n\nphases.content.register(CommentPhase())\n", "path": "euth/documents/phases.py"}]}
| 1,624 | 603 |
gh_patches_debug_22070
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2290
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invite existing users gives an IntegrityError
See http://sentry.support.akvo-ops.org/rsr/live/group/797/.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/employment.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3
4 See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6 """
7
8 from django.contrib.auth.models import Group
9 from rest_framework.decorators import api_view, permission_classes
10 from rest_framework.exceptions import PermissionDenied
11 from rest_framework.permissions import IsAuthenticated
12 from rest_framework.response import Response
13 from akvo.rsr.models import Employment
14 from ..serializers import EmploymentSerializer
15 from ..viewsets import BaseRSRViewSet
16
17
18 class EmploymentViewSet(BaseRSRViewSet):
19
20 """Employment resource."""
21
22 queryset = Employment.objects.select_related('organisation')
23 serializer_class = EmploymentSerializer
24
25
26 @api_view(['POST'])
27 @permission_classes((IsAuthenticated, ))
28 def approve_employment(request, pk=None):
29 employment = Employment.objects.get(pk=pk)
30 user = request.user
31
32 if not user.has_perm('rsr.change_employment', employment):
33 raise PermissionDenied
34
35 employment.approve(user)
36
37 return Response({'status': 'employment approved'})
38
39
40 @api_view(['POST'])
41 @permission_classes((IsAuthenticated, ))
42 def set_group(request, pk=None, group_id=None):
43 employment = Employment.objects.get(pk=pk)
44 group = Group.objects.get(pk=group_id)
45 user = request.user
46
47 if not user.has_perm('rsr.change_employment', employment):
48 raise PermissionDenied
49
50 employment.group = group
51 employment.save()
52
53 return Response({'status': 'group set'})
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rest/views/employment.py b/akvo/rest/views/employment.py
--- a/akvo/rest/views/employment.py
+++ b/akvo/rest/views/employment.py
@@ -6,10 +6,12 @@
"""
from django.contrib.auth.models import Group
+from django.db import IntegrityError
from rest_framework.decorators import api_view, permission_classes
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
+from rest_framework import status
from akvo.rsr.models import Employment
from ..serializers import EmploymentSerializer
from ..viewsets import BaseRSRViewSet
@@ -48,6 +50,10 @@
raise PermissionDenied
employment.group = group
- employment.save()
+ try:
+ employment.save()
+ except IntegrityError:
+ return Response({'status': 'group not set', 'error': 'Employment already exists.'},
+ status=status.HTTP_400_BAD_REQUEST)
return Response({'status': 'group set'})
|
{"golden_diff": "diff --git a/akvo/rest/views/employment.py b/akvo/rest/views/employment.py\n--- a/akvo/rest/views/employment.py\n+++ b/akvo/rest/views/employment.py\n@@ -6,10 +6,12 @@\n \"\"\"\n \n from django.contrib.auth.models import Group\n+from django.db import IntegrityError\n from rest_framework.decorators import api_view, permission_classes\n from rest_framework.exceptions import PermissionDenied\n from rest_framework.permissions import IsAuthenticated\n from rest_framework.response import Response\n+from rest_framework import status\n from akvo.rsr.models import Employment\n from ..serializers import EmploymentSerializer\n from ..viewsets import BaseRSRViewSet\n@@ -48,6 +50,10 @@\n raise PermissionDenied\n \n employment.group = group\n- employment.save()\n+ try:\n+ employment.save()\n+ except IntegrityError:\n+ return Response({'status': 'group not set', 'error': 'Employment already exists.'},\n+ status=status.HTTP_400_BAD_REQUEST)\n \n return Response({'status': 'group set'})\n", "issue": "Invite existing users gives an IntegrityError\nSee http://sentry.support.akvo-ops.org/rsr/live/group/797/.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.contrib.auth.models import Group\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom akvo.rsr.models import Employment\nfrom ..serializers import EmploymentSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass EmploymentViewSet(BaseRSRViewSet):\n\n \"\"\"Employment resource.\"\"\"\n\n queryset = Employment.objects.select_related('organisation')\n serializer_class = EmploymentSerializer\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef approve_employment(request, pk=None):\n employment = Employment.objects.get(pk=pk)\n user = request.user\n\n if not user.has_perm('rsr.change_employment', employment):\n raise PermissionDenied\n\n employment.approve(user)\n\n return Response({'status': 'employment approved'})\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef set_group(request, pk=None, group_id=None):\n employment = Employment.objects.get(pk=pk)\n group = Group.objects.get(pk=group_id)\n user = request.user\n\n if not user.has_perm('rsr.change_employment', employment):\n raise PermissionDenied\n\n employment.group = group\n employment.save()\n\n return Response({'status': 'group set'})\n", "path": "akvo/rest/views/employment.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.contrib.auth.models import Group\nfrom django.db import IntegrityError\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom akvo.rsr.models import Employment\nfrom ..serializers import EmploymentSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass EmploymentViewSet(BaseRSRViewSet):\n\n \"\"\"Employment resource.\"\"\"\n\n queryset = Employment.objects.select_related('organisation')\n serializer_class = EmploymentSerializer\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef approve_employment(request, pk=None):\n employment = Employment.objects.get(pk=pk)\n user = request.user\n\n if not user.has_perm('rsr.change_employment', employment):\n raise PermissionDenied\n\n employment.approve(user)\n\n return Response({'status': 'employment approved'})\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef set_group(request, pk=None, group_id=None):\n employment = Employment.objects.get(pk=pk)\n group = Group.objects.get(pk=group_id)\n user = request.user\n\n if not user.has_perm('rsr.change_employment', employment):\n raise PermissionDenied\n\n employment.group = group\n try:\n employment.save()\n except IntegrityError:\n return Response({'status': 'group not set', 'error': 'Employment already exists.'},\n status=status.HTTP_400_BAD_REQUEST)\n\n return Response({'status': 'group set'})\n", "path": "akvo/rest/views/employment.py"}]}
| 737 | 231 |
gh_patches_debug_3886
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-1169
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dropping scikit-learn dependency < 0.14.1
It makes easier to set base for decoder object in nilearn which actually requires lot of backports for versions. #1148
I don't have great justifications though. Let me know if I miss something important in nilearn which we need to take into account dropping 0.13.
FYI: https://packages.debian.org/jessie/python-sklearn
Discussions are welcome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/version.py`
Content:
```
1 # *- encoding: utf-8 -*-
2 """
3 nilearn version, required package versions, and utilities for checking
4 """
5 # Author: Loïc Estève, Ben Cipollini
6 # License: simplified BSD
7
8 # PEP0440 compatible formatted version, see:
9 # https://www.python.org/dev/peps/pep-0440/
10 #
11 # Generic release markers:
12 # X.Y
13 # X.Y.Z # For bugfix releases
14 #
15 # Admissible pre-release markers:
16 # X.YaN # Alpha release
17 # X.YbN # Beta release
18 # X.YrcN # Release Candidate
19 # X.Y # Final release
20 #
21 # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
22 # 'X.Y.dev0' is the canonical version of 'X.Y.dev'
23 #
24 __version__ = '0.2.5'
25
26 _NILEARN_INSTALL_MSG = 'See %s for installation information.' % (
27 'http://nilearn.github.io/introduction.html#installation')
28
29 # This is a tuple to preserve order, so that dependencies are checked
30 # in some meaningful order (more => less 'core'). We avoid using
31 # collections.OrderedDict to preserve Python 2.6 compatibility.
32 REQUIRED_MODULE_METADATA = (
33 ('numpy', {
34 'min_version': '1.6.1',
35 'required_at_installation': True,
36 'install_info': _NILEARN_INSTALL_MSG}),
37 ('scipy', {
38 'min_version': '0.9.0',
39 'required_at_installation': True,
40 'install_info': _NILEARN_INSTALL_MSG}),
41 ('sklearn', {
42 'min_version': '0.13',
43 'required_at_installation': True,
44 'install_info': _NILEARN_INSTALL_MSG}),
45 ('nibabel', {
46 'min_version': '1.1.0',
47 'required_at_installation': False}))
48
49 OPTIONAL_MATPLOTLIB_MIN_VERSION = '1.1.1'
50
51
52 def _import_module_with_version_check(
53 module_name,
54 minimum_version,
55 install_info=None):
56 """Check that module is installed with a recent enough version
57 """
58 from distutils.version import LooseVersion
59
60 try:
61 module = __import__(module_name)
62 except ImportError as exc:
63 user_friendly_info = ('Module "{0}" could not be found. {1}').format(
64 module_name,
65 install_info or 'Please install it properly to use nilearn.')
66 exc.args += (user_friendly_info,)
67 raise
68
69 # Avoid choking on modules with no __version__ attribute
70 module_version = getattr(module, '__version__', '0.0.0')
71
72 version_too_old = (not LooseVersion(module_version) >=
73 LooseVersion(minimum_version))
74
75 if version_too_old:
76 message = (
77 'A {module_name} version of at least {minimum_version} '
78 'is required to use nilearn. {module_version} was found. '
79 'Please upgrade {module_name}').format(
80 module_name=module_name,
81 minimum_version=minimum_version,
82 module_version=module_version)
83
84 raise ImportError(message)
85
86 return module
87
88
89 def _check_module_dependencies(is_nilearn_installing=False):
90 """Throw an exception if nilearn dependencies are not installed.
91
92 Parameters
93 ----------
94 is_nilearn_installing: boolean
95 if True, only error on missing packages that cannot be auto-installed.
96 if False, error on any missing package.
97
98 Throws
99 -------
100 ImportError
101 """
102
103 for (module_name, module_metadata) in REQUIRED_MODULE_METADATA:
104 if not (is_nilearn_installing and
105 not module_metadata['required_at_installation']):
106 # Skip check only when installing and it's a module that
107 # will be auto-installed.
108 _import_module_with_version_check(
109 module_name=module_name,
110 minimum_version=module_metadata['min_version'],
111 install_info=module_metadata.get('install_info'))
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nilearn/version.py b/nilearn/version.py
--- a/nilearn/version.py
+++ b/nilearn/version.py
@@ -39,7 +39,7 @@
'required_at_installation': True,
'install_info': _NILEARN_INSTALL_MSG}),
('sklearn', {
- 'min_version': '0.13',
+ 'min_version': '0.14.1',
'required_at_installation': True,
'install_info': _NILEARN_INSTALL_MSG}),
('nibabel', {
|
{"golden_diff": "diff --git a/nilearn/version.py b/nilearn/version.py\n--- a/nilearn/version.py\n+++ b/nilearn/version.py\n@@ -39,7 +39,7 @@\n 'required_at_installation': True,\n 'install_info': _NILEARN_INSTALL_MSG}),\n ('sklearn', {\n- 'min_version': '0.13',\n+ 'min_version': '0.14.1',\n 'required_at_installation': True,\n 'install_info': _NILEARN_INSTALL_MSG}),\n ('nibabel', {\n", "issue": "Dropping scikit-learn dependency < 0.14.1\nIt makes easier to set base for decoder object in nilearn which actually requires lot of backports for versions. #1148 \n\nI don't have great justifications though. Let me know if I miss something important in nilearn which we need to take into account dropping 0.13.\n\nFYI: https://packages.debian.org/jessie/python-sklearn\n\nDiscussions are welcome.\n\n", "before_files": [{"content": "# *- encoding: utf-8 -*-\n\"\"\"\nnilearn version, required package versions, and utilities for checking\n\"\"\"\n# Author: Lo\u00efc Est\u00e8ve, Ben Cipollini\n# License: simplified BSD\n\n# PEP0440 compatible formatted version, see:\n# https://www.python.org/dev/peps/pep-0440/\n#\n# Generic release markers:\n# X.Y\n# X.Y.Z # For bugfix releases\n#\n# Admissible pre-release markers:\n# X.YaN # Alpha release\n# X.YbN # Beta release\n# X.YrcN # Release Candidate\n# X.Y # Final release\n#\n# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.\n# 'X.Y.dev0' is the canonical version of 'X.Y.dev'\n#\n__version__ = '0.2.5'\n\n_NILEARN_INSTALL_MSG = 'See %s for installation information.' % (\n 'http://nilearn.github.io/introduction.html#installation')\n\n# This is a tuple to preserve order, so that dependencies are checked\n# in some meaningful order (more => less 'core'). We avoid using\n# collections.OrderedDict to preserve Python 2.6 compatibility.\nREQUIRED_MODULE_METADATA = (\n ('numpy', {\n 'min_version': '1.6.1',\n 'required_at_installation': True,\n 'install_info': _NILEARN_INSTALL_MSG}),\n ('scipy', {\n 'min_version': '0.9.0',\n 'required_at_installation': True,\n 'install_info': _NILEARN_INSTALL_MSG}),\n ('sklearn', {\n 'min_version': '0.13',\n 'required_at_installation': True,\n 'install_info': _NILEARN_INSTALL_MSG}),\n ('nibabel', {\n 'min_version': '1.1.0',\n 'required_at_installation': False}))\n\nOPTIONAL_MATPLOTLIB_MIN_VERSION = '1.1.1'\n\n\ndef _import_module_with_version_check(\n module_name,\n minimum_version,\n install_info=None):\n \"\"\"Check that module is installed with a recent enough version\n \"\"\"\n from distutils.version import LooseVersion\n\n try:\n module = __import__(module_name)\n except ImportError as exc:\n user_friendly_info = ('Module \"{0}\" could not be found. {1}').format(\n module_name,\n install_info or 'Please install it properly to use nilearn.')\n exc.args += (user_friendly_info,)\n raise\n\n # Avoid choking on modules with no __version__ attribute\n module_version = getattr(module, '__version__', '0.0.0')\n\n version_too_old = (not LooseVersion(module_version) >=\n LooseVersion(minimum_version))\n\n if version_too_old:\n message = (\n 'A {module_name} version of at least {minimum_version} '\n 'is required to use nilearn. {module_version} was found. '\n 'Please upgrade {module_name}').format(\n module_name=module_name,\n minimum_version=minimum_version,\n module_version=module_version)\n\n raise ImportError(message)\n\n return module\n\n\ndef _check_module_dependencies(is_nilearn_installing=False):\n \"\"\"Throw an exception if nilearn dependencies are not installed.\n\n Parameters\n ----------\n is_nilearn_installing: boolean\n if True, only error on missing packages that cannot be auto-installed.\n if False, error on any missing package.\n\n Throws\n -------\n ImportError\n \"\"\"\n\n for (module_name, module_metadata) in REQUIRED_MODULE_METADATA:\n if not (is_nilearn_installing and\n not module_metadata['required_at_installation']):\n # Skip check only when installing and it's a module that\n # will be auto-installed.\n _import_module_with_version_check(\n module_name=module_name,\n minimum_version=module_metadata['min_version'],\n install_info=module_metadata.get('install_info'))\n", "path": "nilearn/version.py"}], "after_files": [{"content": "# *- encoding: utf-8 -*-\n\"\"\"\nnilearn version, required package versions, and utilities for checking\n\"\"\"\n# Author: Lo\u00efc Est\u00e8ve, Ben Cipollini\n# License: simplified BSD\n\n# PEP0440 compatible formatted version, see:\n# https://www.python.org/dev/peps/pep-0440/\n#\n# Generic release markers:\n# X.Y\n# X.Y.Z # For bugfix releases\n#\n# Admissible pre-release markers:\n# X.YaN # Alpha release\n# X.YbN # Beta release\n# X.YrcN # Release Candidate\n# X.Y # Final release\n#\n# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.\n# 'X.Y.dev0' is the canonical version of 'X.Y.dev'\n#\n__version__ = '0.2.5'\n\n_NILEARN_INSTALL_MSG = 'See %s for installation information.' % (\n 'http://nilearn.github.io/introduction.html#installation')\n\n# This is a tuple to preserve order, so that dependencies are checked\n# in some meaningful order (more => less 'core'). We avoid using\n# collections.OrderedDict to preserve Python 2.6 compatibility.\nREQUIRED_MODULE_METADATA = (\n ('numpy', {\n 'min_version': '1.6.1',\n 'required_at_installation': True,\n 'install_info': _NILEARN_INSTALL_MSG}),\n ('scipy', {\n 'min_version': '0.9.0',\n 'required_at_installation': True,\n 'install_info': _NILEARN_INSTALL_MSG}),\n ('sklearn', {\n 'min_version': '0.14.1',\n 'required_at_installation': True,\n 'install_info': _NILEARN_INSTALL_MSG}),\n ('nibabel', {\n 'min_version': '1.1.0',\n 'required_at_installation': False}))\n\nOPTIONAL_MATPLOTLIB_MIN_VERSION = '1.1.1'\n\n\ndef _import_module_with_version_check(\n module_name,\n minimum_version,\n install_info=None):\n \"\"\"Check that module is installed with a recent enough version\n \"\"\"\n from distutils.version import LooseVersion\n\n try:\n module = __import__(module_name)\n except ImportError as exc:\n user_friendly_info = ('Module \"{0}\" could not be found. {1}').format(\n module_name,\n install_info or 'Please install it properly to use nilearn.')\n exc.args += (user_friendly_info,)\n raise\n\n # Avoid choking on modules with no __version__ attribute\n module_version = getattr(module, '__version__', '0.0.0')\n\n version_too_old = (not LooseVersion(module_version) >=\n LooseVersion(minimum_version))\n\n if version_too_old:\n message = (\n 'A {module_name} version of at least {minimum_version} '\n 'is required to use nilearn. {module_version} was found. '\n 'Please upgrade {module_name}').format(\n module_name=module_name,\n minimum_version=minimum_version,\n module_version=module_version)\n\n raise ImportError(message)\n\n return module\n\n\ndef _check_module_dependencies(is_nilearn_installing=False):\n \"\"\"Throw an exception if nilearn dependencies are not installed.\n\n Parameters\n ----------\n is_nilearn_installing: boolean\n if True, only error on missing packages that cannot be auto-installed.\n if False, error on any missing package.\n\n Throws\n -------\n ImportError\n \"\"\"\n\n for (module_name, module_metadata) in REQUIRED_MODULE_METADATA:\n if not (is_nilearn_installing and\n not module_metadata['required_at_installation']):\n # Skip check only when installing and it's a module that\n # will be auto-installed.\n _import_module_with_version_check(\n module_name=module_name,\n minimum_version=module_metadata['min_version'],\n install_info=module_metadata.get('install_info'))\n", "path": "nilearn/version.py"}]}
| 1,468 | 125 |
gh_patches_debug_19787
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-538
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pages endpoint: skip pages if parent is not visible
If a parent page in the page tree is in any state that makes it invisible in the API, we do not want the children to appear in the pages endpoint. This should allow for easy deactivating of "full chapters".
Pages endpoint: skip pages if parent is not visible
If a parent page in the page tree is in any state that makes it invisible in the API, we do not want the children to appear in the pages endpoint. This should allow for easy deactivating of "full chapters".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/api/v3/pages.py`
Content:
```
1 from django.http import JsonResponse
2
3 from cms.models import Region
4
5
6 def transform_page(page_translation):
7 if page_translation.page.parent:
8 parent = {
9 "id": page_translation.page.parent.id,
10 "url": page_translation.page.parent.get_translation(
11 page_translation.language.code
12 ).permalink,
13 "path": page_translation.page.parent.get_translation(
14 page_translation.language.code
15 ).slug,
16 }
17 else:
18 parent = None
19 return {
20 "id": page_translation.id,
21 "url": page_translation.permalink,
22 "path": page_translation.slug,
23 "title": page_translation.title,
24 "modified_gmt": page_translation.last_updated,
25 "excerpt": page_translation.text,
26 "content": page_translation.combined_text,
27 "parent": parent,
28 "order": page_translation.page.lft, # use left edge indicator of mptt model for order
29 "available_languages": page_translation.available_languages,
30 "thumbnail": None,
31 "hash": None,
32 }
33
34
35 # pylint: disable=unused-argument
36 def pages(request, region_slug, language_code):
37 region = Region.get_current_region(request)
38 result = []
39 for page in region.pages.all():
40 page_translation = page.get_public_translation(language_code)
41 if page_translation:
42 result.append(transform_page(page_translation))
43 return JsonResponse(
44 result, safe=False
45 ) # Turn off Safe-Mode to allow serializing arrays
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/api/v3/pages.py b/src/api/v3/pages.py
--- a/src/api/v3/pages.py
+++ b/src/api/v3/pages.py
@@ -36,10 +36,20 @@
def pages(request, region_slug, language_code):
region = Region.get_current_region(request)
result = []
- for page in region.pages.all():
+ for page in region.pages.filter(archived=False, parent=None): # get main level
page_translation = page.get_public_translation(language_code)
if page_translation:
result.append(transform_page(page_translation))
+ result = get_children(page, language_code, result)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
+
+
+def get_children(parent, language_code, result):
+ for page in parent.children.filter(archived=False):
+ page_translation = page.get_public_translation(language_code)
+ if page_translation:
+ result.append(transform_page(page_translation))
+ result = get_children(page, language_code, result)
+ return result
|
{"golden_diff": "diff --git a/src/api/v3/pages.py b/src/api/v3/pages.py\n--- a/src/api/v3/pages.py\n+++ b/src/api/v3/pages.py\n@@ -36,10 +36,20 @@\n def pages(request, region_slug, language_code):\n region = Region.get_current_region(request)\n result = []\n- for page in region.pages.all():\n+ for page in region.pages.filter(archived=False, parent=None): # get main level\n page_translation = page.get_public_translation(language_code)\n if page_translation:\n result.append(transform_page(page_translation))\n+ result = get_children(page, language_code, result)\n return JsonResponse(\n result, safe=False\n ) # Turn off Safe-Mode to allow serializing arrays\n+\n+\n+def get_children(parent, language_code, result):\n+ for page in parent.children.filter(archived=False):\n+ page_translation = page.get_public_translation(language_code)\n+ if page_translation:\n+ result.append(transform_page(page_translation))\n+ result = get_children(page, language_code, result)\n+ return result\n", "issue": "Pages endpoint: skip pages if parent is not visible\nIf a parent page in the page tree is in any state that makes it invisible in the API, we do not want the children to appear in the pages endpoint. This should allow for easy deactivating of \"full chapters\". \nPages endpoint: skip pages if parent is not visible\nIf a parent page in the page tree is in any state that makes it invisible in the API, we do not want the children to appear in the pages endpoint. This should allow for easy deactivating of \"full chapters\". \n", "before_files": [{"content": "from django.http import JsonResponse\n\nfrom cms.models import Region\n\n\ndef transform_page(page_translation):\n if page_translation.page.parent:\n parent = {\n \"id\": page_translation.page.parent.id,\n \"url\": page_translation.page.parent.get_translation(\n page_translation.language.code\n ).permalink,\n \"path\": page_translation.page.parent.get_translation(\n page_translation.language.code\n ).slug,\n }\n else:\n parent = None\n return {\n \"id\": page_translation.id,\n \"url\": page_translation.permalink,\n \"path\": page_translation.slug,\n \"title\": page_translation.title,\n \"modified_gmt\": page_translation.last_updated,\n \"excerpt\": page_translation.text,\n \"content\": page_translation.combined_text,\n \"parent\": parent,\n \"order\": page_translation.page.lft, # use left edge indicator of mptt model for order\n \"available_languages\": page_translation.available_languages,\n \"thumbnail\": None,\n \"hash\": None,\n }\n\n\n# pylint: disable=unused-argument\ndef pages(request, region_slug, language_code):\n region = Region.get_current_region(request)\n result = []\n for page in region.pages.all():\n page_translation = page.get_public_translation(language_code)\n if page_translation:\n result.append(transform_page(page_translation))\n return JsonResponse(\n result, safe=False\n ) # Turn off Safe-Mode to allow serializing arrays\n", "path": "src/api/v3/pages.py"}], "after_files": [{"content": "from django.http import JsonResponse\n\nfrom cms.models import Region\n\n\ndef transform_page(page_translation):\n if page_translation.page.parent:\n parent = {\n \"id\": page_translation.page.parent.id,\n \"url\": page_translation.page.parent.get_translation(\n page_translation.language.code\n ).permalink,\n \"path\": page_translation.page.parent.get_translation(\n page_translation.language.code\n ).slug,\n }\n else:\n parent = None\n return {\n \"id\": page_translation.id,\n \"url\": page_translation.permalink,\n \"path\": page_translation.slug,\n \"title\": page_translation.title,\n \"modified_gmt\": page_translation.last_updated,\n \"excerpt\": page_translation.text,\n \"content\": page_translation.combined_text,\n \"parent\": parent,\n \"order\": page_translation.page.lft, # use left edge indicator of mptt model for order\n \"available_languages\": page_translation.available_languages,\n \"thumbnail\": None,\n \"hash\": None,\n }\n\n\n# pylint: disable=unused-argument\ndef pages(request, region_slug, language_code):\n region = Region.get_current_region(request)\n result = []\n for page in region.pages.filter(archived=False, parent=None): # get main level\n page_translation = page.get_public_translation(language_code)\n if page_translation:\n result.append(transform_page(page_translation))\n result = get_children(page, language_code, result)\n return JsonResponse(\n result, safe=False\n ) # Turn off Safe-Mode to allow serializing arrays\n\n\ndef get_children(parent, language_code, result):\n for page in parent.children.filter(archived=False):\n page_translation = page.get_public_translation(language_code)\n if page_translation:\n result.append(transform_page(page_translation))\n result = get_children(page, language_code, result)\n return result\n", "path": "src/api/v3/pages.py"}]}
| 764 | 237 |
gh_patches_debug_15390
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-1138
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nested attribute access in JS->Python type conversion
Currently the following code fails,
```js
>>> from js import window
>>> window.URL.createObjectURL
Error: Traceback (most recent call last):
File "/lib/python3.7/site-packages/pyodide.py", line 45, in eval_code
return eval(compile(expr, '<eval>', mode='eval'), ns, ns)
File "<eval>", line 1, in <module>
AttributeError: 'JsBoundMethod' object has no attribute 'createObjectURL'
```
(while `window.URL.createObjectURL` is a valid JS object) because nested attributes (i.e. attribute of an attribute) don't seem to be supported. It would have been nice to make it work, though I have not looked at how difficult that would be.
from js import fetch treats fetch as a free function
`fetch` is a member function of `window`.
However, using `from js import fetch` doesn't realize that and leads to the error:
`TypeError: 'fetch' called on an object that does not implement interface Window.`
For Reproducing the Error:
```
%%py
from js import document, Request, fetch, URL
img_tag = document.createElement('img')
req = Request.new('https://i.ibb.co/3f4yJQS/face4.jpg')
def func(response):
return response.blob()
def func2(blob):
objURL = URL.createObjectURL(blob)
img_tag.src = objURL
fetch(req).then(func).then(func2)
document.body.appendChild(img_tag)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyodide-py/pyodide/_core.py`
Content:
```
1 # type: ignore
2 import platform
3
4 if platform.system() == "Emscripten":
5 from _pyodide_core import JsProxy, JsBoundMethod, JsException
6 else:
7 # Can add shims here if we are so inclined.
8 class JsException(Exception):
9 """
10 A wrapper around a Javascript Error to allow the Error to be thrown in Python.
11 """
12
13 # Defined in jsproxy.c
14
15 class JsProxy:
16 """A proxy to make a Javascript object behave like a Python object"""
17
18 # Defined in jsproxy.c
19
20 class JsBoundMethod:
21 """A proxy to make it possible to call Javascript bound methods from Python."""
22
23 # Defined in jsproxy.c
24
25
26 __all__ = [JsProxy, JsBoundMethod, JsException]
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pyodide-py/pyodide/_core.py b/src/pyodide-py/pyodide/_core.py
--- a/src/pyodide-py/pyodide/_core.py
+++ b/src/pyodide-py/pyodide/_core.py
@@ -2,7 +2,7 @@
import platform
if platform.system() == "Emscripten":
- from _pyodide_core import JsProxy, JsBoundMethod, JsException
+ from _pyodide_core import JsProxy, JsMethod, JsException
else:
# Can add shims here if we are so inclined.
class JsException(Exception):
@@ -17,10 +17,10 @@
# Defined in jsproxy.c
- class JsBoundMethod:
+ class JsMethod:
"""A proxy to make it possible to call Javascript bound methods from Python."""
# Defined in jsproxy.c
-__all__ = [JsProxy, JsBoundMethod, JsException]
+__all__ = [JsProxy, JsMethod, JsException]
|
{"golden_diff": "diff --git a/src/pyodide-py/pyodide/_core.py b/src/pyodide-py/pyodide/_core.py\n--- a/src/pyodide-py/pyodide/_core.py\n+++ b/src/pyodide-py/pyodide/_core.py\n@@ -2,7 +2,7 @@\n import platform\n \n if platform.system() == \"Emscripten\":\n- from _pyodide_core import JsProxy, JsBoundMethod, JsException\n+ from _pyodide_core import JsProxy, JsMethod, JsException\n else:\n # Can add shims here if we are so inclined.\n class JsException(Exception):\n@@ -17,10 +17,10 @@\n \n # Defined in jsproxy.c\n \n- class JsBoundMethod:\n+ class JsMethod:\n \"\"\"A proxy to make it possible to call Javascript bound methods from Python.\"\"\"\n \n # Defined in jsproxy.c\n \n \n-__all__ = [JsProxy, JsBoundMethod, JsException]\n+__all__ = [JsProxy, JsMethod, JsException]\n", "issue": "Nested attribute access in JS->Python type conversion\nCurrently the following code fails,\r\n```js\r\n>>> from js import window\r\n>>> window.URL.createObjectURL\r\nError: Traceback (most recent call last):\r\n File \"/lib/python3.7/site-packages/pyodide.py\", line 45, in eval_code\r\n return eval(compile(expr, '<eval>', mode='eval'), ns, ns)\r\n File \"<eval>\", line 1, in <module>\r\nAttributeError: 'JsBoundMethod' object has no attribute 'createObjectURL'\r\n```\r\n(while `window.URL.createObjectURL` is a valid JS object) because nested attributes (i.e. attribute of an attribute) don't seem to be supported. It would have been nice to make it work, though I have not looked at how difficult that would be.\nfrom js import fetch treats fetch as a free function\n`fetch` is a member function of `window`.\r\nHowever, using `from js import fetch` doesn't realize that and leads to the error:\r\n\r\n`TypeError: 'fetch' called on an object that does not implement interface Window.`\r\n\r\nFor Reproducing the Error:\r\n```\r\n%%py\r\n\r\nfrom js import document, Request, fetch, URL\r\nimg_tag = document.createElement('img')\r\nreq = Request.new('https://i.ibb.co/3f4yJQS/face4.jpg')\r\n\r\ndef func(response):\r\n return response.blob()\r\n\r\ndef func2(blob):\r\n objURL = URL.createObjectURL(blob)\r\n img_tag.src = objURL\r\n\r\nfetch(req).then(func).then(func2)\r\n\r\ndocument.body.appendChild(img_tag)\r\n```\n", "before_files": [{"content": "# type: ignore\nimport platform\n\nif platform.system() == \"Emscripten\":\n from _pyodide_core import JsProxy, JsBoundMethod, JsException\nelse:\n # Can add shims here if we are so inclined.\n class JsException(Exception):\n \"\"\"\n A wrapper around a Javascript Error to allow the Error to be thrown in Python.\n \"\"\"\n\n # Defined in jsproxy.c\n\n class JsProxy:\n \"\"\"A proxy to make a Javascript object behave like a Python object\"\"\"\n\n # Defined in jsproxy.c\n\n class JsBoundMethod:\n \"\"\"A proxy to make it possible to call Javascript bound methods from Python.\"\"\"\n\n # Defined in jsproxy.c\n\n\n__all__ = [JsProxy, JsBoundMethod, JsException]\n", "path": "src/pyodide-py/pyodide/_core.py"}], "after_files": [{"content": "# type: ignore\nimport platform\n\nif platform.system() == \"Emscripten\":\n from _pyodide_core import JsProxy, JsMethod, JsException\nelse:\n # Can add shims here if we are so inclined.\n class JsException(Exception):\n \"\"\"\n A wrapper around a Javascript Error to allow the Error to be thrown in Python.\n \"\"\"\n\n # Defined in jsproxy.c\n\n class JsProxy:\n \"\"\"A proxy to make a Javascript object behave like a Python object\"\"\"\n\n # Defined in jsproxy.c\n\n class JsMethod:\n \"\"\"A proxy to make it possible to call Javascript bound methods from Python.\"\"\"\n\n # Defined in jsproxy.c\n\n\n__all__ = [JsProxy, JsMethod, JsException]\n", "path": "src/pyodide-py/pyodide/_core.py"}]}
| 811 | 235 |
gh_patches_debug_5221
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-1011
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
vds: cannot export sheets containing a date column
**Small description**
It's not possible to save a sheet to a .vds file if said sheet contains a date column.
This results in the error below.
**Expected result**
It should just work(tm).
**Actual result with screenshot**
```stacktrace
Traceback (most recent call last):
File "/nix/store/srkr2wnwq95ylmgiadh28p3jiaadl5yw-visidata-2.4/lib/python3.8/site-packages/visidata/threads.py", line 215, in _toplevelTryFunc
t.status = func(*args, **kwargs)
File "/nix/store/srkr2wnwq95ylmgiadh28p3jiaadl5yw-visidata-2.4/lib/python3.8/site-packages/visidata/loaders/vds.py", line 32, in save_vds
fp.write(json.dumps(d)+NL)
File "/nix/store/4s0h5aawbap3xhldxhcijvl26751qrjr-python3-3.8.9/lib/python3.8/json/__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "/nix/store/4s0h5aawbap3xhldxhcijvl26751qrjr-python3-3.8.9/lib/python3.8/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/nix/store/4s0h5aawbap3xhldxhcijvl26751qrjr-python3-3.8.9/lib/python3.8/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/nix/store/4s0h5aawbap3xhldxhcijvl26751qrjr-python3-3.8.9/lib/python3.8/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type date is not JSON serializable
```
**Steps to reproduce with sample data and a .vd**
❯ cat testsheet.vd
```log
sheet col row longname input keystrokes comment
visidata_menu sheets-stack S open Sheets Stack: join or jump between the active sheets on the current stack
sheets キsheets add-row a append a blank row
sheets name キ edit-cell testsheet e edit contents of current cell
sheets キtestsheet open-row ^J open sheet referenced in current row
testsheet 0 rename-col testcol ^ edit name of current column
testsheet testcol type-date @ set type of current column to date
testsheet add-row a append a blank row
testsheet testcol 0 edit-cell 2021-06-14 e edit contents of current cell
testsheet save-all test.vds g^S save all sheets to given file or directory)
```
**Additional context**
Problem is present on v2.4 and on the develop branch (commit 3350d9fd8c9e64ebf409deae4b31085d12efeb7f)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/loaders/vds.py`
Content:
```
1 'Custom VisiData save format'
2
3 import json
4 from visidata import *
5
6 NL='\n'
7
8 @VisiData.api
9 def open_vds(vd, p):
10 return VdsIndexSheet(p.name, source=p)
11
12
13 @VisiData.api
14 def save_vds(vd, p, *sheets):
15 'Save in custom VisiData format, preserving columns and their attributes.'
16
17 with p.open_text(mode='w') as fp:
18 for vs in sheets:
19 # class and attrs for vs
20 d = { 'name': vs.name, }
21 fp.write('#'+json.dumps(d)+NL)
22
23 # class and attrs for each column in vs
24 for col in vs.visibleCols:
25 d = col.__getstate__()
26 d['col'] = type(col).__name__
27 fp.write('#'+json.dumps(d)+NL)
28
29 with Progress(gerund='saving'):
30 for row in vs.iterdispvals(*vs.visibleCols, format=False):
31 d = {col.name:val for col, val in row.items()}
32 fp.write(json.dumps(d)+NL)
33
34
35 class VdsIndexSheet(IndexSheet):
36 def iterload(self):
37 vs = None
38 with self.source.open_text() as fp:
39 line = fp.readline()
40 while line:
41 if line.startswith('#{'):
42 d = json.loads(line[1:])
43 if 'col' not in d:
44 vs = VdsSheet(d.pop('name'), columns=[], source=self.source, source_fpos=fp.tell())
45 yield vs
46 line = fp.readline()
47
48
49 class VdsSheet(Sheet):
50 def newRow(self):
51 return {} # rowdef: dict
52
53 def iterload(self):
54 self.colnames = {}
55 self.columns = []
56
57 with self.source.open_text() as fp:
58 fp.seek(self.source_fpos)
59
60 # consume all metadata, create columns
61 line = fp.readline()
62 while line and line.startswith('#{'):
63 d = json.loads(line[1:])
64 if 'col' not in d:
65 raise Exception(d)
66 classname = d.pop('col')
67 if classname == 'Column':
68 classname = 'ItemColumn'
69 d['expr'] = d['name']
70
71 c = globals()[classname](d.pop('name'))
72 self.colnames[c.name] = c
73 self.addColumn(c)
74 for k, v in d.items():
75 setattr(c, k, v)
76
77 line = fp.readline()
78
79 while line and not line.startswith('#{'):
80 d = json.loads(line)
81 yield d
82 line = fp.readline()
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/visidata/loaders/vds.py b/visidata/loaders/vds.py
--- a/visidata/loaders/vds.py
+++ b/visidata/loaders/vds.py
@@ -29,7 +29,7 @@
with Progress(gerund='saving'):
for row in vs.iterdispvals(*vs.visibleCols, format=False):
d = {col.name:val for col, val in row.items()}
- fp.write(json.dumps(d)+NL)
+ fp.write(json.dumps(d, default=str)+NL)
class VdsIndexSheet(IndexSheet):
|
{"golden_diff": "diff --git a/visidata/loaders/vds.py b/visidata/loaders/vds.py\n--- a/visidata/loaders/vds.py\n+++ b/visidata/loaders/vds.py\n@@ -29,7 +29,7 @@\n with Progress(gerund='saving'):\n for row in vs.iterdispvals(*vs.visibleCols, format=False):\n d = {col.name:val for col, val in row.items()}\n- fp.write(json.dumps(d)+NL)\n+ fp.write(json.dumps(d, default=str)+NL)\n \n \n class VdsIndexSheet(IndexSheet):\n", "issue": "vds: cannot export sheets containing a date column\n**Small description**\r\n\r\nIt's not possible to save a sheet to a .vds file if said sheet contains a date column.\r\nThis results in the error below.\r\n\r\n**Expected result**\r\n\r\nIt should just work(tm).\r\n\r\n**Actual result with screenshot**\r\n\r\n```stacktrace\r\nTraceback (most recent call last):\r\n File \"/nix/store/srkr2wnwq95ylmgiadh28p3jiaadl5yw-visidata-2.4/lib/python3.8/site-packages/visidata/threads.py\", line 215, in _toplevelTryFunc\r\n t.status = func(*args, **kwargs)\r\n File \"/nix/store/srkr2wnwq95ylmgiadh28p3jiaadl5yw-visidata-2.4/lib/python3.8/site-packages/visidata/loaders/vds.py\", line 32, in save_vds\r\n fp.write(json.dumps(d)+NL)\r\n File \"/nix/store/4s0h5aawbap3xhldxhcijvl26751qrjr-python3-3.8.9/lib/python3.8/json/__init__.py\", line 231, in dumps\r\n return _default_encoder.encode(obj)\r\n File \"/nix/store/4s0h5aawbap3xhldxhcijvl26751qrjr-python3-3.8.9/lib/python3.8/json/encoder.py\", line 199, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/nix/store/4s0h5aawbap3xhldxhcijvl26751qrjr-python3-3.8.9/lib/python3.8/json/encoder.py\", line 257, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/nix/store/4s0h5aawbap3xhldxhcijvl26751qrjr-python3-3.8.9/lib/python3.8/json/encoder.py\", line 179, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nTypeError: Object of type date is not JSON serializable\r\n```\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n\r\n\u276f cat testsheet.vd\r\n```log\r\nsheet\tcol\trow\tlongname\tinput\tkeystrokes\tcomment\r\nvisidata_menu\t\t\tsheets-stack\t\tS\topen Sheets Stack: join or jump between the active sheets on the current stack\r\nsheets\t\t\u30adsheets\tadd-row\t\ta\tappend a blank row\r\nsheets\tname\t\u30ad\tedit-cell\ttestsheet\te\tedit contents of current cell\r\nsheets\t\t\u30adtestsheet\topen-row\t\t^J\topen sheet referenced in current row\r\ntestsheet\t0\t\trename-col\ttestcol\t^\tedit name of current column\r\ntestsheet\ttestcol\t\ttype-date\t\t@\tset type of current column to date\r\ntestsheet\t\t\tadd-row\t\ta\tappend a blank row\r\ntestsheet\ttestcol\t0\tedit-cell\t2021-06-14\te\tedit contents of current cell\r\ntestsheet\t\t\tsave-all\ttest.vds\tg^S\tsave all sheets to given file or directory)\r\n```\r\n\r\n\r\n**Additional context**\r\n\r\nProblem is present on v2.4 and on the develop branch (commit 3350d9fd8c9e64ebf409deae4b31085d12efeb7f)\n", "before_files": [{"content": "'Custom VisiData save format'\n\nimport json\nfrom visidata import *\n\nNL='\\n'\n\[email protected]\ndef open_vds(vd, p):\n return VdsIndexSheet(p.name, source=p)\n\n\[email protected]\ndef save_vds(vd, p, *sheets):\n 'Save in custom VisiData format, preserving columns and their attributes.'\n\n with p.open_text(mode='w') as fp:\n for vs in sheets:\n # class and attrs for vs\n d = { 'name': vs.name, }\n fp.write('#'+json.dumps(d)+NL)\n\n # class and attrs for each column in vs\n for col in vs.visibleCols:\n d = col.__getstate__()\n d['col'] = type(col).__name__\n fp.write('#'+json.dumps(d)+NL)\n\n with Progress(gerund='saving'):\n for row in vs.iterdispvals(*vs.visibleCols, format=False):\n d = {col.name:val for col, val in row.items()}\n fp.write(json.dumps(d)+NL)\n\n\nclass VdsIndexSheet(IndexSheet):\n def iterload(self):\n vs = None\n with self.source.open_text() as fp:\n line = fp.readline()\n while line:\n if line.startswith('#{'):\n d = json.loads(line[1:])\n if 'col' not in d:\n vs = VdsSheet(d.pop('name'), columns=[], source=self.source, source_fpos=fp.tell())\n yield vs\n line = fp.readline()\n\n\nclass VdsSheet(Sheet):\n def newRow(self):\n return {} # rowdef: dict\n\n def iterload(self):\n self.colnames = {}\n self.columns = []\n\n with self.source.open_text() as fp:\n fp.seek(self.source_fpos)\n\n # consume all metadata, create columns\n line = fp.readline()\n while line and line.startswith('#{'):\n d = json.loads(line[1:])\n if 'col' not in d:\n raise Exception(d)\n classname = d.pop('col')\n if classname == 'Column':\n classname = 'ItemColumn'\n d['expr'] = d['name']\n\n c = globals()[classname](d.pop('name'))\n self.colnames[c.name] = c\n self.addColumn(c)\n for k, v in d.items():\n setattr(c, k, v)\n\n line = fp.readline()\n\n while line and not line.startswith('#{'):\n d = json.loads(line)\n yield d\n line = fp.readline()\n", "path": "visidata/loaders/vds.py"}], "after_files": [{"content": "'Custom VisiData save format'\n\nimport json\nfrom visidata import *\n\nNL='\\n'\n\[email protected]\ndef open_vds(vd, p):\n return VdsIndexSheet(p.name, source=p)\n\n\[email protected]\ndef save_vds(vd, p, *sheets):\n 'Save in custom VisiData format, preserving columns and their attributes.'\n\n with p.open_text(mode='w') as fp:\n for vs in sheets:\n # class and attrs for vs\n d = { 'name': vs.name, }\n fp.write('#'+json.dumps(d)+NL)\n\n # class and attrs for each column in vs\n for col in vs.visibleCols:\n d = col.__getstate__()\n d['col'] = type(col).__name__\n fp.write('#'+json.dumps(d)+NL)\n\n with Progress(gerund='saving'):\n for row in vs.iterdispvals(*vs.visibleCols, format=False):\n d = {col.name:val for col, val in row.items()}\n fp.write(json.dumps(d, default=str)+NL)\n\n\nclass VdsIndexSheet(IndexSheet):\n def iterload(self):\n vs = None\n with self.source.open_text() as fp:\n line = fp.readline()\n while line:\n if line.startswith('#{'):\n d = json.loads(line[1:])\n if 'col' not in d:\n vs = VdsSheet(d.pop('name'), columns=[], source=self.source, source_fpos=fp.tell())\n yield vs\n line = fp.readline()\n\n\nclass VdsSheet(Sheet):\n def newRow(self):\n return {} # rowdef: dict\n\n def iterload(self):\n self.colnames = {}\n self.columns = []\n\n with self.source.open_text() as fp:\n fp.seek(self.source_fpos)\n\n # consume all metadata, create columns\n line = fp.readline()\n while line and line.startswith('#{'):\n d = json.loads(line[1:])\n if 'col' not in d:\n raise Exception(d)\n classname = d.pop('col')\n if classname == 'Column':\n classname = 'ItemColumn'\n d['expr'] = d['name']\n\n c = globals()[classname](d.pop('name'))\n self.colnames[c.name] = c\n self.addColumn(c)\n for k, v in d.items():\n setattr(c, k, v)\n\n line = fp.readline()\n\n while line and not line.startswith('#{'):\n d = json.loads(line)\n yield d\n line = fp.readline()\n", "path": "visidata/loaders/vds.py"}]}
| 1,750 | 129 |
gh_patches_debug_33630
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-1150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possibly incorrect hook names?
Going through the docs, I see two rather unusual hook names: `construct_wagtail_edit_bird` and `construct_whitelister_element_rules`.
The first seems like a placeholder name that accidentally made it out of the alpha stage. Based on the docs, it seems like it should be called `construct_wagtail_userbar`.
The second seems like a straight up typo. I've never heard the word "whitelister" before. I'm pretty sure this hook should be called `construct_whitelisted_element_rules`.
Changing the names of hooks is obviously a major undertaking, since some code bases will have already implemented them. But adding the new names and deprecating the old ones for a few releases should be entirely possible. I'd be happy to do this in a pull request, since it's only a dozen or lines of code to change, but I don't really know how wagtail handles deprecating old APIs.
Possibly incorrect hook names?
Going through the docs, I see two rather unusual hook names: `construct_wagtail_edit_bird` and `construct_whitelister_element_rules`.
The first seems like a placeholder name that accidentally made it out of the alpha stage. Based on the docs, it seems like it should be called `construct_wagtail_userbar`.
The second seems like a straight up typo. I've never heard the word "whitelister" before. I'm pretty sure this hook should be called `construct_whitelisted_element_rules`.
Changing the names of hooks is obviously a major undertaking, since some code bases will have already implemented them. But adding the new names and deprecating the old ones for a few releases should be entirely possible. I'd be happy to do this in a pull request, since it's only a dozen or lines of code to change, but I don't really know how wagtail handles deprecating old APIs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailadmin/views/userbar.py`
Content:
```
1 from django.shortcuts import render
2 from django.contrib.auth.decorators import permission_required
3
4 from wagtail.wagtailadmin.userbar import EditPageItem, AddPageItem, ApproveModerationEditPageItem, RejectModerationEditPageItem
5 from wagtail.wagtailcore import hooks
6 from wagtail.wagtailcore.models import Page, PageRevision
7
8
9 @permission_required('wagtailadmin.access_admin', raise_exception=True)
10 def for_frontend(request, page_id):
11 items = [
12 EditPageItem(Page.objects.get(id=page_id)),
13 AddPageItem(Page.objects.get(id=page_id)),
14 ]
15
16 for fn in hooks.get_hooks('construct_wagtail_edit_bird'):
17 fn(request, items)
18
19 # Render the items
20 rendered_items = [item.render(request) for item in items]
21
22 # Remove any unrendered items
23 rendered_items = [item for item in rendered_items if item]
24
25 # Render the edit bird
26 return render(request, 'wagtailadmin/userbar/base.html', {
27 'items': rendered_items,
28 })
29
30
31 @permission_required('wagtailadmin.access_admin', raise_exception=True)
32 def for_moderation(request, revision_id):
33 items = [
34 EditPageItem(PageRevision.objects.get(id=revision_id).page),
35 AddPageItem(PageRevision.objects.get(id=revision_id).page),
36 ApproveModerationEditPageItem(PageRevision.objects.get(id=revision_id)),
37 RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),
38 ]
39
40 for fn in hooks.get_hooks('construct_wagtail_edit_bird'):
41 fn(request, items)
42
43 # Render the items
44 rendered_items = [item.render(request) for item in items]
45
46 # Remove any unrendered items
47 rendered_items = [item for item in rendered_items if item]
48
49 # Render the edit bird
50 return render(request, 'wagtailadmin/userbar/base.html', {
51 'items': rendered_items,
52 })
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/wagtailadmin/views/userbar.py b/wagtail/wagtailadmin/views/userbar.py
--- a/wagtail/wagtailadmin/views/userbar.py
+++ b/wagtail/wagtailadmin/views/userbar.py
@@ -1,3 +1,5 @@
+import warnings
+
from django.shortcuts import render
from django.contrib.auth.decorators import permission_required
@@ -5,6 +7,8 @@
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import Page, PageRevision
+from wagtail.utils.deprecation import RemovedInWagtail11Warning
+
@permission_required('wagtailadmin.access_admin', raise_exception=True)
def for_frontend(request, page_id):
@@ -13,7 +17,10 @@
AddPageItem(Page.objects.get(id=page_id)),
]
- for fn in hooks.get_hooks('construct_wagtail_edit_bird'):
+ # TODO: Remove in 1.1 release
+ run_deprecated_edit_bird_hook(request, items)
+
+ for fn in hooks.get_hooks('construct_wagtail_userbar'):
fn(request, items)
# Render the items
@@ -37,7 +44,10 @@
RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),
]
- for fn in hooks.get_hooks('construct_wagtail_edit_bird'):
+ # TODO: Remove in 1.1 release
+ run_deprecated_edit_bird_hook(request, items)
+
+ for fn in hooks.get_hooks('construct_wagtail_userbar'):
fn(request, items)
# Render the items
@@ -50,3 +60,13 @@
return render(request, 'wagtailadmin/userbar/base.html', {
'items': rendered_items,
})
+
+
+def run_deprecated_edit_bird_hook(request, items):
+ for fn in hooks.get_hooks('construct_wagtail_edit_bird'):
+ fn(request, items)
+
+ warnings.warn(
+ "The 'construct_wagtail_edit_bird' hook has been renamed to 'construct_wagtail_userbar'."
+ "Please update function '%s' in '%s'." % (fn.__name__, fn.__module__), RemovedInWagtail11Warning
+ )
|
{"golden_diff": "diff --git a/wagtail/wagtailadmin/views/userbar.py b/wagtail/wagtailadmin/views/userbar.py\n--- a/wagtail/wagtailadmin/views/userbar.py\n+++ b/wagtail/wagtailadmin/views/userbar.py\n@@ -1,3 +1,5 @@\n+import warnings\n+\n from django.shortcuts import render\n from django.contrib.auth.decorators import permission_required\n \n@@ -5,6 +7,8 @@\n from wagtail.wagtailcore import hooks\n from wagtail.wagtailcore.models import Page, PageRevision\n \n+from wagtail.utils.deprecation import RemovedInWagtail11Warning\n+\n \n @permission_required('wagtailadmin.access_admin', raise_exception=True)\n def for_frontend(request, page_id):\n@@ -13,7 +17,10 @@\n AddPageItem(Page.objects.get(id=page_id)),\n ]\n \n- for fn in hooks.get_hooks('construct_wagtail_edit_bird'):\n+ # TODO: Remove in 1.1 release\n+ run_deprecated_edit_bird_hook(request, items)\n+\n+ for fn in hooks.get_hooks('construct_wagtail_userbar'):\n fn(request, items)\n \n # Render the items\n@@ -37,7 +44,10 @@\n RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n ]\n \n- for fn in hooks.get_hooks('construct_wagtail_edit_bird'):\n+ # TODO: Remove in 1.1 release\n+ run_deprecated_edit_bird_hook(request, items)\n+\n+ for fn in hooks.get_hooks('construct_wagtail_userbar'):\n fn(request, items)\n \n # Render the items\n@@ -50,3 +60,13 @@\n return render(request, 'wagtailadmin/userbar/base.html', {\n 'items': rendered_items,\n })\n+\n+\n+def run_deprecated_edit_bird_hook(request, items):\n+ for fn in hooks.get_hooks('construct_wagtail_edit_bird'):\n+ fn(request, items)\n+\n+ warnings.warn(\n+ \"The 'construct_wagtail_edit_bird' hook has been renamed to 'construct_wagtail_userbar'.\"\n+ \"Please update function '%s' in '%s'.\" % (fn.__name__, fn.__module__), RemovedInWagtail11Warning\n+ )\n", "issue": "Possibly incorrect hook names?\nGoing through the docs, I see two rather unusual hook names: `construct_wagtail_edit_bird` and `construct_whitelister_element_rules`. \n\nThe first seems like a placeholder name that accidentally made it out of the alpha stage. Based on the docs, it seems like it should be called `construct_wagtail_userbar`.\n\nThe second seems like a straight up typo. I've never heard the word \"whitelister\" before. I'm pretty sure this hook should be called `construct_whitelisted_element_rules`.\n\nChanging the names of hooks is obviously a major undertaking, since some code bases will have already implemented them. But adding the new names and deprecating the old ones for a few releases should be entirely possible. I'd be happy to do this in a pull request, since it's only a dozen or lines of code to change, but I don't really know how wagtail handles deprecating old APIs.\n\nPossibly incorrect hook names?\nGoing through the docs, I see two rather unusual hook names: `construct_wagtail_edit_bird` and `construct_whitelister_element_rules`. \n\nThe first seems like a placeholder name that accidentally made it out of the alpha stage. Based on the docs, it seems like it should be called `construct_wagtail_userbar`.\n\nThe second seems like a straight up typo. I've never heard the word \"whitelister\" before. I'm pretty sure this hook should be called `construct_whitelisted_element_rules`.\n\nChanging the names of hooks is obviously a major undertaking, since some code bases will have already implemented them. But adding the new names and deprecating the old ones for a few releases should be entirely possible. I'd be happy to do this in a pull request, since it's only a dozen or lines of code to change, but I don't really know how wagtail handles deprecating old APIs.\n\n", "before_files": [{"content": "from django.shortcuts import render\nfrom django.contrib.auth.decorators import permission_required\n\nfrom wagtail.wagtailadmin.userbar import EditPageItem, AddPageItem, ApproveModerationEditPageItem, RejectModerationEditPageItem\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.models import Page, PageRevision\n\n\n@permission_required('wagtailadmin.access_admin', raise_exception=True)\ndef for_frontend(request, page_id):\n items = [\n EditPageItem(Page.objects.get(id=page_id)),\n AddPageItem(Page.objects.get(id=page_id)),\n ]\n\n for fn in hooks.get_hooks('construct_wagtail_edit_bird'):\n fn(request, items)\n\n # Render the items\n rendered_items = [item.render(request) for item in items]\n\n # Remove any unrendered items\n rendered_items = [item for item in rendered_items if item]\n\n # Render the edit bird\n return render(request, 'wagtailadmin/userbar/base.html', {\n 'items': rendered_items,\n })\n\n\n@permission_required('wagtailadmin.access_admin', raise_exception=True)\ndef for_moderation(request, revision_id):\n items = [\n EditPageItem(PageRevision.objects.get(id=revision_id).page),\n AddPageItem(PageRevision.objects.get(id=revision_id).page),\n ApproveModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n ]\n\n for fn in hooks.get_hooks('construct_wagtail_edit_bird'):\n fn(request, items)\n\n # Render the items\n rendered_items = [item.render(request) for item in items]\n\n # Remove any unrendered items\n rendered_items = [item for item in rendered_items if item]\n\n # Render the edit bird\n return render(request, 'wagtailadmin/userbar/base.html', {\n 'items': rendered_items,\n })\n", "path": "wagtail/wagtailadmin/views/userbar.py"}], "after_files": [{"content": "import warnings\n\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import permission_required\n\nfrom wagtail.wagtailadmin.userbar import EditPageItem, AddPageItem, ApproveModerationEditPageItem, RejectModerationEditPageItem\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.models import Page, PageRevision\n\nfrom wagtail.utils.deprecation import RemovedInWagtail11Warning\n\n\n@permission_required('wagtailadmin.access_admin', raise_exception=True)\ndef for_frontend(request, page_id):\n items = [\n EditPageItem(Page.objects.get(id=page_id)),\n AddPageItem(Page.objects.get(id=page_id)),\n ]\n\n # TODO: Remove in 1.1 release\n run_deprecated_edit_bird_hook(request, items)\n\n for fn in hooks.get_hooks('construct_wagtail_userbar'):\n fn(request, items)\n\n # Render the items\n rendered_items = [item.render(request) for item in items]\n\n # Remove any unrendered items\n rendered_items = [item for item in rendered_items if item]\n\n # Render the edit bird\n return render(request, 'wagtailadmin/userbar/base.html', {\n 'items': rendered_items,\n })\n\n\n@permission_required('wagtailadmin.access_admin', raise_exception=True)\ndef for_moderation(request, revision_id):\n items = [\n EditPageItem(PageRevision.objects.get(id=revision_id).page),\n AddPageItem(PageRevision.objects.get(id=revision_id).page),\n ApproveModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n ]\n\n # TODO: Remove in 1.1 release\n run_deprecated_edit_bird_hook(request, items)\n\n for fn in hooks.get_hooks('construct_wagtail_userbar'):\n fn(request, items)\n\n # Render the items\n rendered_items = [item.render(request) for item in items]\n\n # Remove any unrendered items\n rendered_items = [item for item in rendered_items if item]\n\n # Render the edit bird\n return render(request, 'wagtailadmin/userbar/base.html', {\n 'items': rendered_items,\n })\n\n\ndef run_deprecated_edit_bird_hook(request, items):\n for fn in hooks.get_hooks('construct_wagtail_edit_bird'):\n fn(request, items)\n\n warnings.warn(\n \"The 'construct_wagtail_edit_bird' hook has been renamed to 'construct_wagtail_userbar'.\"\n \"Please update function '%s' in '%s'.\" % (fn.__name__, fn.__module__), RemovedInWagtail11Warning\n )\n", "path": "wagtail/wagtailadmin/views/userbar.py"}]}
| 1,182 | 514 |
gh_patches_debug_39227
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-3930
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use astropy instead of pyfits to read FITS images
pyfits is not currently installable on 3.5, and it looks like AstroPy is more actively maintained.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/io/_plugins/fits_plugin.py`
Content:
```
1 __all__ = ['imread', 'imread_collection']
2
3 import skimage.io as io
4
5 try:
6 from astropy.io import fits as pyfits
7 except ImportError:
8 try:
9 import pyfits
10 except ImportError:
11 raise ImportError(
12 "PyFITS could not be found. Please refer to\n"
13 "http://www.stsci.edu/resources/software_hardware/pyfits\n"
14 "for further instructions.")
15
16
17 def imread(fname, dtype=None):
18 """Load an image from a FITS file.
19
20 Parameters
21 ----------
22 fname : string
23 Image file name, e.g. ``test.fits``.
24 dtype : dtype, optional
25 For FITS, this argument is ignored because Stefan is planning on
26 removing the dtype argument from imread anyway.
27
28 Returns
29 -------
30 img_array : ndarray
31 Unlike plugins such as PIL, where different color bands/channels are
32 stored in the third dimension, FITS images are greyscale-only and can
33 be N-dimensional, so an array of the native FITS dimensionality is
34 returned, without color channels.
35
36 Currently if no image is found in the file, None will be returned
37
38 Notes
39 -----
40
41 Currently FITS ``imread()`` always returns the first image extension when
42 given a Multi-Extension FITS file; use ``imread_collection()`` (which does
43 lazy loading) to get all the extensions at once.
44
45 """
46
47 hdulist = pyfits.open(fname)
48
49 # Iterate over FITS image extensions, ignoring any other extension types
50 # such as binary tables, and get the first image data array:
51 img_array = None
52 for hdu in hdulist:
53 if isinstance(hdu, pyfits.ImageHDU) or \
54 isinstance(hdu, pyfits.PrimaryHDU):
55 if hdu.data is not None:
56 img_array = hdu.data
57 break
58 hdulist.close()
59
60 return img_array
61
62
63 def imread_collection(load_pattern, conserve_memory=True):
64 """Load a collection of images from one or more FITS files
65
66 Parameters
67 ----------
68 load_pattern : str or list
69 List of extensions to load. Filename globbing is currently
70 unsupported.
71 converve_memory : bool
72 If True, never keep more than one in memory at a specific
73 time. Otherwise, images will be cached once they are loaded.
74
75 Returns
76 -------
77
78 ic : ImageCollection
79 Collection of images.
80
81 """
82
83 intype = type(load_pattern)
84 if intype is not list and intype is not str:
85 raise TypeError("Input must be a filename or list of filenames")
86
87 # Ensure we have a list, otherwise we'll end up iterating over the string:
88 if intype is not list:
89 load_pattern = [load_pattern]
90
91 # Generate a list of filename/extension pairs by opening the list of
92 # files and finding the image extensions in each one:
93 ext_list = []
94 for filename in load_pattern:
95 hdulist = pyfits.open(filename)
96 for n, hdu in zip(range(len(hdulist)), hdulist):
97 if isinstance(hdu, pyfits.ImageHDU) or \
98 isinstance(hdu, pyfits.PrimaryHDU):
99 # Ignore (primary) header units with no data (use '.size'
100 # rather than '.data' to avoid actually loading the image):
101 try:
102 data_size = hdu.size()
103 except TypeError: # (size changed to int in PyFITS 3.1)
104 data_size = hdu.size
105 if data_size > 0:
106 ext_list.append((filename, n))
107 hdulist.close()
108
109 return io.ImageCollection(ext_list, load_func=FITSFactory,
110 conserve_memory=conserve_memory)
111
112
113 def FITSFactory(image_ext):
114 """Load an image extension from a FITS file and return a NumPy array
115
116 Parameters
117 ----------
118
119 image_ext : tuple
120 FITS extension to load, in the format ``(filename, ext_num)``.
121 The FITS ``(extname, extver)`` format is unsupported, since this
122 function is not called directly by the user and
123 ``imread_collection()`` does the work of figuring out which
124 extensions need loading.
125
126 """
127
128 # Expect a length-2 tuple with a filename as the first element:
129 if not isinstance(image_ext, tuple):
130 raise TypeError("Expected a tuple")
131
132 if len(image_ext) != 2:
133 raise ValueError("Expected a tuple of length 2")
134
135 filename = image_ext[0]
136 extnum = image_ext[1]
137
138 if type(filename) is not str or type(extnum) is not int:
139 raise ValueError("Expected a (filename, extension) tuple")
140
141 hdulist = pyfits.open(filename)
142
143 data = hdulist[extnum].data
144
145 hdulist.close()
146
147 if data is None:
148 raise RuntimeError(
149 "Extension %d of %s has no data" % (extnum, filename))
150
151 return data
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/io/_plugins/fits_plugin.py b/skimage/io/_plugins/fits_plugin.py
--- a/skimage/io/_plugins/fits_plugin.py
+++ b/skimage/io/_plugins/fits_plugin.py
@@ -3,15 +3,12 @@
import skimage.io as io
try:
- from astropy.io import fits as pyfits
+ from astropy.io import fits
except ImportError:
- try:
- import pyfits
- except ImportError:
- raise ImportError(
- "PyFITS could not be found. Please refer to\n"
- "http://www.stsci.edu/resources/software_hardware/pyfits\n"
- "for further instructions.")
+ raise ImportError(
+ "Astropy could not be found. It is needed to read FITS files.\n"
+ "Please refer to http://www.astropy.org for installation\n"
+ "instructions.")
def imread(fname, dtype=None):
@@ -44,14 +41,14 @@
"""
- hdulist = pyfits.open(fname)
+ hdulist = fits.open(fname)
# Iterate over FITS image extensions, ignoring any other extension types
# such as binary tables, and get the first image data array:
img_array = None
for hdu in hdulist:
- if isinstance(hdu, pyfits.ImageHDU) or \
- isinstance(hdu, pyfits.PrimaryHDU):
+ if isinstance(hdu, fits.ImageHDU) or \
+ isinstance(hdu, fits.PrimaryHDU):
if hdu.data is not None:
img_array = hdu.data
break
@@ -92,16 +89,16 @@
# files and finding the image extensions in each one:
ext_list = []
for filename in load_pattern:
- hdulist = pyfits.open(filename)
+ hdulist = fits.open(filename)
for n, hdu in zip(range(len(hdulist)), hdulist):
- if isinstance(hdu, pyfits.ImageHDU) or \
- isinstance(hdu, pyfits.PrimaryHDU):
+ if isinstance(hdu, fits.ImageHDU) or \
+ isinstance(hdu, fits.PrimaryHDU):
# Ignore (primary) header units with no data (use '.size'
# rather than '.data' to avoid actually loading the image):
try:
+ data_size = hdu.size # size is int in Astropy 3.1.2
+ except TypeError:
data_size = hdu.size()
- except TypeError: # (size changed to int in PyFITS 3.1)
- data_size = hdu.size
if data_size > 0:
ext_list.append((filename, n))
hdulist.close()
@@ -138,7 +135,7 @@
if type(filename) is not str or type(extnum) is not int:
raise ValueError("Expected a (filename, extension) tuple")
- hdulist = pyfits.open(filename)
+ hdulist = fits.open(filename)
data = hdulist[extnum].data
|
{"golden_diff": "diff --git a/skimage/io/_plugins/fits_plugin.py b/skimage/io/_plugins/fits_plugin.py\n--- a/skimage/io/_plugins/fits_plugin.py\n+++ b/skimage/io/_plugins/fits_plugin.py\n@@ -3,15 +3,12 @@\n import skimage.io as io\n \n try:\n- from astropy.io import fits as pyfits\n+ from astropy.io import fits\n except ImportError:\n- try:\n- import pyfits\n- except ImportError:\n- raise ImportError(\n- \"PyFITS could not be found. Please refer to\\n\"\n- \"http://www.stsci.edu/resources/software_hardware/pyfits\\n\"\n- \"for further instructions.\")\n+ raise ImportError(\n+ \"Astropy could not be found. It is needed to read FITS files.\\n\"\n+ \"Please refer to http://www.astropy.org for installation\\n\"\n+ \"instructions.\")\n \n \n def imread(fname, dtype=None):\n@@ -44,14 +41,14 @@\n \n \"\"\"\n \n- hdulist = pyfits.open(fname)\n+ hdulist = fits.open(fname)\n \n # Iterate over FITS image extensions, ignoring any other extension types\n # such as binary tables, and get the first image data array:\n img_array = None\n for hdu in hdulist:\n- if isinstance(hdu, pyfits.ImageHDU) or \\\n- isinstance(hdu, pyfits.PrimaryHDU):\n+ if isinstance(hdu, fits.ImageHDU) or \\\n+ isinstance(hdu, fits.PrimaryHDU):\n if hdu.data is not None:\n img_array = hdu.data\n break\n@@ -92,16 +89,16 @@\n # files and finding the image extensions in each one:\n ext_list = []\n for filename in load_pattern:\n- hdulist = pyfits.open(filename)\n+ hdulist = fits.open(filename)\n for n, hdu in zip(range(len(hdulist)), hdulist):\n- if isinstance(hdu, pyfits.ImageHDU) or \\\n- isinstance(hdu, pyfits.PrimaryHDU):\n+ if isinstance(hdu, fits.ImageHDU) or \\\n+ isinstance(hdu, fits.PrimaryHDU):\n # Ignore (primary) header units with no data (use '.size'\n # rather than '.data' to avoid actually loading the image):\n try:\n+ data_size = hdu.size # size is int in Astropy 3.1.2\n+ except TypeError:\n data_size = hdu.size()\n- except TypeError: # (size changed to int in PyFITS 3.1)\n- data_size = hdu.size\n if data_size > 0:\n ext_list.append((filename, n))\n hdulist.close()\n@@ -138,7 +135,7 @@\n if type(filename) is not str or type(extnum) is not int:\n raise ValueError(\"Expected a (filename, extension) tuple\")\n \n- hdulist = pyfits.open(filename)\n+ hdulist = fits.open(filename)\n \n data = hdulist[extnum].data\n", "issue": "Use astropy instead of pyfits to read FITS images\npyfits is not currently installable on 3.5, and it looks like AstroPy is more actively maintained.\n", "before_files": [{"content": "__all__ = ['imread', 'imread_collection']\n\nimport skimage.io as io\n\ntry:\n from astropy.io import fits as pyfits\nexcept ImportError:\n try:\n import pyfits\n except ImportError:\n raise ImportError(\n \"PyFITS could not be found. Please refer to\\n\"\n \"http://www.stsci.edu/resources/software_hardware/pyfits\\n\"\n \"for further instructions.\")\n\n\ndef imread(fname, dtype=None):\n \"\"\"Load an image from a FITS file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.fits``.\n dtype : dtype, optional\n For FITS, this argument is ignored because Stefan is planning on\n removing the dtype argument from imread anyway.\n\n Returns\n -------\n img_array : ndarray\n Unlike plugins such as PIL, where different color bands/channels are\n stored in the third dimension, FITS images are greyscale-only and can\n be N-dimensional, so an array of the native FITS dimensionality is\n returned, without color channels.\n\n Currently if no image is found in the file, None will be returned\n\n Notes\n -----\n\n Currently FITS ``imread()`` always returns the first image extension when\n given a Multi-Extension FITS file; use ``imread_collection()`` (which does\n lazy loading) to get all the extensions at once.\n\n \"\"\"\n\n hdulist = pyfits.open(fname)\n\n # Iterate over FITS image extensions, ignoring any other extension types\n # such as binary tables, and get the first image data array:\n img_array = None\n for hdu in hdulist:\n if isinstance(hdu, pyfits.ImageHDU) or \\\n isinstance(hdu, pyfits.PrimaryHDU):\n if hdu.data is not None:\n img_array = hdu.data\n break\n hdulist.close()\n\n return img_array\n\n\ndef imread_collection(load_pattern, conserve_memory=True):\n \"\"\"Load a collection of images from one or more FITS files\n\n Parameters\n ----------\n load_pattern : str or list\n List of extensions to load. Filename globbing is currently\n unsupported.\n converve_memory : bool\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n\n ic : ImageCollection\n Collection of images.\n\n \"\"\"\n\n intype = type(load_pattern)\n if intype is not list and intype is not str:\n raise TypeError(\"Input must be a filename or list of filenames\")\n\n # Ensure we have a list, otherwise we'll end up iterating over the string:\n if intype is not list:\n load_pattern = [load_pattern]\n\n # Generate a list of filename/extension pairs by opening the list of\n # files and finding the image extensions in each one:\n ext_list = []\n for filename in load_pattern:\n hdulist = pyfits.open(filename)\n for n, hdu in zip(range(len(hdulist)), hdulist):\n if isinstance(hdu, pyfits.ImageHDU) or \\\n isinstance(hdu, pyfits.PrimaryHDU):\n # Ignore (primary) header units with no data (use '.size'\n # rather than '.data' to avoid actually loading the image):\n try:\n data_size = hdu.size()\n except TypeError: # (size changed to int in PyFITS 3.1)\n data_size = hdu.size\n if data_size > 0:\n ext_list.append((filename, n))\n hdulist.close()\n\n return io.ImageCollection(ext_list, load_func=FITSFactory,\n conserve_memory=conserve_memory)\n\n\ndef FITSFactory(image_ext):\n \"\"\"Load an image extension from a FITS file and return a NumPy array\n\n Parameters\n ----------\n\n image_ext : tuple\n FITS extension to load, in the format ``(filename, ext_num)``.\n The FITS ``(extname, extver)`` format is unsupported, since this\n function is not called directly by the user and\n ``imread_collection()`` does the work of figuring out which\n extensions need loading.\n\n \"\"\"\n\n # Expect a length-2 tuple with a filename as the first element:\n if not isinstance(image_ext, tuple):\n raise TypeError(\"Expected a tuple\")\n\n if len(image_ext) != 2:\n raise ValueError(\"Expected a tuple of length 2\")\n\n filename = image_ext[0]\n extnum = image_ext[1]\n\n if type(filename) is not str or type(extnum) is not int:\n raise ValueError(\"Expected a (filename, extension) tuple\")\n\n hdulist = pyfits.open(filename)\n\n data = hdulist[extnum].data\n\n hdulist.close()\n\n if data is None:\n raise RuntimeError(\n \"Extension %d of %s has no data\" % (extnum, filename))\n\n return data\n", "path": "skimage/io/_plugins/fits_plugin.py"}], "after_files": [{"content": "__all__ = ['imread', 'imread_collection']\n\nimport skimage.io as io\n\ntry:\n from astropy.io import fits\nexcept ImportError:\n raise ImportError(\n \"Astropy could not be found. It is needed to read FITS files.\\n\"\n \"Please refer to http://www.astropy.org for installation\\n\"\n \"instructions.\")\n\n\ndef imread(fname, dtype=None):\n \"\"\"Load an image from a FITS file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.fits``.\n dtype : dtype, optional\n For FITS, this argument is ignored because Stefan is planning on\n removing the dtype argument from imread anyway.\n\n Returns\n -------\n img_array : ndarray\n Unlike plugins such as PIL, where different color bands/channels are\n stored in the third dimension, FITS images are greyscale-only and can\n be N-dimensional, so an array of the native FITS dimensionality is\n returned, without color channels.\n\n Currently if no image is found in the file, None will be returned\n\n Notes\n -----\n\n Currently FITS ``imread()`` always returns the first image extension when\n given a Multi-Extension FITS file; use ``imread_collection()`` (which does\n lazy loading) to get all the extensions at once.\n\n \"\"\"\n\n hdulist = fits.open(fname)\n\n # Iterate over FITS image extensions, ignoring any other extension types\n # such as binary tables, and get the first image data array:\n img_array = None\n for hdu in hdulist:\n if isinstance(hdu, fits.ImageHDU) or \\\n isinstance(hdu, fits.PrimaryHDU):\n if hdu.data is not None:\n img_array = hdu.data\n break\n hdulist.close()\n\n return img_array\n\n\ndef imread_collection(load_pattern, conserve_memory=True):\n \"\"\"Load a collection of images from one or more FITS files\n\n Parameters\n ----------\n load_pattern : str or list\n List of extensions to load. Filename globbing is currently\n unsupported.\n converve_memory : bool\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n\n ic : ImageCollection\n Collection of images.\n\n \"\"\"\n\n intype = type(load_pattern)\n if intype is not list and intype is not str:\n raise TypeError(\"Input must be a filename or list of filenames\")\n\n # Ensure we have a list, otherwise we'll end up iterating over the string:\n if intype is not list:\n load_pattern = [load_pattern]\n\n # Generate a list of filename/extension pairs by opening the list of\n # files and finding the image extensions in each one:\n ext_list = []\n for filename in load_pattern:\n hdulist = fits.open(filename)\n for n, hdu in zip(range(len(hdulist)), hdulist):\n if isinstance(hdu, fits.ImageHDU) or \\\n isinstance(hdu, fits.PrimaryHDU):\n # Ignore (primary) header units with no data (use '.size'\n # rather than '.data' to avoid actually loading the image):\n try:\n data_size = hdu.size # size is int in Astropy 3.1.2\n except TypeError:\n data_size = hdu.size()\n if data_size > 0:\n ext_list.append((filename, n))\n hdulist.close()\n\n return io.ImageCollection(ext_list, load_func=FITSFactory,\n conserve_memory=conserve_memory)\n\n\ndef FITSFactory(image_ext):\n \"\"\"Load an image extension from a FITS file and return a NumPy array\n\n Parameters\n ----------\n\n image_ext : tuple\n FITS extension to load, in the format ``(filename, ext_num)``.\n The FITS ``(extname, extver)`` format is unsupported, since this\n function is not called directly by the user and\n ``imread_collection()`` does the work of figuring out which\n extensions need loading.\n\n \"\"\"\n\n # Expect a length-2 tuple with a filename as the first element:\n if not isinstance(image_ext, tuple):\n raise TypeError(\"Expected a tuple\")\n\n if len(image_ext) != 2:\n raise ValueError(\"Expected a tuple of length 2\")\n\n filename = image_ext[0]\n extnum = image_ext[1]\n\n if type(filename) is not str or type(extnum) is not int:\n raise ValueError(\"Expected a (filename, extension) tuple\")\n\n hdulist = fits.open(filename)\n\n data = hdulist[extnum].data\n\n hdulist.close()\n\n if data is None:\n raise RuntimeError(\n \"Extension %d of %s has no data\" % (extnum, filename))\n\n return data\n", "path": "skimage/io/_plugins/fits_plugin.py"}]}
| 1,741 | 688 |
gh_patches_debug_34926
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-3770
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clarify the `num_workers` in `ThreadDataLoader`
**Is your feature request related to a problem? Please describe.**
When I was introducing GPU transforms and the associated `ThreadDataLoader` to users, got several times feedback about the `num_workers` arg, which is confusing that users think it means the multi-threads in `ThreadDataLoader`, but actually it's the multi-processing workers of PyTorch DataLoader.
Would be nice to clarify this arg and the use cases.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monai/data/thread_buffer.py`
Content:
```
1 # Copyright (c) MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12
13 from queue import Empty, Full, Queue
14 from threading import Thread
15
16 from monai.data import DataLoader, Dataset
17
18
19 class ThreadBuffer:
20 """
21 Iterates over values from self.src in a separate thread but yielding them in the current thread. This allows values
22 to be queued up asynchronously. The internal thread will continue running so long as the source has values or until
23 the stop() method is called.
24
25 One issue raised by using a thread in this way is that during the lifetime of the thread the source object is being
26 iterated over, so if the thread hasn't finished another attempt to iterate over it will raise an exception or yield
27 unexpected results. To ensure the thread releases the iteration and proper cleanup is done the stop() method must
28 be called which will join with the thread.
29
30 Args:
31 src: Source data iterable
32 buffer_size: Number of items to buffer from the source
33 timeout: Time to wait for an item from the buffer, or to wait while the buffer is full when adding items
34 """
35
36 def __init__(self, src, buffer_size: int = 1, timeout: float = 0.01):
37 self.src = src
38 self.buffer_size = buffer_size
39 self.timeout = timeout
40 self.buffer: Queue = Queue(self.buffer_size)
41 self.gen_thread = None
42 self.is_running = False
43
44 def enqueue_values(self):
45 for src_val in self.src:
46 while self.is_running:
47 try:
48 self.buffer.put(src_val, timeout=self.timeout)
49 except Full:
50 pass # try to add the item again
51 else:
52 break # successfully added the item, quit trying
53 else: # quit the thread cleanly when requested to stop
54 break
55
56 def stop(self):
57 self.is_running = False # signal the thread to exit
58
59 if self.gen_thread is not None:
60 self.gen_thread.join()
61
62 self.gen_thread = None
63
64 def __iter__(self):
65
66 self.is_running = True
67 self.gen_thread = Thread(target=self.enqueue_values, daemon=True)
68 self.gen_thread.start()
69
70 try:
71 while self.is_running and (self.gen_thread.is_alive() or not self.buffer.empty()):
72 try:
73 yield self.buffer.get(timeout=self.timeout)
74 except Empty:
75 pass # queue was empty this time, try again
76 finally:
77 self.stop() # ensure thread completion
78
79
80 class ThreadDataLoader(DataLoader):
81 """
82 Subclass of `DataLoader` using a `ThreadBuffer` object to implement `__iter__` method asynchronously. This will
83 iterate over data from the loader as expected however the data is generated on a separate thread. Use this class
84 where a `DataLoader` instance is required and not just an iterable object.
85
86 The default behaviour with `repeats` set to 1 is to yield each batch as it is generated, however with a higher
87 value the generated batch is yielded that many times while underlying dataset asynchronously generates the next.
88 Typically not all relevant information is learned from a batch in a single iteration so training multiple times
89 on the same batch will still produce good training with minimal short-term overfitting while allowing a slow batch
90 generation process more time to produce a result.
91
92 See:
93 * Fischetti et al. "Faster SGD training by minibatch persistency." ArXiv (2018) https://arxiv.org/abs/1806.07353
94 * Dami et al., "Faster Neural Network Training with Data Echoing" ArXiv (2020) https://arxiv.org/abs/1907.05550
95 * Ramezani et al. "GCN meets GPU: Decoupling "When to Sample" from "How to Sample"." NeurIPS (2020).
96 https://proceedings.neurips.cc/paper/2020/file/d714d2c5a796d5814c565d78dd16188d-Paper.pdf
97
98 Args:
99 dataset: input dataset.
100 buffer_size: number of items to buffer from the data source.
101 buffer_timeout: time to wait for an item from the buffer, or to wait while the buffer is full when adding items.
102 num_workers: number of the multi-processing workers in PyTorch DataLoader.
103 repeats: number of times to yield the same batch
104 """
105
106 def __init__(
107 self,
108 dataset: Dataset,
109 buffer_size: int = 1,
110 buffer_timeout: float = 0.01,
111 num_workers: int = 0,
112 repeats: int = 1,
113 **kwargs,
114 ):
115 super().__init__(dataset, num_workers, **kwargs)
116 self.buffer_size = buffer_size
117 self.buffer_timeout = buffer_timeout
118 self.repeats = repeats
119
120 def __iter__(self):
121 buffer = ThreadBuffer(src=super().__iter__(), buffer_size=self.buffer_size, timeout=self.buffer_timeout)
122
123 for batch in buffer:
124 for _ in range(self.repeats):
125 yield batch
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/monai/data/thread_buffer.py b/monai/data/thread_buffer.py
--- a/monai/data/thread_buffer.py
+++ b/monai/data/thread_buffer.py
@@ -89,6 +89,12 @@
on the same batch will still produce good training with minimal short-term overfitting while allowing a slow batch
generation process more time to produce a result.
+ Another typical usage is to accelerate light-weight preprocessing (usually cached all the deterministic transforms
+ and no IO operations), because it leverages the separate thread to execute preprocessing to avoid unnecessary IPC
+ between multiple workers of DataLoader. And as CUDA may not work well with the multi-processing of DataLoader,
+ `ThreadDataLoader` can be useful for GPU transforms. For more details:
+ https://github.com/Project-MONAI/tutorials/blob/master/acceleration/fast_model_training_guide.md.
+
See:
* Fischetti et al. "Faster SGD training by minibatch persistency." ArXiv (2018) https://arxiv.org/abs/1806.07353
* Dami et al., "Faster Neural Network Training with Data Echoing" ArXiv (2020) https://arxiv.org/abs/1907.05550
@@ -99,20 +105,15 @@
dataset: input dataset.
buffer_size: number of items to buffer from the data source.
buffer_timeout: time to wait for an item from the buffer, or to wait while the buffer is full when adding items.
- num_workers: number of the multi-processing workers in PyTorch DataLoader.
- repeats: number of times to yield the same batch
+ repeats: number of times to yield the same batch.
+ kwargs: other arguments for `DataLoader` except for `dataset`.
+
"""
def __init__(
- self,
- dataset: Dataset,
- buffer_size: int = 1,
- buffer_timeout: float = 0.01,
- num_workers: int = 0,
- repeats: int = 1,
- **kwargs,
+ self, dataset: Dataset, buffer_size: int = 1, buffer_timeout: float = 0.01, repeats: int = 1, **kwargs
):
- super().__init__(dataset, num_workers, **kwargs)
+ super().__init__(dataset, **kwargs)
self.buffer_size = buffer_size
self.buffer_timeout = buffer_timeout
self.repeats = repeats
|
{"golden_diff": "diff --git a/monai/data/thread_buffer.py b/monai/data/thread_buffer.py\n--- a/monai/data/thread_buffer.py\n+++ b/monai/data/thread_buffer.py\n@@ -89,6 +89,12 @@\n on the same batch will still produce good training with minimal short-term overfitting while allowing a slow batch\n generation process more time to produce a result.\n \n+ Another typical usage is to accelerate light-weight preprocessing (usually cached all the deterministic transforms\n+ and no IO operations), because it leverages the separate thread to execute preprocessing to avoid unnecessary IPC\n+ between multiple workers of DataLoader. And as CUDA may not work well with the multi-processing of DataLoader,\n+ `ThreadDataLoader` can be useful for GPU transforms. For more details:\n+ https://github.com/Project-MONAI/tutorials/blob/master/acceleration/fast_model_training_guide.md.\n+\n See:\n * Fischetti et al. \"Faster SGD training by minibatch persistency.\" ArXiv (2018) https://arxiv.org/abs/1806.07353\n * Dami et al., \"Faster Neural Network Training with Data Echoing\" ArXiv (2020) https://arxiv.org/abs/1907.05550\n@@ -99,20 +105,15 @@\n dataset: input dataset.\n buffer_size: number of items to buffer from the data source.\n buffer_timeout: time to wait for an item from the buffer, or to wait while the buffer is full when adding items.\n- num_workers: number of the multi-processing workers in PyTorch DataLoader.\n- repeats: number of times to yield the same batch\n+ repeats: number of times to yield the same batch.\n+ kwargs: other arguments for `DataLoader` except for `dataset`.\n+\n \"\"\"\n \n def __init__(\n- self,\n- dataset: Dataset,\n- buffer_size: int = 1,\n- buffer_timeout: float = 0.01,\n- num_workers: int = 0,\n- repeats: int = 1,\n- **kwargs,\n+ self, dataset: Dataset, buffer_size: int = 1, buffer_timeout: float = 0.01, repeats: int = 1, **kwargs\n ):\n- super().__init__(dataset, num_workers, **kwargs)\n+ super().__init__(dataset, **kwargs)\n self.buffer_size = buffer_size\n self.buffer_timeout = buffer_timeout\n self.repeats = repeats\n", "issue": "Clarify the `num_workers` in `ThreadDataLoader`\n**Is your feature request related to a problem? Please describe.**\r\nWhen I was introducing GPU transforms and the associated `ThreadDataLoader` to users, got several times feedback about the `num_workers` arg, which is confusing that users think it means the multi-threads in `ThreadDataLoader`, but actually it's the multi-processing workers of PyTorch DataLoader.\r\nWould be nice to clarify this arg and the use cases.\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom queue import Empty, Full, Queue\nfrom threading import Thread\n\nfrom monai.data import DataLoader, Dataset\n\n\nclass ThreadBuffer:\n \"\"\"\n Iterates over values from self.src in a separate thread but yielding them in the current thread. This allows values\n to be queued up asynchronously. The internal thread will continue running so long as the source has values or until\n the stop() method is called.\n\n One issue raised by using a thread in this way is that during the lifetime of the thread the source object is being\n iterated over, so if the thread hasn't finished another attempt to iterate over it will raise an exception or yield\n unexpected results. To ensure the thread releases the iteration and proper cleanup is done the stop() method must\n be called which will join with the thread.\n\n Args:\n src: Source data iterable\n buffer_size: Number of items to buffer from the source\n timeout: Time to wait for an item from the buffer, or to wait while the buffer is full when adding items\n \"\"\"\n\n def __init__(self, src, buffer_size: int = 1, timeout: float = 0.01):\n self.src = src\n self.buffer_size = buffer_size\n self.timeout = timeout\n self.buffer: Queue = Queue(self.buffer_size)\n self.gen_thread = None\n self.is_running = False\n\n def enqueue_values(self):\n for src_val in self.src:\n while self.is_running:\n try:\n self.buffer.put(src_val, timeout=self.timeout)\n except Full:\n pass # try to add the item again\n else:\n break # successfully added the item, quit trying\n else: # quit the thread cleanly when requested to stop\n break\n\n def stop(self):\n self.is_running = False # signal the thread to exit\n\n if self.gen_thread is not None:\n self.gen_thread.join()\n\n self.gen_thread = None\n\n def __iter__(self):\n\n self.is_running = True\n self.gen_thread = Thread(target=self.enqueue_values, daemon=True)\n self.gen_thread.start()\n\n try:\n while self.is_running and (self.gen_thread.is_alive() or not self.buffer.empty()):\n try:\n yield self.buffer.get(timeout=self.timeout)\n except Empty:\n pass # queue was empty this time, try again\n finally:\n self.stop() # ensure thread completion\n\n\nclass ThreadDataLoader(DataLoader):\n \"\"\"\n Subclass of `DataLoader` using a `ThreadBuffer` object to implement `__iter__` method asynchronously. This will\n iterate over data from the loader as expected however the data is generated on a separate thread. Use this class\n where a `DataLoader` instance is required and not just an iterable object.\n\n The default behaviour with `repeats` set to 1 is to yield each batch as it is generated, however with a higher\n value the generated batch is yielded that many times while underlying dataset asynchronously generates the next.\n Typically not all relevant information is learned from a batch in a single iteration so training multiple times\n on the same batch will still produce good training with minimal short-term overfitting while allowing a slow batch\n generation process more time to produce a result.\n\n See:\n * Fischetti et al. \"Faster SGD training by minibatch persistency.\" ArXiv (2018) https://arxiv.org/abs/1806.07353\n * Dami et al., \"Faster Neural Network Training with Data Echoing\" ArXiv (2020) https://arxiv.org/abs/1907.05550\n * Ramezani et al. \"GCN meets GPU: Decoupling \"When to Sample\" from \"How to Sample\".\" NeurIPS (2020).\n https://proceedings.neurips.cc/paper/2020/file/d714d2c5a796d5814c565d78dd16188d-Paper.pdf\n\n Args:\n dataset: input dataset.\n buffer_size: number of items to buffer from the data source.\n buffer_timeout: time to wait for an item from the buffer, or to wait while the buffer is full when adding items.\n num_workers: number of the multi-processing workers in PyTorch DataLoader.\n repeats: number of times to yield the same batch\n \"\"\"\n\n def __init__(\n self,\n dataset: Dataset,\n buffer_size: int = 1,\n buffer_timeout: float = 0.01,\n num_workers: int = 0,\n repeats: int = 1,\n **kwargs,\n ):\n super().__init__(dataset, num_workers, **kwargs)\n self.buffer_size = buffer_size\n self.buffer_timeout = buffer_timeout\n self.repeats = repeats\n\n def __iter__(self):\n buffer = ThreadBuffer(src=super().__iter__(), buffer_size=self.buffer_size, timeout=self.buffer_timeout)\n\n for batch in buffer:\n for _ in range(self.repeats):\n yield batch\n", "path": "monai/data/thread_buffer.py"}], "after_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom queue import Empty, Full, Queue\nfrom threading import Thread\n\nfrom monai.data import DataLoader, Dataset\n\n\nclass ThreadBuffer:\n \"\"\"\n Iterates over values from self.src in a separate thread but yielding them in the current thread. This allows values\n to be queued up asynchronously. The internal thread will continue running so long as the source has values or until\n the stop() method is called.\n\n One issue raised by using a thread in this way is that during the lifetime of the thread the source object is being\n iterated over, so if the thread hasn't finished another attempt to iterate over it will raise an exception or yield\n unexpected results. To ensure the thread releases the iteration and proper cleanup is done the stop() method must\n be called which will join with the thread.\n\n Args:\n src: Source data iterable\n buffer_size: Number of items to buffer from the source\n timeout: Time to wait for an item from the buffer, or to wait while the buffer is full when adding items\n \"\"\"\n\n def __init__(self, src, buffer_size: int = 1, timeout: float = 0.01):\n self.src = src\n self.buffer_size = buffer_size\n self.timeout = timeout\n self.buffer: Queue = Queue(self.buffer_size)\n self.gen_thread = None\n self.is_running = False\n\n def enqueue_values(self):\n for src_val in self.src:\n while self.is_running:\n try:\n self.buffer.put(src_val, timeout=self.timeout)\n except Full:\n pass # try to add the item again\n else:\n break # successfully added the item, quit trying\n else: # quit the thread cleanly when requested to stop\n break\n\n def stop(self):\n self.is_running = False # signal the thread to exit\n\n if self.gen_thread is not None:\n self.gen_thread.join()\n\n self.gen_thread = None\n\n def __iter__(self):\n\n self.is_running = True\n self.gen_thread = Thread(target=self.enqueue_values, daemon=True)\n self.gen_thread.start()\n\n try:\n while self.is_running and (self.gen_thread.is_alive() or not self.buffer.empty()):\n try:\n yield self.buffer.get(timeout=self.timeout)\n except Empty:\n pass # queue was empty this time, try again\n finally:\n self.stop() # ensure thread completion\n\n\nclass ThreadDataLoader(DataLoader):\n \"\"\"\n Subclass of `DataLoader` using a `ThreadBuffer` object to implement `__iter__` method asynchronously. This will\n iterate over data from the loader as expected however the data is generated on a separate thread. Use this class\n where a `DataLoader` instance is required and not just an iterable object.\n\n The default behaviour with `repeats` set to 1 is to yield each batch as it is generated, however with a higher\n value the generated batch is yielded that many times while underlying dataset asynchronously generates the next.\n Typically not all relevant information is learned from a batch in a single iteration so training multiple times\n on the same batch will still produce good training with minimal short-term overfitting while allowing a slow batch\n generation process more time to produce a result.\n\n Another typical usage is to accelerate light-weight preprocessing (usually cached all the deterministic transforms\n and no IO operations), because it leverages the separate thread to execute preprocessing to avoid unnecessary IPC\n between multiple workers of DataLoader. And as CUDA may not work well with the multi-processing of DataLoader,\n `ThreadDataLoader` can be useful for GPU transforms. For more details:\n https://github.com/Project-MONAI/tutorials/blob/master/acceleration/fast_model_training_guide.md.\n\n See:\n * Fischetti et al. \"Faster SGD training by minibatch persistency.\" ArXiv (2018) https://arxiv.org/abs/1806.07353\n * Dami et al., \"Faster Neural Network Training with Data Echoing\" ArXiv (2020) https://arxiv.org/abs/1907.05550\n * Ramezani et al. \"GCN meets GPU: Decoupling \"When to Sample\" from \"How to Sample\".\" NeurIPS (2020).\n https://proceedings.neurips.cc/paper/2020/file/d714d2c5a796d5814c565d78dd16188d-Paper.pdf\n\n Args:\n dataset: input dataset.\n buffer_size: number of items to buffer from the data source.\n buffer_timeout: time to wait for an item from the buffer, or to wait while the buffer is full when adding items.\n repeats: number of times to yield the same batch.\n kwargs: other arguments for `DataLoader` except for `dataset`.\n\n \"\"\"\n\n def __init__(\n self, dataset: Dataset, buffer_size: int = 1, buffer_timeout: float = 0.01, repeats: int = 1, **kwargs\n ):\n super().__init__(dataset, **kwargs)\n self.buffer_size = buffer_size\n self.buffer_timeout = buffer_timeout\n self.repeats = repeats\n\n def __iter__(self):\n buffer = ThreadBuffer(src=super().__iter__(), buffer_size=self.buffer_size, timeout=self.buffer_timeout)\n\n for batch in buffer:\n for _ in range(self.repeats):\n yield batch\n", "path": "monai/data/thread_buffer.py"}]}
| 1,873 | 562 |
gh_patches_debug_6496
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-3150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Períodos de parlamentares sumiram
## Comportamento Esperado
Devem aparecer todos os períodos dos parlamentares, correspondentes às legislaturas.
## Comportamento Atual
Aparecem apenas da 6ª legislatura à atual.
Passos para Reproduzir (para bugs)
https://sapl.agudo.rs.leg.br/parlamentar/
## Contexto
Pesquisa de parlamentares por legislatura.
## Imagens do Ocorrido

## Seu Ambiente
Versão usada (Release): 3.1.160-RC11
Nome e versão do navegador: Versão 80.0.3987.163 (Versão oficial)
Nome e versão do Sistema Operacional (desktop ou mobile): Ubuntu 16.04 64 bits
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/api/pagination.py`
Content:
```
1 from django.core.paginator import EmptyPage
2 from rest_framework import pagination
3 from rest_framework.response import Response
4
5
6 class StandardPagination(pagination.PageNumberPagination):
7 page_size = 10
8 page_size_query_param = 'page_size'
9 max_page_size = 50
10
11 def get_paginated_response(self, data):
12 try:
13 previous_page_number = self.page.previous_page_number()
14 except EmptyPage:
15 previous_page_number = None
16
17 try:
18 next_page_number = self.page.next_page_number()
19 except EmptyPage:
20 next_page_number = None
21
22 return Response({
23 'pagination': {
24 'previous_page': previous_page_number,
25 'next_page': next_page_number,
26 'start_index': self.page.start_index(),
27 'end_index': self.page.end_index(),
28 'total_entries': self.page.paginator.count,
29 'total_pages': self.page.paginator.num_pages,
30 'page': self.page.number,
31 },
32 'results': data,
33
34 })
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sapl/api/pagination.py b/sapl/api/pagination.py
--- a/sapl/api/pagination.py
+++ b/sapl/api/pagination.py
@@ -8,6 +8,11 @@
page_size_query_param = 'page_size'
max_page_size = 50
+ def paginate_queryset(self, queryset, request, view=None):
+ if request.query_params.get('get_all', False) == 'true':
+ return None
+ return super().paginate_queryset(queryset, request, view=view)
+
def get_paginated_response(self, data):
try:
previous_page_number = self.page.previous_page_number()
|
{"golden_diff": "diff --git a/sapl/api/pagination.py b/sapl/api/pagination.py\n--- a/sapl/api/pagination.py\n+++ b/sapl/api/pagination.py\n@@ -8,6 +8,11 @@\n page_size_query_param = 'page_size'\n max_page_size = 50\n \n+ def paginate_queryset(self, queryset, request, view=None):\n+ if request.query_params.get('get_all', False) == 'true':\n+ return None\n+ return super().paginate_queryset(queryset, request, view=view)\n+\n def get_paginated_response(self, data):\n try:\n previous_page_number = self.page.previous_page_number()\n", "issue": "Per\u00edodos de parlamentares sumiram\n## Comportamento Esperado\r\nDevem aparecer todos os per\u00edodos dos parlamentares, correspondentes \u00e0s legislaturas.\r\n\r\n## Comportamento Atual\r\nAparecem apenas da 6\u00aa legislatura \u00e0 atual.\r\n\r\nPassos para Reproduzir (para bugs)\r\nhttps://sapl.agudo.rs.leg.br/parlamentar/\r\n\r\n## Contexto\r\nPesquisa de parlamentares por legislatura.\r\n\r\n## Imagens do Ocorrido\r\n\r\n\r\n## Seu Ambiente\r\nVers\u00e3o usada (Release): 3.1.160-RC11\r\nNome e vers\u00e3o do navegador: Vers\u00e3o 80.0.3987.163 (Vers\u00e3o oficial)\r\nNome e vers\u00e3o do Sistema Operacional (desktop ou mobile): Ubuntu 16.04 64 bits\r\n\r\n\n", "before_files": [{"content": "from django.core.paginator import EmptyPage\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass StandardPagination(pagination.PageNumberPagination):\n page_size = 10\n page_size_query_param = 'page_size'\n max_page_size = 50\n\n def get_paginated_response(self, data):\n try:\n previous_page_number = self.page.previous_page_number()\n except EmptyPage:\n previous_page_number = None\n\n try:\n next_page_number = self.page.next_page_number()\n except EmptyPage:\n next_page_number = None\n\n return Response({\n 'pagination': {\n 'previous_page': previous_page_number,\n 'next_page': next_page_number,\n 'start_index': self.page.start_index(),\n 'end_index': self.page.end_index(),\n 'total_entries': self.page.paginator.count,\n 'total_pages': self.page.paginator.num_pages,\n 'page': self.page.number,\n },\n 'results': data,\n\n })\n", "path": "sapl/api/pagination.py"}], "after_files": [{"content": "from django.core.paginator import EmptyPage\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass StandardPagination(pagination.PageNumberPagination):\n page_size = 10\n page_size_query_param = 'page_size'\n max_page_size = 50\n\n def paginate_queryset(self, queryset, request, view=None):\n if request.query_params.get('get_all', False) == 'true':\n return None\n return super().paginate_queryset(queryset, request, view=view)\n\n def get_paginated_response(self, data):\n try:\n previous_page_number = self.page.previous_page_number()\n except EmptyPage:\n previous_page_number = None\n\n try:\n next_page_number = self.page.next_page_number()\n except EmptyPage:\n next_page_number = None\n\n return Response({\n 'pagination': {\n 'previous_page': previous_page_number,\n 'next_page': next_page_number,\n 'start_index': self.page.start_index(),\n 'end_index': self.page.end_index(),\n 'total_entries': self.page.paginator.count,\n 'total_pages': self.page.paginator.num_pages,\n 'page': self.page.number,\n },\n 'results': data,\n\n })\n", "path": "sapl/api/pagination.py"}]}
| 807 | 142 |
gh_patches_debug_20590
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-795
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove passwords from error messages
EvaP sends error messages to the admins when a server error occurs. Post data sent to the page where the error occurred will be included in the message, but passwords must not be included in these messages.
This currently happens e.g. when a user is logging in who has two accounts like described in #791.
Remove passwords from error messages
EvaP sends error messages to the admins when a server error occurs. Post data sent to the page where the error occurred will be included in the message, but passwords must not be included in these messages.
This currently happens e.g. when a user is logging in who has two accounts like described in #791.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/views.py`
Content:
```
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.contrib.auth import login as auth_login
6 from django.contrib.auth.decorators import login_required
7 from django.core.mail import EmailMessage
8 from django.http import HttpResponse
9 from django.shortcuts import redirect, render
10 from django.utils.translation import ugettext as _
11 from django.core.urlresolvers import resolve, Resolver404
12 from django.views.decorators.http import require_POST
13
14 from evap.evaluation.forms import NewKeyForm, LoginUsernameForm
15 from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester
16
17 logger = logging.getLogger(__name__)
18
19
20 def index(request):
21 """Main entry page into EvaP providing all the login options available. The username/password
22 login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
23 The login key mechanism is meant to be used to include external participants, e.g. visiting
24 students or visiting contributors.
25 """
26
27 # parse the form data into the respective form
28 submit_type = request.POST.get("submit_type", "no_submit")
29 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None)
30 login_username_form = LoginUsernameForm(request, request.POST if submit_type == "login_username" else None)
31
32 # process form data
33 if request.method == 'POST':
34 if new_key_form.is_valid():
35 # user wants a new login key
36 profile = new_key_form.get_user()
37 profile.generate_login_key()
38 profile.save()
39
40 EmailTemplate.send_login_url_to_user(new_key_form.get_user())
41
42 messages.success(request, _("We sent you an email with a one-time login URL. Please check your inbox."))
43 return redirect('evaluation:index')
44 elif login_username_form.is_valid():
45 # user would like to login with username and password and passed password test
46 auth_login(request, login_username_form.get_user())
47
48 # clean up our test cookie
49 if request.session.test_cookie_worked():
50 request.session.delete_test_cookie()
51
52 # if not logged in by now, render form
53 if not request.user.is_authenticated():
54 # set test cookie to verify whether they work in the next step
55 request.session.set_test_cookie()
56
57 template_data = dict(new_key_form=new_key_form, login_username_form=login_username_form)
58 return render(request, "index.html", template_data)
59 else:
60 user, __ = UserProfile.objects.get_or_create(username=request.user.username)
61
62 # check for redirect variable
63 redirect_to = request.GET.get("next", None)
64 if redirect_to is not None:
65 if redirect_to.startswith("/staff/"):
66 if request.user.is_staff:
67 return redirect(redirect_to)
68 elif redirect_to.startswith("/grades/"):
69 if request.user.is_grade_publisher:
70 return redirect(redirect_to)
71 elif redirect_to.startswith("/contributor/"):
72 if user.is_contributor:
73 return redirect(redirect_to)
74 elif redirect_to.startswith("/student/"):
75 if user.is_participant:
76 return redirect(redirect_to)
77 else:
78 try:
79 resolve(redirect_to)
80 except Resolver404:
81 pass
82 else:
83 return redirect(redirect_to)
84
85 # redirect user to appropriate start page
86 if request.user.is_staff:
87 return redirect('staff:index')
88 elif request.user.is_grade_publisher:
89 return redirect('grades:semester_view', Semester.active_semester().id)
90 elif user.is_student:
91 return redirect('student:index')
92 elif user.is_contributor_or_delegate:
93 return redirect('contributor:index')
94 elif user.is_participant:
95 return redirect('student:index')
96 else:
97 return redirect('results:index')
98
99
100 def faq(request):
101 return render(request, "faq.html", dict(sections=FaqSection.objects.all()))
102
103
104 def legal_notice(request):
105 return render(request, "legal_notice.html", dict())
106
107
108 @require_POST
109 @login_required
110 def feedback_send(request):
111 sender_email = request.user.email
112 message = request.POST.get("message")
113 subject = "Feedback from {}".format(sender_email)
114
115 if message:
116 mail = EmailMessage(
117 subject=subject,
118 body=message,
119 to=[settings.FEEDBACK_EMAIL])
120
121 try:
122 mail.send()
123 logger.info('Sent feedback email: \n{}\n'.format(mail.message()))
124 except Exception:
125 logger.exception('An exception occurred when sending the following feedback email:\n{}\n'.format(mail.message()))
126
127 return HttpResponse()
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py
--- a/evap/evaluation/views.py
+++ b/evap/evaluation/views.py
@@ -10,6 +10,7 @@
from django.utils.translation import ugettext as _
from django.core.urlresolvers import resolve, Resolver404
from django.views.decorators.http import require_POST
+from django.views.decorators.debug import sensitive_post_parameters
from evap.evaluation.forms import NewKeyForm, LoginUsernameForm
from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester
@@ -17,6 +18,7 @@
logger = logging.getLogger(__name__)
+@sensitive_post_parameters("password")
def index(request):
"""Main entry page into EvaP providing all the login options available. The username/password
login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
|
{"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -10,6 +10,7 @@\n from django.utils.translation import ugettext as _\n from django.core.urlresolvers import resolve, Resolver404\n from django.views.decorators.http import require_POST\n+from django.views.decorators.debug import sensitive_post_parameters\n \n from evap.evaluation.forms import NewKeyForm, LoginUsernameForm\n from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n@@ -17,6 +18,7 @@\n logger = logging.getLogger(__name__)\n \n \n+@sensitive_post_parameters(\"password\")\n def index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. The username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n", "issue": "Remove passwords from error messages\nEvaP sends error messages to the admins when a server error occurs. Post data sent to the page where the error occurred will be included in the message, but passwords must not be included in these messages.\nThis currently happens e.g. when a user is logging in who has two accounts like described in #791.\n\nRemove passwords from error messages\nEvaP sends error messages to the admins when a server error occurs. Post data sent to the page where the error occurred will be included in the message, but passwords must not be included in these messages.\nThis currently happens e.g. when a user is logging in who has two accounts like described in #791.\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import ugettext as _\nfrom django.core.urlresolvers import resolve, Resolver404\nfrom django.views.decorators.http import require_POST\n\nfrom evap.evaluation.forms import NewKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n\nlogger = logging.getLogger(__name__)\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. The username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_user()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.send_login_url_to_user(new_key_form.get_user())\n\n messages.success(request, _(\"We sent you an email with a one-time login URL. Please check your inbox.\"))\n return redirect('evaluation:index')\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_authenticated():\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n template_data = dict(new_key_form=new_key_form, login_username_form=login_username_form)\n return render(request, \"index.html\", template_data)\n else:\n user, __ = UserProfile.objects.get_or_create(username=request.user.username)\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/staff/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/grades/\"):\n if request.user.is_grade_publisher:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if user.is_contributor:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/student/\"):\n if user.is_participant:\n return redirect(redirect_to)\n else:\n try:\n resolve(redirect_to)\n except Resolver404:\n pass\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('staff:index')\n elif request.user.is_grade_publisher:\n return redirect('grades:semester_view', Semester.active_semester().id)\n elif user.is_student:\n return redirect('student:index')\n elif user.is_contributor_or_delegate:\n return redirect('contributor:index')\n elif user.is_participant:\n return redirect('student:index')\n else:\n return redirect('results:index')\n\n\ndef faq(request):\n return render(request, \"faq.html\", dict(sections=FaqSection.objects.all()))\n\n\ndef legal_notice(request):\n return render(request, \"legal_notice.html\", dict())\n\n\n@require_POST\n@login_required\ndef feedback_send(request):\n sender_email = request.user.email\n message = request.POST.get(\"message\")\n subject = \"Feedback from {}\".format(sender_email)\n\n if message:\n mail = EmailMessage(\n subject=subject,\n body=message,\n to=[settings.FEEDBACK_EMAIL])\n\n try:\n mail.send()\n logger.info('Sent feedback email: \\n{}\\n'.format(mail.message()))\n except Exception:\n logger.exception('An exception occurred when sending the following feedback email:\\n{}\\n'.format(mail.message()))\n\n return HttpResponse()\n", "path": "evap/evaluation/views.py"}], "after_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import ugettext as _\nfrom django.core.urlresolvers import resolve, Resolver404\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.debug import sensitive_post_parameters\n\nfrom evap.evaluation.forms import NewKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate, Semester\n\nlogger = logging.getLogger(__name__)\n\n\n@sensitive_post_parameters(\"password\")\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. The username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_user()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.send_login_url_to_user(new_key_form.get_user())\n\n messages.success(request, _(\"We sent you an email with a one-time login URL. Please check your inbox.\"))\n return redirect('evaluation:index')\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_authenticated():\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n template_data = dict(new_key_form=new_key_form, login_username_form=login_username_form)\n return render(request, \"index.html\", template_data)\n else:\n user, __ = UserProfile.objects.get_or_create(username=request.user.username)\n\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/staff/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/grades/\"):\n if request.user.is_grade_publisher:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if user.is_contributor:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/student/\"):\n if user.is_participant:\n return redirect(redirect_to)\n else:\n try:\n resolve(redirect_to)\n except Resolver404:\n pass\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('staff:index')\n elif request.user.is_grade_publisher:\n return redirect('grades:semester_view', Semester.active_semester().id)\n elif user.is_student:\n return redirect('student:index')\n elif user.is_contributor_or_delegate:\n return redirect('contributor:index')\n elif user.is_participant:\n return redirect('student:index')\n else:\n return redirect('results:index')\n\n\ndef faq(request):\n return render(request, \"faq.html\", dict(sections=FaqSection.objects.all()))\n\n\ndef legal_notice(request):\n return render(request, \"legal_notice.html\", dict())\n\n\n@require_POST\n@login_required\ndef feedback_send(request):\n sender_email = request.user.email\n message = request.POST.get(\"message\")\n subject = \"Feedback from {}\".format(sender_email)\n\n if message:\n mail = EmailMessage(\n subject=subject,\n body=message,\n to=[settings.FEEDBACK_EMAIL])\n\n try:\n mail.send()\n logger.info('Sent feedback email: \\n{}\\n'.format(mail.message()))\n except Exception:\n logger.exception('An exception occurred when sending the following feedback email:\\n{}\\n'.format(mail.message()))\n\n return HttpResponse()\n", "path": "evap/evaluation/views.py"}]}
| 1,637 | 201 |
gh_patches_debug_6190
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-626
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update README
Before blog: update verbiage and add pics from blog/paper.
After blog: add link to blog.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """TorchGeo: datasets, transforms, and models for geospatial data.
5
6 This library is part of the `PyTorch <http://pytorch.org/>`_ project. PyTorch is an open
7 source machine learning framework.
8
9 The :mod:`torchgeo` package consists of popular datasets, model architectures, and
10 common image transformations for geospatial data.
11 """
12
13 __author__ = "Adam J. Stewart"
14 __version__ = "0.3.0.dev0"
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchgeo/__init__.py b/torchgeo/__init__.py
--- a/torchgeo/__init__.py
+++ b/torchgeo/__init__.py
@@ -1,7 +1,7 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
-"""TorchGeo: datasets, transforms, and models for geospatial data.
+"""TorchGeo: datasets, samplers, transforms, and pre-trained models for geospatial data.
This library is part of the `PyTorch <http://pytorch.org/>`_ project. PyTorch is an open
source machine learning framework.
|
{"golden_diff": "diff --git a/torchgeo/__init__.py b/torchgeo/__init__.py\n--- a/torchgeo/__init__.py\n+++ b/torchgeo/__init__.py\n@@ -1,7 +1,7 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n \n-\"\"\"TorchGeo: datasets, transforms, and models for geospatial data.\n+\"\"\"TorchGeo: datasets, samplers, transforms, and pre-trained models for geospatial data.\n \n This library is part of the `PyTorch <http://pytorch.org/>`_ project. PyTorch is an open\n source machine learning framework.\n", "issue": "Update README\nBefore blog: update verbiage and add pics from blog/paper. \n\nAfter blog: add link to blog.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"TorchGeo: datasets, transforms, and models for geospatial data.\n\nThis library is part of the `PyTorch <http://pytorch.org/>`_ project. PyTorch is an open\nsource machine learning framework.\n\nThe :mod:`torchgeo` package consists of popular datasets, model architectures, and\ncommon image transformations for geospatial data.\n\"\"\"\n\n__author__ = \"Adam J. Stewart\"\n__version__ = \"0.3.0.dev0\"\n", "path": "torchgeo/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"TorchGeo: datasets, samplers, transforms, and pre-trained models for geospatial data.\n\nThis library is part of the `PyTorch <http://pytorch.org/>`_ project. PyTorch is an open\nsource machine learning framework.\n\nThe :mod:`torchgeo` package consists of popular datasets, model architectures, and\ncommon image transformations for geospatial data.\n\"\"\"\n\n__author__ = \"Adam J. Stewart\"\n__version__ = \"0.3.0.dev0\"\n", "path": "torchgeo/__init__.py"}]}
| 432 | 145 |
gh_patches_debug_34052
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-5750
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Purging deleted packages does not delete associated filestore/datastore resources
**CKAN version**
2.9
**Describe the bug**
After deleting and purging dataset packages, the associated datastore/filestore resources are orphaned and not deleted.
**Steps to reproduce**
1. Delete datasets
2. Go to sysadmin/trash
3. Select "Purge All"
4. Check datastore and filestore. Associated resources are still there.
**Expected behavior**
Dataset resources in the filestore and the datastore are deleted when the associated package resources are purged.
**Additional details**
This is related to #4705 and a lot of work has been done on PRs #4867 and #4905, but neither have been merged.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext/datastore/cli.py`
Content:
```
1 # encoding: utf-8
2
3 import logging
4 import os
5
6 import click
7
8 from ckan.model import parse_db_config
9 from ckan.common import config
10
11 import ckanext.datastore as datastore_module
12 from ckanext.datastore.backend.postgres import identifier
13 from ckanext.datastore.blueprint import DUMP_FORMATS, dump_to
14
15 log = logging.getLogger(__name__)
16
17
18 @click.group()
19 def datastore():
20 u'''Perform commands to set up the datastore.
21 '''
22
23
24 @datastore.command(
25 u'set-permissions',
26 short_help=u'Generate SQL for permission configuration.'
27 )
28 def set_permissions():
29 u'''Emit an SQL script that will set the permissions for the datastore
30 users as configured in your configuration file.'''
31
32 write_url = _parse_db_config(u'ckan.datastore.write_url')
33 read_url = _parse_db_config(u'ckan.datastore.read_url')
34 db_url = _parse_db_config(u'sqlalchemy.url')
35
36 # Basic validation that read and write URLs reference the same database.
37 # This obviously doesn't check they're the same database (the hosts/ports
38 # could be different), but it's better than nothing, I guess.
39
40 if write_url[u'db_name'] != read_url[u'db_name']:
41 click.secho(
42 u'The datastore write_url and read_url must refer to the same '
43 u'database!',
44 fg=u'red',
45 bold=True
46 )
47 raise click.Abort()
48
49 sql = permissions_sql(
50 maindb=db_url[u'db_name'],
51 datastoredb=write_url[u'db_name'],
52 mainuser=db_url[u'db_user'],
53 writeuser=write_url[u'db_user'],
54 readuser=read_url[u'db_user']
55 )
56
57 click.echo(sql)
58
59
60 def permissions_sql(maindb, datastoredb, mainuser, writeuser, readuser):
61 template_filename = os.path.join(
62 os.path.dirname(datastore_module.__file__), u'set_permissions.sql'
63 )
64 with open(template_filename) as fp:
65 template = fp.read()
66 return template.format(
67 maindb=identifier(maindb),
68 datastoredb=identifier(datastoredb),
69 mainuser=identifier(mainuser),
70 writeuser=identifier(writeuser),
71 readuser=identifier(readuser)
72 )
73
74
75 @datastore.command()
76 @click.argument(u'resource-id', nargs=1)
77 @click.argument(
78 u'output-file',
79 type=click.File(u'wb'),
80 default=click.get_binary_stream(u'stdout')
81 )
82 @click.option(u'--format', default=u'csv', type=click.Choice(DUMP_FORMATS))
83 @click.option(u'--offset', type=click.IntRange(0, None), default=0)
84 @click.option(u'--limit', type=click.IntRange(0))
85 @click.option(u'--bom', is_flag=True) # FIXME: options based on format
86 @click.pass_context
87 def dump(ctx, resource_id, output_file, format, offset, limit, bom):
88 u'''Dump a datastore resource.
89 '''
90 flask_app = ctx.meta['flask_app']
91 with flask_app.test_request_context():
92 dump_to(
93 resource_id,
94 output_file,
95 fmt=format,
96 offset=offset,
97 limit=limit,
98 options={u'bom': bom},
99 sort=u'_id',
100 search_params={}
101 )
102
103
104 def _parse_db_config(config_key=u'sqlalchemy.url'):
105 db_config = parse_db_config(config_key)
106 if not db_config:
107 click.secho(
108 u'Could not extract db details from url: %r' % config[config_key],
109 fg=u'red',
110 bold=True
111 )
112 raise click.Abort()
113 return db_config
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckanext/datastore/cli.py b/ckanext/datastore/cli.py
--- a/ckanext/datastore/cli.py
+++ b/ckanext/datastore/cli.py
@@ -7,6 +7,7 @@
from ckan.model import parse_db_config
from ckan.common import config
+import ckan.logic as logic
import ckanext.datastore as datastore_module
from ckanext.datastore.backend.postgres import identifier
@@ -111,3 +112,69 @@
)
raise click.Abort()
return db_config
+
+
[email protected](
+ u'purge',
+ short_help=u'purge orphaned resources from the datastore.'
+)
+def purge():
+ u'''Purge orphaned resources from the datastore using the datastore_delete
+ action, which drops tables when called without filters.'''
+
+ site_user = logic.get_action(u'get_site_user')({u'ignore_auth': True}, {})
+ context = {u'user': site_user[u'name']}
+
+ result = logic.get_action(u'datastore_search')(
+ context,
+ {u'resource_id': u'_table_metadata'}
+ )
+
+ resource_id_list = []
+ for record in result[u'records']:
+ try:
+ # ignore 'alias' records (views) as they are automatically
+ # deleted when the parent resource table is dropped
+ if record[u'alias_of']:
+ continue
+
+ # we need to do this to trigger resource_show auth function
+ site_user = logic.get_action(u'get_site_user')(
+ {u'ignore_auth': True}, {})
+ context = {u'user': site_user[u'name']}
+
+ logic.get_action(u'resource_show')(
+ context,
+ {u'id': record[u'name']}
+ )
+ except logic.NotFound:
+ resource_id_list.append(record[u'name'])
+ click.echo(u"Resource '%s' orphaned - queued for drop" %
+ record[u'name'])
+ except KeyError:
+ continue
+
+ orphaned_table_count = len(resource_id_list)
+ click.echo(u'%d orphaned tables found.' % orphaned_table_count)
+
+ if not orphaned_table_count:
+ return
+
+ click.confirm(u'Proceed with purge?', abort=True)
+
+ # Drop the orphaned datastore tables. When datastore_delete is called
+ # without filters, it does a drop table cascade
+ drop_count = 0
+ for resource_id in resource_id_list:
+ logic.get_action(u'datastore_delete')(
+ context,
+ {u'resource_id': resource_id, u'force': True}
+ )
+ click.echo(u"Table '%s' dropped)" % resource_id)
+ drop_count += 1
+
+ click.echo(u'Dropped %s tables' % drop_count)
+
+
+def get_commands():
+ return (set_permissions, dump, purge)
|
{"golden_diff": "diff --git a/ckanext/datastore/cli.py b/ckanext/datastore/cli.py\n--- a/ckanext/datastore/cli.py\n+++ b/ckanext/datastore/cli.py\n@@ -7,6 +7,7 @@\n \n from ckan.model import parse_db_config\n from ckan.common import config\n+import ckan.logic as logic\n \n import ckanext.datastore as datastore_module\n from ckanext.datastore.backend.postgres import identifier\n@@ -111,3 +112,69 @@\n )\n raise click.Abort()\n return db_config\n+\n+\[email protected](\n+ u'purge',\n+ short_help=u'purge orphaned resources from the datastore.'\n+)\n+def purge():\n+ u'''Purge orphaned resources from the datastore using the datastore_delete\n+ action, which drops tables when called without filters.'''\n+\n+ site_user = logic.get_action(u'get_site_user')({u'ignore_auth': True}, {})\n+ context = {u'user': site_user[u'name']}\n+\n+ result = logic.get_action(u'datastore_search')(\n+ context,\n+ {u'resource_id': u'_table_metadata'}\n+ )\n+\n+ resource_id_list = []\n+ for record in result[u'records']:\n+ try:\n+ # ignore 'alias' records (views) as they are automatically\n+ # deleted when the parent resource table is dropped\n+ if record[u'alias_of']:\n+ continue\n+\n+ # we need to do this to trigger resource_show auth function\n+ site_user = logic.get_action(u'get_site_user')(\n+ {u'ignore_auth': True}, {})\n+ context = {u'user': site_user[u'name']}\n+\n+ logic.get_action(u'resource_show')(\n+ context,\n+ {u'id': record[u'name']}\n+ )\n+ except logic.NotFound:\n+ resource_id_list.append(record[u'name'])\n+ click.echo(u\"Resource '%s' orphaned - queued for drop\" %\n+ record[u'name'])\n+ except KeyError:\n+ continue\n+\n+ orphaned_table_count = len(resource_id_list)\n+ click.echo(u'%d orphaned tables found.' % orphaned_table_count)\n+\n+ if not orphaned_table_count:\n+ return\n+\n+ click.confirm(u'Proceed with purge?', abort=True)\n+\n+ # Drop the orphaned datastore tables. When datastore_delete is called\n+ # without filters, it does a drop table cascade\n+ drop_count = 0\n+ for resource_id in resource_id_list:\n+ logic.get_action(u'datastore_delete')(\n+ context,\n+ {u'resource_id': resource_id, u'force': True}\n+ )\n+ click.echo(u\"Table '%s' dropped)\" % resource_id)\n+ drop_count += 1\n+\n+ click.echo(u'Dropped %s tables' % drop_count)\n+\n+\n+def get_commands():\n+ return (set_permissions, dump, purge)\n", "issue": "Purging deleted packages does not delete associated filestore/datastore resources\n**CKAN version**\r\n2.9\r\n\r\n**Describe the bug**\r\nAfter deleting and purging dataset packages, the associated datastore/filestore resources are orphaned and not deleted.\r\n\r\n**Steps to reproduce**\r\n\r\n1. Delete datasets\r\n2. Go to sysadmin/trash\r\n3. Select \"Purge All\"\r\n4. Check datastore and filestore. Associated resources are still there.\r\n\r\n**Expected behavior**\r\nDataset resources in the filestore and the datastore are deleted when the associated package resources are purged.\r\n\r\n**Additional details**\r\nThis is related to #4705 and a lot of work has been done on PRs #4867 and #4905, but neither have been merged.\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\nimport os\n\nimport click\n\nfrom ckan.model import parse_db_config\nfrom ckan.common import config\n\nimport ckanext.datastore as datastore_module\nfrom ckanext.datastore.backend.postgres import identifier\nfrom ckanext.datastore.blueprint import DUMP_FORMATS, dump_to\n\nlog = logging.getLogger(__name__)\n\n\[email protected]()\ndef datastore():\n u'''Perform commands to set up the datastore.\n '''\n\n\[email protected](\n u'set-permissions',\n short_help=u'Generate SQL for permission configuration.'\n)\ndef set_permissions():\n u'''Emit an SQL script that will set the permissions for the datastore\n users as configured in your configuration file.'''\n\n write_url = _parse_db_config(u'ckan.datastore.write_url')\n read_url = _parse_db_config(u'ckan.datastore.read_url')\n db_url = _parse_db_config(u'sqlalchemy.url')\n\n # Basic validation that read and write URLs reference the same database.\n # This obviously doesn't check they're the same database (the hosts/ports\n # could be different), but it's better than nothing, I guess.\n\n if write_url[u'db_name'] != read_url[u'db_name']:\n click.secho(\n u'The datastore write_url and read_url must refer to the same '\n u'database!',\n fg=u'red',\n bold=True\n )\n raise click.Abort()\n\n sql = permissions_sql(\n maindb=db_url[u'db_name'],\n datastoredb=write_url[u'db_name'],\n mainuser=db_url[u'db_user'],\n writeuser=write_url[u'db_user'],\n readuser=read_url[u'db_user']\n )\n\n click.echo(sql)\n\n\ndef permissions_sql(maindb, datastoredb, mainuser, writeuser, readuser):\n template_filename = os.path.join(\n os.path.dirname(datastore_module.__file__), u'set_permissions.sql'\n )\n with open(template_filename) as fp:\n template = fp.read()\n return template.format(\n maindb=identifier(maindb),\n datastoredb=identifier(datastoredb),\n mainuser=identifier(mainuser),\n writeuser=identifier(writeuser),\n readuser=identifier(readuser)\n )\n\n\[email protected]()\[email protected](u'resource-id', nargs=1)\[email protected](\n u'output-file',\n type=click.File(u'wb'),\n default=click.get_binary_stream(u'stdout')\n)\[email protected](u'--format', default=u'csv', type=click.Choice(DUMP_FORMATS))\[email protected](u'--offset', type=click.IntRange(0, None), default=0)\[email protected](u'--limit', type=click.IntRange(0))\[email protected](u'--bom', is_flag=True) # FIXME: options based on format\[email protected]_context\ndef dump(ctx, resource_id, output_file, format, offset, limit, bom):\n u'''Dump a datastore resource.\n '''\n flask_app = ctx.meta['flask_app']\n with flask_app.test_request_context():\n dump_to(\n resource_id,\n output_file,\n fmt=format,\n offset=offset,\n limit=limit,\n options={u'bom': bom},\n sort=u'_id',\n search_params={}\n )\n\n\ndef _parse_db_config(config_key=u'sqlalchemy.url'):\n db_config = parse_db_config(config_key)\n if not db_config:\n click.secho(\n u'Could not extract db details from url: %r' % config[config_key],\n fg=u'red',\n bold=True\n )\n raise click.Abort()\n return db_config\n", "path": "ckanext/datastore/cli.py"}], "after_files": [{"content": "# encoding: utf-8\n\nimport logging\nimport os\n\nimport click\n\nfrom ckan.model import parse_db_config\nfrom ckan.common import config\nimport ckan.logic as logic\n\nimport ckanext.datastore as datastore_module\nfrom ckanext.datastore.backend.postgres import identifier\nfrom ckanext.datastore.blueprint import DUMP_FORMATS, dump_to\n\nlog = logging.getLogger(__name__)\n\n\[email protected]()\ndef datastore():\n u'''Perform commands to set up the datastore.\n '''\n\n\[email protected](\n u'set-permissions',\n short_help=u'Generate SQL for permission configuration.'\n)\ndef set_permissions():\n u'''Emit an SQL script that will set the permissions for the datastore\n users as configured in your configuration file.'''\n\n write_url = _parse_db_config(u'ckan.datastore.write_url')\n read_url = _parse_db_config(u'ckan.datastore.read_url')\n db_url = _parse_db_config(u'sqlalchemy.url')\n\n # Basic validation that read and write URLs reference the same database.\n # This obviously doesn't check they're the same database (the hosts/ports\n # could be different), but it's better than nothing, I guess.\n\n if write_url[u'db_name'] != read_url[u'db_name']:\n click.secho(\n u'The datastore write_url and read_url must refer to the same '\n u'database!',\n fg=u'red',\n bold=True\n )\n raise click.Abort()\n\n sql = permissions_sql(\n maindb=db_url[u'db_name'],\n datastoredb=write_url[u'db_name'],\n mainuser=db_url[u'db_user'],\n writeuser=write_url[u'db_user'],\n readuser=read_url[u'db_user']\n )\n\n click.echo(sql)\n\n\ndef permissions_sql(maindb, datastoredb, mainuser, writeuser, readuser):\n template_filename = os.path.join(\n os.path.dirname(datastore_module.__file__), u'set_permissions.sql'\n )\n with open(template_filename) as fp:\n template = fp.read()\n return template.format(\n maindb=identifier(maindb),\n datastoredb=identifier(datastoredb),\n mainuser=identifier(mainuser),\n writeuser=identifier(writeuser),\n readuser=identifier(readuser)\n )\n\n\[email protected]()\[email protected](u'resource-id', nargs=1)\[email protected](\n u'output-file',\n type=click.File(u'wb'),\n default=click.get_binary_stream(u'stdout')\n)\[email protected](u'--format', default=u'csv', type=click.Choice(DUMP_FORMATS))\[email protected](u'--offset', type=click.IntRange(0, None), default=0)\[email protected](u'--limit', type=click.IntRange(0))\[email protected](u'--bom', is_flag=True) # FIXME: options based on format\[email protected]_context\ndef dump(ctx, resource_id, output_file, format, offset, limit, bom):\n u'''Dump a datastore resource.\n '''\n flask_app = ctx.meta['flask_app']\n with flask_app.test_request_context():\n dump_to(\n resource_id,\n output_file,\n fmt=format,\n offset=offset,\n limit=limit,\n options={u'bom': bom},\n sort=u'_id',\n search_params={}\n )\n\n\ndef _parse_db_config(config_key=u'sqlalchemy.url'):\n db_config = parse_db_config(config_key)\n if not db_config:\n click.secho(\n u'Could not extract db details from url: %r' % config[config_key],\n fg=u'red',\n bold=True\n )\n raise click.Abort()\n return db_config\n\n\[email protected](\n u'purge',\n short_help=u'purge orphaned resources from the datastore.'\n)\ndef purge():\n u'''Purge orphaned resources from the datastore using the datastore_delete\n action, which drops tables when called without filters.'''\n\n site_user = logic.get_action(u'get_site_user')({u'ignore_auth': True}, {})\n context = {u'user': site_user[u'name']}\n\n result = logic.get_action(u'datastore_search')(\n context,\n {u'resource_id': u'_table_metadata'}\n )\n\n resource_id_list = []\n for record in result[u'records']:\n try:\n # ignore 'alias' records (views) as they are automatically\n # deleted when the parent resource table is dropped\n if record[u'alias_of']:\n continue\n\n # we need to do this to trigger resource_show auth function\n site_user = logic.get_action(u'get_site_user')(\n {u'ignore_auth': True}, {})\n context = {u'user': site_user[u'name']}\n\n logic.get_action(u'resource_show')(\n context,\n {u'id': record[u'name']}\n )\n except logic.NotFound:\n resource_id_list.append(record[u'name'])\n click.echo(u\"Resource '%s' orphaned - queued for drop\" %\n record[u'name'])\n except KeyError:\n continue\n\n orphaned_table_count = len(resource_id_list)\n click.echo(u'%d orphaned tables found.' % orphaned_table_count)\n\n if not orphaned_table_count:\n return\n\n click.confirm(u'Proceed with purge?', abort=True)\n\n # Drop the orphaned datastore tables. When datastore_delete is called\n # without filters, it does a drop table cascade\n drop_count = 0\n for resource_id in resource_id_list:\n logic.get_action(u'datastore_delete')(\n context,\n {u'resource_id': resource_id, u'force': True}\n )\n click.echo(u\"Table '%s' dropped)\" % resource_id)\n drop_count += 1\n\n click.echo(u'Dropped %s tables' % drop_count)\n\n\ndef get_commands():\n return (set_permissions, dump, purge)\n", "path": "ckanext/datastore/cli.py"}]}
| 1,465 | 664 |
gh_patches_debug_19386
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-1747
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ebola Page: make millions and % smaller
About half the current height looks right.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py`
Content:
```
1 '''
2 Created on Nov 3, 2014
3
4 @author: alexandru-m-g
5 '''
6
7 import logging
8 import datetime as dt
9 import decimal
10
11 import pylons.config as config
12
13 import ckan.lib.base as base
14 import ckan.logic as logic
15 import ckan.model as model
16 import ckan.common as common
17 import ckan.lib.helpers as h
18
19 render = base.render
20 get_action = logic.get_action
21 c = common.c
22 request = common.request
23 _ = common._
24
25 Decimal = decimal.Decimal
26
27 log = logging.getLogger(__name__)
28
29
30 class CrisisController(base.BaseController):
31
32 def show(self):
33
34 context = {'model': model, 'session': model.Session,
35 'user': c.user or c.author, 'for_view': True,
36 'auth_user_obj': c.userobj}
37
38 datastore_resource_id = self._get_datastore_resource_id(
39 context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))
40 if datastore_resource_id:
41 c.top_line_items = self._get_top_line_items(
42 context, datastore_resource_id)
43
44 limit = 25
45 c.q = u'ebola'
46
47 page = int(request.params.get('page', 1))
48 data_dict = {'sort': u'metadata_modified desc',
49 'fq': '+dataset_type:dataset',
50 'rows': limit,
51 'q': c.q,
52 'start': (page - 1) * limit
53 }
54 query = get_action("package_search")(context, data_dict)
55
56 def pager_url(q=None, page=None):
57 return h.url_for('show_crisis', page=page)
58
59 c.page = h.Page(
60 collection=query['results'],
61 page=page,
62 url=pager_url,
63 item_count=query['count'],
64 items_per_page=limit
65 )
66 c.items = query['results']
67 c.item_count = query['count']
68
69 c.other_links = {}
70 c.other_links['show_more'] = h.url_for(
71 "search", **{'q': u'ebola', 'sort': u'metadata_modified desc',
72 'ext_indicator': '0'})
73
74 return render('crisis/crisis.html')
75
76 def _get_decimal_value(self, value):
77 decimal_value = Decimal(str(value)).quantize(
78 Decimal('.1'), rounding=decimal.ROUND_HALF_UP)
79 return decimal_value
80
81 def _format_results(self, result):
82 for r in result['records']:
83 d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')
84 r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')
85
86 modified_value = r[u'value']
87 if r[u'units'] == 'ratio':
88 modified_value *= 100.0
89 elif r[u'units'] == 'million':
90 modified_value /= 1000000.0
91
92 int_value = int(modified_value)
93 if int_value == modified_value:
94 r[u'formatted_value'] = '{:,}'.format(int_value)
95 else:
96 if r[u'units'] == 'ratio':
97 r[u'formatted_value'] = '{:,.1f}%'.format(
98 self._get_decimal_value(modified_value))
99 elif r[u'units'] == 'million':
100 r[u'formatted_value'] = '{:,.1f} '.format(
101 self._get_decimal_value(modified_value))
102 r[u'formatted_value'] += ' ' + _('million')
103
104 def _get_top_line_items(self, context, datastore_resource_id):
105 modified_context = dict(context)
106 modified_context['ignore_auth'] = True
107 result = get_action('datastore_search')(
108 modified_context, {'resource_id': datastore_resource_id})
109 if 'records' in result:
110 self._format_results(result)
111 return result['records']
112 return []
113
114 def _get_datastore_resource_id(self, context, dataset_id, resource_name):
115 try:
116 modified_context = dict(context)
117 modified_context['ignore_auth'] = True
118 dataset = get_action('package_show')(
119 modified_context, {'id': dataset_id})
120
121 if 'resources' in dataset:
122 for r in dataset['resources']:
123 if 'datastore_active' in r and r['datastore_active'] \
124 and r['name'] == resource_name:
125 return r['id']
126 return None
127 except:
128 log.warning('No dataset with id ' + dataset_id)
129 return None
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
@@ -94,12 +94,12 @@
r[u'formatted_value'] = '{:,}'.format(int_value)
else:
if r[u'units'] == 'ratio':
- r[u'formatted_value'] = '{:,.1f}%'.format(
+ r[u'formatted_value'] = '{:,.1f}'.format(
self._get_decimal_value(modified_value))
elif r[u'units'] == 'million':
- r[u'formatted_value'] = '{:,.1f} '.format(
+ r[u'formatted_value'] = '{:,.1f}'.format(
self._get_decimal_value(modified_value))
- r[u'formatted_value'] += ' ' + _('million')
+ #r[u'formatted_value'] += ' ' + _('million')
def _get_top_line_items(self, context, datastore_resource_id):
modified_context = dict(context)
|
{"golden_diff": "diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n@@ -94,12 +94,12 @@\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n- r[u'formatted_value'] = '{:,.1f}%'.format(\n+ r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n- r[u'formatted_value'] = '{:,.1f} '.format(\n+ r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n- r[u'formatted_value'] += ' ' + _('million')\n+ #r[u'formatted_value'] += ' ' + _('million')\n \n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n", "issue": "Ebola Page: make millions and % smaller\nAbout half the current height looks right.\n\n\n\n", "before_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}%'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f} '.format(\n self._get_decimal_value(modified_value))\n r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}], "after_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n #r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}]}
| 1,634 | 306 |
gh_patches_debug_26743
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-533
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] plot_series inconsistent date/time index
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
**To Reproduce**
```python
y = load_airline()
y = y[:48]
fh=np.arange(1, 13)
y_train, y_test = temporal_train_test_split(y, test_size=len(fh))
plot_series(y_train, y_test, labels=["y_train", "y_test"]);
print(y.shape, y_train.shape[0], y_test.shape[0])
print(y.index)
```
> (48,) 36 12
> PeriodIndex(['1949-01', '1949-02', '1949-03', '1949-04', '1949-05', '1949-06',
> '1949-07', '1949-08', '1949-09', '1949-10', '1949-11', '1949-12',
> '1950-01', '1950-02', '1950-03', '1950-04', '1950-05', '1950-06',
> '1950-07', '1950-08', '1950-09', '1950-10', '1950-11', '1950-12',
> '1951-01', '1951-02', '1951-03', '1951-04', '1951-05', '1951-06',
> '1951-07', '1951-08', '1951-09', '1951-10', '1951-11', '1951-12',
> '1952-01', '1952-02', '1952-03', '1952-04', '1952-05', '1952-06',
> '1952-07', '1952-08', '1952-09', '1952-10', '1952-11', '1952-12'],
> dtype='period[M]', name='Period', freq='M')
>
**Expected behavior**
The X-axis should go till end of 1952. The X-axis on the plot does not reflect the indices correctly. It should go till 1952, but shows values only till 1949

**Versions**
System:
python: 3.6.12 |Anaconda, Inc.| (default, Sep 9 2020, 00:29:25) [MSC v.1916 64 bit (AMD64)]
executable: C:\Users\xxxx\AppData\Local\Continuum\anaconda3\envs\sktime\python.exe
machine: Windows-10-10.0.18362-SP0
Python dependencies:
pip: 20.3
setuptools: 49.6.0
sklearn: 0.23.2
numpy: 1.19.2
scipy: 1.5.2
Cython: 0.29.17
pandas: 1.1.3
matplotlib: 3.3.2
joblib: 0.17.0
numba: None
pmdarima: 1.7.1
tsfresh: None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/utils/plotting/__init__.py`
Content:
```
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4
5 __all__ = ["plot_series"]
6 __author__ = ["Markus Löning"]
7
8 import warnings
9
10 import numpy as np
11
12 from sktime.utils.check_imports import _check_soft_dependencies
13 from sktime.utils.validation.forecasting import check_y
14
15
16 def plot_series(*series, labels=None):
17 """Plot one or more time series
18
19 Parameters
20 ----------
21 series : pd.Series
22 One or more time series
23 labels : list, optional (default=None)
24 Names of series, will be displayed in figure legend
25
26 Returns
27 -------
28 fig : plt.Figure
29 ax : plt.Axis
30 """
31 _check_soft_dependencies("matplotlib", "seaborn")
32 import matplotlib.pyplot as plt
33 import seaborn as sns
34
35 n_series = len(series)
36 if labels is not None:
37 if n_series != len(labels):
38 raise ValueError(
39 "There must be one label for each time series, "
40 "but found inconsistent numbers of series and "
41 "labels."
42 )
43 legend = True
44 else:
45 labels = ["" for _ in range(n_series)]
46 legend = False
47
48 for y in series:
49 check_y(y)
50
51 # create combined index
52 index = series[0].index
53 for y in series[1:]:
54 # check types, note that isinstance() does not work here because index
55 # types inherit from each other, hence we check for type equality
56 if not type(index) is type(y.index): # noqa
57 raise TypeError("Found series with different index types.")
58 index = index.union(y.index)
59
60 # generate integer x-values
61 xs = [np.argwhere(index.isin(y.index)).ravel() for y in series]
62
63 # create figure
64 fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))
65 colors = sns.color_palette("colorblind", n_colors=n_series)
66
67 # plot series
68 for x, y, color, label in zip(xs, series, colors, labels):
69
70 # scatter if little data is available or index is not complete
71 if len(x) <= 3 or not np.array_equal(np.arange(x[0], x[-1] + 1), x):
72 plot_func = sns.scatterplot
73 else:
74 plot_func = sns.lineplot
75
76 plot_func(x=x, y=y, ax=ax, marker="o", label=label, color=color)
77
78 # set combined index as xticklabels, suppress matplotlib warning
79 with warnings.catch_warnings():
80 warnings.filterwarnings("ignore")
81 ax.set(xticklabels=index)
82
83 if legend:
84 ax.legend()
85
86 return fig, ax
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sktime/utils/plotting/__init__.py b/sktime/utils/plotting/__init__.py
--- a/sktime/utils/plotting/__init__.py
+++ b/sktime/utils/plotting/__init__.py
@@ -5,8 +5,6 @@
__all__ = ["plot_series"]
__author__ = ["Markus Löning"]
-import warnings
-
import numpy as np
from sktime.utils.check_imports import _check_soft_dependencies
@@ -30,6 +28,8 @@
"""
_check_soft_dependencies("matplotlib", "seaborn")
import matplotlib.pyplot as plt
+ from matplotlib.ticker import FuncFormatter, MaxNLocator
+ from matplotlib.cbook import flatten
import seaborn as sns
n_series = len(series)
@@ -75,10 +75,19 @@
plot_func(x=x, y=y, ax=ax, marker="o", label=label, color=color)
- # set combined index as xticklabels, suppress matplotlib warning
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore")
- ax.set(xticklabels=index)
+ # combine data points for all series
+ xs_flat = list(flatten(xs))
+
+ # set x label of data point to the matching index
+ def format_fn(tick_val, tick_pos):
+ if int(tick_val) in xs_flat:
+ return index[int(tick_val)]
+ else:
+ return ""
+
+ # dynamically set x label ticks and spacing from index labels
+ ax.xaxis.set_major_formatter(FuncFormatter(format_fn))
+ ax.xaxis.set_major_locator(MaxNLocator(integer=True))
if legend:
ax.legend()
|
{"golden_diff": "diff --git a/sktime/utils/plotting/__init__.py b/sktime/utils/plotting/__init__.py\n--- a/sktime/utils/plotting/__init__.py\n+++ b/sktime/utils/plotting/__init__.py\n@@ -5,8 +5,6 @@\n __all__ = [\"plot_series\"]\n __author__ = [\"Markus L\u00f6ning\"]\n \n-import warnings\n-\n import numpy as np\n \n from sktime.utils.check_imports import _check_soft_dependencies\n@@ -30,6 +28,8 @@\n \"\"\"\n _check_soft_dependencies(\"matplotlib\", \"seaborn\")\n import matplotlib.pyplot as plt\n+ from matplotlib.ticker import FuncFormatter, MaxNLocator\n+ from matplotlib.cbook import flatten\n import seaborn as sns\n \n n_series = len(series)\n@@ -75,10 +75,19 @@\n \n plot_func(x=x, y=y, ax=ax, marker=\"o\", label=label, color=color)\n \n- # set combined index as xticklabels, suppress matplotlib warning\n- with warnings.catch_warnings():\n- warnings.filterwarnings(\"ignore\")\n- ax.set(xticklabels=index)\n+ # combine data points for all series\n+ xs_flat = list(flatten(xs))\n+\n+ # set x label of data point to the matching index\n+ def format_fn(tick_val, tick_pos):\n+ if int(tick_val) in xs_flat:\n+ return index[int(tick_val)]\n+ else:\n+ return \"\"\n+\n+ # dynamically set x label ticks and spacing from index labels\n+ ax.xaxis.set_major_formatter(FuncFormatter(format_fn))\n+ ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n \n if legend:\n ax.legend()\n", "issue": "[BUG] plot_series inconsistent date/time index \n**Describe the bug**\r\n<!--\r\nA clear and concise description of what the bug is.\r\n-->\r\n\r\n**To Reproduce**\r\n\r\n```python\r\ny = load_airline()\r\ny = y[:48]\r\nfh=np.arange(1, 13)\r\n\r\ny_train, y_test = temporal_train_test_split(y, test_size=len(fh))\r\nplot_series(y_train, y_test, labels=[\"y_train\", \"y_test\"]);\r\nprint(y.shape, y_train.shape[0], y_test.shape[0])\r\nprint(y.index)\r\n```\r\n\r\n> (48,) 36 12\r\n> PeriodIndex(['1949-01', '1949-02', '1949-03', '1949-04', '1949-05', '1949-06',\r\n> '1949-07', '1949-08', '1949-09', '1949-10', '1949-11', '1949-12',\r\n> '1950-01', '1950-02', '1950-03', '1950-04', '1950-05', '1950-06',\r\n> '1950-07', '1950-08', '1950-09', '1950-10', '1950-11', '1950-12',\r\n> '1951-01', '1951-02', '1951-03', '1951-04', '1951-05', '1951-06',\r\n> '1951-07', '1951-08', '1951-09', '1951-10', '1951-11', '1951-12',\r\n> '1952-01', '1952-02', '1952-03', '1952-04', '1952-05', '1952-06',\r\n> '1952-07', '1952-08', '1952-09', '1952-10', '1952-11', '1952-12'],\r\n> dtype='period[M]', name='Period', freq='M')\r\n> \r\n\r\n**Expected behavior**\r\nThe X-axis should go till end of 1952. The X-axis on the plot does not reflect the indices correctly. It should go till 1952, but shows values only till 1949\r\n\r\n\r\n\r\n**Versions**\r\nSystem:\r\n python: 3.6.12 |Anaconda, Inc.| (default, Sep 9 2020, 00:29:25) [MSC v.1916 64 bit (AMD64)]\r\nexecutable: C:\\Users\\xxxx\\AppData\\Local\\Continuum\\anaconda3\\envs\\sktime\\python.exe\r\n machine: Windows-10-10.0.18362-SP0\r\n\r\nPython dependencies:\r\n pip: 20.3\r\n setuptools: 49.6.0\r\n sklearn: 0.23.2\r\n numpy: 1.19.2\r\n scipy: 1.5.2\r\n Cython: 0.29.17\r\n pandas: 1.1.3\r\n matplotlib: 3.3.2\r\n joblib: 0.17.0\r\n numba: None\r\n pmdarima: 1.7.1\r\n tsfresh: None\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__all__ = [\"plot_series\"]\n__author__ = [\"Markus L\u00f6ning\"]\n\nimport warnings\n\nimport numpy as np\n\nfrom sktime.utils.check_imports import _check_soft_dependencies\nfrom sktime.utils.validation.forecasting import check_y\n\n\ndef plot_series(*series, labels=None):\n \"\"\"Plot one or more time series\n\n Parameters\n ----------\n series : pd.Series\n One or more time series\n labels : list, optional (default=None)\n Names of series, will be displayed in figure legend\n\n Returns\n -------\n fig : plt.Figure\n ax : plt.Axis\n \"\"\"\n _check_soft_dependencies(\"matplotlib\", \"seaborn\")\n import matplotlib.pyplot as plt\n import seaborn as sns\n\n n_series = len(series)\n if labels is not None:\n if n_series != len(labels):\n raise ValueError(\n \"There must be one label for each time series, \"\n \"but found inconsistent numbers of series and \"\n \"labels.\"\n )\n legend = True\n else:\n labels = [\"\" for _ in range(n_series)]\n legend = False\n\n for y in series:\n check_y(y)\n\n # create combined index\n index = series[0].index\n for y in series[1:]:\n # check types, note that isinstance() does not work here because index\n # types inherit from each other, hence we check for type equality\n if not type(index) is type(y.index): # noqa\n raise TypeError(\"Found series with different index types.\")\n index = index.union(y.index)\n\n # generate integer x-values\n xs = [np.argwhere(index.isin(y.index)).ravel() for y in series]\n\n # create figure\n fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))\n colors = sns.color_palette(\"colorblind\", n_colors=n_series)\n\n # plot series\n for x, y, color, label in zip(xs, series, colors, labels):\n\n # scatter if little data is available or index is not complete\n if len(x) <= 3 or not np.array_equal(np.arange(x[0], x[-1] + 1), x):\n plot_func = sns.scatterplot\n else:\n plot_func = sns.lineplot\n\n plot_func(x=x, y=y, ax=ax, marker=\"o\", label=label, color=color)\n\n # set combined index as xticklabels, suppress matplotlib warning\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n ax.set(xticklabels=index)\n\n if legend:\n ax.legend()\n\n return fig, ax\n", "path": "sktime/utils/plotting/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\n__all__ = [\"plot_series\"]\n__author__ = [\"Markus L\u00f6ning\"]\n\nimport numpy as np\n\nfrom sktime.utils.check_imports import _check_soft_dependencies\nfrom sktime.utils.validation.forecasting import check_y\n\n\ndef plot_series(*series, labels=None):\n \"\"\"Plot one or more time series\n\n Parameters\n ----------\n series : pd.Series\n One or more time series\n labels : list, optional (default=None)\n Names of series, will be displayed in figure legend\n\n Returns\n -------\n fig : plt.Figure\n ax : plt.Axis\n \"\"\"\n _check_soft_dependencies(\"matplotlib\", \"seaborn\")\n import matplotlib.pyplot as plt\n from matplotlib.ticker import FuncFormatter, MaxNLocator\n from matplotlib.cbook import flatten\n import seaborn as sns\n\n n_series = len(series)\n if labels is not None:\n if n_series != len(labels):\n raise ValueError(\n \"There must be one label for each time series, \"\n \"but found inconsistent numbers of series and \"\n \"labels.\"\n )\n legend = True\n else:\n labels = [\"\" for _ in range(n_series)]\n legend = False\n\n for y in series:\n check_y(y)\n\n # create combined index\n index = series[0].index\n for y in series[1:]:\n # check types, note that isinstance() does not work here because index\n # types inherit from each other, hence we check for type equality\n if not type(index) is type(y.index): # noqa\n raise TypeError(\"Found series with different index types.\")\n index = index.union(y.index)\n\n # generate integer x-values\n xs = [np.argwhere(index.isin(y.index)).ravel() for y in series]\n\n # create figure\n fig, ax = plt.subplots(1, figsize=plt.figaspect(0.25))\n colors = sns.color_palette(\"colorblind\", n_colors=n_series)\n\n # plot series\n for x, y, color, label in zip(xs, series, colors, labels):\n\n # scatter if little data is available or index is not complete\n if len(x) <= 3 or not np.array_equal(np.arange(x[0], x[-1] + 1), x):\n plot_func = sns.scatterplot\n else:\n plot_func = sns.lineplot\n\n plot_func(x=x, y=y, ax=ax, marker=\"o\", label=label, color=color)\n\n # combine data points for all series\n xs_flat = list(flatten(xs))\n\n # set x label of data point to the matching index\n def format_fn(tick_val, tick_pos):\n if int(tick_val) in xs_flat:\n return index[int(tick_val)]\n else:\n return \"\"\n\n # dynamically set x label ticks and spacing from index labels\n ax.xaxis.set_major_formatter(FuncFormatter(format_fn))\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\n if legend:\n ax.legend()\n\n return fig, ax\n", "path": "sktime/utils/plotting/__init__.py"}]}
| 1,990 | 379 |
gh_patches_debug_18214
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-1954
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
command to check lock file
<!-- Checked checkbox should look like this: [x] -->
- [X] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
## Issue
Would it be possible to add a `poetry check` command (oh, I see you'e used that already, maybe the command could be extended or have a `poetry lock --check`?) that will return zero if the current poetry.lock is both correct w.r.t. the pyproject.toml (ie: packages match) *and* that it matches the output of `poetry lock`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/console/commands/lock.py`
Content:
```
1 from cleo.helpers import option
2
3 from .installer_command import InstallerCommand
4
5
6 class LockCommand(InstallerCommand):
7
8 name = "lock"
9 description = "Locks the project dependencies."
10
11 options = [
12 option(
13 "no-update", None, "Do not update locked versions, only refresh lock file."
14 ),
15 ]
16
17 help = """
18 The <info>lock</info> command reads the <comment>pyproject.toml</> file from the
19 current directory, processes it, and locks the dependencies in the <comment>poetry.lock</>
20 file.
21
22 <info>poetry lock</info>
23 """
24
25 loggers = ["poetry.repositories.pypi_repository"]
26
27 def handle(self) -> int:
28 self._installer.use_executor(
29 self.poetry.config.get("experimental.new-installer", False)
30 )
31
32 self._installer.lock(update=not self.option("no-update"))
33
34 return self._installer.run()
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/poetry/console/commands/lock.py b/poetry/console/commands/lock.py
--- a/poetry/console/commands/lock.py
+++ b/poetry/console/commands/lock.py
@@ -12,6 +12,12 @@
option(
"no-update", None, "Do not update locked versions, only refresh lock file."
),
+ option(
+ "check",
+ None,
+ "Check that the <comment>poetry.lock</> file corresponds to the current version "
+ "of <comment>pyproject.toml</>.",
+ ),
]
help = """
@@ -29,6 +35,13 @@
self.poetry.config.get("experimental.new-installer", False)
)
+ if self.option("check"):
+ return (
+ 0
+ if self.poetry.locker.is_locked() and self.poetry.locker.is_fresh()
+ else 1
+ )
+
self._installer.lock(update=not self.option("no-update"))
return self._installer.run()
|
{"golden_diff": "diff --git a/poetry/console/commands/lock.py b/poetry/console/commands/lock.py\n--- a/poetry/console/commands/lock.py\n+++ b/poetry/console/commands/lock.py\n@@ -12,6 +12,12 @@\n option(\n \"no-update\", None, \"Do not update locked versions, only refresh lock file.\"\n ),\n+ option(\n+ \"check\",\n+ None,\n+ \"Check that the <comment>poetry.lock</> file corresponds to the current version \"\n+ \"of <comment>pyproject.toml</>.\",\n+ ),\n ]\n \n help = \"\"\"\n@@ -29,6 +35,13 @@\n self.poetry.config.get(\"experimental.new-installer\", False)\n )\n \n+ if self.option(\"check\"):\n+ return (\n+ 0\n+ if self.poetry.locker.is_locked() and self.poetry.locker.is_fresh()\n+ else 1\n+ )\n+\n self._installer.lock(update=not self.option(\"no-update\"))\n \n return self._installer.run()\n", "issue": "command to check lock file\n<!-- Checked checkbox should look like this: [x] -->\r\n- [X] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n\r\n## Issue\r\n\r\nWould it be possible to add a `poetry check` command (oh, I see you'e used that already, maybe the command could be extended or have a `poetry lock --check`?) that will return zero if the current poetry.lock is both correct w.r.t. the pyproject.toml (ie: packages match) *and* that it matches the output of `poetry lock`.\r\n\n", "before_files": [{"content": "from cleo.helpers import option\n\nfrom .installer_command import InstallerCommand\n\n\nclass LockCommand(InstallerCommand):\n\n name = \"lock\"\n description = \"Locks the project dependencies.\"\n\n options = [\n option(\n \"no-update\", None, \"Do not update locked versions, only refresh lock file.\"\n ),\n ]\n\n help = \"\"\"\nThe <info>lock</info> command reads the <comment>pyproject.toml</> file from the\ncurrent directory, processes it, and locks the dependencies in the <comment>poetry.lock</>\nfile.\n\n<info>poetry lock</info>\n\"\"\"\n\n loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self) -> int:\n self._installer.use_executor(\n self.poetry.config.get(\"experimental.new-installer\", False)\n )\n\n self._installer.lock(update=not self.option(\"no-update\"))\n\n return self._installer.run()\n", "path": "poetry/console/commands/lock.py"}], "after_files": [{"content": "from cleo.helpers import option\n\nfrom .installer_command import InstallerCommand\n\n\nclass LockCommand(InstallerCommand):\n\n name = \"lock\"\n description = \"Locks the project dependencies.\"\n\n options = [\n option(\n \"no-update\", None, \"Do not update locked versions, only refresh lock file.\"\n ),\n option(\n \"check\",\n None,\n \"Check that the <comment>poetry.lock</> file corresponds to the current version \"\n \"of <comment>pyproject.toml</>.\",\n ),\n ]\n\n help = \"\"\"\nThe <info>lock</info> command reads the <comment>pyproject.toml</> file from the\ncurrent directory, processes it, and locks the dependencies in the <comment>poetry.lock</>\nfile.\n\n<info>poetry lock</info>\n\"\"\"\n\n loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self) -> int:\n self._installer.use_executor(\n self.poetry.config.get(\"experimental.new-installer\", False)\n )\n\n if self.option(\"check\"):\n return (\n 0\n if self.poetry.locker.is_locked() and self.poetry.locker.is_fresh()\n else 1\n )\n\n self._installer.lock(update=not self.option(\"no-update\"))\n\n return self._installer.run()\n", "path": "poetry/console/commands/lock.py"}]}
| 666 | 243 |
gh_patches_debug_39342
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-5564
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VS generator not exporting for <Lib> tag in vcxproj
Need to add Conan-handled lib to be referenced in .vcxproj in <Lib> tags. Conan VS-generator is exporting only in <Link> tag.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/generators/visualstudio.py`
Content:
```
1 import os
2 import re
3
4 from conans.model import Generator
5 from conans.paths import BUILD_INFO_VISUAL_STUDIO
6 from conans.client.tools.files import VALID_LIB_EXTENSIONS
7
8
9 class VisualStudioGenerator(Generator):
10
11 template = '''<?xml version="1.0" encoding="utf-8"?>
12 <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
13 <ImportGroup Label="PropertySheets" />
14 <PropertyGroup Label="UserMacros" />
15 <PropertyGroup Label="Conan-RootDirs">{item_properties}
16 </PropertyGroup>
17 {properties}
18 <ItemGroup />
19 </Project>'''
20
21 properties_template = ''' <PropertyGroup Label="ConanVariables"{condition}>
22 <ConanBinaryDirectories>{bin_dirs};%(ConanBinaryDirectories)</ConanBinaryDirectories>
23 <ConanResourceDirectories>{res_dirs};%(ConanResourceDirectories)$(</ConanResourceDirectories>
24 </PropertyGroup>
25 <PropertyGroup{condition}>
26 <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>
27 <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>
28 </PropertyGroup>
29 <ItemDefinitionGroup{condition}>
30 <ClCompile>
31 <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
32 <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>
33 <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
34 </ClCompile>
35 <Link>
36 <AdditionalLibraryDirectories>{lib_dirs}%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
37 <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
38 <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>
39 </Link>
40 <Midl>
41 <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
42 </Midl>
43 <ResourceCompile>
44 <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
45 <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>
46 <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
47 </ResourceCompile>
48 </ItemDefinitionGroup>'''
49
50 item_template = '''
51 <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''
52
53 def _format_items(self):
54 sections = []
55 for dep_name, cpp_info in self._deps_build_info.dependencies:
56 fields = {
57 'root_dir': cpp_info.rootpath,
58 'name': dep_name.replace(".", "-")
59 }
60 section = self.item_template.format(**fields)
61 sections.append(section)
62 return "".join(sections)
63
64 @property
65 def filename(self):
66 return BUILD_INFO_VISUAL_STUDIO
67
68 def _format_properties(self, build_info, condition):
69 def has_valid_ext(lib):
70 ext = os.path.splitext(lib)[1]
71 return ext in VALID_LIB_EXTENSIONS
72
73 fields = {
74 'condition': condition,
75 'bin_dirs': "".join("%s;" % p for p in build_info.bin_paths),
76 'res_dirs': "".join("%s;" % p for p in build_info.res_paths),
77 'include_dirs': "".join("%s;" % p for p in build_info.include_paths),
78 'lib_dirs': "".join("%s;" % p for p in build_info.lib_paths),
79 'libs': "".join(['%s.lib;' % lib if not has_valid_ext(lib)
80 else '%s;' % lib for lib in build_info.libs]),
81 'definitions': "".join("%s;" % d for d in build_info.defines),
82 'compiler_flags': " ".join(build_info.cxxflags + build_info.cflags),
83 'linker_flags': " ".join(build_info.sharedlinkflags),
84 'exe_flags': " ".join(build_info.exelinkflags)
85 }
86 formatted_template = self.properties_template.format(**fields)
87 return formatted_template
88
89 @property
90 def content(self):
91 per_item_props = self._format_items()
92
93 properties = [self._format_properties(self._deps_build_info, condition='')]
94 for config, cpp_info in self._deps_build_info.configs.items():
95 condition = " Condition=\"'$(Configuration)' == '%s'\"" % config
96 properties.append(self._format_properties(cpp_info, condition=condition))
97
98 fields = {
99 'item_properties': per_item_props,
100 'properties': '\n'.join(properties)
101 }
102 formatted_template = self.template.format(**fields)
103
104 userprofile = os.getenv("USERPROFILE")
105 if userprofile:
106 userprofile = userprofile.replace("\\", "\\\\")
107 formatted_template = re.sub(userprofile, "$(USERPROFILE)", formatted_template,
108 flags=re.I)
109 return formatted_template
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py
--- a/conans/client/generators/visualstudio.py
+++ b/conans/client/generators/visualstudio.py
@@ -19,8 +19,12 @@
</Project>'''
properties_template = ''' <PropertyGroup Label="ConanVariables"{condition}>
- <ConanBinaryDirectories>{bin_dirs};%(ConanBinaryDirectories)</ConanBinaryDirectories>
- <ConanResourceDirectories>{res_dirs};%(ConanResourceDirectories)$(</ConanResourceDirectories>
+ <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>
+ <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>
+ <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>
+ <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>
+ <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>
+ <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>
</PropertyGroup>
<PropertyGroup{condition}>
<LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>
@@ -28,22 +32,26 @@
</PropertyGroup>
<ItemDefinitionGroup{condition}>
<ClCompile>
- <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
+ <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Link>
- <AdditionalLibraryDirectories>{lib_dirs}%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
<AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>
</Link>
+ <Lib>
+ <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
+ </Lib>
<Midl>
- <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</Midl>
<ResourceCompile>
- <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
- <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>
- <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>
+ <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>
</ResourceCompile>
</ItemDefinitionGroup>'''
|
{"golden_diff": "diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py\n--- a/conans/client/generators/visualstudio.py\n+++ b/conans/client/generators/visualstudio.py\n@@ -19,8 +19,12 @@\n </Project>'''\n \n properties_template = ''' <PropertyGroup Label=\"ConanVariables\"{condition}>\n- <ConanBinaryDirectories>{bin_dirs};%(ConanBinaryDirectories)</ConanBinaryDirectories>\n- <ConanResourceDirectories>{res_dirs};%(ConanResourceDirectories)$(</ConanResourceDirectories>\n+ <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>\n+ <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>\n+ <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>\n+ <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>\n+ <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>\n+ <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>\n </PropertyGroup>\n <PropertyGroup{condition}>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n@@ -28,22 +32,26 @@\n </PropertyGroup>\n <ItemDefinitionGroup{condition}>\n <ClCompile>\n- <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n- <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>\n- <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n+ <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n+ <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n+ <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n <Link>\n- <AdditionalLibraryDirectories>{lib_dirs}%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n+ <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>\n </Link>\n+ <Lib>\n+ <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n+ <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n+ </Lib>\n <Midl>\n- <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n+ <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n <ResourceCompile>\n- <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n- <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>\n- <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n+ <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n+ <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n+ <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ResourceCompile>\n </ItemDefinitionGroup>'''\n", "issue": "VS generator not exporting for <Lib> tag in vcxproj\nNeed to add Conan-handled lib to be referenced in .vcxproj in <Lib> tags. Conan VS-generator is exporting only in <Link> tag. \r\n\n", "before_files": [{"content": "import os\nimport re\n\nfrom conans.model import Generator\nfrom conans.paths import BUILD_INFO_VISUAL_STUDIO\nfrom conans.client.tools.files import VALID_LIB_EXTENSIONS\n\n\nclass VisualStudioGenerator(Generator):\n\n template = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ImportGroup Label=\"PropertySheets\" />\n <PropertyGroup Label=\"UserMacros\" />\n <PropertyGroup Label=\"Conan-RootDirs\">{item_properties}\n </PropertyGroup>\n {properties}\n <ItemGroup />\n</Project>'''\n\n properties_template = ''' <PropertyGroup Label=\"ConanVariables\"{condition}>\n <ConanBinaryDirectories>{bin_dirs};%(ConanBinaryDirectories)</ConanBinaryDirectories>\n <ConanResourceDirectories>{res_dirs};%(ConanResourceDirectories)$(</ConanResourceDirectories>\n </PropertyGroup>\n <PropertyGroup{condition}>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>\n </PropertyGroup>\n <ItemDefinitionGroup{condition}>\n <ClCompile>\n <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n <Link>\n <AdditionalLibraryDirectories>{lib_dirs}%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>\n </Link>\n <Midl>\n <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n <ResourceCompile>\n <AdditionalIncludeDirectories>{include_dirs}%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>{definitions}%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>{compiler_flags} %(AdditionalOptions)</AdditionalOptions>\n </ResourceCompile>\n </ItemDefinitionGroup>'''\n\n item_template = '''\n <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''\n\n def _format_items(self):\n sections = []\n for dep_name, cpp_info in self._deps_build_info.dependencies:\n fields = {\n 'root_dir': cpp_info.rootpath,\n 'name': dep_name.replace(\".\", \"-\")\n }\n section = self.item_template.format(**fields)\n sections.append(section)\n return \"\".join(sections)\n\n @property\n def filename(self):\n return BUILD_INFO_VISUAL_STUDIO\n\n def _format_properties(self, build_info, condition):\n def has_valid_ext(lib):\n ext = os.path.splitext(lib)[1]\n return ext in VALID_LIB_EXTENSIONS\n\n fields = {\n 'condition': condition,\n 'bin_dirs': \"\".join(\"%s;\" % p for p in build_info.bin_paths),\n 'res_dirs': \"\".join(\"%s;\" % p for p in build_info.res_paths),\n 'include_dirs': \"\".join(\"%s;\" % p for p in build_info.include_paths),\n 'lib_dirs': \"\".join(\"%s;\" % p for p in build_info.lib_paths),\n 'libs': \"\".join(['%s.lib;' % lib if not has_valid_ext(lib)\n else '%s;' % lib for lib in build_info.libs]),\n 'definitions': \"\".join(\"%s;\" % d for d in build_info.defines),\n 'compiler_flags': \" \".join(build_info.cxxflags + build_info.cflags),\n 'linker_flags': \" \".join(build_info.sharedlinkflags),\n 'exe_flags': \" \".join(build_info.exelinkflags)\n }\n formatted_template = self.properties_template.format(**fields)\n return formatted_template\n\n @property\n def content(self):\n per_item_props = self._format_items()\n\n properties = [self._format_properties(self._deps_build_info, condition='')]\n for config, cpp_info in self._deps_build_info.configs.items():\n condition = \" Condition=\\\"'$(Configuration)' == '%s'\\\"\" % config\n properties.append(self._format_properties(cpp_info, condition=condition))\n\n fields = {\n 'item_properties': per_item_props,\n 'properties': '\\n'.join(properties)\n }\n formatted_template = self.template.format(**fields)\n\n userprofile = os.getenv(\"USERPROFILE\")\n if userprofile:\n userprofile = userprofile.replace(\"\\\\\", \"\\\\\\\\\")\n formatted_template = re.sub(userprofile, \"$(USERPROFILE)\", formatted_template,\n flags=re.I)\n return formatted_template\n", "path": "conans/client/generators/visualstudio.py"}], "after_files": [{"content": "import os\nimport re\n\nfrom conans.model import Generator\nfrom conans.paths import BUILD_INFO_VISUAL_STUDIO\nfrom conans.client.tools.files import VALID_LIB_EXTENSIONS\n\n\nclass VisualStudioGenerator(Generator):\n\n template = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ImportGroup Label=\"PropertySheets\" />\n <PropertyGroup Label=\"UserMacros\" />\n <PropertyGroup Label=\"Conan-RootDirs\">{item_properties}\n </PropertyGroup>\n {properties}\n <ItemGroup />\n</Project>'''\n\n properties_template = ''' <PropertyGroup Label=\"ConanVariables\"{condition}>\n <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>\n <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>\n <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>\n <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>\n <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>\n <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>\n </PropertyGroup>\n <PropertyGroup{condition}>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>\n </PropertyGroup>\n <ItemDefinitionGroup{condition}>\n <ClCompile>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n <Link>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>\n </Link>\n <Lib>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n </Lib>\n <Midl>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n <ResourceCompile>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ResourceCompile>\n </ItemDefinitionGroup>'''\n\n item_template = '''\n <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''\n\n def _format_items(self):\n sections = []\n for dep_name, cpp_info in self._deps_build_info.dependencies:\n fields = {\n 'root_dir': cpp_info.rootpath,\n 'name': dep_name.replace(\".\", \"-\")\n }\n section = self.item_template.format(**fields)\n sections.append(section)\n return \"\".join(sections)\n\n @property\n def filename(self):\n return BUILD_INFO_VISUAL_STUDIO\n\n def _format_properties(self, build_info, condition):\n def has_valid_ext(lib):\n ext = os.path.splitext(lib)[1]\n return ext in VALID_LIB_EXTENSIONS\n\n fields = {\n 'condition': condition,\n 'bin_dirs': \"\".join(\"%s;\" % p for p in build_info.bin_paths),\n 'res_dirs': \"\".join(\"%s;\" % p for p in build_info.res_paths),\n 'include_dirs': \"\".join(\"%s;\" % p for p in build_info.include_paths),\n 'lib_dirs': \"\".join(\"%s;\" % p for p in build_info.lib_paths),\n 'libs': \"\".join(['%s.lib;' % lib if not has_valid_ext(lib)\n else '%s;' % lib for lib in build_info.libs]),\n 'definitions': \"\".join(\"%s;\" % d for d in build_info.defines),\n 'compiler_flags': \" \".join(build_info.cxxflags + build_info.cflags),\n 'linker_flags': \" \".join(build_info.sharedlinkflags),\n 'exe_flags': \" \".join(build_info.exelinkflags)\n }\n formatted_template = self.properties_template.format(**fields)\n return formatted_template\n\n @property\n def content(self):\n per_item_props = self._format_items()\n\n properties = [self._format_properties(self._deps_build_info, condition='')]\n for config, cpp_info in self._deps_build_info.configs.items():\n condition = \" Condition=\\\"'$(Configuration)' == '%s'\\\"\" % config\n properties.append(self._format_properties(cpp_info, condition=condition))\n\n fields = {\n 'item_properties': per_item_props,\n 'properties': '\\n'.join(properties)\n }\n formatted_template = self.template.format(**fields)\n\n userprofile = os.getenv(\"USERPROFILE\")\n if userprofile:\n userprofile = userprofile.replace(\"\\\\\", \"\\\\\\\\\")\n formatted_template = re.sub(userprofile, \"$(USERPROFILE)\", formatted_template,\n flags=re.I)\n return formatted_template\n", "path": "conans/client/generators/visualstudio.py"}]}
| 1,558 | 725 |
gh_patches_debug_9185
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-729
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
502 error on category pages
We can reproduce it with code below.
Seems it's connected with https://github.com/fidals/shopelectro/issues/648, but it's not checked yet
```
import requests
BASE = 'https://www.shopelectro.ru'
broken_urls = []
for c in CategoryPage.objects.all():
response = requests.get(BASE + c.url)
if response.status_code != 200:
broken_urls += (response.status_code, response.url)
print('\n'.join((str(url) for url in broken_urls)))
---
# output:
502
https://www.shopelectro.ru/catalog/categories/batarei-saliutov-moshchnye/
502
https://www.shopelectro.ru/catalog/categories/khlopushki-pnevmaticheskie/
502
https://www.shopelectro.ru/catalog/categories/izmeritelnye-pribory-211/
502
https://www.shopelectro.ru/catalog/categories/dlia-tiporazmera-aaaaa/
502
https://www.shopelectro.ru/catalog/categories/tiporazmer-aa/
502
https://www.shopelectro.ru/catalog/categories/tiporazmer-6f22/
502
https://www.shopelectro.ru/catalog/categories/invertory/
502
https://www.shopelectro.ru/catalog/categories/palchikovye-aa-r6-lr6-fr6-316/
502
https://www.shopelectro.ru/catalog/categories/mizinchikovye-aaa-r03-lr03-fr03-286/
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/celery.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2 from datetime import timedelta
3 import os
4
5 from celery import Celery
6 from kombu import Exchange, Queue
7
8 # set the default Django settings module for the 'celery' program.
9 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')
10
11 app = Celery('shopelectro')
12
13 # Exchanges
14 default_exchange = Exchange('default', type='direct')
15 utils_exchange = Exchange('utils', type='direct')
16
17 # http://docs.celeryproject.org/en/latest/userguide/tasks.html
18 task_queues = (
19 Queue(
20 name='default',
21 exchange=default_exchange,
22 routing_key='default',
23 ),
24 Queue(
25 name='mail',
26 exchange=utils_exchange,
27 routing_key='utils.mail',
28 ),
29 Queue(
30 name='command',
31 exchange=utils_exchange,
32 routing_key='utils.command',
33 )
34 )
35
36 # http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html
37 beat_schedule = {
38 'update-catalog': {
39 'task': 'shopelectro.tasks.update_catalog',
40 'schedule': timedelta(hours=2).total_seconds(),
41 },
42 'check-purchase': {
43 'task': 'shopelectro.tasks.check_purchase',
44 'schedule': timedelta(days=1).total_seconds(),
45 },
46 }
47
48 # http://docs.celeryproject.org/en/master/userguide/routing.html
49 task_routes = {
50 'shopelectro.tasks.update_catalog': {
51 'queue': 'command',
52 'routing_key': 'utils.command',
53 'priority': 30,
54 },
55 'shopelectro.tasks.check_purchase': {
56 'queue': 'command',
57 'routing_key': 'utils.command',
58 'priority': 20,
59 },
60 'ecommerce.tasks.send_mail': {
61 'queue': 'mail',
62 'routing_key': 'utils.mail',
63 'priority': 50,
64 },
65 }
66
67 # Using a string here means the worker don't have to serialize
68 # the configuration object to child processes.
69 # - namespace='CELERY' means all celery-related configuration keys
70 # should have a `CELERY_` prefix.
71 app.config_from_object('django.conf:settings', namespace='CELERY')
72
73 # http://docs.celeryproject.org/en/latest/userguide/configuration.html
74
75 BROCKER_URL = (
76 f'amqp://{os.environ["RABBITMQ_DEFAULT_USER"]}:{os.environ["RABBITMQ_DEFAULT_PASS"]}'
77 f'@{os.environ["RABBITMQ_URL"]}:{os.environ["RABBITMQ_PORT"]}/'
78 )
79 app.conf.update(
80 broker_url=BROCKER_URL,
81 broker_heartbeat=30,
82 task_acks_late=True,
83 task_default_queue='default',
84 task_default_exchange='default',
85 task_default_routing_key='default',
86 task_ignore_result=True,
87 worker_pool_restarts=1000,
88 task_routes=task_routes,
89 task_queues=task_queues,
90 beat_schedule=beat_schedule,
91 )
92
93 # Load task modules from all registered Django app configs.
94 app.autodiscover_tasks()
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/celery.py b/shopelectro/celery.py
--- a/shopelectro/celery.py
+++ b/shopelectro/celery.py
@@ -83,11 +83,11 @@
task_default_queue='default',
task_default_exchange='default',
task_default_routing_key='default',
- task_ignore_result=True,
- worker_pool_restarts=1000,
+ worker_pool_restarts=True,
task_routes=task_routes,
task_queues=task_queues,
beat_schedule=beat_schedule,
+ worker_max_memory_per_child=200000, # after 250MB will restart
)
# Load task modules from all registered Django app configs.
|
{"golden_diff": "diff --git a/shopelectro/celery.py b/shopelectro/celery.py\n--- a/shopelectro/celery.py\n+++ b/shopelectro/celery.py\n@@ -83,11 +83,11 @@\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n- task_ignore_result=True,\n- worker_pool_restarts=1000,\n+ worker_pool_restarts=True,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n+ worker_max_memory_per_child=200000, # after 250MB will restart\n )\n \n # Load task modules from all registered Django app configs.\n", "issue": "502 error on category pages\nWe can reproduce it with code below.\r\n\r\nSeems it's connected with https://github.com/fidals/shopelectro/issues/648, but it's not checked yet\r\n\r\n```\r\nimport requests\r\n\r\nBASE = 'https://www.shopelectro.ru'\r\n\r\nbroken_urls = []\r\nfor c in CategoryPage.objects.all():\r\n\tresponse = requests.get(BASE + c.url)\r\n\tif response.status_code != 200:\r\n\t\tbroken_urls += (response.status_code, response.url)\r\n\r\nprint('\\n'.join((str(url) for url in broken_urls)))\r\n\r\n---\r\n\r\n# output:\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/batarei-saliutov-moshchnye/\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/khlopushki-pnevmaticheskie/\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/izmeritelnye-pribory-211/\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/dlia-tiporazmera-aaaaa/\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/tiporazmer-aa/\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/tiporazmer-6f22/\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/invertory/\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/palchikovye-aa-r6-lr6-fr6-316/\r\n502\r\nhttps://www.shopelectro.ru/catalog/categories/mizinchikovye-aaa-r03-lr03-fr03-286/\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nimport os\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')\n\napp = Celery('shopelectro')\n\n# Exchanges\ndefault_exchange = Exchange('default', type='direct')\nutils_exchange = Exchange('utils', type='direct')\n\n# http://docs.celeryproject.org/en/latest/userguide/tasks.html\ntask_queues = (\n Queue(\n name='default',\n exchange=default_exchange,\n routing_key='default',\n ),\n Queue(\n name='mail',\n exchange=utils_exchange,\n routing_key='utils.mail',\n ),\n Queue(\n name='command',\n exchange=utils_exchange,\n routing_key='utils.command',\n )\n)\n\n# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html\nbeat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2).total_seconds(),\n },\n 'check-purchase': {\n 'task': 'shopelectro.tasks.check_purchase',\n 'schedule': timedelta(days=1).total_seconds(),\n },\n}\n\n# http://docs.celeryproject.org/en/master/userguide/routing.html\ntask_routes = {\n 'shopelectro.tasks.update_catalog': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n 'shopelectro.tasks.check_purchase': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 20,\n },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\n 'priority': 50,\n },\n}\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html\n\nBROCKER_URL = (\n f'amqp://{os.environ[\"RABBITMQ_DEFAULT_USER\"]}:{os.environ[\"RABBITMQ_DEFAULT_PASS\"]}'\n f'@{os.environ[\"RABBITMQ_URL\"]}:{os.environ[\"RABBITMQ_PORT\"]}/'\n)\napp.conf.update(\n broker_url=BROCKER_URL,\n broker_heartbeat=30,\n task_acks_late=True,\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n task_ignore_result=True,\n worker_pool_restarts=1000,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n)\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n", "path": "shopelectro/celery.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nimport os\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue\n\n# set the default Django settings module for the 'celery' program.\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopelectro.settings.local')\n\napp = Celery('shopelectro')\n\n# Exchanges\ndefault_exchange = Exchange('default', type='direct')\nutils_exchange = Exchange('utils', type='direct')\n\n# http://docs.celeryproject.org/en/latest/userguide/tasks.html\ntask_queues = (\n Queue(\n name='default',\n exchange=default_exchange,\n routing_key='default',\n ),\n Queue(\n name='mail',\n exchange=utils_exchange,\n routing_key='utils.mail',\n ),\n Queue(\n name='command',\n exchange=utils_exchange,\n routing_key='utils.command',\n )\n)\n\n# http://docs.celeryproject.org/en/latest/userguide/periodic-tasks.html\nbeat_schedule = {\n 'update-catalog': {\n 'task': 'shopelectro.tasks.update_catalog',\n 'schedule': timedelta(hours=2).total_seconds(),\n },\n 'check-purchase': {\n 'task': 'shopelectro.tasks.check_purchase',\n 'schedule': timedelta(days=1).total_seconds(),\n },\n}\n\n# http://docs.celeryproject.org/en/master/userguide/routing.html\ntask_routes = {\n 'shopelectro.tasks.update_catalog': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 30,\n },\n 'shopelectro.tasks.check_purchase': {\n 'queue': 'command',\n 'routing_key': 'utils.command',\n 'priority': 20,\n },\n 'ecommerce.tasks.send_mail': {\n 'queue': 'mail',\n 'routing_key': 'utils.mail',\n 'priority': 50,\n },\n}\n\n# Using a string here means the worker don't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html\n\nBROCKER_URL = (\n f'amqp://{os.environ[\"RABBITMQ_DEFAULT_USER\"]}:{os.environ[\"RABBITMQ_DEFAULT_PASS\"]}'\n f'@{os.environ[\"RABBITMQ_URL\"]}:{os.environ[\"RABBITMQ_PORT\"]}/'\n)\napp.conf.update(\n broker_url=BROCKER_URL,\n broker_heartbeat=30,\n task_acks_late=True,\n task_default_queue='default',\n task_default_exchange='default',\n task_default_routing_key='default',\n worker_pool_restarts=True,\n task_routes=task_routes,\n task_queues=task_queues,\n beat_schedule=beat_schedule,\n worker_max_memory_per_child=200000, # after 250MB will restart\n)\n\n# Load task modules from all registered Django app configs.\napp.autodiscover_tasks()\n", "path": "shopelectro/celery.py"}]}
| 1,484 | 167 |
gh_patches_debug_5657
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-254
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`split_axis` fails when it splits the last axis of an array on py3
This code causes an error.
```
>>> x=chainer.Variable(chainer.cuda.to_gpu(numpy.array([1,2,3], numpy.float32)))
>>> chainer.functions.split_axis(x, [1], 0)
...
struct.error: required argument is not an integer
```
A return value of `numpy.prod(())` is `float`, and pycuda cannot pass a float value as an integer argument on py3 (on py2, it works).
https://github.com/pfnet/chainer/blob/master/chainer/functions/split_axis.py#L60
@ShigekiKarita reported this bug. Thanks!!
https://gist.github.com/ShigekiKarita/f27d1d149ccc3e3f381f
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/split_axis.py`
Content:
```
1 import collections
2
3 import numpy
4
5 from chainer import cuda
6 from chainer import function
7 from chainer.utils import type_check
8
9
10 _args = 'float* y, float* x, int cdimy, int cdimx, int rdim, int coffset'
11 _preamble = '''
12 #define COPY(statement) \
13 int l = i / (rdim * cdimy); \
14 int c = i / rdim % cdimy + coffset; \
15 int r = i % rdim; \
16 int idx = r + rdim * (c + cdimx * l); \
17 statement;
18 '''
19
20
21 class SplitAxis(function.Function):
22
23 """Function that splits multiple arrays towards the specified axis."""
24
25 def __init__(self, indices_or_sections, axis):
26 if not isinstance(indices_or_sections, (int, collections.Iterable)):
27 raise TypeError('indices_or_sections must be integer or 1-D array')
28 self.indices_or_sections = indices_or_sections
29 self.axis = axis
30
31 def check_type_forward(self, in_types):
32 type_check.expect(in_types.size() == 1)
33 type_check.expect(in_types[0].ndim >= self.axis)
34
35 if isinstance(self.indices_or_sections, collections.Iterable):
36 max_index = type_check.Variable(
37 self.indices_or_sections[-1], 'max_index')
38 type_check.expect(in_types[0].shape[self.axis] > max_index)
39 else:
40 sections = type_check.Variable(
41 self.indices_or_sections, 'sections')
42 type_check.expect(in_types[0].shape[self.axis] % sections == 0)
43
44 def forward_cpu(self, x):
45 if isinstance(self.indices_or_sections, collections.Iterable):
46 cdimx = x[0].shape[self.axis]
47 ind = list(self.indices_or_sections)
48 ind.append(cdimx)
49 prev_i = 0
50 for i in ind:
51 cdimy = max(0, min(i, cdimx) - prev_i)
52 if cdimy == 0:
53 raise ValueError('Not support if shape contains 0')
54 prev_i = i
55 return tuple(numpy.split(x[0], self.indices_or_sections, self.axis))
56
57 def forward_gpu(self, x):
58 xshape = x[0].shape
59 self.cdimx = xshape[self.axis]
60 self.rdim = numpy.prod(xshape[self.axis + 1:])
61
62 if isinstance(self.indices_or_sections, collections.Iterable):
63 ind = list(self.indices_or_sections)
64 ind.append(self.cdimx)
65 else:
66 sec = self.indices_or_sections
67 if self.cdimx % sec:
68 raise ValueError(
69 'array split does not result in an equal division')
70 ind = numpy.arange(1, sec + 1) * (self.cdimx // sec)
71 ys = []
72 kernel = cuda.elementwise(
73 _args, 'COPY(y[i] = x[idx])', 'split_fwd', preamble=_preamble)
74 prev_i = 0
75 for i in ind:
76 cdimy = max(0, min(i, self.cdimx) - prev_i)
77 s = list(xshape)
78 s[self.axis] = cdimy
79 y = cuda.empty(s, dtype=x[0].dtype)
80 if cdimy == 0:
81 raise ValueError('Not support if shape contains 0')
82 kernel(y, x[0], cdimy, self.cdimx, self.rdim, prev_i)
83 prev_i = i
84 ys.append(y)
85 return tuple(ys)
86
87 def backward_cpu(self, x, gys):
88 return numpy.concatenate(gys, axis=self.axis),
89
90 def backward_gpu(self, x, gys):
91 gx = cuda.empty_like(x[0])
92 coffset = 0
93 kernel = cuda.elementwise(
94 _args, 'COPY(x[idx] = y[i])', 'split_bwd', preamble=_preamble)
95 for gy in gys:
96 cdimy = gy.shape[self.axis]
97 if cdimy != 0:
98 kernel(gy, gx, cdimy, self.cdimx, self.rdim, coffset)
99 coffset += cdimy
100 return gx,
101
102
103 def split_axis(x, indices_or_sections, axis):
104 """Splits given variables along an axis.
105
106 Args:
107 x (tuple of Variables): Variables to be split.
108 indices_or_sections (int or 1-D array): If this argument is an integer,
109 N, the array will be divided into N equal arrays along axis.
110 If it is a 1-D array of sorted integers, it
111 indicates the positions where the array is split.
112 axis (int): Axis that the input array is split along.
113
114 Returns:
115 ``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects
116 if the number of outputs is more than 1 or
117 :class:`~chainer.Variable` otherwise.
118
119 .. note::
120 This function raises ``ValueError`` if at least
121 one of the outputs is splitted to zero-size
122 (i.e. `axis`-th value of its shape is zero).
123
124 """
125 return SplitAxis(indices_or_sections, axis)(x)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/split_axis.py b/chainer/functions/split_axis.py
--- a/chainer/functions/split_axis.py
+++ b/chainer/functions/split_axis.py
@@ -57,7 +57,7 @@
def forward_gpu(self, x):
xshape = x[0].shape
self.cdimx = xshape[self.axis]
- self.rdim = numpy.prod(xshape[self.axis + 1:])
+ self.rdim = numpy.prod(xshape[self.axis + 1:], dtype=int)
if isinstance(self.indices_or_sections, collections.Iterable):
ind = list(self.indices_or_sections)
|
{"golden_diff": "diff --git a/chainer/functions/split_axis.py b/chainer/functions/split_axis.py\n--- a/chainer/functions/split_axis.py\n+++ b/chainer/functions/split_axis.py\n@@ -57,7 +57,7 @@\n def forward_gpu(self, x):\n xshape = x[0].shape\n self.cdimx = xshape[self.axis]\n- self.rdim = numpy.prod(xshape[self.axis + 1:])\n+ self.rdim = numpy.prod(xshape[self.axis + 1:], dtype=int)\n \n if isinstance(self.indices_or_sections, collections.Iterable):\n ind = list(self.indices_or_sections)\n", "issue": "`split_axis` fails when it splits the last axis of an array on py3\nThis code causes an error.\n\n```\n>>> x=chainer.Variable(chainer.cuda.to_gpu(numpy.array([1,2,3], numpy.float32)))\n>>> chainer.functions.split_axis(x, [1], 0)\n...\nstruct.error: required argument is not an integer\n```\n\nA return value of `numpy.prod(())` is `float`, and pycuda cannot pass a float value as an integer argument on py3 (on py2, it works).\nhttps://github.com/pfnet/chainer/blob/master/chainer/functions/split_axis.py#L60\n\n@ShigekiKarita reported this bug. Thanks!!\nhttps://gist.github.com/ShigekiKarita/f27d1d149ccc3e3f381f\n\n", "before_files": [{"content": "import collections\n\nimport numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\n_args = 'float* y, float* x, int cdimy, int cdimx, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimy); \\\n int c = i / rdim % cdimy + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimx * l); \\\n statement;\n'''\n\n\nclass SplitAxis(function.Function):\n\n \"\"\"Function that splits multiple arrays towards the specified axis.\"\"\"\n\n def __init__(self, indices_or_sections, axis):\n if not isinstance(indices_or_sections, (int, collections.Iterable)):\n raise TypeError('indices_or_sections must be integer or 1-D array')\n self.indices_or_sections = indices_or_sections\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].ndim >= self.axis)\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n max_index = type_check.Variable(\n self.indices_or_sections[-1], 'max_index')\n type_check.expect(in_types[0].shape[self.axis] > max_index)\n else:\n sections = type_check.Variable(\n self.indices_or_sections, 'sections')\n type_check.expect(in_types[0].shape[self.axis] % sections == 0)\n\n def forward_cpu(self, x):\n if isinstance(self.indices_or_sections, collections.Iterable):\n cdimx = x[0].shape[self.axis]\n ind = list(self.indices_or_sections)\n ind.append(cdimx)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, cdimx) - prev_i)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n prev_i = i\n return tuple(numpy.split(x[0], self.indices_or_sections, self.axis))\n\n def forward_gpu(self, x):\n xshape = x[0].shape\n self.cdimx = xshape[self.axis]\n self.rdim = numpy.prod(xshape[self.axis + 1:])\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n ind = list(self.indices_or_sections)\n ind.append(self.cdimx)\n else:\n sec = self.indices_or_sections\n if self.cdimx % sec:\n raise ValueError(\n 'array split does not result in an equal division')\n ind = numpy.arange(1, sec + 1) * (self.cdimx // sec)\n ys = []\n kernel = cuda.elementwise(\n _args, 'COPY(y[i] = x[idx])', 'split_fwd', preamble=_preamble)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, self.cdimx) - prev_i)\n s = list(xshape)\n s[self.axis] = cdimy\n y = cuda.empty(s, dtype=x[0].dtype)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n kernel(y, x[0], cdimy, self.cdimx, self.rdim, prev_i)\n prev_i = i\n ys.append(y)\n return tuple(ys)\n\n def backward_cpu(self, x, gys):\n return numpy.concatenate(gys, axis=self.axis),\n\n def backward_gpu(self, x, gys):\n gx = cuda.empty_like(x[0])\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[idx] = y[i])', 'split_bwd', preamble=_preamble)\n for gy in gys:\n cdimy = gy.shape[self.axis]\n if cdimy != 0:\n kernel(gy, gx, cdimy, self.cdimx, self.rdim, coffset)\n coffset += cdimy\n return gx,\n\n\ndef split_axis(x, indices_or_sections, axis):\n \"\"\"Splits given variables along an axis.\n\n Args:\n x (tuple of Variables): Variables to be split.\n indices_or_sections (int or 1-D array): If this argument is an integer,\n N, the array will be divided into N equal arrays along axis.\n If it is a 1-D array of sorted integers, it\n indicates the positions where the array is split.\n axis (int): Axis that the input array is split along.\n\n Returns:\n ``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects\n if the number of outputs is more than 1 or\n :class:`~chainer.Variable` otherwise.\n\n .. note::\n This function raises ``ValueError`` if at least\n one of the outputs is splitted to zero-size\n (i.e. `axis`-th value of its shape is zero).\n\n \"\"\"\n return SplitAxis(indices_or_sections, axis)(x)\n", "path": "chainer/functions/split_axis.py"}], "after_files": [{"content": "import collections\n\nimport numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\n_args = 'float* y, float* x, int cdimy, int cdimx, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimy); \\\n int c = i / rdim % cdimy + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimx * l); \\\n statement;\n'''\n\n\nclass SplitAxis(function.Function):\n\n \"\"\"Function that splits multiple arrays towards the specified axis.\"\"\"\n\n def __init__(self, indices_or_sections, axis):\n if not isinstance(indices_or_sections, (int, collections.Iterable)):\n raise TypeError('indices_or_sections must be integer or 1-D array')\n self.indices_or_sections = indices_or_sections\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].ndim >= self.axis)\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n max_index = type_check.Variable(\n self.indices_or_sections[-1], 'max_index')\n type_check.expect(in_types[0].shape[self.axis] > max_index)\n else:\n sections = type_check.Variable(\n self.indices_or_sections, 'sections')\n type_check.expect(in_types[0].shape[self.axis] % sections == 0)\n\n def forward_cpu(self, x):\n if isinstance(self.indices_or_sections, collections.Iterable):\n cdimx = x[0].shape[self.axis]\n ind = list(self.indices_or_sections)\n ind.append(cdimx)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, cdimx) - prev_i)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n prev_i = i\n return tuple(numpy.split(x[0], self.indices_or_sections, self.axis))\n\n def forward_gpu(self, x):\n xshape = x[0].shape\n self.cdimx = xshape[self.axis]\n self.rdim = numpy.prod(xshape[self.axis + 1:], dtype=int)\n\n if isinstance(self.indices_or_sections, collections.Iterable):\n ind = list(self.indices_or_sections)\n ind.append(self.cdimx)\n else:\n sec = self.indices_or_sections\n if self.cdimx % sec:\n raise ValueError(\n 'array split does not result in an equal division')\n ind = numpy.arange(1, sec + 1) * (self.cdimx // sec)\n ys = []\n kernel = cuda.elementwise(\n _args, 'COPY(y[i] = x[idx])', 'split_fwd', preamble=_preamble)\n prev_i = 0\n for i in ind:\n cdimy = max(0, min(i, self.cdimx) - prev_i)\n s = list(xshape)\n s[self.axis] = cdimy\n y = cuda.empty(s, dtype=x[0].dtype)\n if cdimy == 0:\n raise ValueError('Not support if shape contains 0')\n kernel(y, x[0], cdimy, self.cdimx, self.rdim, prev_i)\n prev_i = i\n ys.append(y)\n return tuple(ys)\n\n def backward_cpu(self, x, gys):\n return numpy.concatenate(gys, axis=self.axis),\n\n def backward_gpu(self, x, gys):\n gx = cuda.empty_like(x[0])\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[idx] = y[i])', 'split_bwd', preamble=_preamble)\n for gy in gys:\n cdimy = gy.shape[self.axis]\n if cdimy != 0:\n kernel(gy, gx, cdimy, self.cdimx, self.rdim, coffset)\n coffset += cdimy\n return gx,\n\n\ndef split_axis(x, indices_or_sections, axis):\n \"\"\"Splits given variables along an axis.\n\n Args:\n x (tuple of Variables): Variables to be split.\n indices_or_sections (int or 1-D array): If this argument is an integer,\n N, the array will be divided into N equal arrays along axis.\n If it is a 1-D array of sorted integers, it\n indicates the positions where the array is split.\n axis (int): Axis that the input array is split along.\n\n Returns:\n ``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects\n if the number of outputs is more than 1 or\n :class:`~chainer.Variable` otherwise.\n\n .. note::\n This function raises ``ValueError`` if at least\n one of the outputs is splitted to zero-size\n (i.e. `axis`-th value of its shape is zero).\n\n \"\"\"\n return SplitAxis(indices_or_sections, axis)(x)\n", "path": "chainer/functions/split_axis.py"}]}
| 1,846 | 138 |
gh_patches_debug_13030
|
rasdani/github-patches
|
git_diff
|
microsoft__AzureTRE-395
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Application Insights configuration for processor function broken
The `createNewWorkspace` function monitoring blade indicates that Application Insights integration is not enabled.

At a first glance, the config seems to be there in [processor_function/function.tf](https://github.com/microsoft/AzureTRE/blob/develop/templates/core/terraform/processor_function/function.tf#L24)
```plaintext
APP_INSIGHTS_INSTRUMENTATION_KEY = var.app_insights_instrumentation_key
```
however, the setting key name is incorrect.
Manually enabling the integration in the portal adds additional config keys:
`APPLICATIONINSIGHTS_CONNECTION_STRING`
`APPINSIGHTS_INSTRUMENTATIONKEY`
Need to add above keys with values to function app config.
[Reference](https://docs.microsoft.com/en-us/azure/azure-monitor/app/resource-manager-function-app)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `processor_function/shared/logging.py`
Content:
```
1 import logging
2 import os
3
4 from opencensus.ext.azure.log_exporter import AzureLogHandler
5 from opencensus.trace import config_integration
6 from opencensus.trace.samplers import AlwaysOnSampler
7 from opencensus.trace.tracer import Tracer
8
9
10 def initialize_logging(logging_level: int, correlation_id: str) -> logging.LoggerAdapter:
11 """
12 Adds the Application Insights handler for the root logger and sets the given logging level.
13 Creates and returns a logger adapter that integrates the correlation ID, if given, to the log messages.
14
15 :param logging_level: The logging level to set e.g., logging.WARNING.
16 :param correlation_id: Optional. The correlation ID that is passed on to the operation_Id in App Insights.
17 :returns: A newly created logger adapter.
18 """
19 logger = logging.getLogger()
20 logger.addHandler(logging.StreamHandler()) # For logging into console
21 app_insights_instrumentation_key = os.getenv("APP_INSIGHTS_INSTRUMENTATION_KEY")
22
23 try:
24 logger.addHandler(AzureLogHandler(connection_string=f"InstrumentationKey={app_insights_instrumentation_key}"))
25 except ValueError as e:
26 logger.error(f"Failed to set Application Insights logger handler: {e}")
27
28 config_integration.trace_integrations(['logging'])
29 logging.basicConfig(level=logging_level, format='%(asctime)s traceId=%(traceId)s spanId=%(spanId)s %(message)s')
30 Tracer(sampler=AlwaysOnSampler())
31 logger.setLevel(logging_level)
32
33 extra = None
34
35 if correlation_id:
36 extra = {'traceId': correlation_id}
37
38 adapter = logging.LoggerAdapter(logger, extra)
39 adapter.debug(f"Logger adapter initialized with extra: {extra}")
40
41 return adapter
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/processor_function/shared/logging.py b/processor_function/shared/logging.py
--- a/processor_function/shared/logging.py
+++ b/processor_function/shared/logging.py
@@ -18,10 +18,10 @@
"""
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler()) # For logging into console
- app_insights_instrumentation_key = os.getenv("APP_INSIGHTS_INSTRUMENTATION_KEY")
+ app_insights_connection_string = os.getenv("APPLICATIONINSIGHTS_CONNECTION_STRING")
try:
- logger.addHandler(AzureLogHandler(connection_string=f"InstrumentationKey={app_insights_instrumentation_key}"))
+ logger.addHandler(AzureLogHandler(connection_string=app_insights_connection_string))
except ValueError as e:
logger.error(f"Failed to set Application Insights logger handler: {e}")
|
{"golden_diff": "diff --git a/processor_function/shared/logging.py b/processor_function/shared/logging.py\n--- a/processor_function/shared/logging.py\n+++ b/processor_function/shared/logging.py\n@@ -18,10 +18,10 @@\n \"\"\"\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler()) # For logging into console\n- app_insights_instrumentation_key = os.getenv(\"APP_INSIGHTS_INSTRUMENTATION_KEY\")\n+ app_insights_connection_string = os.getenv(\"APPLICATIONINSIGHTS_CONNECTION_STRING\")\n \n try:\n- logger.addHandler(AzureLogHandler(connection_string=f\"InstrumentationKey={app_insights_instrumentation_key}\"))\n+ logger.addHandler(AzureLogHandler(connection_string=app_insights_connection_string))\n except ValueError as e:\n logger.error(f\"Failed to set Application Insights logger handler: {e}\")\n", "issue": "Application Insights configuration for processor function broken\nThe `createNewWorkspace` function monitoring blade indicates that Application Insights integration is not enabled.\r\n\r\n\r\nAt a first glance, the config seems to be there in [processor_function/function.tf](https://github.com/microsoft/AzureTRE/blob/develop/templates/core/terraform/processor_function/function.tf#L24)\r\n```plaintext\r\n APP_INSIGHTS_INSTRUMENTATION_KEY = var.app_insights_instrumentation_key\r\n```\r\nhowever, the setting key name is incorrect.\r\n\r\nManually enabling the integration in the portal adds additional config keys:\r\n\r\n`APPLICATIONINSIGHTS_CONNECTION_STRING`\r\n`APPINSIGHTS_INSTRUMENTATIONKEY`\r\n\r\nNeed to add above keys with values to function app config.\r\n\r\n[Reference](https://docs.microsoft.com/en-us/azure/azure-monitor/app/resource-manager-function-app)\n", "before_files": [{"content": "import logging\nimport os\n\nfrom opencensus.ext.azure.log_exporter import AzureLogHandler\nfrom opencensus.trace import config_integration\nfrom opencensus.trace.samplers import AlwaysOnSampler\nfrom opencensus.trace.tracer import Tracer\n\n\ndef initialize_logging(logging_level: int, correlation_id: str) -> logging.LoggerAdapter:\n \"\"\"\n Adds the Application Insights handler for the root logger and sets the given logging level.\n Creates and returns a logger adapter that integrates the correlation ID, if given, to the log messages.\n\n :param logging_level: The logging level to set e.g., logging.WARNING.\n :param correlation_id: Optional. The correlation ID that is passed on to the operation_Id in App Insights.\n :returns: A newly created logger adapter.\n \"\"\"\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler()) # For logging into console\n app_insights_instrumentation_key = os.getenv(\"APP_INSIGHTS_INSTRUMENTATION_KEY\")\n\n try:\n logger.addHandler(AzureLogHandler(connection_string=f\"InstrumentationKey={app_insights_instrumentation_key}\"))\n except ValueError as e:\n logger.error(f\"Failed to set Application Insights logger handler: {e}\")\n\n config_integration.trace_integrations(['logging'])\n logging.basicConfig(level=logging_level, format='%(asctime)s traceId=%(traceId)s spanId=%(spanId)s %(message)s')\n Tracer(sampler=AlwaysOnSampler())\n logger.setLevel(logging_level)\n\n extra = None\n\n if correlation_id:\n extra = {'traceId': correlation_id}\n\n adapter = logging.LoggerAdapter(logger, extra)\n adapter.debug(f\"Logger adapter initialized with extra: {extra}\")\n\n return adapter\n", "path": "processor_function/shared/logging.py"}], "after_files": [{"content": "import logging\nimport os\n\nfrom opencensus.ext.azure.log_exporter import AzureLogHandler\nfrom opencensus.trace import config_integration\nfrom opencensus.trace.samplers import AlwaysOnSampler\nfrom opencensus.trace.tracer import Tracer\n\n\ndef initialize_logging(logging_level: int, correlation_id: str) -> logging.LoggerAdapter:\n \"\"\"\n Adds the Application Insights handler for the root logger and sets the given logging level.\n Creates and returns a logger adapter that integrates the correlation ID, if given, to the log messages.\n\n :param logging_level: The logging level to set e.g., logging.WARNING.\n :param correlation_id: Optional. The correlation ID that is passed on to the operation_Id in App Insights.\n :returns: A newly created logger adapter.\n \"\"\"\n logger = logging.getLogger()\n logger.addHandler(logging.StreamHandler()) # For logging into console\n app_insights_connection_string = os.getenv(\"APPLICATIONINSIGHTS_CONNECTION_STRING\")\n\n try:\n logger.addHandler(AzureLogHandler(connection_string=app_insights_connection_string))\n except ValueError as e:\n logger.error(f\"Failed to set Application Insights logger handler: {e}\")\n\n config_integration.trace_integrations(['logging'])\n logging.basicConfig(level=logging_level, format='%(asctime)s traceId=%(traceId)s spanId=%(spanId)s %(message)s')\n Tracer(sampler=AlwaysOnSampler())\n logger.setLevel(logging_level)\n\n extra = None\n\n if correlation_id:\n extra = {'traceId': correlation_id}\n\n adapter = logging.LoggerAdapter(logger, extra)\n adapter.debug(f\"Logger adapter initialized with extra: {extra}\")\n\n return adapter\n", "path": "processor_function/shared/logging.py"}]}
| 932 | 181 |
gh_patches_debug_22772
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-559
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Doc does not build
Am I the only one?
```
Exception occurred:
File "/home/aa013911/work/nilearn/doc/sphinxext/sphinxgallery/docs_resolv.py", line 348, in embed_code_links
gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir'])
KeyError: 'gallery_dir'
The full traceback has been saved in /tmp/sphinx-err-sePU7r.log, if you want to report the issue to the developers.
Please also report this if it was a user error, so that a better error message can be provided next time.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/sphinxext/sphinxgallery/__init__.py`
Content:
```
1 """Sphinx Gallery
2 """
3 import os
4 __version__ = '0.0.8'
5
6 def path_static():
7 """Returns path to packaged static files"""
8 return os.path.abspath(os.path.dirname(__file__))+'/_static'
9
```
Path: `doc/sphinxext/sphinxgallery/gen_gallery.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Author: Óscar Nájera
3 # License: 3-clause BSD
4
5 import os
6 from sphinxgallery.gen_rst import generate_dir_rst
7 from sphinxgallery.docs_resolv import embed_code_links
8
9
10 def generate_gallery_rst(app):
11 """Starts the gallery configuration and recursively scans the examples
12 directory in order to populate the examples gallery
13 """
14 try:
15 plot_gallery = eval(app.builder.config.plot_gallery)
16 except TypeError:
17 plot_gallery = bool(app.builder.config.plot_gallery)
18
19 if not plot_gallery:
20 return
21
22 gallery_conf.update(app.config.sphinxgallery_conf)
23
24 # this assures I can call the config in other places
25 app.config.sphinxgallery_conf = gallery_conf
26
27 examples_dir = os.path.join(app.builder.srcdir, gallery_conf['examples_dir'])
28 gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir'])
29 mod_examples_dir = os.path.join(app.builder.srcdir, gallery_conf['mod_example_dir'])
30
31 for workdir in [examples_dir, gallery_dir, mod_examples_dir]:
32 if not os.path.exists(workdir):
33 os.makedirs(workdir)
34
35 # we create an index.rst with all examples
36 fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w')
37 fhindex.write("""
38
39 .. _examples-index:
40
41 Gallery of Examples
42 ===================
43
44 """)
45 # Here we don't use an os.walk, but we recurse only twice: flat is
46 # better than nested.
47 seen_backrefs = set()
48 generate_dir_rst('.', fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs)
49 for directory in sorted(os.listdir(examples_dir)):
50 if os.path.isdir(os.path.join(examples_dir, directory)):
51 generate_dir_rst(directory, fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs)
52 fhindex.flush()
53
54
55 gallery_conf = {
56 'examples_dir' : '../examples',
57 'gallery_dir' : 'auto_examples',
58 'mod_example_dir': 'modules/generated',
59 'doc_module' : (),
60 'reference_url' : {},
61 }
62
63 def setup(app):
64 app.add_config_value('plot_gallery', True, 'html')
65 app.add_config_value('sphinxgallery_conf', gallery_conf, 'html')
66 app.add_stylesheet('gallery.css')
67
68 app.connect('builder-inited', generate_gallery_rst)
69
70 app.connect('build-finished', embed_code_links)
71
72 # Sphinx hack: sphinx copies generated images to the build directory
73 # each time the docs are made. If the desired image name already
74 # exists, it appends a digit to prevent overwrites. The problem is,
75 # the directory is never cleared. This means that each time you build
76 # the docs, the number of images in the directory grows.
77 #
78 # This question has been asked on the sphinx development list, but there
79 # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
80 #
81 # The following is a hack that prevents this behavior by clearing the
82 # image build directory each time the docs are built. If sphinx
83 # changes their layout between versions, this will not work (though
84 # it should probably not cause a crash). Tested successfully
85 # on Sphinx 1.0.7
86 build_image_dir = '_build/html/_images'
87 if os.path.exists(build_image_dir):
88 filelist = os.listdir(build_image_dir)
89 for filename in filelist:
90 if filename.endswith('png'):
91 os.remove(os.path.join(build_image_dir, filename))
92
93 def setup_module():
94 # HACK: Stop nosetests running setup() above
95 pass
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/sphinxext/sphinxgallery/__init__.py b/doc/sphinxext/sphinxgallery/__init__.py
--- a/doc/sphinxext/sphinxgallery/__init__.py
+++ b/doc/sphinxext/sphinxgallery/__init__.py
@@ -1,7 +1,7 @@
"""Sphinx Gallery
"""
import os
-__version__ = '0.0.8'
+__version__ = '0.0.9-dev'
def path_static():
"""Returns path to packaged static files"""
diff --git a/doc/sphinxext/sphinxgallery/gen_gallery.py b/doc/sphinxext/sphinxgallery/gen_gallery.py
--- a/doc/sphinxext/sphinxgallery/gen_gallery.py
+++ b/doc/sphinxext/sphinxgallery/gen_gallery.py
@@ -16,14 +16,14 @@
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
- if not plot_gallery:
- return
-
gallery_conf.update(app.config.sphinxgallery_conf)
# this assures I can call the config in other places
app.config.sphinxgallery_conf = gallery_conf
+ if not plot_gallery:
+ return
+
examples_dir = os.path.join(app.builder.srcdir, gallery_conf['examples_dir'])
gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir'])
mod_examples_dir = os.path.join(app.builder.srcdir, gallery_conf['mod_example_dir'])
|
{"golden_diff": "diff --git a/doc/sphinxext/sphinxgallery/__init__.py b/doc/sphinxext/sphinxgallery/__init__.py\n--- a/doc/sphinxext/sphinxgallery/__init__.py\n+++ b/doc/sphinxext/sphinxgallery/__init__.py\n@@ -1,7 +1,7 @@\n \"\"\"Sphinx Gallery\n \"\"\"\n import os\n-__version__ = '0.0.8'\n+__version__ = '0.0.9-dev'\n \n def path_static():\n \"\"\"Returns path to packaged static files\"\"\"\ndiff --git a/doc/sphinxext/sphinxgallery/gen_gallery.py b/doc/sphinxext/sphinxgallery/gen_gallery.py\n--- a/doc/sphinxext/sphinxgallery/gen_gallery.py\n+++ b/doc/sphinxext/sphinxgallery/gen_gallery.py\n@@ -16,14 +16,14 @@\n except TypeError:\n plot_gallery = bool(app.builder.config.plot_gallery)\n \n- if not plot_gallery:\n- return\n-\n gallery_conf.update(app.config.sphinxgallery_conf)\n \n # this assures I can call the config in other places\n app.config.sphinxgallery_conf = gallery_conf\n \n+ if not plot_gallery:\n+ return\n+\n examples_dir = os.path.join(app.builder.srcdir, gallery_conf['examples_dir'])\n gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir'])\n mod_examples_dir = os.path.join(app.builder.srcdir, gallery_conf['mod_example_dir'])\n", "issue": "Doc does not build\nAm I the only one?\n\n```\nException occurred:\n File \"/home/aa013911/work/nilearn/doc/sphinxext/sphinxgallery/docs_resolv.py\", line 348, in embed_code_links\n gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir'])\nKeyError: 'gallery_dir'\nThe full traceback has been saved in /tmp/sphinx-err-sePU7r.log, if you want to report the issue to the developers.\nPlease also report this if it was a user error, so that a better error message can be provided next time.\n```\n\n", "before_files": [{"content": "\"\"\"Sphinx Gallery\n\"\"\"\nimport os\n__version__ = '0.0.8'\n\ndef path_static():\n \"\"\"Returns path to packaged static files\"\"\"\n return os.path.abspath(os.path.dirname(__file__))+'/_static'\n", "path": "doc/sphinxext/sphinxgallery/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Author: \u00d3scar N\u00e1jera\n# License: 3-clause BSD\n\nimport os\nfrom sphinxgallery.gen_rst import generate_dir_rst\nfrom sphinxgallery.docs_resolv import embed_code_links\n\n\ndef generate_gallery_rst(app):\n \"\"\"Starts the gallery configuration and recursively scans the examples\n directory in order to populate the examples gallery\n \"\"\"\n try:\n plot_gallery = eval(app.builder.config.plot_gallery)\n except TypeError:\n plot_gallery = bool(app.builder.config.plot_gallery)\n\n if not plot_gallery:\n return\n\n gallery_conf.update(app.config.sphinxgallery_conf)\n\n # this assures I can call the config in other places\n app.config.sphinxgallery_conf = gallery_conf\n\n examples_dir = os.path.join(app.builder.srcdir, gallery_conf['examples_dir'])\n gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir'])\n mod_examples_dir = os.path.join(app.builder.srcdir, gallery_conf['mod_example_dir'])\n\n for workdir in [examples_dir, gallery_dir, mod_examples_dir]:\n if not os.path.exists(workdir):\n os.makedirs(workdir)\n\n # we create an index.rst with all examples\n fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w')\n fhindex.write(\"\"\"\n\n.. _examples-index:\n\nGallery of Examples\n===================\n\n\"\"\")\n # Here we don't use an os.walk, but we recurse only twice: flat is\n # better than nested.\n seen_backrefs = set()\n generate_dir_rst('.', fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs)\n for directory in sorted(os.listdir(examples_dir)):\n if os.path.isdir(os.path.join(examples_dir, directory)):\n generate_dir_rst(directory, fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs)\n fhindex.flush()\n\n\ngallery_conf = {\n 'examples_dir' : '../examples',\n 'gallery_dir' : 'auto_examples',\n 'mod_example_dir': 'modules/generated',\n 'doc_module' : (),\n 'reference_url' : {},\n}\n\ndef setup(app):\n app.add_config_value('plot_gallery', True, 'html')\n app.add_config_value('sphinxgallery_conf', gallery_conf, 'html')\n app.add_stylesheet('gallery.css')\n\n app.connect('builder-inited', generate_gallery_rst)\n\n app.connect('build-finished', embed_code_links)\n\n # Sphinx hack: sphinx copies generated images to the build directory\n # each time the docs are made. If the desired image name already\n # exists, it appends a digit to prevent overwrites. The problem is,\n # the directory is never cleared. This means that each time you build\n # the docs, the number of images in the directory grows.\n #\n # This question has been asked on the sphinx development list, but there\n # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html\n #\n # The following is a hack that prevents this behavior by clearing the\n # image build directory each time the docs are built. If sphinx\n # changes their layout between versions, this will not work (though\n # it should probably not cause a crash). Tested successfully\n # on Sphinx 1.0.7\n build_image_dir = '_build/html/_images'\n if os.path.exists(build_image_dir):\n filelist = os.listdir(build_image_dir)\n for filename in filelist:\n if filename.endswith('png'):\n os.remove(os.path.join(build_image_dir, filename))\n\ndef setup_module():\n # HACK: Stop nosetests running setup() above\n pass\n", "path": "doc/sphinxext/sphinxgallery/gen_gallery.py"}], "after_files": [{"content": "\"\"\"Sphinx Gallery\n\"\"\"\nimport os\n__version__ = '0.0.9-dev'\n\ndef path_static():\n \"\"\"Returns path to packaged static files\"\"\"\n return os.path.abspath(os.path.dirname(__file__))+'/_static'\n", "path": "doc/sphinxext/sphinxgallery/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Author: \u00d3scar N\u00e1jera\n# License: 3-clause BSD\n\nimport os\nfrom sphinxgallery.gen_rst import generate_dir_rst\nfrom sphinxgallery.docs_resolv import embed_code_links\n\n\ndef generate_gallery_rst(app):\n \"\"\"Starts the gallery configuration and recursively scans the examples\n directory in order to populate the examples gallery\n \"\"\"\n try:\n plot_gallery = eval(app.builder.config.plot_gallery)\n except TypeError:\n plot_gallery = bool(app.builder.config.plot_gallery)\n\n gallery_conf.update(app.config.sphinxgallery_conf)\n\n # this assures I can call the config in other places\n app.config.sphinxgallery_conf = gallery_conf\n\n if not plot_gallery:\n return\n\n examples_dir = os.path.join(app.builder.srcdir, gallery_conf['examples_dir'])\n gallery_dir = os.path.join(app.builder.srcdir, gallery_conf['gallery_dir'])\n mod_examples_dir = os.path.join(app.builder.srcdir, gallery_conf['mod_example_dir'])\n\n for workdir in [examples_dir, gallery_dir, mod_examples_dir]:\n if not os.path.exists(workdir):\n os.makedirs(workdir)\n\n # we create an index.rst with all examples\n fhindex = open(os.path.join(gallery_dir, 'index.rst'), 'w')\n fhindex.write(\"\"\"\n\n.. _examples-index:\n\nGallery of Examples\n===================\n\n\"\"\")\n # Here we don't use an os.walk, but we recurse only twice: flat is\n # better than nested.\n seen_backrefs = set()\n generate_dir_rst('.', fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs)\n for directory in sorted(os.listdir(examples_dir)):\n if os.path.isdir(os.path.join(examples_dir, directory)):\n generate_dir_rst(directory, fhindex, examples_dir, gallery_dir, gallery_conf, plot_gallery, seen_backrefs)\n fhindex.flush()\n\n\ngallery_conf = {\n 'examples_dir' : '../examples',\n 'gallery_dir' : 'auto_examples',\n 'mod_example_dir': 'modules/generated',\n 'doc_module' : (),\n 'reference_url' : {},\n}\n\ndef setup(app):\n app.add_config_value('plot_gallery', True, 'html')\n app.add_config_value('sphinxgallery_conf', gallery_conf, 'html')\n app.add_stylesheet('gallery.css')\n\n app.connect('builder-inited', generate_gallery_rst)\n\n app.connect('build-finished', embed_code_links)\n\n # Sphinx hack: sphinx copies generated images to the build directory\n # each time the docs are made. If the desired image name already\n # exists, it appends a digit to prevent overwrites. The problem is,\n # the directory is never cleared. This means that each time you build\n # the docs, the number of images in the directory grows.\n #\n # This question has been asked on the sphinx development list, but there\n # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html\n #\n # The following is a hack that prevents this behavior by clearing the\n # image build directory each time the docs are built. If sphinx\n # changes their layout between versions, this will not work (though\n # it should probably not cause a crash). Tested successfully\n # on Sphinx 1.0.7\n build_image_dir = '_build/html/_images'\n if os.path.exists(build_image_dir):\n filelist = os.listdir(build_image_dir)\n for filename in filelist:\n if filename.endswith('png'):\n os.remove(os.path.join(build_image_dir, filename))\n\ndef setup_module():\n # HACK: Stop nosetests running setup() above\n pass\n", "path": "doc/sphinxext/sphinxgallery/gen_gallery.py"}]}
| 1,504 | 312 |
gh_patches_debug_22829
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-891
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JsProxy does not handle Error correctly
Example:
```python
from js import Error
e = Error.new("Hi")
raise e
```
Raises: `TypeError: exceptions must derive from BaseException`.
This came up in https://github.com/iodide-project/pyodide/pull/880#pullrequestreview-555341317. I will try to fix this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyodide-py/pyodide/__init__.py`
Content:
```
1 from ._base import open_url, eval_code, find_imports, as_nested_list
2 from .console import get_completions
3
4 __version__ = "0.15.0"
5
6 __all__ = ["open_url", "eval_code", "find_imports", "as_nested_list", "get_completions"]
7
```
Path: `src/pyodide-py/pyodide/_base.py`
Content:
```
1 """
2 A library of helper utilities for connecting Python to the browser environment.
3 """
4
5 import ast
6 from io import StringIO
7 from textwrap import dedent
8 from typing import Dict, List, Any
9
10
11 def open_url(url: str) -> StringIO:
12 """
13 Fetches a given URL
14
15 Parameters
16 ----------
17 url
18 URL to fetch
19
20 Returns
21 -------
22 a io.StringIO object with the contents of the URL.
23 """
24 from js import XMLHttpRequest
25
26 req = XMLHttpRequest.new()
27 req.open("GET", url, False)
28 req.send(None)
29 return StringIO(req.response)
30
31
32 def eval_code(code: str, ns: Dict[str, Any]) -> None:
33 """Runs a code string
34
35 The last part of the provided code may be an expression.
36
37 Parameters
38 ----------
39 code
40 the Python code to run.
41 ns
42 `locals()` or `globals()` context where to execute code.
43
44 Returns
45 -------
46 None
47 """
48 # handle mis-indented input from multi-line strings
49 code = dedent(code)
50
51 mod = ast.parse(code)
52 if len(mod.body) == 0:
53 return None
54
55 expr: Any
56 if isinstance(mod.body[-1], ast.Expr):
57 expr = ast.Expression(mod.body[-1].value)
58 del mod.body[-1]
59 else:
60 expr = None
61
62 if len(mod.body):
63 exec(compile(mod, "<exec>", mode="exec"), ns, ns)
64 if expr is not None:
65 return eval(compile(expr, "<eval>", mode="eval"), ns, ns)
66 else:
67 return None
68
69
70 def find_imports(code: str) -> List[str]:
71 """
72 Finds the imports in a string of code
73
74 Parameters
75 ----------
76 code
77 the Python code to run.
78
79 Returns
80 -------
81 A list of module names that are imported in the code.
82
83 Examples
84 --------
85 >>> from pyodide import find_imports
86 >>> code = "import numpy as np; import scipy.stats"
87 >>> find_imports(code)
88 ['numpy', 'scipy']
89 """
90 # handle mis-indented input from multi-line strings
91 code = dedent(code)
92
93 mod = ast.parse(code)
94 imports = set()
95 for node in ast.walk(mod):
96 if isinstance(node, ast.Import):
97 for name in node.names:
98 node_name = name.name
99 imports.add(node_name.split(".")[0])
100 elif isinstance(node, ast.ImportFrom):
101 module_name = node.module
102 if module_name is None:
103 continue
104 imports.add(module_name.split(".")[0])
105 return list(sorted(imports))
106
107
108 def as_nested_list(obj) -> List:
109 """Convert a nested JS array to nested Python list.
110
111 Assumes a Javascript object is made of (possibly nested) arrays and
112 converts them to nested Python lists.
113
114 Parameters
115 ----------
116 obj
117 a Javscript object made of nested arrays.
118
119 Returns
120 -------
121 Python list, or a nested Python list
122 """
123 try:
124 it = iter(obj)
125 return [as_nested_list(x) for x in it]
126 except TypeError:
127 return obj
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pyodide-py/pyodide/__init__.py b/src/pyodide-py/pyodide/__init__.py
--- a/src/pyodide-py/pyodide/__init__.py
+++ b/src/pyodide-py/pyodide/__init__.py
@@ -1,6 +1,13 @@
-from ._base import open_url, eval_code, find_imports, as_nested_list
+from ._base import open_url, eval_code, find_imports, as_nested_list, JsException
from .console import get_completions
__version__ = "0.15.0"
-__all__ = ["open_url", "eval_code", "find_imports", "as_nested_list", "get_completions"]
+__all__ = [
+ "open_url",
+ "eval_code",
+ "find_imports",
+ "as_nested_list",
+ "get_completions",
+ "JsException",
+]
diff --git a/src/pyodide-py/pyodide/_base.py b/src/pyodide-py/pyodide/_base.py
--- a/src/pyodide-py/pyodide/_base.py
+++ b/src/pyodide-py/pyodide/_base.py
@@ -1,6 +1,8 @@
"""
A library of helper utilities for connecting Python to the browser environment.
"""
+# Added by C:
+# JsException (from jsproxy.c)
import ast
from io import StringIO
@@ -8,6 +10,16 @@
from typing import Dict, List, Any
+class JsException(Exception):
+ """
+ A wrapper around a Javascript Error to allow the Error to be thrown in Python.
+ """
+
+ # This gets overwritten in jsproxy.c, it is just here for autodoc and humans
+ # reading this file.
+ pass
+
+
def open_url(url: str) -> StringIO:
"""
Fetches a given URL
|
{"golden_diff": "diff --git a/src/pyodide-py/pyodide/__init__.py b/src/pyodide-py/pyodide/__init__.py\n--- a/src/pyodide-py/pyodide/__init__.py\n+++ b/src/pyodide-py/pyodide/__init__.py\n@@ -1,6 +1,13 @@\n-from ._base import open_url, eval_code, find_imports, as_nested_list\n+from ._base import open_url, eval_code, find_imports, as_nested_list, JsException\n from .console import get_completions\n \n __version__ = \"0.15.0\"\n \n-__all__ = [\"open_url\", \"eval_code\", \"find_imports\", \"as_nested_list\", \"get_completions\"]\n+__all__ = [\n+ \"open_url\",\n+ \"eval_code\",\n+ \"find_imports\",\n+ \"as_nested_list\",\n+ \"get_completions\",\n+ \"JsException\",\n+]\ndiff --git a/src/pyodide-py/pyodide/_base.py b/src/pyodide-py/pyodide/_base.py\n--- a/src/pyodide-py/pyodide/_base.py\n+++ b/src/pyodide-py/pyodide/_base.py\n@@ -1,6 +1,8 @@\n \"\"\"\n A library of helper utilities for connecting Python to the browser environment.\n \"\"\"\n+# Added by C:\n+# JsException (from jsproxy.c)\n \n import ast\n from io import StringIO\n@@ -8,6 +10,16 @@\n from typing import Dict, List, Any\n \n \n+class JsException(Exception):\n+ \"\"\"\n+ A wrapper around a Javascript Error to allow the Error to be thrown in Python.\n+ \"\"\"\n+\n+ # This gets overwritten in jsproxy.c, it is just here for autodoc and humans\n+ # reading this file.\n+ pass\n+\n+\n def open_url(url: str) -> StringIO:\n \"\"\"\n Fetches a given URL\n", "issue": "JsProxy does not handle Error correctly\nExample:\r\n```python\r\nfrom js import Error\r\ne = Error.new(\"Hi\")\r\nraise e\r\n```\r\nRaises: `TypeError: exceptions must derive from BaseException`.\r\nThis came up in https://github.com/iodide-project/pyodide/pull/880#pullrequestreview-555341317. I will try to fix this.\n", "before_files": [{"content": "from ._base import open_url, eval_code, find_imports, as_nested_list\nfrom .console import get_completions\n\n__version__ = \"0.15.0\"\n\n__all__ = [\"open_url\", \"eval_code\", \"find_imports\", \"as_nested_list\", \"get_completions\"]\n", "path": "src/pyodide-py/pyodide/__init__.py"}, {"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n\nimport ast\nfrom io import StringIO\nfrom textwrap import dedent\nfrom typing import Dict, List, Any\n\n\ndef open_url(url: str) -> StringIO:\n \"\"\"\n Fetches a given URL\n\n Parameters\n ----------\n url\n URL to fetch\n\n Returns\n -------\n a io.StringIO object with the contents of the URL.\n \"\"\"\n from js import XMLHttpRequest\n\n req = XMLHttpRequest.new()\n req.open(\"GET\", url, False)\n req.send(None)\n return StringIO(req.response)\n\n\ndef eval_code(code: str, ns: Dict[str, Any]) -> None:\n \"\"\"Runs a code string\n\n The last part of the provided code may be an expression.\n\n Parameters\n ----------\n code\n the Python code to run.\n ns\n `locals()` or `globals()` context where to execute code.\n\n Returns\n -------\n None\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n if len(mod.body) == 0:\n return None\n\n expr: Any\n if isinstance(mod.body[-1], ast.Expr):\n expr = ast.Expression(mod.body[-1].value)\n del mod.body[-1]\n else:\n expr = None\n\n if len(mod.body):\n exec(compile(mod, \"<exec>\", mode=\"exec\"), ns, ns)\n if expr is not None:\n return eval(compile(expr, \"<eval>\", mode=\"eval\"), ns, ns)\n else:\n return None\n\n\ndef find_imports(code: str) -> List[str]:\n \"\"\"\n Finds the imports in a string of code\n\n Parameters\n ----------\n code\n the Python code to run.\n\n Returns\n -------\n A list of module names that are imported in the code.\n\n Examples\n --------\n >>> from pyodide import find_imports\n >>> code = \"import numpy as np; import scipy.stats\"\n >>> find_imports(code)\n ['numpy', 'scipy']\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n imports = set()\n for node in ast.walk(mod):\n if isinstance(node, ast.Import):\n for name in node.names:\n node_name = name.name\n imports.add(node_name.split(\".\")[0])\n elif isinstance(node, ast.ImportFrom):\n module_name = node.module\n if module_name is None:\n continue\n imports.add(module_name.split(\".\")[0])\n return list(sorted(imports))\n\n\ndef as_nested_list(obj) -> List:\n \"\"\"Convert a nested JS array to nested Python list.\n\n Assumes a Javascript object is made of (possibly nested) arrays and\n converts them to nested Python lists.\n\n Parameters\n ----------\n obj\n a Javscript object made of nested arrays.\n\n Returns\n -------\n Python list, or a nested Python list\n \"\"\"\n try:\n it = iter(obj)\n return [as_nested_list(x) for x in it]\n except TypeError:\n return obj\n", "path": "src/pyodide-py/pyodide/_base.py"}], "after_files": [{"content": "from ._base import open_url, eval_code, find_imports, as_nested_list, JsException\nfrom .console import get_completions\n\n__version__ = \"0.15.0\"\n\n__all__ = [\n \"open_url\",\n \"eval_code\",\n \"find_imports\",\n \"as_nested_list\",\n \"get_completions\",\n \"JsException\",\n]\n", "path": "src/pyodide-py/pyodide/__init__.py"}, {"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n# Added by C:\n# JsException (from jsproxy.c)\n\nimport ast\nfrom io import StringIO\nfrom textwrap import dedent\nfrom typing import Dict, List, Any\n\n\nclass JsException(Exception):\n \"\"\"\n A wrapper around a Javascript Error to allow the Error to be thrown in Python.\n \"\"\"\n\n # This gets overwritten in jsproxy.c, it is just here for autodoc and humans\n # reading this file.\n pass\n\n\ndef open_url(url: str) -> StringIO:\n \"\"\"\n Fetches a given URL\n\n Parameters\n ----------\n url\n URL to fetch\n\n Returns\n -------\n a io.StringIO object with the contents of the URL.\n \"\"\"\n from js import XMLHttpRequest\n\n req = XMLHttpRequest.new()\n req.open(\"GET\", url, False)\n req.send(None)\n return StringIO(req.response)\n\n\ndef eval_code(code: str, ns: Dict[str, Any]) -> None:\n \"\"\"Runs a code string\n\n The last part of the provided code may be an expression.\n\n Parameters\n ----------\n code\n the Python code to run.\n ns\n `locals()` or `globals()` context where to execute code.\n\n Returns\n -------\n None\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n if len(mod.body) == 0:\n return None\n\n expr: Any\n if isinstance(mod.body[-1], ast.Expr):\n expr = ast.Expression(mod.body[-1].value)\n del mod.body[-1]\n else:\n expr = None\n\n if len(mod.body):\n exec(compile(mod, \"<exec>\", mode=\"exec\"), ns, ns)\n if expr is not None:\n return eval(compile(expr, \"<eval>\", mode=\"eval\"), ns, ns)\n else:\n return None\n\n\ndef find_imports(code: str) -> List[str]:\n \"\"\"\n Finds the imports in a string of code\n\n Parameters\n ----------\n code\n the Python code to run.\n\n Returns\n -------\n A list of module names that are imported in the code.\n\n Examples\n --------\n >>> from pyodide import find_imports\n >>> code = \"import numpy as np; import scipy.stats\"\n >>> find_imports(code)\n ['numpy', 'scipy']\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n imports = set()\n for node in ast.walk(mod):\n if isinstance(node, ast.Import):\n for name in node.names:\n node_name = name.name\n imports.add(node_name.split(\".\")[0])\n elif isinstance(node, ast.ImportFrom):\n module_name = node.module\n if module_name is None:\n continue\n imports.add(module_name.split(\".\")[0])\n return list(sorted(imports))\n\n\ndef as_nested_list(obj) -> List:\n \"\"\"Convert a nested JS array to nested Python list.\n\n Assumes a Javascript object is made of (possibly nested) arrays and\n converts them to nested Python lists.\n\n Parameters\n ----------\n obj\n a Javscript object made of nested arrays.\n\n Returns\n -------\n Python list, or a nested Python list\n \"\"\"\n try:\n it = iter(obj)\n return [as_nested_list(x) for x in it]\n except TypeError:\n return obj\n", "path": "src/pyodide-py/pyodide/_base.py"}]}
| 1,422 | 429 |
gh_patches_debug_20992
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-4997
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor bug in export orders as CSV task
**Describe the bug**
Minor bug in exporting orders as CSV. Trying to decode string which throws a type error:
```python
if order.user and order.user.first_name else '',
str(order.user.last_name, 'utf-8')
```
**To Reproduce**
Steps to reproduce the behavior:
1. Start export orders as csv task
2. Make sure user is linked for that particular order
3. See the error
**Expected behavior**
It should work properly
**Stacktrace**
```sh
File "/home/mayank/OpenSource/open-event-server/app/api/helpers/tasks.py", line 238, in export_order_csv_task
content = export_orders_csv(orders)
File "/home/mayank/OpenSource/open-event-server/app/api/helpers/csv_jobs_util.py", line 13, in export_orders_csv
if order.user and order.user.first_name else '',
TypeError: decoding str is not supported
```
**Additional details (please complete the following information):**
- OS: Ubuntu 18.04
- Python Version 3.6
- `HEAD` Commit hash [e.g. `4629c62`]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/csv_jobs_util.py`
Content:
```
1 def export_orders_csv(orders):
2 headers = ['Order#', 'Order Date', 'Status', 'Payment Type', 'Total Amount', 'Quantity',
3 'Discount Code', 'First Name', 'Last Name', 'Email']
4
5 rows = [headers]
6 for order in orders:
7 if order.status != "deleted":
8 column = [str(order.get_invoice_number()), str(order.created_at) if order.created_at else '',
9 str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',
10 str(order.amount) if order.amount else '', str(order.get_tickets_count()),
11 str(order.discount_code.code) if order.discount_code else '',
12 str(order.user.first_name, 'utf-8')
13 if order.user and order.user.first_name else '',
14 str(order.user.last_name, 'utf-8')
15 if order.user and order.user.last_name else '',
16 str(order.user._email, 'utf-8') if order.user and order.user._email else '']
17 rows.append(column)
18
19 return rows
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/csv_jobs_util.py b/app/api/helpers/csv_jobs_util.py
--- a/app/api/helpers/csv_jobs_util.py
+++ b/app/api/helpers/csv_jobs_util.py
@@ -9,11 +9,11 @@
str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',
str(order.amount) if order.amount else '', str(order.get_tickets_count()),
str(order.discount_code.code) if order.discount_code else '',
- str(order.user.first_name, 'utf-8')
+ str(order.user.first_name)
if order.user and order.user.first_name else '',
- str(order.user.last_name, 'utf-8')
+ str(order.user.last_name)
if order.user and order.user.last_name else '',
- str(order.user._email, 'utf-8') if order.user and order.user._email else '']
+ str(order.user.email) if order.user and order.user.email else '']
rows.append(column)
return rows
|
{"golden_diff": "diff --git a/app/api/helpers/csv_jobs_util.py b/app/api/helpers/csv_jobs_util.py\n--- a/app/api/helpers/csv_jobs_util.py\n+++ b/app/api/helpers/csv_jobs_util.py\n@@ -9,11 +9,11 @@\n str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',\n str(order.amount) if order.amount else '', str(order.get_tickets_count()),\n str(order.discount_code.code) if order.discount_code else '',\n- str(order.user.first_name, 'utf-8')\n+ str(order.user.first_name)\n if order.user and order.user.first_name else '',\n- str(order.user.last_name, 'utf-8')\n+ str(order.user.last_name)\n if order.user and order.user.last_name else '',\n- str(order.user._email, 'utf-8') if order.user and order.user._email else '']\n+ str(order.user.email) if order.user and order.user.email else '']\n rows.append(column)\n \n return rows\n", "issue": "Minor bug in export orders as CSV task\n**Describe the bug**\r\nMinor bug in exporting orders as CSV. Trying to decode string which throws a type error:\r\n```python\r\n if order.user and order.user.first_name else '',\r\n str(order.user.last_name, 'utf-8')\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Start export orders as csv task \r\n2. Make sure user is linked for that particular order\r\n3. See the error\r\n\r\n**Expected behavior**\r\nIt should work properly\r\n\r\n**Stacktrace**\r\n```sh\r\n File \"/home/mayank/OpenSource/open-event-server/app/api/helpers/tasks.py\", line 238, in export_order_csv_task\r\n content = export_orders_csv(orders)\r\n File \"/home/mayank/OpenSource/open-event-server/app/api/helpers/csv_jobs_util.py\", line 13, in export_orders_csv\r\n if order.user and order.user.first_name else '',\r\nTypeError: decoding str is not supported\r\n```\r\n**Additional details (please complete the following information):**\r\n - OS: Ubuntu 18.04\r\n - Python Version 3.6\r\n - `HEAD` Commit hash [e.g. `4629c62`]\n", "before_files": [{"content": "def export_orders_csv(orders):\n headers = ['Order#', 'Order Date', 'Status', 'Payment Type', 'Total Amount', 'Quantity',\n 'Discount Code', 'First Name', 'Last Name', 'Email']\n\n rows = [headers]\n for order in orders:\n if order.status != \"deleted\":\n column = [str(order.get_invoice_number()), str(order.created_at) if order.created_at else '',\n str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',\n str(order.amount) if order.amount else '', str(order.get_tickets_count()),\n str(order.discount_code.code) if order.discount_code else '',\n str(order.user.first_name, 'utf-8')\n if order.user and order.user.first_name else '',\n str(order.user.last_name, 'utf-8')\n if order.user and order.user.last_name else '',\n str(order.user._email, 'utf-8') if order.user and order.user._email else '']\n rows.append(column)\n\n return rows\n", "path": "app/api/helpers/csv_jobs_util.py"}], "after_files": [{"content": "def export_orders_csv(orders):\n headers = ['Order#', 'Order Date', 'Status', 'Payment Type', 'Total Amount', 'Quantity',\n 'Discount Code', 'First Name', 'Last Name', 'Email']\n\n rows = [headers]\n for order in orders:\n if order.status != \"deleted\":\n column = [str(order.get_invoice_number()), str(order.created_at) if order.created_at else '',\n str(order.status) if order.status else '', str(order.paid_via) if order.paid_via else '',\n str(order.amount) if order.amount else '', str(order.get_tickets_count()),\n str(order.discount_code.code) if order.discount_code else '',\n str(order.user.first_name)\n if order.user and order.user.first_name else '',\n str(order.user.last_name)\n if order.user and order.user.last_name else '',\n str(order.user.email) if order.user and order.user.email else '']\n rows.append(column)\n\n return rows\n", "path": "app/api/helpers/csv_jobs_util.py"}]}
| 767 | 219 |
gh_patches_debug_56926
|
rasdani/github-patches
|
git_diff
|
kivy__python-for-android-615
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenSSL 1.0.2e outdated (replaced by 1.0.2f)
A new version has been published today. The recipe no longer can download version 1.0.2e.
Does `disable-solver.patch` still apply?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/openssl/__init__.py`
Content:
```
1 from functools import partial
2
3 from pythonforandroid.toolchain import Recipe, shprint, current_directory
4 import sh
5
6
7 class OpenSSLRecipe(Recipe):
8 version = '1.0.2e'
9 url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'
10
11 def should_build(self, arch):
12 return not self.has_libs(arch, 'libssl.so', 'libcrypto.so')
13
14 def check_symbol(self, env, sofile, symbol):
15 nm = env.get('NM', 'nm')
16 syms = sh.sh('-c', "{} -gp {} | cut -d' ' -f3".format(
17 nm, sofile), _env=env).splitlines()
18 if symbol in syms:
19 return True
20 print('{} missing symbol {}; rebuilding'.format(sofile, symbol))
21 return False
22
23 def get_recipe_env(self, arch=None):
24 env = super(OpenSSLRecipe, self).get_recipe_env(arch)
25 env['CFLAGS'] += ' ' + env['LDFLAGS']
26 env['CC'] += ' ' + env['LDFLAGS']
27 return env
28
29 def select_build_arch(self, arch):
30 aname = arch.arch
31 if 'arm64' in aname:
32 return 'linux-aarch64'
33 if 'v7a' in aname:
34 return 'android-armv7'
35 if 'arm' in aname:
36 return 'android'
37 return 'linux-armv4'
38
39 def build_arch(self, arch):
40 env = self.get_recipe_env(arch)
41 with current_directory(self.get_build_dir(arch.arch)):
42 # sh fails with code 255 trying to execute ./Configure
43 # so instead we manually run perl passing in Configure
44 perl = sh.Command('perl')
45 buildarch = self.select_build_arch(arch)
46 shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)
47 self.apply_patch('disable-sover.patch', arch.arch)
48
49 check_crypto = partial(self.check_symbol, env, 'libcrypto.so')
50 # check_ssl = partial(self.check_symbol, env, 'libssl.so')
51 while True:
52 shprint(sh.make, 'build_libs', _env=env)
53 if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):
54 break
55 shprint(sh.make, 'clean', _env=env)
56
57 self.install_libs(arch, 'libssl.so', 'libcrypto.so')
58
59 recipe = OpenSSLRecipe()
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py
--- a/pythonforandroid/recipes/openssl/__init__.py
+++ b/pythonforandroid/recipes/openssl/__init__.py
@@ -5,7 +5,7 @@
class OpenSSLRecipe(Recipe):
- version = '1.0.2e'
+ version = '1.0.2f'
url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'
def should_build(self, arch):
|
{"golden_diff": "diff --git a/pythonforandroid/recipes/openssl/__init__.py b/pythonforandroid/recipes/openssl/__init__.py\n--- a/pythonforandroid/recipes/openssl/__init__.py\n+++ b/pythonforandroid/recipes/openssl/__init__.py\n@@ -5,7 +5,7 @@\n \n \n class OpenSSLRecipe(Recipe):\n- version = '1.0.2e'\n+ version = '1.0.2f'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n \n def should_build(self, arch):\n", "issue": "OpenSSL 1.0.2e outdated (replaced by 1.0.2f)\nA new version has been published today. The recipe no longer can download version 1.0.2e.\n\nDoes `disable-solver.patch` still apply?\n\n", "before_files": [{"content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2e'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return not self.has_libs(arch, 'libssl.so', 'libcrypto.so')\n\n def check_symbol(self, env, sofile, symbol):\n nm = env.get('NM', 'nm')\n syms = sh.sh('-c', \"{} -gp {} | cut -d' ' -f3\".format(\n nm, sofile), _env=env).splitlines()\n if symbol in syms:\n return True\n print('{} missing symbol {}; rebuilding'.format(sofile, symbol))\n return False\n\n def get_recipe_env(self, arch=None):\n env = super(OpenSSLRecipe, self).get_recipe_env(arch)\n env['CFLAGS'] += ' ' + env['LDFLAGS']\n env['CC'] += ' ' + env['LDFLAGS']\n return env\n\n def select_build_arch(self, arch):\n aname = arch.arch\n if 'arm64' in aname:\n return 'linux-aarch64'\n if 'v7a' in aname:\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n return 'linux-armv4'\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # sh fails with code 255 trying to execute ./Configure\n # so instead we manually run perl passing in Configure\n perl = sh.Command('perl')\n buildarch = self.select_build_arch(arch)\n shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)\n self.apply_patch('disable-sover.patch', arch.arch)\n\n check_crypto = partial(self.check_symbol, env, 'libcrypto.so')\n # check_ssl = partial(self.check_symbol, env, 'libssl.so')\n while True:\n shprint(sh.make, 'build_libs', _env=env)\n if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):\n break\n shprint(sh.make, 'clean', _env=env)\n\n self.install_libs(arch, 'libssl.so', 'libcrypto.so')\n\nrecipe = OpenSSLRecipe()\n", "path": "pythonforandroid/recipes/openssl/__init__.py"}], "after_files": [{"content": "from functools import partial\n\nfrom pythonforandroid.toolchain import Recipe, shprint, current_directory\nimport sh\n\n\nclass OpenSSLRecipe(Recipe):\n version = '1.0.2f'\n url = 'https://www.openssl.org/source/openssl-{version}.tar.gz'\n\n def should_build(self, arch):\n return not self.has_libs(arch, 'libssl.so', 'libcrypto.so')\n\n def check_symbol(self, env, sofile, symbol):\n nm = env.get('NM', 'nm')\n syms = sh.sh('-c', \"{} -gp {} | cut -d' ' -f3\".format(\n nm, sofile), _env=env).splitlines()\n if symbol in syms:\n return True\n print('{} missing symbol {}; rebuilding'.format(sofile, symbol))\n return False\n\n def get_recipe_env(self, arch=None):\n env = super(OpenSSLRecipe, self).get_recipe_env(arch)\n env['CFLAGS'] += ' ' + env['LDFLAGS']\n env['CC'] += ' ' + env['LDFLAGS']\n return env\n\n def select_build_arch(self, arch):\n aname = arch.arch\n if 'arm64' in aname:\n return 'linux-aarch64'\n if 'v7a' in aname:\n return 'android-armv7'\n if 'arm' in aname:\n return 'android'\n return 'linux-armv4'\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # sh fails with code 255 trying to execute ./Configure\n # so instead we manually run perl passing in Configure\n perl = sh.Command('perl')\n buildarch = self.select_build_arch(arch)\n shprint(perl, 'Configure', 'shared', 'no-dso', 'no-krb5', buildarch, _env=env)\n self.apply_patch('disable-sover.patch', arch.arch)\n\n check_crypto = partial(self.check_symbol, env, 'libcrypto.so')\n # check_ssl = partial(self.check_symbol, env, 'libssl.so')\n while True:\n shprint(sh.make, 'build_libs', _env=env)\n if all(map(check_crypto, ('SSLeay', 'MD5_Transform', 'MD4_Init'))):\n break\n shprint(sh.make, 'clean', _env=env)\n\n self.install_libs(arch, 'libssl.so', 'libcrypto.so')\n\nrecipe = OpenSSLRecipe()\n", "path": "pythonforandroid/recipes/openssl/__init__.py"}]}
| 1,001 | 125 |
gh_patches_debug_520
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-303
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Suggest use of httplib2 v0.9.2 in readme if using python > 3
Python3.x implementations of the client, passes bytestring to httplib2 header encoding method. Httplib2 < 0.9.2 does not support this.
Documentation should make a note that implementations in python3 should explicitly install httplib2 0.9.2. PyPi index targets 0.8.0, which is no bueno for those unaware of this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (2, 6):
25 print('google-api-python-client requires python version >= 2.6.',
26 file=sys.stderr)
27 sys.exit(1)
28 if (3, 1) <= sys.version_info < (3, 3):
29 print('google-api-python-client requires python3 version >= 3.3.',
30 file=sys.stderr)
31 sys.exit(1)
32
33 from setuptools import setup
34 import pkg_resources
35
36 def _DetectBadness():
37 import os
38 if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:
39 return
40 o2c_pkg = None
41 try:
42 o2c_pkg = pkg_resources.get_distribution('oauth2client')
43 except pkg_resources.DistributionNotFound:
44 pass
45 oauth2client = None
46 try:
47 import oauth2client
48 except ImportError:
49 pass
50 if o2c_pkg is None and oauth2client is not None:
51 raise RuntimeError(
52 'Previous version of google-api-python-client detected; due to a '
53 'packaging issue, we cannot perform an in-place upgrade. Please remove '
54 'the old version and re-install this package.'
55 )
56
57 _DetectBadness()
58
59 packages = [
60 'apiclient',
61 'googleapiclient',
62 'googleapiclient/discovery_cache',
63 ]
64
65 install_requires = [
66 'httplib2>=0.8,<1dev',
67 'oauth2client>=1.5.0,<5.0.0dev',
68 'six>=1.6.1,<2dev',
69 'uritemplate>=3.0.0,<4dev',
70 ]
71
72 if sys.version_info < (2, 7):
73 install_requires.append('argparse')
74
75 long_desc = """The Google API Client for Python is a client library for
76 accessing the Plus, Moderator, and many other Google APIs."""
77
78 import googleapiclient
79 version = googleapiclient.__version__
80
81 setup(
82 name="google-api-python-client",
83 version=version,
84 description="Google API Client Library for Python",
85 long_description=long_desc,
86 author="Google Inc.",
87 url="http://github.com/google/google-api-python-client/",
88 install_requires=install_requires,
89 packages=packages,
90 package_data={},
91 license="Apache 2.0",
92 keywords="google api client",
93 classifiers=[
94 'Programming Language :: Python :: 2',
95 'Programming Language :: Python :: 2.6',
96 'Programming Language :: Python :: 2.7',
97 'Programming Language :: Python :: 3',
98 'Programming Language :: Python :: 3.3',
99 'Programming Language :: Python :: 3.4',
100 'Development Status :: 5 - Production/Stable',
101 'Intended Audience :: Developers',
102 'License :: OSI Approved :: Apache Software License',
103 'Operating System :: OS Independent',
104 'Topic :: Internet :: WWW/HTTP',
105 ],
106 )
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,7 @@
]
install_requires = [
- 'httplib2>=0.8,<1dev',
+ 'httplib2>=0.9.2,<1dev',
'oauth2client>=1.5.0,<5.0.0dev',
'six>=1.6.1,<2dev',
'uritemplate>=3.0.0,<4dev',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,7 @@\n ]\n \n install_requires = [\n- 'httplib2>=0.8,<1dev',\n+ 'httplib2>=0.9.2,<1dev',\n 'oauth2client>=1.5.0,<5.0.0dev',\n 'six>=1.6.1,<2dev',\n 'uritemplate>=3.0.0,<4dev',\n", "issue": "Suggest use of httplib2 v0.9.2 in readme if using python > 3\nPython3.x implementations of the client, passes bytestring to httplib2 header encoding method. Httplib2 < 0.9.2 does not support this. \n\nDocumentation should make a note that implementations in python3 should explicitly install httplib2 0.9.2. PyPi index targets 0.8.0, which is no bueno for those unaware of this issue. \n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 6):\n print('google-api-python-client requires python version >= 2.6.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache',\n]\n\ninstall_requires = [\n 'httplib2>=0.8,<1dev',\n 'oauth2client>=1.5.0,<5.0.0dev',\n 'six>=1.6.1,<2dev',\n 'uritemplate>=3.0.0,<4dev',\n]\n\nif sys.version_info < (2, 7):\n install_requires.append('argparse')\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 6):\n print('google-api-python-client requires python version >= 2.6.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache',\n]\n\ninstall_requires = [\n 'httplib2>=0.9.2,<1dev',\n 'oauth2client>=1.5.0,<5.0.0dev',\n 'six>=1.6.1,<2dev',\n 'uritemplate>=3.0.0,<4dev',\n]\n\nif sys.version_info < (2, 7):\n install_requires.append('argparse')\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}]}
| 1,374 | 115 |
gh_patches_debug_504
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-3702
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"the" plugin uses incorrect regex
### Problem
Running this command:
```sh
~ ❯ beet move artist:trance -p
Moving 40 items.
M:\TET - Travailleur En Trance\[2008] Cobra Coded Escalation\01-01 - Cobra Reporting In.mp3
-> M:\‒ Travailleur En Trance, TET\[2008] Cobra Coded Escalation\01-01 - Cobra Reporting In.mp3
```
Led to this problem:
"TET" is recognized by the "the" plugin as something it should move. This is because the regex used in the.py, line 26, is
```
PATTERN_THE = u'^[the]{3}\\s'
```
which matches "TET". It should probably be:
```
PATTERN_THE = u'^the\\s'
```
### Setup
* OS: Windows 10 2004
* Python version: 3.8
* beets version: latest trunk
* Turning off plugins made problem go away (yes/no): obviously, if I disable 'the' it no longer does this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/the.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Blemjhoo Tezoulbr <[email protected]>.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Moves patterns in path formats (suitable for moving articles)."""
17
18 from __future__ import division, absolute_import, print_function
19
20 import re
21 from beets.plugins import BeetsPlugin
22
23 __author__ = '[email protected]'
24 __version__ = '1.1'
25
26 PATTERN_THE = u'^[the]{3}\\s'
27 PATTERN_A = u'^[a][n]?\\s'
28 FORMAT = u'{0}, {1}'
29
30
31 class ThePlugin(BeetsPlugin):
32
33 patterns = []
34
35 def __init__(self):
36 super(ThePlugin, self).__init__()
37
38 self.template_funcs['the'] = self.the_template_func
39
40 self.config.add({
41 'the': True,
42 'a': True,
43 'format': u'{0}, {1}',
44 'strip': False,
45 'patterns': [],
46 })
47
48 self.patterns = self.config['patterns'].as_str_seq()
49 for p in self.patterns:
50 if p:
51 try:
52 re.compile(p)
53 except re.error:
54 self._log.error(u'invalid pattern: {0}', p)
55 else:
56 if not (p.startswith('^') or p.endswith('$')):
57 self._log.warning(u'warning: \"{0}\" will not '
58 u'match string start/end', p)
59 if self.config['a']:
60 self.patterns = [PATTERN_A] + self.patterns
61 if self.config['the']:
62 self.patterns = [PATTERN_THE] + self.patterns
63 if not self.patterns:
64 self._log.warning(u'no patterns defined!')
65
66 def unthe(self, text, pattern):
67 """Moves pattern in the path format string or strips it
68
69 text -- text to handle
70 pattern -- regexp pattern (case ignore is already on)
71 strip -- if True, pattern will be removed
72 """
73 if text:
74 r = re.compile(pattern, flags=re.IGNORECASE)
75 try:
76 t = r.findall(text)[0]
77 except IndexError:
78 return text
79 else:
80 r = re.sub(r, '', text).strip()
81 if self.config['strip']:
82 return r
83 else:
84 fmt = self.config['format'].as_str()
85 return fmt.format(r, t.strip()).strip()
86 else:
87 return u''
88
89 def the_template_func(self, text):
90 if not self.patterns:
91 return text
92 if text:
93 for p in self.patterns:
94 r = self.unthe(text, p)
95 if r != text:
96 self._log.debug(u'\"{0}\" -> \"{1}\"', text, r)
97 break
98 return r
99 else:
100 return u''
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/beetsplug/the.py b/beetsplug/the.py
--- a/beetsplug/the.py
+++ b/beetsplug/the.py
@@ -23,7 +23,7 @@
__author__ = '[email protected]'
__version__ = '1.1'
-PATTERN_THE = u'^[the]{3}\\s'
+PATTERN_THE = u'^the\\s'
PATTERN_A = u'^[a][n]?\\s'
FORMAT = u'{0}, {1}'
|
{"golden_diff": "diff --git a/beetsplug/the.py b/beetsplug/the.py\n--- a/beetsplug/the.py\n+++ b/beetsplug/the.py\n@@ -23,7 +23,7 @@\n __author__ = '[email protected]'\n __version__ = '1.1'\n \n-PATTERN_THE = u'^[the]{3}\\\\s'\n+PATTERN_THE = u'^the\\\\s'\n PATTERN_A = u'^[a][n]?\\\\s'\n FORMAT = u'{0}, {1}'\n", "issue": "\"the\" plugin uses incorrect regex\n### Problem\r\n\r\nRunning this command:\r\n\r\n```sh\r\n~ \u276f beet move artist:trance -p\r\nMoving 40 items.\r\nM:\\TET - Travailleur En Trance\\[2008] Cobra Coded Escalation\\01-01 - Cobra Reporting In.mp3\r\n -> M:\\\u2012 Travailleur En Trance, TET\\[2008] Cobra Coded Escalation\\01-01 - Cobra Reporting In.mp3\r\n```\r\n\r\nLed to this problem:\r\n\r\n\"TET\" is recognized by the \"the\" plugin as something it should move. This is because the regex used in the.py, line 26, is\r\n```\r\nPATTERN_THE = u'^[the]{3}\\\\s'\r\n```\r\nwhich matches \"TET\". It should probably be:\r\n```\r\nPATTERN_THE = u'^the\\\\s'\r\n```\r\n\r\n### Setup\r\n\r\n* OS: Windows 10 2004\r\n* Python version: 3.8\r\n* beets version: latest trunk\r\n* Turning off plugins made problem go away (yes/no): obviously, if I disable 'the' it no longer does this\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Blemjhoo Tezoulbr <[email protected]>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Moves patterns in path formats (suitable for moving articles).\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nimport re\nfrom beets.plugins import BeetsPlugin\n\n__author__ = '[email protected]'\n__version__ = '1.1'\n\nPATTERN_THE = u'^[the]{3}\\\\s'\nPATTERN_A = u'^[a][n]?\\\\s'\nFORMAT = u'{0}, {1}'\n\n\nclass ThePlugin(BeetsPlugin):\n\n patterns = []\n\n def __init__(self):\n super(ThePlugin, self).__init__()\n\n self.template_funcs['the'] = self.the_template_func\n\n self.config.add({\n 'the': True,\n 'a': True,\n 'format': u'{0}, {1}',\n 'strip': False,\n 'patterns': [],\n })\n\n self.patterns = self.config['patterns'].as_str_seq()\n for p in self.patterns:\n if p:\n try:\n re.compile(p)\n except re.error:\n self._log.error(u'invalid pattern: {0}', p)\n else:\n if not (p.startswith('^') or p.endswith('$')):\n self._log.warning(u'warning: \\\"{0}\\\" will not '\n u'match string start/end', p)\n if self.config['a']:\n self.patterns = [PATTERN_A] + self.patterns\n if self.config['the']:\n self.patterns = [PATTERN_THE] + self.patterns\n if not self.patterns:\n self._log.warning(u'no patterns defined!')\n\n def unthe(self, text, pattern):\n \"\"\"Moves pattern in the path format string or strips it\n\n text -- text to handle\n pattern -- regexp pattern (case ignore is already on)\n strip -- if True, pattern will be removed\n \"\"\"\n if text:\n r = re.compile(pattern, flags=re.IGNORECASE)\n try:\n t = r.findall(text)[0]\n except IndexError:\n return text\n else:\n r = re.sub(r, '', text).strip()\n if self.config['strip']:\n return r\n else:\n fmt = self.config['format'].as_str()\n return fmt.format(r, t.strip()).strip()\n else:\n return u''\n\n def the_template_func(self, text):\n if not self.patterns:\n return text\n if text:\n for p in self.patterns:\n r = self.unthe(text, p)\n if r != text:\n self._log.debug(u'\\\"{0}\\\" -> \\\"{1}\\\"', text, r)\n break\n return r\n else:\n return u''\n", "path": "beetsplug/the.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Blemjhoo Tezoulbr <[email protected]>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Moves patterns in path formats (suitable for moving articles).\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nimport re\nfrom beets.plugins import BeetsPlugin\n\n__author__ = '[email protected]'\n__version__ = '1.1'\n\nPATTERN_THE = u'^the\\\\s'\nPATTERN_A = u'^[a][n]?\\\\s'\nFORMAT = u'{0}, {1}'\n\n\nclass ThePlugin(BeetsPlugin):\n\n patterns = []\n\n def __init__(self):\n super(ThePlugin, self).__init__()\n\n self.template_funcs['the'] = self.the_template_func\n\n self.config.add({\n 'the': True,\n 'a': True,\n 'format': u'{0}, {1}',\n 'strip': False,\n 'patterns': [],\n })\n\n self.patterns = self.config['patterns'].as_str_seq()\n for p in self.patterns:\n if p:\n try:\n re.compile(p)\n except re.error:\n self._log.error(u'invalid pattern: {0}', p)\n else:\n if not (p.startswith('^') or p.endswith('$')):\n self._log.warning(u'warning: \\\"{0}\\\" will not '\n u'match string start/end', p)\n if self.config['a']:\n self.patterns = [PATTERN_A] + self.patterns\n if self.config['the']:\n self.patterns = [PATTERN_THE] + self.patterns\n if not self.patterns:\n self._log.warning(u'no patterns defined!')\n\n def unthe(self, text, pattern):\n \"\"\"Moves pattern in the path format string or strips it\n\n text -- text to handle\n pattern -- regexp pattern (case ignore is already on)\n strip -- if True, pattern will be removed\n \"\"\"\n if text:\n r = re.compile(pattern, flags=re.IGNORECASE)\n try:\n t = r.findall(text)[0]\n except IndexError:\n return text\n else:\n r = re.sub(r, '', text).strip()\n if self.config['strip']:\n return r\n else:\n fmt = self.config['format'].as_str()\n return fmt.format(r, t.strip()).strip()\n else:\n return u''\n\n def the_template_func(self, text):\n if not self.patterns:\n return text\n if text:\n for p in self.patterns:\n r = self.unthe(text, p)\n if r != text:\n self._log.debug(u'\\\"{0}\\\" -> \\\"{1}\\\"', text, r)\n break\n return r\n else:\n return u''\n", "path": "beetsplug/the.py"}]}
| 1,454 | 115 |
gh_patches_debug_777
|
rasdani/github-patches
|
git_diff
|
getmoto__moto-1840
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cryptography Package has a Security Vulnerability
Discovered using pipenv's security check feature that there's a vulnerability in the cryptography package versions<2.3.
> Checking installed package safety...
36351: cryptography >=1.9.0,<2.3 resolved (2.2.2 installed)!
python-cryptography versions >=1.9.0 and <2.3 did not enforce a minimum tag length for finalize_with_tag API. If a user did not validate the input length prior to passing it to finalize_with_tag an attacker could craft an invalid payload with a shortened tag (e.g. 1 byte) such that they would have a 1 in 256 chance of passing the MAC check. GCM tag forgeries can cause key leakage.
More details here: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-10903
setup.py should be updated to require cryptography>=2.3.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3 import setuptools
4 from setuptools import setup, find_packages
5 import sys
6
7
8 install_requires = [
9 "Jinja2>=2.7.3",
10 "boto>=2.36.0",
11 "boto3>=1.6.16,<1.8",
12 "botocore>=1.9.16,<1.11",
13 "cryptography>=2.0.0",
14 "requests>=2.5",
15 "xmltodict",
16 "six>1.9",
17 "werkzeug",
18 "pyaml",
19 "pytz",
20 "python-dateutil<3.0.0,>=2.1",
21 "python-jose<3.0.0",
22 "mock",
23 "docker>=2.5.1",
24 "jsondiff==1.1.1",
25 "aws-xray-sdk<0.96,>=0.93",
26 "responses>=0.9.0",
27 ]
28
29 extras_require = {
30 'server': ['flask'],
31 }
32
33 # https://hynek.me/articles/conditional-python-dependencies/
34 if int(setuptools.__version__.split(".", 1)[0]) < 18:
35 if sys.version_info[0:2] < (3, 3):
36 install_requires.append("backports.tempfile")
37 else:
38 extras_require[":python_version<'3.3'"] = ["backports.tempfile"]
39
40
41 setup(
42 name='moto',
43 version='1.3.6',
44 description='A library that allows your python tests to easily'
45 ' mock out the boto library',
46 author='Steve Pulec',
47 author_email='[email protected]',
48 url='https://github.com/spulec/moto',
49 entry_points={
50 'console_scripts': [
51 'moto_server = moto.server:main',
52 ],
53 },
54 packages=find_packages(exclude=("tests", "tests.*")),
55 install_requires=install_requires,
56 extras_require=extras_require,
57 include_package_data=True,
58 license="Apache",
59 test_suite="tests",
60 classifiers=[
61 "Programming Language :: Python :: 2",
62 "Programming Language :: Python :: 2.7",
63 "Programming Language :: Python :: 3",
64 "Programming Language :: Python :: 3.3",
65 "Programming Language :: Python :: 3.4",
66 "Programming Language :: Python :: 3.5",
67 "Programming Language :: Python :: 3.6",
68 "License :: OSI Approved :: Apache Software License",
69 "Topic :: Software Development :: Testing",
70 ],
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
"boto>=2.36.0",
"boto3>=1.6.16,<1.8",
"botocore>=1.9.16,<1.11",
- "cryptography>=2.0.0",
+ "cryptography>=2.3.0",
"requests>=2.5",
"xmltodict",
"six>1.9",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n- \"cryptography>=2.0.0\",\n+ \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n", "issue": "Cryptography Package has a Security Vulnerability\nDiscovered using pipenv's security check feature that there's a vulnerability in the cryptography package versions<2.3.\r\n \r\n> Checking installed package safety...\r\n36351: cryptography >=1.9.0,<2.3 resolved (2.2.2 installed)!\r\npython-cryptography versions >=1.9.0 and <2.3 did not enforce a minimum tag length for finalize_with_tag API. If a user did not validate the input length prior to passing it to finalize_with_tag an attacker could craft an invalid payload with a shortened tag (e.g. 1 byte) such that they would have a 1 in 256 chance of passing the MAC check. GCM tag forgeries can cause key leakage.\r\n \r\nMore details here: http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-10903\r\n\r\nsetup.py should be updated to require cryptography>=2.3.0.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n \"cryptography>=2.0.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk<0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.6',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nimport setuptools\nfrom setuptools import setup, find_packages\nimport sys\n\n\ninstall_requires = [\n \"Jinja2>=2.7.3\",\n \"boto>=2.36.0\",\n \"boto3>=1.6.16,<1.8\",\n \"botocore>=1.9.16,<1.11\",\n \"cryptography>=2.3.0\",\n \"requests>=2.5\",\n \"xmltodict\",\n \"six>1.9\",\n \"werkzeug\",\n \"pyaml\",\n \"pytz\",\n \"python-dateutil<3.0.0,>=2.1\",\n \"python-jose<3.0.0\",\n \"mock\",\n \"docker>=2.5.1\",\n \"jsondiff==1.1.1\",\n \"aws-xray-sdk<0.96,>=0.93\",\n \"responses>=0.9.0\",\n]\n\nextras_require = {\n 'server': ['flask'],\n}\n\n# https://hynek.me/articles/conditional-python-dependencies/\nif int(setuptools.__version__.split(\".\", 1)[0]) < 18:\n if sys.version_info[0:2] < (3, 3):\n install_requires.append(\"backports.tempfile\")\nelse:\n extras_require[\":python_version<'3.3'\"] = [\"backports.tempfile\"]\n\n\nsetup(\n name='moto',\n version='1.3.6',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='[email protected]',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n include_package_data=True,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}]}
| 1,170 | 125 |
gh_patches_debug_49499
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1896
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.104
On the docket:
+ [x] Pull in Pip fixes. #1805
+ [x] pex fails to overwrite zipapp pex with loose pex #1879
+ [x] Make lock update sensitive to artifacts. #1887
+ [x] PEXBuilder is not robust to ephemeral .pyc compiles. #1889
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.103"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.103"
+__version__ = "2.1.104"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.103\"\n+__version__ = \"2.1.104\"\n", "issue": "Release 2.1.104\nOn the docket:\r\n+ [x] Pull in Pip fixes. #1805\r\n+ [x] pex fails to overwrite zipapp pex with loose pex #1879\r\n+ [x] Make lock update sensitive to artifacts. #1887 \r\n+ [x] PEXBuilder is not robust to ephemeral .pyc compiles. #1889 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.103\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.104\"\n", "path": "pex/version.py"}]}
| 403 | 98 |
gh_patches_debug_2793
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-579
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Backslash not showing in equation for Tversky Loss
## 📚 Documentation
The backslashs in the denominator of the Tversky loss equation is not rendered correctly with MathJax. As shown in this screenshot it only inserts a little space between P and G and thus does not correctly render the equation.

<!-- A clear and concise description of what content in https://kornia.readthedocs.io is an issue. If this has to do with the general https://kornia.org website, please file an issue at https://github.com/kornia/kornia.github.io/issues/new/choose instead. If this has to do with https://kornia.org/tutorials, please file an issue at https://github.com/kornia/tutorials/issues/new -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/losses/tversky.py`
Content:
```
1 from typing import Optional
2
3 import torch
4 import torch.nn as nn
5 import torch.nn.functional as F
6
7 from kornia.utils import one_hot
8
9 # based on:
10 # https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
11
12
13 def tversky_loss(input: torch.Tensor, target: torch.Tensor,
14 alpha: float, beta: float, eps: float = 1e-8) -> torch.Tensor:
15 r"""Function that computes Tversky loss.
16
17 See :class:`~kornia.losses.TverskyLoss` for details.
18 """
19 if not torch.is_tensor(input):
20 raise TypeError("Input type is not a torch.Tensor. Got {}"
21 .format(type(input)))
22
23 if not len(input.shape) == 4:
24 raise ValueError("Invalid input shape, we expect BxNxHxW. Got: {}"
25 .format(input.shape))
26
27 if not input.shape[-2:] == target.shape[-2:]:
28 raise ValueError("input and target shapes must be the same. Got: {} and {}"
29 .format(input.shape, input.shape))
30
31 if not input.device == target.device:
32 raise ValueError(
33 "input and target must be in the same device. Got: {} and {}" .format(
34 input.device, target.device))
35
36 # compute softmax over the classes axis
37 input_soft: torch.Tensor = F.softmax(input, dim=1)
38
39 # create the labels one hot tensor
40 target_one_hot: torch.Tensor = one_hot(
41 target, num_classes=input.shape[1],
42 device=input.device, dtype=input.dtype)
43
44 # compute the actual dice score
45 dims = (1, 2, 3)
46 intersection = torch.sum(input_soft * target_one_hot, dims)
47 fps = torch.sum(input_soft * (-target_one_hot + 1.), dims)
48 fns = torch.sum((-input_soft + 1.) * target_one_hot, dims)
49
50 numerator = intersection
51 denominator = intersection + alpha * fps + beta * fns
52 tversky_loss = numerator / (denominator + eps)
53 return torch.mean(-tversky_loss + 1.)
54
55
56 class TverskyLoss(nn.Module):
57 r"""Criterion that computes Tversky Coeficient loss.
58
59 According to [1], we compute the Tversky Coefficient as follows:
60
61 .. math::
62
63 \text{S}(P, G, \alpha; \beta) =
64 \frac{|PG|}{|PG| + \alpha |P \ G| + \beta |G \ P|}
65
66 where:
67 - :math:`P` and :math:`G` are the predicted and ground truth binary
68 labels.
69 - :math:`\alpha` and :math:`\beta` control the magnitude of the
70 penalties for FPs and FNs, respectively.
71
72 Notes:
73 - :math:`\alpha = \beta = 0.5` => dice coeff
74 - :math:`\alpha = \beta = 1` => tanimoto coeff
75 - :math:`\alpha + \beta = 1` => F beta coeff
76
77 Shape:
78 - Input: :math:`(N, C, H, W)` where C = number of classes.
79 - Target: :math:`(N, H, W)` where each value is
80 :math:`0 ≤ targets[i] ≤ C−1`.
81
82 Examples:
83 >>> N = 5 # num_classes
84 >>> loss = kornia.losses.TverskyLoss(alpha=0.5, beta=0.5)
85 >>> input = torch.randn(1, N, 3, 5, requires_grad=True)
86 >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
87 >>> output = loss(input, target)
88 >>> output.backward()
89
90 References:
91 [1]: https://arxiv.org/abs/1706.05721
92 """
93
94 def __init__(self, alpha: float, beta: float, eps: float = 1e-8) -> None:
95 super(TverskyLoss, self).__init__()
96 self.alpha: float = alpha
97 self.beta: float = beta
98 self.eps: float = eps
99
100 def forward( # type: ignore
101 self,
102 input: torch.Tensor,
103 target: torch.Tensor) -> torch.Tensor:
104 return tversky_loss(input, target, self.alpha, self.beta, self.eps)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kornia/losses/tversky.py b/kornia/losses/tversky.py
--- a/kornia/losses/tversky.py
+++ b/kornia/losses/tversky.py
@@ -61,7 +61,7 @@
.. math::
\text{S}(P, G, \alpha; \beta) =
- \frac{|PG|}{|PG| + \alpha |P \ G| + \beta |G \ P|}
+ \frac{|PG|}{|PG| + \alpha |P \setminus G| + \beta |G \setminus P|}
where:
- :math:`P` and :math:`G` are the predicted and ground truth binary
|
{"golden_diff": "diff --git a/kornia/losses/tversky.py b/kornia/losses/tversky.py\n--- a/kornia/losses/tversky.py\n+++ b/kornia/losses/tversky.py\n@@ -61,7 +61,7 @@\n .. math::\n \n \\text{S}(P, G, \\alpha; \\beta) =\n- \\frac{|PG|}{|PG| + \\alpha |P \\ G| + \\beta |G \\ P|}\n+ \\frac{|PG|}{|PG| + \\alpha |P \\setminus G| + \\beta |G \\setminus P|}\n \n where:\n - :math:`P` and :math:`G` are the predicted and ground truth binary\n", "issue": "Backslash not showing in equation for Tversky Loss\n## \ud83d\udcda Documentation\r\n\r\nThe backslashs in the denominator of the Tversky loss equation is not rendered correctly with MathJax. As shown in this screenshot it only inserts a little space between P and G and thus does not correctly render the equation.\r\n\r\n\r\n\r\n\r\n<!-- A clear and concise description of what content in https://kornia.readthedocs.io is an issue. If this has to do with the general https://kornia.org website, please file an issue at https://github.com/kornia/kornia.github.io/issues/new/choose instead. If this has to do with https://kornia.org/tutorials, please file an issue at https://github.com/kornia/tutorials/issues/new -->\r\n\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom kornia.utils import one_hot\n\n# based on:\n# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py\n\n\ndef tversky_loss(input: torch.Tensor, target: torch.Tensor,\n alpha: float, beta: float, eps: float = 1e-8) -> torch.Tensor:\n r\"\"\"Function that computes Tversky loss.\n\n See :class:`~kornia.losses.TverskyLoss` for details.\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n .format(type(input)))\n\n if not len(input.shape) == 4:\n raise ValueError(\"Invalid input shape, we expect BxNxHxW. Got: {}\"\n .format(input.shape))\n\n if not input.shape[-2:] == target.shape[-2:]:\n raise ValueError(\"input and target shapes must be the same. Got: {} and {}\"\n .format(input.shape, input.shape))\n\n if not input.device == target.device:\n raise ValueError(\n \"input and target must be in the same device. Got: {} and {}\" .format(\n input.device, target.device))\n\n # compute softmax over the classes axis\n input_soft: torch.Tensor = F.softmax(input, dim=1)\n\n # create the labels one hot tensor\n target_one_hot: torch.Tensor = one_hot(\n target, num_classes=input.shape[1],\n device=input.device, dtype=input.dtype)\n\n # compute the actual dice score\n dims = (1, 2, 3)\n intersection = torch.sum(input_soft * target_one_hot, dims)\n fps = torch.sum(input_soft * (-target_one_hot + 1.), dims)\n fns = torch.sum((-input_soft + 1.) * target_one_hot, dims)\n\n numerator = intersection\n denominator = intersection + alpha * fps + beta * fns\n tversky_loss = numerator / (denominator + eps)\n return torch.mean(-tversky_loss + 1.)\n\n\nclass TverskyLoss(nn.Module):\n r\"\"\"Criterion that computes Tversky Coeficient loss.\n\n According to [1], we compute the Tversky Coefficient as follows:\n\n .. math::\n\n \\text{S}(P, G, \\alpha; \\beta) =\n \\frac{|PG|}{|PG| + \\alpha |P \\ G| + \\beta |G \\ P|}\n\n where:\n - :math:`P` and :math:`G` are the predicted and ground truth binary\n labels.\n - :math:`\\alpha` and :math:`\\beta` control the magnitude of the\n penalties for FPs and FNs, respectively.\n\n Notes:\n - :math:`\\alpha = \\beta = 0.5` => dice coeff\n - :math:`\\alpha = \\beta = 1` => tanimoto coeff\n - :math:`\\alpha + \\beta = 1` => F beta coeff\n\n Shape:\n - Input: :math:`(N, C, H, W)` where C = number of classes.\n - Target: :math:`(N, H, W)` where each value is\n :math:`0 \u2264 targets[i] \u2264 C\u22121`.\n\n Examples:\n >>> N = 5 # num_classes\n >>> loss = kornia.losses.TverskyLoss(alpha=0.5, beta=0.5)\n >>> input = torch.randn(1, N, 3, 5, requires_grad=True)\n >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)\n >>> output = loss(input, target)\n >>> output.backward()\n\n References:\n [1]: https://arxiv.org/abs/1706.05721\n \"\"\"\n\n def __init__(self, alpha: float, beta: float, eps: float = 1e-8) -> None:\n super(TverskyLoss, self).__init__()\n self.alpha: float = alpha\n self.beta: float = beta\n self.eps: float = eps\n\n def forward( # type: ignore\n self,\n input: torch.Tensor,\n target: torch.Tensor) -> torch.Tensor:\n return tversky_loss(input, target, self.alpha, self.beta, self.eps)\n", "path": "kornia/losses/tversky.py"}], "after_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom kornia.utils import one_hot\n\n# based on:\n# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py\n\n\ndef tversky_loss(input: torch.Tensor, target: torch.Tensor,\n alpha: float, beta: float, eps: float = 1e-8) -> torch.Tensor:\n r\"\"\"Function that computes Tversky loss.\n\n See :class:`~kornia.losses.TverskyLoss` for details.\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n .format(type(input)))\n\n if not len(input.shape) == 4:\n raise ValueError(\"Invalid input shape, we expect BxNxHxW. Got: {}\"\n .format(input.shape))\n\n if not input.shape[-2:] == target.shape[-2:]:\n raise ValueError(\"input and target shapes must be the same. Got: {} and {}\"\n .format(input.shape, input.shape))\n\n if not input.device == target.device:\n raise ValueError(\n \"input and target must be in the same device. Got: {} and {}\" .format(\n input.device, target.device))\n\n # compute softmax over the classes axis\n input_soft: torch.Tensor = F.softmax(input, dim=1)\n\n # create the labels one hot tensor\n target_one_hot: torch.Tensor = one_hot(\n target, num_classes=input.shape[1],\n device=input.device, dtype=input.dtype)\n\n # compute the actual dice score\n dims = (1, 2, 3)\n intersection = torch.sum(input_soft * target_one_hot, dims)\n fps = torch.sum(input_soft * (-target_one_hot + 1.), dims)\n fns = torch.sum((-input_soft + 1.) * target_one_hot, dims)\n\n numerator = intersection\n denominator = intersection + alpha * fps + beta * fns\n tversky_loss = numerator / (denominator + eps)\n return torch.mean(-tversky_loss + 1.)\n\n\nclass TverskyLoss(nn.Module):\n r\"\"\"Criterion that computes Tversky Coeficient loss.\n\n According to [1], we compute the Tversky Coefficient as follows:\n\n .. math::\n\n \\text{S}(P, G, \\alpha; \\beta) =\n \\frac{|PG|}{|PG| + \\alpha |P \\setminus G| + \\beta |G \\setminus P|}\n\n where:\n - :math:`P` and :math:`G` are the predicted and ground truth binary\n labels.\n - :math:`\\alpha` and :math:`\\beta` control the magnitude of the\n penalties for FPs and FNs, respectively.\n\n Notes:\n - :math:`\\alpha = \\beta = 0.5` => dice coeff\n - :math:`\\alpha = \\beta = 1` => tanimoto coeff\n - :math:`\\alpha + \\beta = 1` => F beta coeff\n\n Shape:\n - Input: :math:`(N, C, H, W)` where C = number of classes.\n - Target: :math:`(N, H, W)` where each value is\n :math:`0 \u2264 targets[i] \u2264 C\u22121`.\n\n Examples:\n >>> N = 5 # num_classes\n >>> loss = kornia.losses.TverskyLoss(alpha=0.5, beta=0.5)\n >>> input = torch.randn(1, N, 3, 5, requires_grad=True)\n >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)\n >>> output = loss(input, target)\n >>> output.backward()\n\n References:\n [1]: https://arxiv.org/abs/1706.05721\n \"\"\"\n\n def __init__(self, alpha: float, beta: float, eps: float = 1e-8) -> None:\n super(TverskyLoss, self).__init__()\n self.alpha: float = alpha\n self.beta: float = beta\n self.eps: float = eps\n\n def forward( # type: ignore\n self,\n input: torch.Tensor,\n target: torch.Tensor) -> torch.Tensor:\n return tversky_loss(input, target, self.alpha, self.beta, self.eps)\n", "path": "kornia/losses/tversky.py"}]}
| 1,677 | 169 |
gh_patches_debug_18960
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-9119
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Locked pages report renders all users in "locked_by" filter
### Issue Summary
Currently, the select-field for the "locked_by" filter on the "Locked pages"-report renders all users unfiltered from the user table.
This breaks the view for one of our projects, which tries to list all 500k users...
Actually, only the users which have locked a page should be listed.
I will try to make a PR for that, in the meantime this quickfix works for us (with wagtail 4.0, django 3.2):
```python
# in one of our apps.py
from django.apps import AppConfig
class BaseConfig(AppConfig):
name = "base"
def ready(self):
patch_locked_pages_filter()
def patch_locked_pages_filter():
import django_filters
from django.contrib.auth import get_user_model
from wagtail.admin.views.reports.locked_pages import LockedPagesView, LockedPagesReportFilterSet
def get_users_for_filter():
User = get_user_model()
return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)
class PatchedLockedPagesReportFilterSet(LockedPagesReportFilterSet):
locked_by = django_filters.ModelChoiceFilter(
field_name="locked_by", queryset=lambda request: get_users_for_filter()
)
LockedPagesView.filterset_class = PatchedLockedPagesReportFilterSet
```
### Steps to Reproduce
1. Start a new project with `wagtail start myproject`
2. Create many users, no matter if they have access to the CMS or not
3. Open `/admin/reports/locked/` and find all those users in the "locked_by" dropdown on the right
- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
### Technical details
- Python version: 3.10
- Django version: 3.2
- Wagtail version: 4.0, 2.15,... (according to the git history seems to be there from the beginning)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/views/reports/locked_pages.py`
Content:
```
1 import datetime
2
3 import django_filters
4 from django.conf import settings
5 from django.core.exceptions import PermissionDenied
6 from django.utils.translation import gettext_lazy as _
7
8 from wagtail.admin.filters import DateRangePickerWidget, WagtailFilterSet
9 from wagtail.models import Page, UserPagePermissionsProxy
10
11 from .base import PageReportView
12
13
14 class LockedPagesReportFilterSet(WagtailFilterSet):
15 locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)
16
17 class Meta:
18 model = Page
19 fields = ["locked_by", "locked_at", "live"]
20
21
22 class LockedPagesView(PageReportView):
23 template_name = "wagtailadmin/reports/locked_pages.html"
24 title = _("Locked pages")
25 header_icon = "lock"
26 list_export = PageReportView.list_export + [
27 "locked_at",
28 "locked_by",
29 ]
30 filterset_class = LockedPagesReportFilterSet
31
32 def get_filename(self):
33 return "locked-pages-report-{}".format(
34 datetime.datetime.today().strftime("%Y-%m-%d")
35 )
36
37 def get_queryset(self):
38 pages = (
39 (
40 UserPagePermissionsProxy(self.request.user).editable_pages()
41 | Page.objects.filter(locked_by=self.request.user)
42 )
43 .filter(locked=True)
44 .specific(defer=True)
45 )
46
47 if getattr(settings, "WAGTAIL_I18N_ENABLED", False):
48 pages = pages.select_related("locale")
49
50 self.queryset = pages
51 return super().get_queryset()
52
53 def dispatch(self, request, *args, **kwargs):
54 if not UserPagePermissionsProxy(request.user).can_remove_locks():
55 raise PermissionDenied
56 return super().dispatch(request, *args, **kwargs)
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/admin/views/reports/locked_pages.py b/wagtail/admin/views/reports/locked_pages.py
--- a/wagtail/admin/views/reports/locked_pages.py
+++ b/wagtail/admin/views/reports/locked_pages.py
@@ -2,6 +2,7 @@
import django_filters
from django.conf import settings
+from django.contrib.auth import get_user_model
from django.core.exceptions import PermissionDenied
from django.utils.translation import gettext_lazy as _
@@ -11,8 +12,16 @@
from .base import PageReportView
+def get_users_for_filter():
+ User = get_user_model()
+ return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)
+
+
class LockedPagesReportFilterSet(WagtailFilterSet):
locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)
+ locked_by = django_filters.ModelChoiceFilter(
+ field_name="locked_by", queryset=lambda request: get_users_for_filter()
+ )
class Meta:
model = Page
|
{"golden_diff": "diff --git a/wagtail/admin/views/reports/locked_pages.py b/wagtail/admin/views/reports/locked_pages.py\n--- a/wagtail/admin/views/reports/locked_pages.py\n+++ b/wagtail/admin/views/reports/locked_pages.py\n@@ -2,6 +2,7 @@\n \n import django_filters\n from django.conf import settings\n+from django.contrib.auth import get_user_model\n from django.core.exceptions import PermissionDenied\n from django.utils.translation import gettext_lazy as _\n \n@@ -11,8 +12,16 @@\n from .base import PageReportView\n \n \n+def get_users_for_filter():\n+ User = get_user_model()\n+ return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)\n+\n+\n class LockedPagesReportFilterSet(WagtailFilterSet):\n locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)\n+ locked_by = django_filters.ModelChoiceFilter(\n+ field_name=\"locked_by\", queryset=lambda request: get_users_for_filter()\n+ )\n \n class Meta:\n model = Page\n", "issue": "Locked pages report renders all users in \"locked_by\" filter\n### Issue Summary\r\n\r\nCurrently, the select-field for the \"locked_by\" filter on the \"Locked pages\"-report renders all users unfiltered from the user table.\r\nThis breaks the view for one of our projects, which tries to list all 500k users...\r\nActually, only the users which have locked a page should be listed.\r\n\r\nI will try to make a PR for that, in the meantime this quickfix works for us (with wagtail 4.0, django 3.2):\r\n```python\r\n# in one of our apps.py\r\nfrom django.apps import AppConfig\r\n\r\nclass BaseConfig(AppConfig):\r\n name = \"base\"\r\n\r\n def ready(self):\r\n patch_locked_pages_filter()\r\n\r\ndef patch_locked_pages_filter():\r\n import django_filters\r\n from django.contrib.auth import get_user_model\r\n\r\n from wagtail.admin.views.reports.locked_pages import LockedPagesView, LockedPagesReportFilterSet\r\n\r\n def get_users_for_filter():\r\n User = get_user_model()\r\n return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)\r\n\r\n class PatchedLockedPagesReportFilterSet(LockedPagesReportFilterSet):\r\n locked_by = django_filters.ModelChoiceFilter(\r\n field_name=\"locked_by\", queryset=lambda request: get_users_for_filter()\r\n )\r\n\r\n LockedPagesView.filterset_class = PatchedLockedPagesReportFilterSet\r\n\r\n```\r\n\r\n### Steps to Reproduce\r\n\r\n1. Start a new project with `wagtail start myproject`\r\n2. Create many users, no matter if they have access to the CMS or not\r\n3. Open `/admin/reports/locked/` and find all those users in the \"locked_by\" dropdown on the right\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Python version: 3.10\r\n- Django version: 3.2\r\n- Wagtail version: 4.0, 2.15,... (according to the git history seems to be there from the beginning)\r\n\n", "before_files": [{"content": "import datetime\n\nimport django_filters\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.filters import DateRangePickerWidget, WagtailFilterSet\nfrom wagtail.models import Page, UserPagePermissionsProxy\n\nfrom .base import PageReportView\n\n\nclass LockedPagesReportFilterSet(WagtailFilterSet):\n locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)\n\n class Meta:\n model = Page\n fields = [\"locked_by\", \"locked_at\", \"live\"]\n\n\nclass LockedPagesView(PageReportView):\n template_name = \"wagtailadmin/reports/locked_pages.html\"\n title = _(\"Locked pages\")\n header_icon = \"lock\"\n list_export = PageReportView.list_export + [\n \"locked_at\",\n \"locked_by\",\n ]\n filterset_class = LockedPagesReportFilterSet\n\n def get_filename(self):\n return \"locked-pages-report-{}\".format(\n datetime.datetime.today().strftime(\"%Y-%m-%d\")\n )\n\n def get_queryset(self):\n pages = (\n (\n UserPagePermissionsProxy(self.request.user).editable_pages()\n | Page.objects.filter(locked_by=self.request.user)\n )\n .filter(locked=True)\n .specific(defer=True)\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n pages = pages.select_related(\"locale\")\n\n self.queryset = pages\n return super().get_queryset()\n\n def dispatch(self, request, *args, **kwargs):\n if not UserPagePermissionsProxy(request.user).can_remove_locks():\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n", "path": "wagtail/admin/views/reports/locked_pages.py"}], "after_files": [{"content": "import datetime\n\nimport django_filters\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.filters import DateRangePickerWidget, WagtailFilterSet\nfrom wagtail.models import Page, UserPagePermissionsProxy\n\nfrom .base import PageReportView\n\n\ndef get_users_for_filter():\n User = get_user_model()\n return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)\n\n\nclass LockedPagesReportFilterSet(WagtailFilterSet):\n locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)\n locked_by = django_filters.ModelChoiceFilter(\n field_name=\"locked_by\", queryset=lambda request: get_users_for_filter()\n )\n\n class Meta:\n model = Page\n fields = [\"locked_by\", \"locked_at\", \"live\"]\n\n\nclass LockedPagesView(PageReportView):\n template_name = \"wagtailadmin/reports/locked_pages.html\"\n title = _(\"Locked pages\")\n header_icon = \"lock\"\n list_export = PageReportView.list_export + [\n \"locked_at\",\n \"locked_by\",\n ]\n filterset_class = LockedPagesReportFilterSet\n\n def get_filename(self):\n return \"locked-pages-report-{}\".format(\n datetime.datetime.today().strftime(\"%Y-%m-%d\")\n )\n\n def get_queryset(self):\n pages = (\n (\n UserPagePermissionsProxy(self.request.user).editable_pages()\n | Page.objects.filter(locked_by=self.request.user)\n )\n .filter(locked=True)\n .specific(defer=True)\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n pages = pages.select_related(\"locale\")\n\n self.queryset = pages\n return super().get_queryset()\n\n def dispatch(self, request, *args, **kwargs):\n if not UserPagePermissionsProxy(request.user).can_remove_locks():\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n", "path": "wagtail/admin/views/reports/locked_pages.py"}]}
| 1,186 | 236 |
gh_patches_debug_207
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-239
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add type check to Identity Function
Related to #123
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/identity.py`
Content:
```
1 from chainer import function
2
3
4 class Identity(function.Function):
5
6 """Identity function."""
7
8 def forward(self, xs):
9 return xs
10
11 def backward(self, xs, gys):
12 return gys
13
14
15 def identity(*inputs):
16 """Just returns input variables."""
17 return Identity()(*inputs)
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/identity.py b/chainer/functions/identity.py
--- a/chainer/functions/identity.py
+++ b/chainer/functions/identity.py
@@ -5,6 +5,9 @@
"""Identity function."""
+ def check_type_forward(self, in_types):
+ pass
+
def forward(self, xs):
return xs
|
{"golden_diff": "diff --git a/chainer/functions/identity.py b/chainer/functions/identity.py\n--- a/chainer/functions/identity.py\n+++ b/chainer/functions/identity.py\n@@ -5,6 +5,9 @@\n \n \"\"\"Identity function.\"\"\"\n \n+ def check_type_forward(self, in_types):\n+ pass\n+\n def forward(self, xs):\n return xs\n", "issue": "Add type check to Identity Function\nRelated to #123\n\n", "before_files": [{"content": "from chainer import function\n\n\nclass Identity(function.Function):\n\n \"\"\"Identity function.\"\"\"\n\n def forward(self, xs):\n return xs\n\n def backward(self, xs, gys):\n return gys\n\n\ndef identity(*inputs):\n \"\"\"Just returns input variables.\"\"\"\n return Identity()(*inputs)\n", "path": "chainer/functions/identity.py"}], "after_files": [{"content": "from chainer import function\n\n\nclass Identity(function.Function):\n\n \"\"\"Identity function.\"\"\"\n\n def check_type_forward(self, in_types):\n pass\n\n def forward(self, xs):\n return xs\n\n def backward(self, xs, gys):\n return gys\n\n\ndef identity(*inputs):\n \"\"\"Just returns input variables.\"\"\"\n return Identity()(*inputs)\n", "path": "chainer/functions/identity.py"}]}
| 369 | 80 |
gh_patches_debug_4580
|
rasdani/github-patches
|
git_diff
|
pypa__pip-5136
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Name normalization issue in 10.0.0b1 yields false-positive installation warnings
* Pip version: 10.0.0b1
* Python version: 2.7.14
* Operating system: Linux
### Description:
The new pip check mode used during installation wrongly reports that packages are not being installed. This only happens for package containing a `.` (dot) in the name.
### What I've run:
```
pip install pyramid
Collecting pyramid
Using cached pyramid-1.9.1-py2.py3-none-any.whl
Collecting zope.interface>=3.8.0 (from pyramid)
Using cached zope.interface-4.4.3-cp36-cp36m-manylinux1_x86_64.whl
Collecting hupper (from pyramid)
Using cached hupper-1.1-py2.py3-none-any.whl
Collecting zope.deprecation>=3.5.0 (from pyramid)
Using cached zope.deprecation-4.3.0-py2.py3-none-any.whl
Collecting translationstring>=0.4 (from pyramid)
Using cached translationstring-1.3-py2.py3-none-any.whl
Requirement already satisfied: setuptools in ./venv-ex/lib/python3.6/site-packages (from pyramid) (39.0.1)
Collecting WebOb>=1.7.0rc2 (from pyramid)
Using cached WebOb-1.8.0rc1-py2.py3-none-any.whl
Collecting venusian>=1.0a3 (from pyramid)
Using cached venusian-1.1.0-py2.py3-none-any.whl
Collecting repoze.lru>=0.4 (from pyramid)
Using cached repoze.lru-0.7-py3-none-any.whl
Collecting plaster-pastedeploy (from pyramid)
Using cached plaster_pastedeploy-0.5-py2.py3-none-any.whl
Collecting PasteDeploy>=1.5.0 (from pyramid)
Using cached PasteDeploy-1.5.2-py2.py3-none-any.whl
Collecting plaster (from pyramid)
Using cached plaster-1.0-py2.py3-none-any.whl
pyramid 1.9.1 requires repoze.lru>=0.4, which is not installed.
pyramid 1.9.1 requires zope.deprecation>=3.5.0, which is not installed.
pyramid 1.9.1 requires zope.interface>=3.8.0, which is not installed.
Installing collected packages: zope.interface, hupper, zope.deprecation, translationstring, WebOb, venusian, repoze.lru, PasteDeploy, plaster, plaster-pastedeploy, pyramid
Successfully installed PasteDeploy-1.5.2 WebOb-1.8.0rc1 hupper-1.1 plaster-1.0 plaster-pastedeploy-0.5 pyramid-1.9.1 repoze.lru-0.7 translationstring-1.3 venusian-1.1.0 zope.deprecation-4.3.0 zope.interface-4.4.3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/operations/check.py`
Content:
```
1 """Validation of dependencies of packages
2 """
3
4 from collections import namedtuple
5
6 from pip._vendor.packaging.utils import canonicalize_name
7
8 from pip._internal.operations.prepare import make_abstract_dist
9
10 from pip._internal.utils.misc import get_installed_distributions
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from pip._internal.req.req_install import InstallRequirement
15 from typing import Any, Dict, Iterator, Set, Tuple, List
16
17 # Shorthands
18 PackageSet = Dict[str, 'PackageDetails']
19 Missing = Tuple[str, Any]
20 Conflicting = Tuple[str, str, Any]
21
22 MissingDict = Dict[str, List[Missing]]
23 ConflictingDict = Dict[str, List[Conflicting]]
24 CheckResult = Tuple[MissingDict, ConflictingDict]
25
26 PackageDetails = namedtuple('PackageDetails', ['version', 'requires'])
27
28
29 def create_package_set_from_installed(**kwargs):
30 # type: (**Any) -> PackageSet
31 """Converts a list of distributions into a PackageSet.
32 """
33 retval = {}
34 for dist in get_installed_distributions(**kwargs):
35 name = canonicalize_name(dist.project_name)
36 retval[name] = PackageDetails(dist.version, dist.requires())
37 return retval
38
39
40 def check_package_set(package_set):
41 # type: (PackageSet) -> CheckResult
42 """Check if a package set is consistent
43 """
44 missing = dict()
45 conflicting = dict()
46
47 for package_name in package_set:
48 # Info about dependencies of package_name
49 missing_deps = set() # type: Set[Missing]
50 conflicting_deps = set() # type: Set[Conflicting]
51
52 for req in package_set[package_name].requires:
53 name = canonicalize_name(req.project_name) # type: str
54
55 # Check if it's missing
56 if name not in package_set:
57 missed = True
58 if req.marker is not None:
59 missed = req.marker.evaluate()
60 if missed:
61 missing_deps.add((name, req))
62 continue
63
64 # Check if there's a conflict
65 version = package_set[name].version # type: str
66 if version not in req.specifier:
67 conflicting_deps.add((name, version, req))
68
69 def str_key(x):
70 return str(x)
71
72 if missing_deps:
73 missing[package_name] = sorted(missing_deps, key=str_key)
74 if conflicting_deps:
75 conflicting[package_name] = sorted(conflicting_deps, key=str_key)
76
77 return missing, conflicting
78
79
80 def check_install_conflicts(to_install):
81 # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]
82 """For checking if the dependency graph would be consistent after \
83 installing given requirements
84 """
85 # Start from the current state
86 state = create_package_set_from_installed()
87 _simulate_installation_of(to_install, state)
88 return state, check_package_set(state)
89
90
91 # NOTE from @pradyunsg
92 # This required a minor update in dependency link handling logic over at
93 # operations.prepare.IsSDist.dist() to get it working
94 def _simulate_installation_of(to_install, state):
95 # type: (List[InstallRequirement], PackageSet) -> None
96 """Computes the version of packages after installing to_install.
97 """
98
99 # Modify it as installing requirement_set would (assuming no errors)
100 for inst_req in to_install:
101 dist = make_abstract_dist(inst_req).dist(finder=None)
102 state[dist.key] = PackageDetails(dist.version, dist.requires())
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_internal/operations/check.py b/src/pip/_internal/operations/check.py
--- a/src/pip/_internal/operations/check.py
+++ b/src/pip/_internal/operations/check.py
@@ -99,4 +99,5 @@
# Modify it as installing requirement_set would (assuming no errors)
for inst_req in to_install:
dist = make_abstract_dist(inst_req).dist(finder=None)
- state[dist.key] = PackageDetails(dist.version, dist.requires())
+ name = canonicalize_name(dist.key)
+ state[name] = PackageDetails(dist.version, dist.requires())
|
{"golden_diff": "diff --git a/src/pip/_internal/operations/check.py b/src/pip/_internal/operations/check.py\n--- a/src/pip/_internal/operations/check.py\n+++ b/src/pip/_internal/operations/check.py\n@@ -99,4 +99,5 @@\n # Modify it as installing requirement_set would (assuming no errors)\n for inst_req in to_install:\n dist = make_abstract_dist(inst_req).dist(finder=None)\n- state[dist.key] = PackageDetails(dist.version, dist.requires())\n+ name = canonicalize_name(dist.key)\n+ state[name] = PackageDetails(dist.version, dist.requires())\n", "issue": "Name normalization issue in 10.0.0b1 yields false-positive installation warnings\n* Pip version: 10.0.0b1\r\n* Python version: 2.7.14\r\n* Operating system: Linux\r\n\r\n### Description:\r\n\r\nThe new pip check mode used during installation wrongly reports that packages are not being installed. This only happens for package containing a `.` (dot) in the name.\r\n\r\n### What I've run:\r\n\r\n```\r\npip install pyramid\r\nCollecting pyramid\r\n Using cached pyramid-1.9.1-py2.py3-none-any.whl\r\nCollecting zope.interface>=3.8.0 (from pyramid)\r\n Using cached zope.interface-4.4.3-cp36-cp36m-manylinux1_x86_64.whl\r\nCollecting hupper (from pyramid)\r\n Using cached hupper-1.1-py2.py3-none-any.whl\r\nCollecting zope.deprecation>=3.5.0 (from pyramid)\r\n Using cached zope.deprecation-4.3.0-py2.py3-none-any.whl\r\nCollecting translationstring>=0.4 (from pyramid)\r\n Using cached translationstring-1.3-py2.py3-none-any.whl\r\nRequirement already satisfied: setuptools in ./venv-ex/lib/python3.6/site-packages (from pyramid) (39.0.1)\r\nCollecting WebOb>=1.7.0rc2 (from pyramid)\r\n Using cached WebOb-1.8.0rc1-py2.py3-none-any.whl\r\nCollecting venusian>=1.0a3 (from pyramid)\r\n Using cached venusian-1.1.0-py2.py3-none-any.whl\r\nCollecting repoze.lru>=0.4 (from pyramid)\r\n Using cached repoze.lru-0.7-py3-none-any.whl\r\nCollecting plaster-pastedeploy (from pyramid)\r\n Using cached plaster_pastedeploy-0.5-py2.py3-none-any.whl\r\nCollecting PasteDeploy>=1.5.0 (from pyramid)\r\n Using cached PasteDeploy-1.5.2-py2.py3-none-any.whl\r\nCollecting plaster (from pyramid)\r\n Using cached plaster-1.0-py2.py3-none-any.whl\r\npyramid 1.9.1 requires repoze.lru>=0.4, which is not installed.\r\npyramid 1.9.1 requires zope.deprecation>=3.5.0, which is not installed.\r\npyramid 1.9.1 requires zope.interface>=3.8.0, which is not installed.\r\nInstalling collected packages: zope.interface, hupper, zope.deprecation, translationstring, WebOb, venusian, repoze.lru, PasteDeploy, plaster, plaster-pastedeploy, pyramid\r\nSuccessfully installed PasteDeploy-1.5.2 WebOb-1.8.0rc1 hupper-1.1 plaster-1.0 plaster-pastedeploy-0.5 pyramid-1.9.1 repoze.lru-0.7 translationstring-1.3 venusian-1.1.0 zope.deprecation-4.3.0 zope.interface-4.4.3\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Validation of dependencies of packages\n\"\"\"\n\nfrom collections import namedtuple\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.operations.prepare import make_abstract_dist\n\nfrom pip._internal.utils.misc import get_installed_distributions\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from pip._internal.req.req_install import InstallRequirement\n from typing import Any, Dict, Iterator, Set, Tuple, List\n\n # Shorthands\n PackageSet = Dict[str, 'PackageDetails']\n Missing = Tuple[str, Any]\n Conflicting = Tuple[str, str, Any]\n\n MissingDict = Dict[str, List[Missing]]\n ConflictingDict = Dict[str, List[Conflicting]]\n CheckResult = Tuple[MissingDict, ConflictingDict]\n\nPackageDetails = namedtuple('PackageDetails', ['version', 'requires'])\n\n\ndef create_package_set_from_installed(**kwargs):\n # type: (**Any) -> PackageSet\n \"\"\"Converts a list of distributions into a PackageSet.\n \"\"\"\n retval = {}\n for dist in get_installed_distributions(**kwargs):\n name = canonicalize_name(dist.project_name)\n retval[name] = PackageDetails(dist.version, dist.requires())\n return retval\n\n\ndef check_package_set(package_set):\n # type: (PackageSet) -> CheckResult\n \"\"\"Check if a package set is consistent\n \"\"\"\n missing = dict()\n conflicting = dict()\n\n for package_name in package_set:\n # Info about dependencies of package_name\n missing_deps = set() # type: Set[Missing]\n conflicting_deps = set() # type: Set[Conflicting]\n\n for req in package_set[package_name].requires:\n name = canonicalize_name(req.project_name) # type: str\n\n # Check if it's missing\n if name not in package_set:\n missed = True\n if req.marker is not None:\n missed = req.marker.evaluate()\n if missed:\n missing_deps.add((name, req))\n continue\n\n # Check if there's a conflict\n version = package_set[name].version # type: str\n if version not in req.specifier:\n conflicting_deps.add((name, version, req))\n\n def str_key(x):\n return str(x)\n\n if missing_deps:\n missing[package_name] = sorted(missing_deps, key=str_key)\n if conflicting_deps:\n conflicting[package_name] = sorted(conflicting_deps, key=str_key)\n\n return missing, conflicting\n\n\ndef check_install_conflicts(to_install):\n # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]\n \"\"\"For checking if the dependency graph would be consistent after \\\n installing given requirements\n \"\"\"\n # Start from the current state\n state = create_package_set_from_installed()\n _simulate_installation_of(to_install, state)\n return state, check_package_set(state)\n\n\n# NOTE from @pradyunsg\n# This required a minor update in dependency link handling logic over at\n# operations.prepare.IsSDist.dist() to get it working\ndef _simulate_installation_of(to_install, state):\n # type: (List[InstallRequirement], PackageSet) -> None\n \"\"\"Computes the version of packages after installing to_install.\n \"\"\"\n\n # Modify it as installing requirement_set would (assuming no errors)\n for inst_req in to_install:\n dist = make_abstract_dist(inst_req).dist(finder=None)\n state[dist.key] = PackageDetails(dist.version, dist.requires())\n", "path": "src/pip/_internal/operations/check.py"}], "after_files": [{"content": "\"\"\"Validation of dependencies of packages\n\"\"\"\n\nfrom collections import namedtuple\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.operations.prepare import make_abstract_dist\n\nfrom pip._internal.utils.misc import get_installed_distributions\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from pip._internal.req.req_install import InstallRequirement\n from typing import Any, Dict, Iterator, Set, Tuple, List\n\n # Shorthands\n PackageSet = Dict[str, 'PackageDetails']\n Missing = Tuple[str, Any]\n Conflicting = Tuple[str, str, Any]\n\n MissingDict = Dict[str, List[Missing]]\n ConflictingDict = Dict[str, List[Conflicting]]\n CheckResult = Tuple[MissingDict, ConflictingDict]\n\nPackageDetails = namedtuple('PackageDetails', ['version', 'requires'])\n\n\ndef create_package_set_from_installed(**kwargs):\n # type: (**Any) -> PackageSet\n \"\"\"Converts a list of distributions into a PackageSet.\n \"\"\"\n retval = {}\n for dist in get_installed_distributions(**kwargs):\n name = canonicalize_name(dist.project_name)\n retval[name] = PackageDetails(dist.version, dist.requires())\n return retval\n\n\ndef check_package_set(package_set):\n # type: (PackageSet) -> CheckResult\n \"\"\"Check if a package set is consistent\n \"\"\"\n missing = dict()\n conflicting = dict()\n\n for package_name in package_set:\n # Info about dependencies of package_name\n missing_deps = set() # type: Set[Missing]\n conflicting_deps = set() # type: Set[Conflicting]\n\n for req in package_set[package_name].requires:\n name = canonicalize_name(req.project_name) # type: str\n\n # Check if it's missing\n if name not in package_set:\n missed = True\n if req.marker is not None:\n missed = req.marker.evaluate()\n if missed:\n missing_deps.add((name, req))\n continue\n\n # Check if there's a conflict\n version = package_set[name].version # type: str\n if version not in req.specifier:\n conflicting_deps.add((name, version, req))\n\n def str_key(x):\n return str(x)\n\n if missing_deps:\n missing[package_name] = sorted(missing_deps, key=str_key)\n if conflicting_deps:\n conflicting[package_name] = sorted(conflicting_deps, key=str_key)\n\n return missing, conflicting\n\n\ndef check_install_conflicts(to_install):\n # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]\n \"\"\"For checking if the dependency graph would be consistent after \\\n installing given requirements\n \"\"\"\n # Start from the current state\n state = create_package_set_from_installed()\n _simulate_installation_of(to_install, state)\n return state, check_package_set(state)\n\n\n# NOTE from @pradyunsg\n# This required a minor update in dependency link handling logic over at\n# operations.prepare.IsSDist.dist() to get it working\ndef _simulate_installation_of(to_install, state):\n # type: (List[InstallRequirement], PackageSet) -> None\n \"\"\"Computes the version of packages after installing to_install.\n \"\"\"\n\n # Modify it as installing requirement_set would (assuming no errors)\n for inst_req in to_install:\n dist = make_abstract_dist(inst_req).dist(finder=None)\n name = canonicalize_name(dist.key)\n state[name] = PackageDetails(dist.version, dist.requires())\n", "path": "src/pip/_internal/operations/check.py"}]}
| 1,939 | 136 |
gh_patches_debug_4138
|
rasdani/github-patches
|
git_diff
|
google__turbinia-793
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
path_spec.parent is None exception
```
FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/workers/__init__.py", line 881, in run_wrapper
self.evidence_setup(evidence)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/workers/__init__.py", line 499, in evidence_setup
evidence.preprocess(self.tmp_dir, required_states=self.REQUIRED_STATES)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/evidence.py", line 341, in preprocess
self._preprocess(tmp_dir, required_states)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/evidence.py", line 575, in _preprocess
encryption_type = partitions.GetPartitionEncryptionType(path_spec)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/processors/partitions.py", line 66, in GetPartitionEncryptionType
if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
AttributeError: 'NoneType' object has no attribute 'type_indicator'
No state_manager initialized, not updating Task info
Trying last ditch attempt to close result
Task Result was auto-closed from task executor on turbinia-worker-6eaf93e7aee7c9cf-0 likely due to previous failures. Previous status: [FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']]
Task Result was auto-closed from task executor on turbinia-worker-6eaf93e7aee7c9cf-0 likely due to previous failures. Previous status: [FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/processors/partitions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2021 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # https://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Evidence processor to enumerate partitions."""
16
17 import logging
18
19 from dfvfs.helpers import volume_scanner
20 from dfvfs.lib import definitions as dfvfs_definitions
21 from dfvfs.lib import errors as dfvfs_errors
22
23 from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator
24 from turbinia import TurbiniaException
25
26 log = logging.getLogger('turbinia')
27
28
29 def Enumerate(evidence):
30 """Uses dfVFS to enumerate partitions in a disk / image.
31
32 Args:
33 evidence: Evidence object to be scanned.
34
35 Raises:
36 TurbiniaException if source evidence can't be scanned.
37
38 Returns:
39 list[dfVFS.path_spec]: path specs for identified partitions
40 """
41 dfvfs_definitions.PREFERRED_GPT_BACK_END = (
42 dfvfs_definitions.TYPE_INDICATOR_GPT)
43 mediator = UnattendedVolumeScannerMediator()
44 mediator.credentials = evidence.credentials
45 path_specs = []
46 try:
47 scanner = volume_scanner.VolumeScanner(mediator=mediator)
48 path_specs = scanner.GetBasePathSpecs(evidence.local_path)
49 except dfvfs_errors.ScannerError as e:
50 raise TurbiniaException(
51 'Could not enumerate partitions [{0!s}]: {1!s}'.format(
52 evidence.local_path, e))
53
54 return path_specs
55
56
57 def GetPartitionEncryptionType(path_spec):
58 """Checks a partition for encryption.
59
60 Args:
61 path_spec (dfVFS.path_spec): Partition path_spec.
62
63 Returns:
64 String representing the type of encryption, or None.
65 """
66 encryption_type = None
67 if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
68 encryption_type = 'BDE'
69 return encryption_type
70
71
72 def GetPathSpecByLocation(path_specs, location):
73 """Finds a path_spec from a list of path_specs for a given location.
74
75 Args:
76 path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.
77 location (str): dfVFS location to search for.
78
79 Returns:
80 dfVFS.path_spec for the given location or None if not found.
81 """
82 for path_spec in path_specs:
83 child_path_spec = path_spec
84 fs_location = getattr(path_spec, 'location', None)
85 while path_spec.HasParent():
86 type_indicator = path_spec.type_indicator
87 if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
88 dfvfs_definitions.TYPE_INDICATOR_GPT):
89 if fs_location in ('\\', '/'):
90 fs_location = getattr(path_spec, 'location', None)
91 break
92 path_spec = path_spec.parent
93 if fs_location == location:
94 return child_path_spec
95 return None
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py
--- a/turbinia/processors/partitions.py
+++ b/turbinia/processors/partitions.py
@@ -64,6 +64,10 @@
String representing the type of encryption, or None.
"""
encryption_type = None
+
+ if not path_spec.HasParent():
+ return None
+
if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
encryption_type = 'BDE'
return encryption_type
|
{"golden_diff": "diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py\n--- a/turbinia/processors/partitions.py\n+++ b/turbinia/processors/partitions.py\n@@ -64,6 +64,10 @@\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n+\n+ if not path_spec.HasParent():\n+ return None\n+\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n", "issue": "path_spec.parent is None exception\n```\r\nFsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/workers/__init__.py\", line 881, in run_wrapper\r\n self.evidence_setup(evidence)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/workers/__init__.py\", line 499, in evidence_setup\r\n evidence.preprocess(self.tmp_dir, required_states=self.REQUIRED_STATES)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/evidence.py\", line 341, in preprocess\r\n self._preprocess(tmp_dir, required_states)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/evidence.py\", line 575, in _preprocess\r\n encryption_type = partitions.GetPartitionEncryptionType(path_spec)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/processors/partitions.py\", line 66, in GetPartitionEncryptionType\r\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\r\nAttributeError: 'NoneType' object has no attribute 'type_indicator'\r\n\r\nNo state_manager initialized, not updating Task info\r\nTrying last ditch attempt to close result\r\nTask Result was auto-closed from task executor on turbinia-worker-6eaf93e7aee7c9cf-0 likely due to previous failures. Previous status: [FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']]\r\nTask Result was auto-closed from task executor on turbinia-worker-6eaf93e7aee7c9cf-0 likely due to previous failures. Previous status: [FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']]\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\nfrom turbinia import TurbiniaException\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n", "path": "turbinia/processors/partitions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\nfrom turbinia import TurbiniaException\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n\n if not path_spec.HasParent():\n return None\n\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n", "path": "turbinia/processors/partitions.py"}]}
| 1,753 | 127 |
gh_patches_debug_10351
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-3993
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In Bplans required field (start-date & end-date) for publication is not marked as such
**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/sabinas-bebauungsplan/bplan/
**user:** A Bplan initiator
**expected behaviour:** As a Bplan initiator, I want the form to tell me which fields are required so that I can publish
**behaviour:** Because the start- and enddate are not marked as required for publication, it's unclear that not filling these in is hindering publication
**important screensize:** any
**device & browser:** ubuntu chrome
**Comment/Question:**
These are just missing red the exclamation point icons. It's a simple fix in forms.py
Screenshot?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/bplan/forms.py`
Content:
```
1 from django import forms
2
3 from meinberlin.apps.extprojects.forms import ExternalProjectCreateForm
4 from meinberlin.apps.extprojects.forms import ExternalProjectForm
5
6 from . import models
7
8
9 class StatementForm(forms.ModelForm):
10 class Meta:
11 model = models.Statement
12 fields = ['name', 'email', 'statement',
13 'street_number', 'postal_code_city']
14
15
16 class BplanProjectCreateForm(ExternalProjectCreateForm):
17
18 class Meta:
19 model = models.Bplan
20 fields = ['name', 'description', 'tile_image', 'tile_image_copyright']
21
22
23 class BplanProjectForm(ExternalProjectForm):
24
25 class Meta:
26 model = models.Bplan
27 fields = ['name', 'identifier', 'url', 'description', 'tile_image',
28 'tile_image_copyright', 'is_archived', 'office_worker_email',
29 'start_date', 'end_date']
30 required_for_project_publish = ['name', 'url', 'description',
31 'office_worker_email']
32
33 def __init__(self, *args, **kwargs):
34 super().__init__(*args, **kwargs)
35 self.fields['name'].widget.attrs.update({
36 'autocomplete': 'off', 'autofill': 'off'
37 })
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/bplan/forms.py b/meinberlin/apps/bplan/forms.py
--- a/meinberlin/apps/bplan/forms.py
+++ b/meinberlin/apps/bplan/forms.py
@@ -28,7 +28,8 @@
'tile_image_copyright', 'is_archived', 'office_worker_email',
'start_date', 'end_date']
required_for_project_publish = ['name', 'url', 'description',
- 'office_worker_email']
+ 'office_worker_email',
+ 'start_date', 'end_date']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
{"golden_diff": "diff --git a/meinberlin/apps/bplan/forms.py b/meinberlin/apps/bplan/forms.py\n--- a/meinberlin/apps/bplan/forms.py\n+++ b/meinberlin/apps/bplan/forms.py\n@@ -28,7 +28,8 @@\n 'tile_image_copyright', 'is_archived', 'office_worker_email',\n 'start_date', 'end_date']\n required_for_project_publish = ['name', 'url', 'description',\n- 'office_worker_email']\n+ 'office_worker_email',\n+ 'start_date', 'end_date']\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n", "issue": "In Bplans required field (start-date & end-date) for publication is not marked as such\n**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/sabinas-bebauungsplan/bplan/\r\n**user:** A Bplan initiator\r\n**expected behaviour:** As a Bplan initiator, I want the form to tell me which fields are required so that I can publish\r\n**behaviour:** Because the start- and enddate are not marked as required for publication, it's unclear that not filling these in is hindering publication\r\n**important screensize:** any\r\n**device & browser:** ubuntu chrome\r\n**Comment/Question:**\r\nThese are just missing red the exclamation point icons. It's a simple fix in forms.py\r\n\r\nScreenshot?\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\n\nfrom meinberlin.apps.extprojects.forms import ExternalProjectCreateForm\nfrom meinberlin.apps.extprojects.forms import ExternalProjectForm\n\nfrom . import models\n\n\nclass StatementForm(forms.ModelForm):\n class Meta:\n model = models.Statement\n fields = ['name', 'email', 'statement',\n 'street_number', 'postal_code_city']\n\n\nclass BplanProjectCreateForm(ExternalProjectCreateForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'description', 'tile_image', 'tile_image_copyright']\n\n\nclass BplanProjectForm(ExternalProjectForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'identifier', 'url', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived', 'office_worker_email',\n 'start_date', 'end_date']\n required_for_project_publish = ['name', 'url', 'description',\n 'office_worker_email']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].widget.attrs.update({\n 'autocomplete': 'off', 'autofill': 'off'\n })\n", "path": "meinberlin/apps/bplan/forms.py"}], "after_files": [{"content": "from django import forms\n\nfrom meinberlin.apps.extprojects.forms import ExternalProjectCreateForm\nfrom meinberlin.apps.extprojects.forms import ExternalProjectForm\n\nfrom . import models\n\n\nclass StatementForm(forms.ModelForm):\n class Meta:\n model = models.Statement\n fields = ['name', 'email', 'statement',\n 'street_number', 'postal_code_city']\n\n\nclass BplanProjectCreateForm(ExternalProjectCreateForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'description', 'tile_image', 'tile_image_copyright']\n\n\nclass BplanProjectForm(ExternalProjectForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'identifier', 'url', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived', 'office_worker_email',\n 'start_date', 'end_date']\n required_for_project_publish = ['name', 'url', 'description',\n 'office_worker_email',\n 'start_date', 'end_date']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].widget.attrs.update({\n 'autocomplete': 'off', 'autofill': 'off'\n })\n", "path": "meinberlin/apps/bplan/forms.py"}]}
| 814 | 149 |
gh_patches_debug_12785
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-8076
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Catch ValueError during network discovery for when JSON parsing fails
<!--
Instructions:
* Fill out the sections below, replace …'s with information about your issue
* Use the 'preview' function above this text box to verify formatting before submitting
-->
### Observed behavior
<!--
Description of the behavior that was observed, including screenshots or other references when applicable
-->
When trying to import content channels (from local network or internet), no error message is produced even though the connection isn't established. (The "trying to connect" message stays on forever) This happens when there is a typo in the address. (I used kolibri-demo.learningequality.omg) When there's no internet, an error message DOES get produced.
### Expected behavior
<!--
Description of what behavior was expected but did not occur
-->
An error message should've been produced.
### User-facing consequences
<!--
Implications and real-world consequences for learners, coaches, admins, and other users of the application
-->
The user will not know how long they have to wait to see if the connection is being established.
### Errors and logs
<!--
Relevant logs from:
* the command line
* ~/.kolibri/logs/kolibri.txt
* the browser console
Please wrap errors in triple backticks for clean formatting like this:
```
01:10 info: something happened
01:12 error: something bad happened
```
-->
…
### Steps to reproduce
<!--
Precise steps that someone else can follow in order to see this behavior
-->
Import content channels from local network or internet >input "kolibri-demo.learningequality.omg" or something else with a typo in it.
### Context
<!--
Tell us about your environment, including:
* Kolibri version
* Operating system
* Browser
-->
Kolibri version: 0.14.3
Operating system: Android 5.1
Browser: Android App
Device: BLU Advance 5.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/discovery/utils/network/client.py`
Content:
```
1 import logging
2
3 import requests
4 from six.moves.urllib.parse import urljoin
5
6 from . import errors
7 from .urls import get_normalized_url_variations
8
9 logger = logging.getLogger(__name__)
10
11
12 class NetworkClient(object):
13 DEFAULT_TIMEOUT_IN_SECS = 5
14
15 def __init__(self, base_url=None, address=None, timeout=None, **kwargs):
16 """If an explicit base_url is already known, provide that. If a vague address is provided, we can try to infer the base_url"""
17 if not base_url and not address:
18 raise Exception(
19 "You must provide either a `base_url` or `address` argument"
20 )
21 self.timeout = timeout or self.DEFAULT_TIMEOUT_IN_SECS
22 self.session = requests.Session(**kwargs)
23 if base_url:
24 self.base_url = self._attempt_connections([base_url])
25 else:
26 # normalize the URL and try a number of variations until we find one that's able to connect
27 logger.info(
28 "Attempting connections to variations of the URL: {}".format(address)
29 )
30 self.base_url = self._attempt_connections(
31 get_normalized_url_variations(address)
32 )
33
34 def _attempt_connections(self, urls):
35 # try each of the URLs in turn, returning the first one that succeeds
36 for url in urls:
37 try:
38 logger.info("Attempting connection to: {}".format(url))
39 response = self.get(
40 "/api/public/info/",
41 base_url=url,
42 timeout=self.timeout,
43 allow_redirects=True,
44 )
45 # check that we successfully connected, and if we were redirected that it's still the right endpoint
46 if response.status_code == 200 and response.url.rstrip("/").endswith(
47 "/api/public/info"
48 ):
49 self.info = response.json()
50 if self.info["application"] not in ["studio", "kolibri"]:
51 raise requests.RequestException(
52 "Server is not running Kolibri or Studio"
53 )
54 logger.info("Success! We connected to: {}".format(response.url))
55 return response.url.rstrip("/").replace("api/public/info", "")
56 except (requests.RequestException) as e:
57 logger.info("Unable to connect: {}".format(e))
58
59 # we weren't able to connect to any of the URL variations, so all we can do is throw
60 raise errors.NetworkLocationNotFound()
61
62 def get(self, path, **kwargs):
63 return self.request("get", path, **kwargs)
64
65 def head(self, path, **kwargs):
66 return self.request("head", path, **kwargs)
67
68 def request(self, method, path, base_url=None, **kwargs):
69 base_url = base_url or self.base_url
70 url = urljoin(base_url, path)
71 response = getattr(self.session, method)(url, **kwargs)
72 response.raise_for_status()
73 return response
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/core/discovery/utils/network/client.py b/kolibri/core/discovery/utils/network/client.py
--- a/kolibri/core/discovery/utils/network/client.py
+++ b/kolibri/core/discovery/utils/network/client.py
@@ -55,6 +55,10 @@
return response.url.rstrip("/").replace("api/public/info", "")
except (requests.RequestException) as e:
logger.info("Unable to connect: {}".format(e))
+ except ValueError:
+ logger.info(
+ "Invalid JSON returned when attempting to connect to a remote server"
+ )
# we weren't able to connect to any of the URL variations, so all we can do is throw
raise errors.NetworkLocationNotFound()
|
{"golden_diff": "diff --git a/kolibri/core/discovery/utils/network/client.py b/kolibri/core/discovery/utils/network/client.py\n--- a/kolibri/core/discovery/utils/network/client.py\n+++ b/kolibri/core/discovery/utils/network/client.py\n@@ -55,6 +55,10 @@\n return response.url.rstrip(\"/\").replace(\"api/public/info\", \"\")\n except (requests.RequestException) as e:\n logger.info(\"Unable to connect: {}\".format(e))\n+ except ValueError:\n+ logger.info(\n+ \"Invalid JSON returned when attempting to connect to a remote server\"\n+ )\n \n # we weren't able to connect to any of the URL variations, so all we can do is throw\n raise errors.NetworkLocationNotFound()\n", "issue": "Catch ValueError during network discovery for when JSON parsing fails\n<!--\r\nInstructions:\r\n * Fill out the sections below, replace \u2026's with information about your issue\r\n * Use the 'preview' function above this text box to verify formatting before submitting\r\n-->\r\n\r\n### Observed behavior\r\n<!--\r\nDescription of the behavior that was observed, including screenshots or other references when applicable\r\n-->\r\n\r\nWhen trying to import content channels (from local network or internet), no error message is produced even though the connection isn't established. (The \"trying to connect\" message stays on forever) This happens when there is a typo in the address. (I used kolibri-demo.learningequality.omg) When there's no internet, an error message DOES get produced. \r\n\r\n### Expected behavior\r\n<!--\r\nDescription of what behavior was expected but did not occur\r\n-->\r\n\r\nAn error message should've been produced.\r\n\r\n### User-facing consequences\r\n<!--\r\nImplications and real-world consequences for learners, coaches, admins, and other users of the application\r\n-->\r\n\r\nThe user will not know how long they have to wait to see if the connection is being established.\r\n\r\n### Errors and logs\r\n<!--\r\nRelevant logs from:\r\n * the command line\r\n * ~/.kolibri/logs/kolibri.txt\r\n * the browser console\r\n\r\nPlease wrap errors in triple backticks for clean formatting like this:\r\n```\r\n01:10 info: something happened\r\n01:12 error: something bad happened\r\n```\r\n-->\r\n\r\n\u2026\r\n\r\n### Steps to reproduce\r\n<!--\r\nPrecise steps that someone else can follow in order to see this behavior\r\n-->\r\n\r\nImport content channels from local network or internet >input \"kolibri-demo.learningequality.omg\" or something else with a typo in it.\r\n\r\n### Context\r\n<!--\r\nTell us about your environment, including:\r\n * Kolibri version\r\n * Operating system\r\n * Browser\r\n-->\r\n\r\nKolibri version: 0.14.3\r\nOperating system: Android 5.1\r\nBrowser: Android App\r\nDevice: BLU Advance 5.0\r\n\n", "before_files": [{"content": "import logging\n\nimport requests\nfrom six.moves.urllib.parse import urljoin\n\nfrom . import errors\nfrom .urls import get_normalized_url_variations\n\nlogger = logging.getLogger(__name__)\n\n\nclass NetworkClient(object):\n DEFAULT_TIMEOUT_IN_SECS = 5\n\n def __init__(self, base_url=None, address=None, timeout=None, **kwargs):\n \"\"\"If an explicit base_url is already known, provide that. If a vague address is provided, we can try to infer the base_url\"\"\"\n if not base_url and not address:\n raise Exception(\n \"You must provide either a `base_url` or `address` argument\"\n )\n self.timeout = timeout or self.DEFAULT_TIMEOUT_IN_SECS\n self.session = requests.Session(**kwargs)\n if base_url:\n self.base_url = self._attempt_connections([base_url])\n else:\n # normalize the URL and try a number of variations until we find one that's able to connect\n logger.info(\n \"Attempting connections to variations of the URL: {}\".format(address)\n )\n self.base_url = self._attempt_connections(\n get_normalized_url_variations(address)\n )\n\n def _attempt_connections(self, urls):\n # try each of the URLs in turn, returning the first one that succeeds\n for url in urls:\n try:\n logger.info(\"Attempting connection to: {}\".format(url))\n response = self.get(\n \"/api/public/info/\",\n base_url=url,\n timeout=self.timeout,\n allow_redirects=True,\n )\n # check that we successfully connected, and if we were redirected that it's still the right endpoint\n if response.status_code == 200 and response.url.rstrip(\"/\").endswith(\n \"/api/public/info\"\n ):\n self.info = response.json()\n if self.info[\"application\"] not in [\"studio\", \"kolibri\"]:\n raise requests.RequestException(\n \"Server is not running Kolibri or Studio\"\n )\n logger.info(\"Success! We connected to: {}\".format(response.url))\n return response.url.rstrip(\"/\").replace(\"api/public/info\", \"\")\n except (requests.RequestException) as e:\n logger.info(\"Unable to connect: {}\".format(e))\n\n # we weren't able to connect to any of the URL variations, so all we can do is throw\n raise errors.NetworkLocationNotFound()\n\n def get(self, path, **kwargs):\n return self.request(\"get\", path, **kwargs)\n\n def head(self, path, **kwargs):\n return self.request(\"head\", path, **kwargs)\n\n def request(self, method, path, base_url=None, **kwargs):\n base_url = base_url or self.base_url\n url = urljoin(base_url, path)\n response = getattr(self.session, method)(url, **kwargs)\n response.raise_for_status()\n return response\n", "path": "kolibri/core/discovery/utils/network/client.py"}], "after_files": [{"content": "import logging\n\nimport requests\nfrom six.moves.urllib.parse import urljoin\n\nfrom . import errors\nfrom .urls import get_normalized_url_variations\n\nlogger = logging.getLogger(__name__)\n\n\nclass NetworkClient(object):\n DEFAULT_TIMEOUT_IN_SECS = 5\n\n def __init__(self, base_url=None, address=None, timeout=None, **kwargs):\n \"\"\"If an explicit base_url is already known, provide that. If a vague address is provided, we can try to infer the base_url\"\"\"\n if not base_url and not address:\n raise Exception(\n \"You must provide either a `base_url` or `address` argument\"\n )\n self.timeout = timeout or self.DEFAULT_TIMEOUT_IN_SECS\n self.session = requests.Session(**kwargs)\n if base_url:\n self.base_url = self._attempt_connections([base_url])\n else:\n # normalize the URL and try a number of variations until we find one that's able to connect\n logger.info(\n \"Attempting connections to variations of the URL: {}\".format(address)\n )\n self.base_url = self._attempt_connections(\n get_normalized_url_variations(address)\n )\n\n def _attempt_connections(self, urls):\n # try each of the URLs in turn, returning the first one that succeeds\n for url in urls:\n try:\n logger.info(\"Attempting connection to: {}\".format(url))\n response = self.get(\n \"/api/public/info/\",\n base_url=url,\n timeout=self.timeout,\n allow_redirects=True,\n )\n # check that we successfully connected, and if we were redirected that it's still the right endpoint\n if response.status_code == 200 and response.url.rstrip(\"/\").endswith(\n \"/api/public/info\"\n ):\n self.info = response.json()\n if self.info[\"application\"] not in [\"studio\", \"kolibri\"]:\n raise requests.RequestException(\n \"Server is not running Kolibri or Studio\"\n )\n logger.info(\"Success! We connected to: {}\".format(response.url))\n return response.url.rstrip(\"/\").replace(\"api/public/info\", \"\")\n except (requests.RequestException) as e:\n logger.info(\"Unable to connect: {}\".format(e))\n except ValueError:\n logger.info(\n \"Invalid JSON returned when attempting to connect to a remote server\"\n )\n\n # we weren't able to connect to any of the URL variations, so all we can do is throw\n raise errors.NetworkLocationNotFound()\n\n def get(self, path, **kwargs):\n return self.request(\"get\", path, **kwargs)\n\n def head(self, path, **kwargs):\n return self.request(\"head\", path, **kwargs)\n\n def request(self, method, path, base_url=None, **kwargs):\n base_url = base_url or self.base_url\n url = urljoin(base_url, path)\n response = getattr(self.session, method)(url, **kwargs)\n response.raise_for_status()\n return response\n", "path": "kolibri/core/discovery/utils/network/client.py"}]}
| 1,421 | 160 |
gh_patches_debug_12113
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1465
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gRPC server interceptor raises AttributeError
When a gRPC service is not found, the Datadog interceptor raises an AttributeError:
```
ERROR grpc._server:_server.py:732 Exception servicing handler: 'NoneType' object has no attribute '_pin'
...
File "/src/ddtrace/contrib/grpc/server_interceptor.py", line 153, in intercept_service
return self._fn(continuation, handler_call_details)
File "/src/ddtrace/contrib/grpc/server_interceptor.py", line 22, in interceptor_function
return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)
File "/src/ddtrace/contrib/grpc/server_interceptor.py", line 58, in __init__
self._pin = pin
AttributeError: 'NoneType' object has no attribute '_pin'
```
The gRPC documentation [points](https://grpc.github.io/grpc/python/grpc.html#grpc.ServerInterceptor.intercept_service) out that `continuation()` in a server-side interceptor can return a `None`, which is not handled by the Datadog interceptor.
This applies to `ddtrace>=0.28.0` when the server interceptor was added.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/grpc/server_interceptor.py`
Content:
```
1 import grpc
2 from ddtrace.vendor import wrapt
3
4 from ddtrace import config
5 from ddtrace.ext import errors
6 from ddtrace.compat import to_unicode
7
8 from ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY
9 from ...ext import SpanTypes
10 from ...propagation.http import HTTPPropagator
11 from . import constants
12 from .utils import parse_method_path
13
14
15 def create_server_interceptor(pin):
16 def interceptor_function(continuation, handler_call_details):
17 if not pin.enabled:
18 return continuation(handler_call_details)
19
20 rpc_method_handler = continuation(handler_call_details)
21 return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)
22
23 return _ServerInterceptor(interceptor_function)
24
25
26 def _handle_server_exception(server_context, span):
27 if server_context is not None and \
28 hasattr(server_context, '_state') and \
29 server_context._state is not None:
30 code = to_unicode(server_context._state.code)
31 details = to_unicode(server_context._state.details)
32 span.error = 1
33 span.set_tag(errors.ERROR_MSG, details)
34 span.set_tag(errors.ERROR_TYPE, code)
35
36
37 def _wrap_response_iterator(response_iterator, server_context, span):
38 try:
39 for response in response_iterator:
40 yield response
41 except Exception:
42 span.set_traceback()
43 _handle_server_exception(server_context, span)
44 raise
45 finally:
46 span.finish()
47
48
49 class _TracedRpcMethodHandler(wrapt.ObjectProxy):
50 def __init__(self, pin, handler_call_details, wrapped):
51 super(_TracedRpcMethodHandler, self).__init__(wrapped)
52 self._pin = pin
53 self._handler_call_details = handler_call_details
54
55 def _fn(self, method_kind, behavior, args, kwargs):
56 if config.grpc_server.distributed_tracing_enabled:
57 headers = dict(self._handler_call_details.invocation_metadata)
58 propagator = HTTPPropagator()
59 context = propagator.extract(headers)
60
61 if context.trace_id:
62 self._pin.tracer.context_provider.activate(context)
63
64 tracer = self._pin.tracer
65
66 span = tracer.trace(
67 'grpc',
68 span_type=SpanTypes.GRPC,
69 service=self._pin.service,
70 resource=self._handler_call_details.method,
71 )
72 span.set_tag(SPAN_MEASURED_KEY)
73
74 method_path = self._handler_call_details.method
75 method_package, method_service, method_name = parse_method_path(method_path)
76 span.set_tag(constants.GRPC_METHOD_PATH_KEY, method_path)
77 span.set_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package)
78 span.set_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service)
79 span.set_tag(constants.GRPC_METHOD_NAME_KEY, method_name)
80 span.set_tag(constants.GRPC_METHOD_KIND_KEY, method_kind)
81 span.set_tag(constants.GRPC_SPAN_KIND_KEY, constants.GRPC_SPAN_KIND_VALUE_SERVER)
82
83 sample_rate = config.grpc_server.get_analytics_sample_rate()
84 if sample_rate is not None:
85 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)
86
87 # access server context by taking second argument as server context
88 # if not found, skip using context to tag span with server state information
89 server_context = args[1] if isinstance(args[1], grpc.ServicerContext) else None
90
91 if self._pin.tags:
92 span.set_tags(self._pin.tags)
93
94 try:
95 response_or_iterator = behavior(*args, **kwargs)
96
97 if self.__wrapped__.response_streaming:
98 response_or_iterator = _wrap_response_iterator(response_or_iterator, server_context, span)
99 except Exception:
100 span.set_traceback()
101 _handle_server_exception(server_context, span)
102 raise
103 finally:
104 if not self.__wrapped__.response_streaming:
105 span.finish()
106
107 return response_or_iterator
108
109 def unary_unary(self, *args, **kwargs):
110 return self._fn(
111 constants.GRPC_METHOD_KIND_UNARY,
112 self.__wrapped__.unary_unary,
113 args,
114 kwargs
115 )
116
117 def unary_stream(self, *args, **kwargs):
118 return self._fn(
119 constants.GRPC_METHOD_KIND_SERVER_STREAMING,
120 self.__wrapped__.unary_stream,
121 args,
122 kwargs
123 )
124
125 def stream_unary(self, *args, **kwargs):
126 return self._fn(
127 constants.GRPC_METHOD_KIND_CLIENT_STREAMING,
128 self.__wrapped__.stream_unary,
129 args,
130 kwargs
131 )
132
133 def stream_stream(self, *args, **kwargs):
134 return self._fn(
135 constants.GRPC_METHOD_KIND_BIDI_STREAMING,
136 self.__wrapped__.stream_stream,
137 args,
138 kwargs
139 )
140
141
142 class _ServerInterceptor(grpc.ServerInterceptor):
143 def __init__(self, interceptor_function):
144 self._fn = interceptor_function
145
146 def intercept_service(self, continuation, handler_call_details):
147 return self._fn(continuation, handler_call_details)
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/grpc/server_interceptor.py b/ddtrace/contrib/grpc/server_interceptor.py
--- a/ddtrace/contrib/grpc/server_interceptor.py
+++ b/ddtrace/contrib/grpc/server_interceptor.py
@@ -18,7 +18,15 @@
return continuation(handler_call_details)
rpc_method_handler = continuation(handler_call_details)
- return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)
+
+ # continuation returns an RpcMethodHandler instance if the RPC is
+ # considered serviced, or None otherwise
+ # https://grpc.github.io/grpc/python/grpc.html#grpc.ServerInterceptor.intercept_service
+
+ if rpc_method_handler:
+ return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)
+
+ return rpc_method_handler
return _ServerInterceptor(interceptor_function)
|
{"golden_diff": "diff --git a/ddtrace/contrib/grpc/server_interceptor.py b/ddtrace/contrib/grpc/server_interceptor.py\n--- a/ddtrace/contrib/grpc/server_interceptor.py\n+++ b/ddtrace/contrib/grpc/server_interceptor.py\n@@ -18,7 +18,15 @@\n return continuation(handler_call_details)\n \n rpc_method_handler = continuation(handler_call_details)\n- return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)\n+\n+ # continuation returns an RpcMethodHandler instance if the RPC is\n+ # considered serviced, or None otherwise\n+ # https://grpc.github.io/grpc/python/grpc.html#grpc.ServerInterceptor.intercept_service\n+\n+ if rpc_method_handler:\n+ return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)\n+\n+ return rpc_method_handler\n \n return _ServerInterceptor(interceptor_function)\n", "issue": "gRPC server interceptor raises AttributeError\nWhen a gRPC service is not found, the Datadog interceptor raises an AttributeError:\r\n\r\n```\r\nERROR grpc._server:_server.py:732 Exception servicing handler: 'NoneType' object has no attribute '_pin'\r\n...\r\n File \"/src/ddtrace/contrib/grpc/server_interceptor.py\", line 153, in intercept_service\r\n return self._fn(continuation, handler_call_details)\r\n File \"/src/ddtrace/contrib/grpc/server_interceptor.py\", line 22, in interceptor_function\r\n return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)\r\n File \"/src/ddtrace/contrib/grpc/server_interceptor.py\", line 58, in __init__\r\n self._pin = pin\r\nAttributeError: 'NoneType' object has no attribute '_pin'\r\n```\r\n\r\nThe gRPC documentation [points](https://grpc.github.io/grpc/python/grpc.html#grpc.ServerInterceptor.intercept_service) out that `continuation()` in a server-side interceptor can return a `None`, which is not handled by the Datadog interceptor.\r\n\r\nThis applies to `ddtrace>=0.28.0` when the server interceptor was added.\n", "before_files": [{"content": "import grpc\nfrom ddtrace.vendor import wrapt\n\nfrom ddtrace import config\nfrom ddtrace.ext import errors\nfrom ddtrace.compat import to_unicode\n\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...propagation.http import HTTPPropagator\nfrom . import constants\nfrom .utils import parse_method_path\n\n\ndef create_server_interceptor(pin):\n def interceptor_function(continuation, handler_call_details):\n if not pin.enabled:\n return continuation(handler_call_details)\n\n rpc_method_handler = continuation(handler_call_details)\n return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)\n\n return _ServerInterceptor(interceptor_function)\n\n\ndef _handle_server_exception(server_context, span):\n if server_context is not None and \\\n hasattr(server_context, '_state') and \\\n server_context._state is not None:\n code = to_unicode(server_context._state.code)\n details = to_unicode(server_context._state.details)\n span.error = 1\n span.set_tag(errors.ERROR_MSG, details)\n span.set_tag(errors.ERROR_TYPE, code)\n\n\ndef _wrap_response_iterator(response_iterator, server_context, span):\n try:\n for response in response_iterator:\n yield response\n except Exception:\n span.set_traceback()\n _handle_server_exception(server_context, span)\n raise\n finally:\n span.finish()\n\n\nclass _TracedRpcMethodHandler(wrapt.ObjectProxy):\n def __init__(self, pin, handler_call_details, wrapped):\n super(_TracedRpcMethodHandler, self).__init__(wrapped)\n self._pin = pin\n self._handler_call_details = handler_call_details\n\n def _fn(self, method_kind, behavior, args, kwargs):\n if config.grpc_server.distributed_tracing_enabled:\n headers = dict(self._handler_call_details.invocation_metadata)\n propagator = HTTPPropagator()\n context = propagator.extract(headers)\n\n if context.trace_id:\n self._pin.tracer.context_provider.activate(context)\n\n tracer = self._pin.tracer\n\n span = tracer.trace(\n 'grpc',\n span_type=SpanTypes.GRPC,\n service=self._pin.service,\n resource=self._handler_call_details.method,\n )\n span.set_tag(SPAN_MEASURED_KEY)\n\n method_path = self._handler_call_details.method\n method_package, method_service, method_name = parse_method_path(method_path)\n span.set_tag(constants.GRPC_METHOD_PATH_KEY, method_path)\n span.set_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package)\n span.set_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service)\n span.set_tag(constants.GRPC_METHOD_NAME_KEY, method_name)\n span.set_tag(constants.GRPC_METHOD_KIND_KEY, method_kind)\n span.set_tag(constants.GRPC_SPAN_KIND_KEY, constants.GRPC_SPAN_KIND_VALUE_SERVER)\n\n sample_rate = config.grpc_server.get_analytics_sample_rate()\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n # access server context by taking second argument as server context\n # if not found, skip using context to tag span with server state information\n server_context = args[1] if isinstance(args[1], grpc.ServicerContext) else None\n\n if self._pin.tags:\n span.set_tags(self._pin.tags)\n\n try:\n response_or_iterator = behavior(*args, **kwargs)\n\n if self.__wrapped__.response_streaming:\n response_or_iterator = _wrap_response_iterator(response_or_iterator, server_context, span)\n except Exception:\n span.set_traceback()\n _handle_server_exception(server_context, span)\n raise\n finally:\n if not self.__wrapped__.response_streaming:\n span.finish()\n\n return response_or_iterator\n\n def unary_unary(self, *args, **kwargs):\n return self._fn(\n constants.GRPC_METHOD_KIND_UNARY,\n self.__wrapped__.unary_unary,\n args,\n kwargs\n )\n\n def unary_stream(self, *args, **kwargs):\n return self._fn(\n constants.GRPC_METHOD_KIND_SERVER_STREAMING,\n self.__wrapped__.unary_stream,\n args,\n kwargs\n )\n\n def stream_unary(self, *args, **kwargs):\n return self._fn(\n constants.GRPC_METHOD_KIND_CLIENT_STREAMING,\n self.__wrapped__.stream_unary,\n args,\n kwargs\n )\n\n def stream_stream(self, *args, **kwargs):\n return self._fn(\n constants.GRPC_METHOD_KIND_BIDI_STREAMING,\n self.__wrapped__.stream_stream,\n args,\n kwargs\n )\n\n\nclass _ServerInterceptor(grpc.ServerInterceptor):\n def __init__(self, interceptor_function):\n self._fn = interceptor_function\n\n def intercept_service(self, continuation, handler_call_details):\n return self._fn(continuation, handler_call_details)\n", "path": "ddtrace/contrib/grpc/server_interceptor.py"}], "after_files": [{"content": "import grpc\nfrom ddtrace.vendor import wrapt\n\nfrom ddtrace import config\nfrom ddtrace.ext import errors\nfrom ddtrace.compat import to_unicode\n\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY, SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...propagation.http import HTTPPropagator\nfrom . import constants\nfrom .utils import parse_method_path\n\n\ndef create_server_interceptor(pin):\n def interceptor_function(continuation, handler_call_details):\n if not pin.enabled:\n return continuation(handler_call_details)\n\n rpc_method_handler = continuation(handler_call_details)\n\n # continuation returns an RpcMethodHandler instance if the RPC is\n # considered serviced, or None otherwise\n # https://grpc.github.io/grpc/python/grpc.html#grpc.ServerInterceptor.intercept_service\n\n if rpc_method_handler:\n return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler)\n\n return rpc_method_handler\n\n return _ServerInterceptor(interceptor_function)\n\n\ndef _handle_server_exception(server_context, span):\n if server_context is not None and \\\n hasattr(server_context, '_state') and \\\n server_context._state is not None:\n code = to_unicode(server_context._state.code)\n details = to_unicode(server_context._state.details)\n span.error = 1\n span.set_tag(errors.ERROR_MSG, details)\n span.set_tag(errors.ERROR_TYPE, code)\n\n\ndef _wrap_response_iterator(response_iterator, server_context, span):\n try:\n for response in response_iterator:\n yield response\n except Exception:\n span.set_traceback()\n _handle_server_exception(server_context, span)\n raise\n finally:\n span.finish()\n\n\nclass _TracedRpcMethodHandler(wrapt.ObjectProxy):\n def __init__(self, pin, handler_call_details, wrapped):\n super(_TracedRpcMethodHandler, self).__init__(wrapped)\n self._pin = pin\n self._handler_call_details = handler_call_details\n\n def _fn(self, method_kind, behavior, args, kwargs):\n if config.grpc_server.distributed_tracing_enabled:\n headers = dict(self._handler_call_details.invocation_metadata)\n propagator = HTTPPropagator()\n context = propagator.extract(headers)\n\n if context.trace_id:\n self._pin.tracer.context_provider.activate(context)\n\n tracer = self._pin.tracer\n\n span = tracer.trace(\n 'grpc',\n span_type=SpanTypes.GRPC,\n service=self._pin.service,\n resource=self._handler_call_details.method,\n )\n span.set_tag(SPAN_MEASURED_KEY)\n\n method_path = self._handler_call_details.method\n method_package, method_service, method_name = parse_method_path(method_path)\n span.set_tag(constants.GRPC_METHOD_PATH_KEY, method_path)\n span.set_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package)\n span.set_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service)\n span.set_tag(constants.GRPC_METHOD_NAME_KEY, method_name)\n span.set_tag(constants.GRPC_METHOD_KIND_KEY, method_kind)\n span.set_tag(constants.GRPC_SPAN_KIND_KEY, constants.GRPC_SPAN_KIND_VALUE_SERVER)\n\n sample_rate = config.grpc_server.get_analytics_sample_rate()\n if sample_rate is not None:\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate)\n\n # access server context by taking second argument as server context\n # if not found, skip using context to tag span with server state information\n server_context = args[1] if isinstance(args[1], grpc.ServicerContext) else None\n\n if self._pin.tags:\n span.set_tags(self._pin.tags)\n\n try:\n response_or_iterator = behavior(*args, **kwargs)\n\n if self.__wrapped__.response_streaming:\n response_or_iterator = _wrap_response_iterator(response_or_iterator, server_context, span)\n except Exception:\n span.set_traceback()\n _handle_server_exception(server_context, span)\n raise\n finally:\n if not self.__wrapped__.response_streaming:\n span.finish()\n\n return response_or_iterator\n\n def unary_unary(self, *args, **kwargs):\n return self._fn(\n constants.GRPC_METHOD_KIND_UNARY,\n self.__wrapped__.unary_unary,\n args,\n kwargs\n )\n\n def unary_stream(self, *args, **kwargs):\n return self._fn(\n constants.GRPC_METHOD_KIND_SERVER_STREAMING,\n self.__wrapped__.unary_stream,\n args,\n kwargs\n )\n\n def stream_unary(self, *args, **kwargs):\n return self._fn(\n constants.GRPC_METHOD_KIND_CLIENT_STREAMING,\n self.__wrapped__.stream_unary,\n args,\n kwargs\n )\n\n def stream_stream(self, *args, **kwargs):\n return self._fn(\n constants.GRPC_METHOD_KIND_BIDI_STREAMING,\n self.__wrapped__.stream_stream,\n args,\n kwargs\n )\n\n\nclass _ServerInterceptor(grpc.ServerInterceptor):\n def __init__(self, interceptor_function):\n self._fn = interceptor_function\n\n def intercept_service(self, continuation, handler_call_details):\n return self._fn(continuation, handler_call_details)\n", "path": "ddtrace/contrib/grpc/server_interceptor.py"}]}
| 1,914 | 188 |
gh_patches_debug_18834
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3308
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider kopps is broken
During the global build at 2021-06-16-14-42-20, spider **kopps** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/logs/kopps.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/kopps.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/kopps.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/kopps.py`
Content:
```
1 import scrapy
2 import re
3 from locations.items import GeojsonPointItem
4
5 DAY_MAPPING = {
6 "Mon": "Mo",
7 "Tues": "Tu",
8 "Wed": "We",
9 "Thur": "Th",
10 "Fri": "Fr",
11 "Sat": "Sa",
12 "Sun": "Su"
13 }
14 class KoppsSpider(scrapy.Spider):
15 name = "kopps"
16 item_attributes = { 'brand': "Kopps" }
17 allowed_domains = ["www.kopps.com"]
18 download_delay = 1.5
19 start_urls = (
20 'https://www.kopps.com/',
21 )
22
23 def parse_day(self, day):
24 if re.search('-', day):
25 days = day.split('-')
26 osm_days = []
27 if len(days) == 2:
28 for day in days:
29 osm_day = DAY_MAPPING[day.strip()]
30 osm_days.append(osm_day)
31 return "-".join(osm_days)
32
33 def parse_times(self, times):
34 if times.strip() == 'Open 24 hours':
35 return '24/7'
36 hours_to = [x.strip() for x in times.split('-')]
37 cleaned_times = []
38
39 for hour in hours_to:
40 if re.search('pm$', hour):
41 hour = re.sub('pm', '', hour).strip()
42 hour_min = hour.split(":")
43 if int(hour_min[0]) < 12:
44 hour_min[0] = str(12 + int(hour_min[0]))
45 cleaned_times.append(":".join(hour_min))
46
47 if re.search('am$', hour):
48 hour = re.sub('am', '', hour).strip()
49 hour_min = hour.split(":")
50 if len(hour_min[0]) <2:
51 hour_min[0] = hour_min[0].zfill(2)
52 else:
53 hour_min[0] = str( int(hour_min[0]))
54
55 cleaned_times.append(":".join(hour_min))
56 return "-".join(cleaned_times)
57
58 def parse_hours(self, lis):
59 hours = []
60 for li in lis:
61 day_times = li.xpath('normalize-space(./text())').extract_first()
62 day = re.findall(r"^[a-zA-Z-]+" , day_times)
63 if(len(day)>0):
64 day = day[0]
65 else:
66 day = 'Mon-Sun'
67 times = re.findall(r"[0-9]{2}:[0-9]{2}[a|p]m - [0-9]{2}:[0-9]{2}[a|p]m" ,day_times)
68 times = times[0]
69 if times and day:
70 parsed_time = self.parse_times(times)
71 parsed_day = self.parse_day(day)
72 hours.append(parsed_day + ' ' + parsed_time)
73
74 return "; ".join(hours)
75
76
77 def parse(self, response):
78 locations = response.xpath('//div[@id="locations"]/div/div')
79
80 for location in locations:
81 properties = {
82 'addr_full': location.xpath('normalize-space(./div/address/a/text())').extract_first(),
83 'phone': location.xpath('normalize-space(./div/ul/li/span/a/text())').extract_first(),
84 'city': location.xpath('./div/address/a/text()').extract()[1].replace(' ' ,'').split(',')[0].replace('\r\n' ,''),
85 'state': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[1],
86 'postcode': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[2].replace('\r\n' ,''),
87 'ref': location.xpath('normalize-space(./div/address/a/@href)').extract_first(),
88 'website': response.url,
89 'lat':re.findall(r"\/[0-9]{2}[^(\/)]+z",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[0],
90 'lon': re.findall(r"\/[0-9]{2}[^(\/)]+z",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[1],
91 }
92
93 hours = self.parse_hours(location.xpath('./div/ul/li[3]/span'))
94 if hours:
95 properties['opening_hours'] = hours
96
97 yield GeojsonPointItem(**properties)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/kopps.py b/locations/spiders/kopps.py
--- a/locations/spiders/kopps.py
+++ b/locations/spiders/kopps.py
@@ -26,8 +26,9 @@
osm_days = []
if len(days) == 2:
for day in days:
- osm_day = DAY_MAPPING[day.strip()]
- osm_days.append(osm_day)
+ if day.strip() in DAY_MAPPING:
+ osm_day = DAY_MAPPING[day.strip()]
+ osm_days.append(osm_day)
return "-".join(osm_days)
def parse_times(self, times):
@@ -69,7 +70,8 @@
if times and day:
parsed_time = self.parse_times(times)
parsed_day = self.parse_day(day)
- hours.append(parsed_day + ' ' + parsed_time)
+ if parsed_day and parsed_time:
+ hours.append(parsed_day + ' ' + parsed_time)
return "; ".join(hours)
|
{"golden_diff": "diff --git a/locations/spiders/kopps.py b/locations/spiders/kopps.py\n--- a/locations/spiders/kopps.py\n+++ b/locations/spiders/kopps.py\n@@ -26,8 +26,9 @@\n osm_days = []\n if len(days) == 2:\n for day in days:\n- osm_day = DAY_MAPPING[day.strip()]\n- osm_days.append(osm_day)\n+ if day.strip() in DAY_MAPPING:\n+ osm_day = DAY_MAPPING[day.strip()]\n+ osm_days.append(osm_day)\n return \"-\".join(osm_days)\n \n def parse_times(self, times):\n@@ -69,7 +70,8 @@\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n- hours.append(parsed_day + ' ' + parsed_time)\n+ if parsed_day and parsed_time:\n+ hours.append(parsed_day + ' ' + parsed_time)\n \n return \"; \".join(hours)\n", "issue": "Spider kopps is broken\nDuring the global build at 2021-06-16-14-42-20, spider **kopps** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/logs/kopps.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/kopps.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/kopps.geojson))\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\n\nDAY_MAPPING = {\n \"Mon\": \"Mo\",\n \"Tues\": \"Tu\",\n \"Wed\": \"We\",\n \"Thur\": \"Th\",\n \"Fri\": \"Fr\",\n \"Sat\": \"Sa\",\n \"Sun\": \"Su\"\n}\nclass KoppsSpider(scrapy.Spider):\n name = \"kopps\"\n item_attributes = { 'brand': \"Kopps\" }\n allowed_domains = [\"www.kopps.com\"]\n download_delay = 1.5\n start_urls = (\n 'https://www.kopps.com/',\n )\n\n def parse_day(self, day):\n if re.search('-', day):\n days = day.split('-')\n osm_days = []\n if len(days) == 2:\n for day in days:\n osm_day = DAY_MAPPING[day.strip()]\n osm_days.append(osm_day)\n return \"-\".join(osm_days)\n\n def parse_times(self, times):\n if times.strip() == 'Open 24 hours':\n return '24/7'\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('pm$', hour):\n hour = re.sub('pm', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('am$', hour):\n hour = re.sub('am', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str( int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day_times = li.xpath('normalize-space(./text())').extract_first()\n day = re.findall(r\"^[a-zA-Z-]+\" , day_times)\n if(len(day)>0):\n day = day[0]\n else:\n day = 'Mon-Sun'\n times = re.findall(r\"[0-9]{2}:[0-9]{2}[a|p]m - [0-9]{2}:[0-9]{2}[a|p]m\" ,day_times)\n times = times[0]\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n\n\n def parse(self, response):\n locations = response.xpath('//div[@id=\"locations\"]/div/div')\n\n for location in locations:\n properties = {\n 'addr_full': location.xpath('normalize-space(./div/address/a/text())').extract_first(),\n 'phone': location.xpath('normalize-space(./div/ul/li/span/a/text())').extract_first(),\n 'city': location.xpath('./div/address/a/text()').extract()[1].replace(' ' ,'').split(',')[0].replace('\\r\\n' ,''),\n 'state': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[1],\n 'postcode': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[2].replace('\\r\\n' ,''),\n 'ref': location.xpath('normalize-space(./div/address/a/@href)').extract_first(),\n 'website': response.url,\n 'lat':re.findall(r\"\\/[0-9]{2}[^(\\/)]+z\",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[0],\n 'lon': re.findall(r\"\\/[0-9]{2}[^(\\/)]+z\",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[1],\n }\n\n hours = self.parse_hours(location.xpath('./div/ul/li[3]/span'))\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/kopps.py"}], "after_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\n\nDAY_MAPPING = {\n \"Mon\": \"Mo\",\n \"Tues\": \"Tu\",\n \"Wed\": \"We\",\n \"Thur\": \"Th\",\n \"Fri\": \"Fr\",\n \"Sat\": \"Sa\",\n \"Sun\": \"Su\"\n}\nclass KoppsSpider(scrapy.Spider):\n name = \"kopps\"\n item_attributes = { 'brand': \"Kopps\" }\n allowed_domains = [\"www.kopps.com\"]\n download_delay = 1.5\n start_urls = (\n 'https://www.kopps.com/',\n )\n\n def parse_day(self, day):\n if re.search('-', day):\n days = day.split('-')\n osm_days = []\n if len(days) == 2:\n for day in days:\n if day.strip() in DAY_MAPPING:\n osm_day = DAY_MAPPING[day.strip()]\n osm_days.append(osm_day)\n return \"-\".join(osm_days)\n\n def parse_times(self, times):\n if times.strip() == 'Open 24 hours':\n return '24/7'\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('pm$', hour):\n hour = re.sub('pm', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('am$', hour):\n hour = re.sub('am', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str( int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day_times = li.xpath('normalize-space(./text())').extract_first()\n day = re.findall(r\"^[a-zA-Z-]+\" , day_times)\n if(len(day)>0):\n day = day[0]\n else:\n day = 'Mon-Sun'\n times = re.findall(r\"[0-9]{2}:[0-9]{2}[a|p]m - [0-9]{2}:[0-9]{2}[a|p]m\" ,day_times)\n times = times[0]\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n if parsed_day and parsed_time:\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n\n\n def parse(self, response):\n locations = response.xpath('//div[@id=\"locations\"]/div/div')\n\n for location in locations:\n properties = {\n 'addr_full': location.xpath('normalize-space(./div/address/a/text())').extract_first(),\n 'phone': location.xpath('normalize-space(./div/ul/li/span/a/text())').extract_first(),\n 'city': location.xpath('./div/address/a/text()').extract()[1].replace(' ' ,'').split(',')[0].replace('\\r\\n' ,''),\n 'state': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[1],\n 'postcode': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[2].replace('\\r\\n' ,''),\n 'ref': location.xpath('normalize-space(./div/address/a/@href)').extract_first(),\n 'website': response.url,\n 'lat':re.findall(r\"\\/[0-9]{2}[^(\\/)]+z\",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[0],\n 'lon': re.findall(r\"\\/[0-9]{2}[^(\\/)]+z\",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[1],\n }\n\n hours = self.parse_hours(location.xpath('./div/ul/li[3]/span'))\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/kopps.py"}]}
| 1,588 | 228 |
gh_patches_debug_12705
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-bolts-315
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add caching for used dataset
## 🚀 Feature
atm for each CI run the datasets are downloaded (seems so as I observed som downloading failers)
### Motivation
Speed-up runs and avoids random connection failers
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pl_bolts/models/mnist_module.py`
Content:
```
1 import os
2 from argparse import ArgumentParser
3 from warnings import warn
4
5 import torch
6 from pytorch_lightning import LightningModule, Trainer
7 from torch.nn import functional as F
8 from torch.utils.data import DataLoader, random_split
9
10 try:
11 from torchvision import transforms
12 from torchvision.datasets import MNIST
13 except ModuleNotFoundError:
14 warn('You want to use `torchvision` which is not installed yet,' # pragma: no-cover
15 ' install it with `pip install torchvision`.')
16
17
18 class LitMNIST(LightningModule):
19 def __init__(self, hidden_dim=128, learning_rate=1e-3, batch_size=32, num_workers=4, data_dir='', **kwargs):
20 super().__init__()
21 self.save_hyperparameters()
22
23 self.l1 = torch.nn.Linear(28 * 28, self.hparams.hidden_dim)
24 self.l2 = torch.nn.Linear(self.hparams.hidden_dim, 10)
25
26 self.mnist_train = None
27 self.mnist_val = None
28
29 def forward(self, x):
30 x = x.view(x.size(0), -1)
31 x = torch.relu(self.l1(x))
32 x = torch.relu(self.l2(x))
33 return x
34
35 def training_step(self, batch, batch_idx):
36 x, y = batch
37 y_hat = self(x)
38 loss = F.cross_entropy(y_hat, y)
39 self.log('train_loss', loss)
40 return loss
41
42 def validation_step(self, batch, batch_idx):
43 x, y = batch
44 y_hat = self(x)
45 loss = F.cross_entropy(y_hat, y)
46 self.log('val_loss', loss)
47
48 def test_step(self, batch, batch_idx):
49 x, y = batch
50 y_hat = self(x)
51 loss = F.cross_entropy(y_hat, y)
52 self.log('test_loss', loss)
53
54 def configure_optimizers(self):
55 return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
56
57 def prepare_data(self):
58 MNIST(self.hparams.data_dir, train=True, download=True, transform=transforms.ToTensor())
59
60 def train_dataloader(self):
61 dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())
62 mnist_train, _ = random_split(dataset, [55000, 5000])
63 loader = DataLoader(mnist_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
64 return loader
65
66 def val_dataloader(self):
67 dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())
68 _, mnist_val = random_split(dataset, [55000, 5000])
69 loader = DataLoader(mnist_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
70 return loader
71
72 def test_dataloader(self):
73 test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())
74 loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
75 return loader
76
77 @staticmethod
78 def add_model_specific_args(parent_parser):
79 parser = ArgumentParser(parents=[parent_parser], add_help=False)
80 parser.add_argument('--batch_size', type=int, default=32)
81 parser.add_argument('--num_workers', type=int, default=4)
82 parser.add_argument('--hidden_dim', type=int, default=128)
83 parser.add_argument('--data_dir', type=str, default='')
84 parser.add_argument('--learning_rate', type=float, default=0.0001)
85 return parser
86
87
88 def cli_main():
89 # args
90 parser = ArgumentParser()
91 parser = Trainer.add_argparse_args(parser)
92 parser = LitMNIST.add_model_specific_args(parser)
93 args = parser.parse_args()
94
95 # model
96 model = LitMNIST(**vars(args))
97
98 # training
99 trainer = Trainer.from_argparse_args(args)
100 trainer.fit(model)
101
102
103 if __name__ == '__main__': # pragma: no cover
104 cli_main()
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pl_bolts/models/mnist_module.py b/pl_bolts/models/mnist_module.py
--- a/pl_bolts/models/mnist_module.py
+++ b/pl_bolts/models/mnist_module.py
@@ -1,4 +1,3 @@
-import os
from argparse import ArgumentParser
from warnings import warn
@@ -70,7 +69,7 @@
return loader
def test_dataloader(self):
- test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())
+ test_dataset = MNIST(self.hparams.data_dir, train=False, download=True, transform=transforms.ToTensor())
loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
return loader
|
{"golden_diff": "diff --git a/pl_bolts/models/mnist_module.py b/pl_bolts/models/mnist_module.py\n--- a/pl_bolts/models/mnist_module.py\n+++ b/pl_bolts/models/mnist_module.py\n@@ -1,4 +1,3 @@\n-import os\n from argparse import ArgumentParser\n from warnings import warn\n \n@@ -70,7 +69,7 @@\n return loader\n \n def test_dataloader(self):\n- test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())\n+ test_dataset = MNIST(self.hparams.data_dir, train=False, download=True, transform=transforms.ToTensor())\n loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n", "issue": "add caching for used dataset\n## \ud83d\ude80 Feature\r\n\r\natm for each CI run the datasets are downloaded (seems so as I observed som downloading failers)\r\n\r\n### Motivation\r\n\r\nSpeed-up runs and avoids random connection failers\r\n\n", "before_files": [{"content": "import os\nfrom argparse import ArgumentParser\nfrom warnings import warn\n\nimport torch\nfrom pytorch_lightning import LightningModule, Trainer\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, random_split\n\ntry:\n from torchvision import transforms\n from torchvision.datasets import MNIST\nexcept ModuleNotFoundError:\n warn('You want to use `torchvision` which is not installed yet,' # pragma: no-cover\n ' install it with `pip install torchvision`.')\n\n\nclass LitMNIST(LightningModule):\n def __init__(self, hidden_dim=128, learning_rate=1e-3, batch_size=32, num_workers=4, data_dir='', **kwargs):\n super().__init__()\n self.save_hyperparameters()\n\n self.l1 = torch.nn.Linear(28 * 28, self.hparams.hidden_dim)\n self.l2 = torch.nn.Linear(self.hparams.hidden_dim, 10)\n\n self.mnist_train = None\n self.mnist_val = None\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = torch.relu(self.l1(x))\n x = torch.relu(self.l2(x))\n return x\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('train_loss', loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('val_loss', loss)\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('test_loss', loss)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)\n\n def prepare_data(self):\n MNIST(self.hparams.data_dir, train=True, download=True, transform=transforms.ToTensor())\n\n def train_dataloader(self):\n dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())\n mnist_train, _ = random_split(dataset, [55000, 5000])\n loader = DataLoader(mnist_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n def val_dataloader(self):\n dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())\n _, mnist_val = random_split(dataset, [55000, 5000])\n loader = DataLoader(mnist_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n def test_dataloader(self):\n test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())\n loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_workers', type=int, default=4)\n parser.add_argument('--hidden_dim', type=int, default=128)\n parser.add_argument('--data_dir', type=str, default='')\n parser.add_argument('--learning_rate', type=float, default=0.0001)\n return parser\n\n\ndef cli_main():\n # args\n parser = ArgumentParser()\n parser = Trainer.add_argparse_args(parser)\n parser = LitMNIST.add_model_specific_args(parser)\n args = parser.parse_args()\n\n # model\n model = LitMNIST(**vars(args))\n\n # training\n trainer = Trainer.from_argparse_args(args)\n trainer.fit(model)\n\n\nif __name__ == '__main__': # pragma: no cover\n cli_main()\n", "path": "pl_bolts/models/mnist_module.py"}], "after_files": [{"content": "from argparse import ArgumentParser\nfrom warnings import warn\n\nimport torch\nfrom pytorch_lightning import LightningModule, Trainer\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, random_split\n\ntry:\n from torchvision import transforms\n from torchvision.datasets import MNIST\nexcept ModuleNotFoundError:\n warn('You want to use `torchvision` which is not installed yet,' # pragma: no-cover\n ' install it with `pip install torchvision`.')\n\n\nclass LitMNIST(LightningModule):\n def __init__(self, hidden_dim=128, learning_rate=1e-3, batch_size=32, num_workers=4, data_dir='', **kwargs):\n super().__init__()\n self.save_hyperparameters()\n\n self.l1 = torch.nn.Linear(28 * 28, self.hparams.hidden_dim)\n self.l2 = torch.nn.Linear(self.hparams.hidden_dim, 10)\n\n self.mnist_train = None\n self.mnist_val = None\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = torch.relu(self.l1(x))\n x = torch.relu(self.l2(x))\n return x\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('train_loss', loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('val_loss', loss)\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('test_loss', loss)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)\n\n def prepare_data(self):\n MNIST(self.hparams.data_dir, train=True, download=True, transform=transforms.ToTensor())\n\n def train_dataloader(self):\n dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())\n mnist_train, _ = random_split(dataset, [55000, 5000])\n loader = DataLoader(mnist_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n def val_dataloader(self):\n dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())\n _, mnist_val = random_split(dataset, [55000, 5000])\n loader = DataLoader(mnist_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n def test_dataloader(self):\n test_dataset = MNIST(self.hparams.data_dir, train=False, download=True, transform=transforms.ToTensor())\n loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_workers', type=int, default=4)\n parser.add_argument('--hidden_dim', type=int, default=128)\n parser.add_argument('--data_dir', type=str, default='')\n parser.add_argument('--learning_rate', type=float, default=0.0001)\n return parser\n\n\ndef cli_main():\n # args\n parser = ArgumentParser()\n parser = Trainer.add_argparse_args(parser)\n parser = LitMNIST.add_model_specific_args(parser)\n args = parser.parse_args()\n\n # model\n model = LitMNIST(**vars(args))\n\n # training\n trainer = Trainer.from_argparse_args(args)\n trainer.fit(model)\n\n\nif __name__ == '__main__': # pragma: no cover\n cli_main()\n", "path": "pl_bolts/models/mnist_module.py"}]}
| 1,415 | 169 |
gh_patches_debug_5031
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2190
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Visual quirk in the login failure message
### Describe the bug
When you fail to log in on desktop (or probably any wider screen) the message saying this shows up on the left and the login fields are offset to the right. I would expect the message to appear on top, while the login fields were to stay in the same horizontal position.
### How to reproduce
Steps to reproduce the behaviour:
1. Go to the login page
2. Enter invalid credentials
### Expected behaviour
The login fields do not move to the right. The error message is either shown above the login fields or next to them.
### Screenshots

### Additional context
Not sure in what other situations these message boxes are used, but it may occur in other places too. Haven't checked.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/thaliawebsite/templatetags/alert.py`
Content:
```
1 from django import template
2
3 register = template.Library()
4
5
6 @register.inclusion_tag("includes/alert.html")
7 def alert(alert_type="info", message=None, dismissible=False, extra_classes=""):
8 if dismissible:
9 extra_classes += " alert-dimissable"
10 return {
11 "type": alert_type,
12 "message": message,
13 "dismissible": dismissible,
14 "extra_classes": extra_classes,
15 }
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/thaliawebsite/templatetags/alert.py b/website/thaliawebsite/templatetags/alert.py
--- a/website/thaliawebsite/templatetags/alert.py
+++ b/website/thaliawebsite/templatetags/alert.py
@@ -6,7 +6,7 @@
@register.inclusion_tag("includes/alert.html")
def alert(alert_type="info", message=None, dismissible=False, extra_classes=""):
if dismissible:
- extra_classes += " alert-dimissable"
+ extra_classes += " alert-dismissible"
return {
"type": alert_type,
"message": message,
|
{"golden_diff": "diff --git a/website/thaliawebsite/templatetags/alert.py b/website/thaliawebsite/templatetags/alert.py\n--- a/website/thaliawebsite/templatetags/alert.py\n+++ b/website/thaliawebsite/templatetags/alert.py\n@@ -6,7 +6,7 @@\n @register.inclusion_tag(\"includes/alert.html\")\n def alert(alert_type=\"info\", message=None, dismissible=False, extra_classes=\"\"):\n if dismissible:\n- extra_classes += \" alert-dimissable\"\n+ extra_classes += \" alert-dismissible\"\n return {\n \"type\": alert_type,\n \"message\": message,\n", "issue": "Visual quirk in the login failure message\n### Describe the bug\r\nWhen you fail to log in on desktop (or probably any wider screen) the message saying this shows up on the left and the login fields are offset to the right. I would expect the message to appear on top, while the login fields were to stay in the same horizontal position.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to the login page\r\n2. Enter invalid credentials\r\n\r\n### Expected behaviour\r\nThe login fields do not move to the right. The error message is either shown above the login fields or next to them.\r\n\r\n### Screenshots\r\n\r\n\r\n\r\n### Additional context\r\nNot sure in what other situations these message boxes are used, but it may occur in other places too. Haven't checked.\r\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/alert.html\")\ndef alert(alert_type=\"info\", message=None, dismissible=False, extra_classes=\"\"):\n if dismissible:\n extra_classes += \" alert-dimissable\"\n return {\n \"type\": alert_type,\n \"message\": message,\n \"dismissible\": dismissible,\n \"extra_classes\": extra_classes,\n }\n", "path": "website/thaliawebsite/templatetags/alert.py"}], "after_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/alert.html\")\ndef alert(alert_type=\"info\", message=None, dismissible=False, extra_classes=\"\"):\n if dismissible:\n extra_classes += \" alert-dismissible\"\n return {\n \"type\": alert_type,\n \"message\": message,\n \"dismissible\": dismissible,\n \"extra_classes\": extra_classes,\n }\n", "path": "website/thaliawebsite/templatetags/alert.py"}]}
| 621 | 144 |
gh_patches_debug_31883
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-1385
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PS-1] RPC service `push_model` implementation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/ps/parameters.py`
Content:
```
1 import tensorflow as tf
2
3 from elasticdl.python.common.ndarray import tensor_to_ndarray
4 from elasticdl.python.ps.embedding_table import create_embedding_table
5
6
7 class Parameters(object):
8 """
9 There are two kinds of parameters:
10
11 1. non-embedding parameters, or dense tensor parameters. We save it
12 in a hashmap `non-embedding_params`, the key is the parameter name,
13 the value is a tf.Variable` object.
14 2. embedding parameters, or row-sparse parameters. We save it in a
15 hashmap `embedding_params`, the key is the embedding layer name,
16 the value is an `EmbeddingTable` object.
17
18 """
19
20 def __init__(self):
21 self.init_status = False
22 self.non_embedding_params = {}
23 self.embedding_params = {}
24
25 def get_embedding_param(self, name, indices):
26 if name not in self.embedding_params:
27 raise ValueError(
28 "Please initialize embedding param %s first!", name
29 )
30 return self.embedding_params[name].get(indices)
31
32 def set_embedding_param(self, name, indices, values):
33 if name not in self.embedding_params:
34 raise ValueError(
35 "Please initialize embedding param %s first!", name
36 )
37 self.embedding_params[name].set(indices, values)
38
39 def init_from_model_pb(self, model_pb):
40 if not self.init_status:
41 tensors_pb = model_pb.param
42 embeddings_pb = model_pb.embedding_table_info
43 self._init_non_embedding_params(tensors_pb)
44 self._init_embedding_params(embeddings_pb)
45 self.init_status = True
46
47 def _init_non_embedding_params(self, tensors_pb):
48 for pb in tensors_pb:
49 name = pb.name
50 arr = tensor_to_ndarray(pb)
51 var = tf.Variable(name=name, initial_value=arr, trainable=True)
52 self.non_embedding_params[name] = var
53
54 def _init_embedding_params(self, embeddings_pb):
55 for pb in embeddings_pb:
56 self.embedding_params[pb.name] = create_embedding_table(pb)
57
```
Path: `elasticdl/python/ps/servicer.py`
Content:
```
1 from google.protobuf import empty_pb2
2
3 from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
4
5
6 class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
7 """PS service implementation"""
8
9 def __init__(
10 self,
11 parameters,
12 grads_to_wait,
13 optimizer,
14 lr_staleness_modulation=False,
15 use_async=False,
16 ):
17 self._parameters = parameters
18 self._grads_to_wait = grads_to_wait
19 self._optimizer = optimizer
20 self._lr_staleness_modulation = lr_staleness_modulation
21 self._use_async = use_async
22 self._version = 0
23
24 def pull_variable(self, request, _):
25 # TODO: implement this RPC service
26 return elasticdl_pb2.PullVariableResponse()
27
28 def pull_embedding_vector(self, request, _):
29 # TODO: implement this RPC service
30 return elasticdl_pb2.Tensor()
31
32 def push_model(self, request, _):
33 # TODO: implement this RPC service
34 return empty_pb2.Empty()
35
36 def push_gradient(self, request, _):
37 # TODO: implement this RPC service
38 return elasticdl_pb2.PushGradientResponse()
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticdl/python/ps/parameters.py b/elasticdl/python/ps/parameters.py
--- a/elasticdl/python/ps/parameters.py
+++ b/elasticdl/python/ps/parameters.py
@@ -18,6 +18,7 @@
"""
def __init__(self):
+ self.version = 0
self.init_status = False
self.non_embedding_params = {}
self.embedding_params = {}
@@ -42,6 +43,7 @@
embeddings_pb = model_pb.embedding_table_info
self._init_non_embedding_params(tensors_pb)
self._init_embedding_params(embeddings_pb)
+ self.version = model_pb.version
self.init_status = True
def _init_non_embedding_params(self, tensors_pb):
diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py
--- a/elasticdl/python/ps/servicer.py
+++ b/elasticdl/python/ps/servicer.py
@@ -1,3 +1,5 @@
+import threading
+
from google.protobuf import empty_pb2
from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
@@ -20,6 +22,7 @@
self._lr_staleness_modulation = lr_staleness_modulation
self._use_async = use_async
self._version = 0
+ self._lock = threading.Lock()
def pull_variable(self, request, _):
# TODO: implement this RPC service
@@ -30,7 +33,8 @@
return elasticdl_pb2.Tensor()
def push_model(self, request, _):
- # TODO: implement this RPC service
+ with self._lock:
+ self._parameters.init_from_model_pb(request)
return empty_pb2.Empty()
def push_gradient(self, request, _):
|
{"golden_diff": "diff --git a/elasticdl/python/ps/parameters.py b/elasticdl/python/ps/parameters.py\n--- a/elasticdl/python/ps/parameters.py\n+++ b/elasticdl/python/ps/parameters.py\n@@ -18,6 +18,7 @@\n \"\"\"\n \n def __init__(self):\n+ self.version = 0\n self.init_status = False\n self.non_embedding_params = {}\n self.embedding_params = {}\n@@ -42,6 +43,7 @@\n embeddings_pb = model_pb.embedding_table_info\n self._init_non_embedding_params(tensors_pb)\n self._init_embedding_params(embeddings_pb)\n+ self.version = model_pb.version\n self.init_status = True\n \n def _init_non_embedding_params(self, tensors_pb):\ndiff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py\n--- a/elasticdl/python/ps/servicer.py\n+++ b/elasticdl/python/ps/servicer.py\n@@ -1,3 +1,5 @@\n+import threading\n+\n from google.protobuf import empty_pb2\n \n from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n@@ -20,6 +22,7 @@\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n+ self._lock = threading.Lock()\n \n def pull_variable(self, request, _):\n # TODO: implement this RPC service\n@@ -30,7 +33,8 @@\n return elasticdl_pb2.Tensor()\n \n def push_model(self, request, _):\n- # TODO: implement this RPC service\n+ with self._lock:\n+ self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n \n def push_gradient(self, request, _):\n", "issue": "[PS-1] RPC service `push_model` implementation\n\n", "before_files": [{"content": "import tensorflow as tf\n\nfrom elasticdl.python.common.ndarray import tensor_to_ndarray\nfrom elasticdl.python.ps.embedding_table import create_embedding_table\n\n\nclass Parameters(object):\n \"\"\"\n There are two kinds of parameters:\n\n 1. non-embedding parameters, or dense tensor parameters. We save it\n in a hashmap `non-embedding_params`, the key is the parameter name,\n the value is a tf.Variable` object.\n 2. embedding parameters, or row-sparse parameters. We save it in a\n hashmap `embedding_params`, the key is the embedding layer name,\n the value is an `EmbeddingTable` object.\n\n \"\"\"\n\n def __init__(self):\n self.init_status = False\n self.non_embedding_params = {}\n self.embedding_params = {}\n\n def get_embedding_param(self, name, indices):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n return self.embedding_params[name].get(indices)\n\n def set_embedding_param(self, name, indices, values):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n self.embedding_params[name].set(indices, values)\n\n def init_from_model_pb(self, model_pb):\n if not self.init_status:\n tensors_pb = model_pb.param\n embeddings_pb = model_pb.embedding_table_info\n self._init_non_embedding_params(tensors_pb)\n self._init_embedding_params(embeddings_pb)\n self.init_status = True\n\n def _init_non_embedding_params(self, tensors_pb):\n for pb in tensors_pb:\n name = pb.name\n arr = tensor_to_ndarray(pb)\n var = tf.Variable(name=name, initial_value=arr, trainable=True)\n self.non_embedding_params[name] = var\n\n def _init_embedding_params(self, embeddings_pb):\n for pb in embeddings_pb:\n self.embedding_params[pb.name] = create_embedding_table(pb)\n", "path": "elasticdl/python/ps/parameters.py"}, {"content": "from google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n\n def pull_variable(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PullVariableResponse()\n\n def pull_embedding_vector(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.Tensor()\n\n def push_model(self, request, _):\n # TODO: implement this RPC service\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}], "after_files": [{"content": "import tensorflow as tf\n\nfrom elasticdl.python.common.ndarray import tensor_to_ndarray\nfrom elasticdl.python.ps.embedding_table import create_embedding_table\n\n\nclass Parameters(object):\n \"\"\"\n There are two kinds of parameters:\n\n 1. non-embedding parameters, or dense tensor parameters. We save it\n in a hashmap `non-embedding_params`, the key is the parameter name,\n the value is a tf.Variable` object.\n 2. embedding parameters, or row-sparse parameters. We save it in a\n hashmap `embedding_params`, the key is the embedding layer name,\n the value is an `EmbeddingTable` object.\n\n \"\"\"\n\n def __init__(self):\n self.version = 0\n self.init_status = False\n self.non_embedding_params = {}\n self.embedding_params = {}\n\n def get_embedding_param(self, name, indices):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n return self.embedding_params[name].get(indices)\n\n def set_embedding_param(self, name, indices, values):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n self.embedding_params[name].set(indices, values)\n\n def init_from_model_pb(self, model_pb):\n if not self.init_status:\n tensors_pb = model_pb.param\n embeddings_pb = model_pb.embedding_table_info\n self._init_non_embedding_params(tensors_pb)\n self._init_embedding_params(embeddings_pb)\n self.version = model_pb.version\n self.init_status = True\n\n def _init_non_embedding_params(self, tensors_pb):\n for pb in tensors_pb:\n name = pb.name\n arr = tensor_to_ndarray(pb)\n var = tf.Variable(name=name, initial_value=arr, trainable=True)\n self.non_embedding_params[name] = var\n\n def _init_embedding_params(self, embeddings_pb):\n for pb in embeddings_pb:\n self.embedding_params[pb.name] = create_embedding_table(pb)\n", "path": "elasticdl/python/ps/parameters.py"}, {"content": "import threading\n\nfrom google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n self._lock = threading.Lock()\n\n def pull_variable(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PullVariableResponse()\n\n def pull_embedding_vector(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.Tensor()\n\n def push_model(self, request, _):\n with self._lock:\n self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}]}
| 1,160 | 413 |
gh_patches_debug_6
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-1273
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PEP257 docstrings for file "./docs/__init__.py"
Cover `./docs/__init__.py` file with docstrings and follow [PEP257](https://www.python.org/dev/peps/pep-0257/). We use [pydocstyle](https://pypi.org/project/pydocstyle/) for validation.
Current validation log:
```
./docs/__init__.py:1 at module level:
D104: Missing docstring in public package
```
Subtask for #742
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/__init__.py`
Content:
```
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/__init__.py b/docs/__init__.py
--- a/docs/__init__.py
+++ b/docs/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+"""Main package for docs."""
|
{"golden_diff": "diff --git a/docs/__init__.py b/docs/__init__.py\n--- a/docs/__init__.py\n+++ b/docs/__init__.py\n@@ -0,0 +1,3 @@\n+# -*- coding: utf-8 -*-\n+\n+\"\"\"Main package for docs.\"\"\"\n", "issue": "PEP257 docstrings for file \"./docs/__init__.py\"\nCover `./docs/__init__.py` file with docstrings and follow [PEP257](https://www.python.org/dev/peps/pep-0257/). We use [pydocstyle](https://pypi.org/project/pydocstyle/) for validation.\r\n\r\nCurrent validation log:\r\n\r\n```\r\n./docs/__init__.py:1 at module level:\r\n D104: Missing docstring in public package\r\n```\r\n\r\nSubtask for #742 \n", "before_files": [{"content": "", "path": "docs/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Main package for docs.\"\"\"\n", "path": "docs/__init__.py"}]}
| 376 | 58 |
gh_patches_debug_17994
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-6938
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Anthropologie spider produces transposed coordinates
https://www.alltheplaces.xyz/map/#7.69/-75.171/39.95

The cause is the upstream data:
https://www.anthropologie.com/stores/rittenhouse-square-philadelphia

It might be worth doing any of the following:
- Suspend the lat/long from the parser for now
- Contact the company (I'll probably do that shortly) about the bug
- Any kind of high level validations that can check the expected bounds for a scraper, vs the results?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/anthropologie.py`
Content:
```
1 from scrapy.spiders import SitemapSpider
2
3 from locations.structured_data_spider import StructuredDataSpider
4
5
6 class AnthropologieSpider(SitemapSpider, StructuredDataSpider):
7 name = "anthropologie"
8 item_attributes = {"brand": "Anthropologie", "brand_wikidata": "Q4773903"}
9 allowed_domains = ["anthropologie.com"]
10 sitemap_urls = ["https://www.anthropologie.com/store_sitemap.xml"]
11 sitemap_rules = [("/stores/", "parse_sd")]
12 requires_proxy = True
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/anthropologie.py b/locations/spiders/anthropologie.py
--- a/locations/spiders/anthropologie.py
+++ b/locations/spiders/anthropologie.py
@@ -1,5 +1,6 @@
from scrapy.spiders import SitemapSpider
+from locations.items import set_closed
from locations.structured_data_spider import StructuredDataSpider
@@ -10,3 +11,17 @@
sitemap_urls = ["https://www.anthropologie.com/store_sitemap.xml"]
sitemap_rules = [("/stores/", "parse_sd")]
requires_proxy = True
+
+ def pre_process_data(self, ld_data, **kwargs):
+ ld_data["geo"]["latitude"], ld_data["geo"]["longitude"] = (
+ ld_data["geo"]["longitude"],
+ ld_data["geo"]["latitude"],
+ )
+
+ def post_process_item(self, item, response, ld_data, **kwargs):
+ item["branch"] = item.pop("name").removeprefix(" - Anthropologie Store")
+
+ if item["branch"].startswith("Closed - ") or item["branch"].endswith(" - Closed"):
+ set_closed(item)
+
+ yield item
|
{"golden_diff": "diff --git a/locations/spiders/anthropologie.py b/locations/spiders/anthropologie.py\n--- a/locations/spiders/anthropologie.py\n+++ b/locations/spiders/anthropologie.py\n@@ -1,5 +1,6 @@\n from scrapy.spiders import SitemapSpider\n \n+from locations.items import set_closed\n from locations.structured_data_spider import StructuredDataSpider\n \n \n@@ -10,3 +11,17 @@\n sitemap_urls = [\"https://www.anthropologie.com/store_sitemap.xml\"]\n sitemap_rules = [(\"/stores/\", \"parse_sd\")]\n requires_proxy = True\n+\n+ def pre_process_data(self, ld_data, **kwargs):\n+ ld_data[\"geo\"][\"latitude\"], ld_data[\"geo\"][\"longitude\"] = (\n+ ld_data[\"geo\"][\"longitude\"],\n+ ld_data[\"geo\"][\"latitude\"],\n+ )\n+\n+ def post_process_item(self, item, response, ld_data, **kwargs):\n+ item[\"branch\"] = item.pop(\"name\").removeprefix(\" - Anthropologie Store\")\n+\n+ if item[\"branch\"].startswith(\"Closed - \") or item[\"branch\"].endswith(\" - Closed\"):\n+ set_closed(item)\n+\n+ yield item\n", "issue": "Anthropologie spider produces transposed coordinates\nhttps://www.alltheplaces.xyz/map/#7.69/-75.171/39.95\r\n\r\n\r\n\r\nThe cause is the upstream data:\r\n\r\nhttps://www.anthropologie.com/stores/rittenhouse-square-philadelphia\r\n\r\n\r\nIt might be worth doing any of the following:\r\n\r\n- Suspend the lat/long from the parser for now\r\n- Contact the company (I'll probably do that shortly) about the bug\r\n- Any kind of high level validations that can check the expected bounds for a scraper, vs the results?\r\n\r\n\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass AnthropologieSpider(SitemapSpider, StructuredDataSpider):\n name = \"anthropologie\"\n item_attributes = {\"brand\": \"Anthropologie\", \"brand_wikidata\": \"Q4773903\"}\n allowed_domains = [\"anthropologie.com\"]\n sitemap_urls = [\"https://www.anthropologie.com/store_sitemap.xml\"]\n sitemap_rules = [(\"/stores/\", \"parse_sd\")]\n requires_proxy = True\n", "path": "locations/spiders/anthropologie.py"}], "after_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.items import set_closed\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass AnthropologieSpider(SitemapSpider, StructuredDataSpider):\n name = \"anthropologie\"\n item_attributes = {\"brand\": \"Anthropologie\", \"brand_wikidata\": \"Q4773903\"}\n allowed_domains = [\"anthropologie.com\"]\n sitemap_urls = [\"https://www.anthropologie.com/store_sitemap.xml\"]\n sitemap_rules = [(\"/stores/\", \"parse_sd\")]\n requires_proxy = True\n\n def pre_process_data(self, ld_data, **kwargs):\n ld_data[\"geo\"][\"latitude\"], ld_data[\"geo\"][\"longitude\"] = (\n ld_data[\"geo\"][\"longitude\"],\n ld_data[\"geo\"][\"latitude\"],\n )\n\n def post_process_item(self, item, response, ld_data, **kwargs):\n item[\"branch\"] = item.pop(\"name\").removeprefix(\" - Anthropologie Store\")\n\n if item[\"branch\"].startswith(\"Closed - \") or item[\"branch\"].endswith(\" - Closed\"):\n set_closed(item)\n\n yield item\n", "path": "locations/spiders/anthropologie.py"}]}
| 630 | 268 |
gh_patches_debug_25775
|
rasdani/github-patches
|
git_diff
|
apache__tvm-2759
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TEST][FLAKY] test_dlpack
Both #2749 and #2353 encountered seg fault error at test_dlpack.
http://ci.tvm.ai:8080/blue/organizations/jenkins/tvm/detail/PR-2749/2/pipeline
http://ci.tvm.ai:8080/blue/organizations/jenkins/tvm/detail/PR-2353/48/pipeline
cc @eqy , could you help look at this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/tvm/_ffi/_ctypes/ndarray.py`
Content:
```
1 # pylint: disable=invalid-name
2 """Runtime NDArray api"""
3 from __future__ import absolute_import
4
5 import ctypes
6 from ..base import _LIB, check_call, c_str
7 from ..runtime_ctypes import TVMArrayHandle, TVMNDArrayContainerHandle
8 from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _return_handle
9
10
11 TVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
12 _c_str_dltensor = c_str('dltensor')
13 _c_str_used_dltensor = c_str('used_dltensor')
14
15
16 # used for PyCapsule manipulation
17 if hasattr(ctypes, 'pythonapi'):
18 ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p
19 ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
20 ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
21
22
23 def _from_dlpack(dltensor):
24 dltensor = ctypes.py_object(dltensor)
25 if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):
26 ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)
27 handle = TVMArrayHandle()
28 check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))
29 ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)
30 ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
31 return _make_array(handle, False, False)
32 raise ValueError("Expect a dltensor field, PyCapsule can only be consumed once")
33
34
35 def _dlpack_deleter(pycapsule):
36 pycapsule = ctypes.cast(pycapsule, ctypes.py_object)
37 if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
38 ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)
39 _LIB.TVMDLManagedTensorCallDeleter(ptr)
40 ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
41
42 _c_dlpack_deleter = TVMPyCapsuleDestructor(_dlpack_deleter)
43
44
45 class NDArrayBase(object):
46 """A simple Device/CPU Array object in runtime."""
47 __slots__ = ["handle", "is_view"]
48 # pylint: disable=no-member
49 def __init__(self, handle, is_view=False):
50 """Initialize the function with handle
51
52 Parameters
53 ----------
54 handle : TVMArrayHandle
55 the handle to the underlying C++ TVMArray
56 """
57 self.handle = handle
58 self.is_view = is_view
59
60 def __del__(self):
61 if not self.is_view and _LIB:
62 check_call(_LIB.TVMArrayFree(self.handle))
63
64 @property
65 def _tvm_handle(self):
66 return ctypes.cast(self.handle, ctypes.c_void_p).value
67
68 def to_dlpack(self):
69 """Produce an array from a DLPack Tensor without copying memory
70
71 Returns
72 -------
73 dlpack : DLPack tensor view of the array data
74 """
75 handle = ctypes.c_void_p()
76 check_call(_LIB.TVMArrayToDLPack(self.handle, ctypes.byref(handle)))
77 return ctypes.pythonapi.PyCapsule_New(handle, _c_str_dltensor, _c_dlpack_deleter)
78
79
80 def _make_array(handle, is_view, is_container):
81 global _TVM_ND_CLS
82 handle = ctypes.cast(handle, TVMArrayHandle)
83 fcreate = _CLASS_NDARRAY
84 if is_container and _TVM_ND_CLS:
85 array_type_info = ctypes.cast(handle, TVMNDArrayContainerHandle).array_type_info.value
86 if array_type_info > 0:
87 fcreate = _TVM_ND_CLS[array_type_info]
88 return fcreate(handle, is_view)
89
90 _TVM_COMPATS = ()
91
92 def _reg_extension(cls, fcreate):
93 global _TVM_COMPATS
94 _TVM_COMPATS += (cls,)
95 if fcreate:
96 fret = lambda x: fcreate(_return_handle(x))
97 RETURN_SWITCH[cls._tvm_tcode] = fret
98 C_TO_PY_ARG_SWITCH[cls._tvm_tcode] = _wrap_arg_func(fret, cls._tvm_tcode)
99
100 _TVM_ND_CLS = {}
101
102 def _reg_ndarray(cls, fcreate):
103 global _TVM_ND_CLS
104 _TVM_ND_CLS[cls._array_type_code] = fcreate
105
106 _CLASS_NDARRAY = None
107
108 def _set_class_ndarray(cls):
109 global _CLASS_NDARRAY
110 _CLASS_NDARRAY = cls
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/tvm/_ffi/_ctypes/ndarray.py b/python/tvm/_ffi/_ctypes/ndarray.py
--- a/python/tvm/_ffi/_ctypes/ndarray.py
+++ b/python/tvm/_ffi/_ctypes/ndarray.py
@@ -24,6 +24,8 @@
dltensor = ctypes.py_object(dltensor)
if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)
+ # enforce type to make sure it works for all ctypes
+ ptr = ctypes.cast(ptr, ctypes.c_void_p)
handle = TVMArrayHandle()
check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))
ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)
@@ -36,6 +38,8 @@
pycapsule = ctypes.cast(pycapsule, ctypes.py_object)
if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)
+ # enforce type to make sure it works for all ctypes
+ ptr = ctypes.cast(ctypes.c_void_p, ptr)
_LIB.TVMDLManagedTensorCallDeleter(ptr)
ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
|
{"golden_diff": "diff --git a/python/tvm/_ffi/_ctypes/ndarray.py b/python/tvm/_ffi/_ctypes/ndarray.py\n--- a/python/tvm/_ffi/_ctypes/ndarray.py\n+++ b/python/tvm/_ffi/_ctypes/ndarray.py\n@@ -24,6 +24,8 @@\n dltensor = ctypes.py_object(dltensor)\n if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)\n+ # enforce type to make sure it works for all ctypes\n+ ptr = ctypes.cast(ptr, ctypes.c_void_p)\n handle = TVMArrayHandle()\n check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))\n ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)\n@@ -36,6 +38,8 @@\n pycapsule = ctypes.cast(pycapsule, ctypes.py_object)\n if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)\n+ # enforce type to make sure it works for all ctypes\n+ ptr = ctypes.cast(ctypes.c_void_p, ptr)\n _LIB.TVMDLManagedTensorCallDeleter(ptr)\n ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))\n", "issue": "[TEST][FLAKY] test_dlpack\nBoth #2749 and #2353 encountered seg fault error at test_dlpack.\r\nhttp://ci.tvm.ai:8080/blue/organizations/jenkins/tvm/detail/PR-2749/2/pipeline\r\nhttp://ci.tvm.ai:8080/blue/organizations/jenkins/tvm/detail/PR-2353/48/pipeline\r\n\r\ncc @eqy , could you help look at this?\n", "before_files": [{"content": "# pylint: disable=invalid-name\n\"\"\"Runtime NDArray api\"\"\"\nfrom __future__ import absolute_import\n\nimport ctypes\nfrom ..base import _LIB, check_call, c_str\nfrom ..runtime_ctypes import TVMArrayHandle, TVMNDArrayContainerHandle\nfrom .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _return_handle\n\n\nTVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)\n_c_str_dltensor = c_str('dltensor')\n_c_str_used_dltensor = c_str('used_dltensor')\n\n\n# used for PyCapsule manipulation\nif hasattr(ctypes, 'pythonapi'):\n ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p\n ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p\n ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object\n\n\ndef _from_dlpack(dltensor):\n dltensor = ctypes.py_object(dltensor)\n if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)\n handle = TVMArrayHandle()\n check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))\n ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)\n ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))\n return _make_array(handle, False, False)\n raise ValueError(\"Expect a dltensor field, PyCapsule can only be consumed once\")\n\n\ndef _dlpack_deleter(pycapsule):\n pycapsule = ctypes.cast(pycapsule, ctypes.py_object)\n if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)\n _LIB.TVMDLManagedTensorCallDeleter(ptr)\n ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))\n\n_c_dlpack_deleter = TVMPyCapsuleDestructor(_dlpack_deleter)\n\n\nclass NDArrayBase(object):\n \"\"\"A simple Device/CPU Array object in runtime.\"\"\"\n __slots__ = [\"handle\", \"is_view\"]\n # pylint: disable=no-member\n def __init__(self, handle, is_view=False):\n \"\"\"Initialize the function with handle\n\n Parameters\n ----------\n handle : TVMArrayHandle\n the handle to the underlying C++ TVMArray\n \"\"\"\n self.handle = handle\n self.is_view = is_view\n\n def __del__(self):\n if not self.is_view and _LIB:\n check_call(_LIB.TVMArrayFree(self.handle))\n\n @property\n def _tvm_handle(self):\n return ctypes.cast(self.handle, ctypes.c_void_p).value\n\n def to_dlpack(self):\n \"\"\"Produce an array from a DLPack Tensor without copying memory\n\n Returns\n -------\n dlpack : DLPack tensor view of the array data\n \"\"\"\n handle = ctypes.c_void_p()\n check_call(_LIB.TVMArrayToDLPack(self.handle, ctypes.byref(handle)))\n return ctypes.pythonapi.PyCapsule_New(handle, _c_str_dltensor, _c_dlpack_deleter)\n\n\ndef _make_array(handle, is_view, is_container):\n global _TVM_ND_CLS\n handle = ctypes.cast(handle, TVMArrayHandle)\n fcreate = _CLASS_NDARRAY\n if is_container and _TVM_ND_CLS:\n array_type_info = ctypes.cast(handle, TVMNDArrayContainerHandle).array_type_info.value\n if array_type_info > 0:\n fcreate = _TVM_ND_CLS[array_type_info]\n return fcreate(handle, is_view)\n\n_TVM_COMPATS = ()\n\ndef _reg_extension(cls, fcreate):\n global _TVM_COMPATS\n _TVM_COMPATS += (cls,)\n if fcreate:\n fret = lambda x: fcreate(_return_handle(x))\n RETURN_SWITCH[cls._tvm_tcode] = fret\n C_TO_PY_ARG_SWITCH[cls._tvm_tcode] = _wrap_arg_func(fret, cls._tvm_tcode)\n\n_TVM_ND_CLS = {}\n\ndef _reg_ndarray(cls, fcreate):\n global _TVM_ND_CLS\n _TVM_ND_CLS[cls._array_type_code] = fcreate\n\n_CLASS_NDARRAY = None\n\ndef _set_class_ndarray(cls):\n global _CLASS_NDARRAY\n _CLASS_NDARRAY = cls\n", "path": "python/tvm/_ffi/_ctypes/ndarray.py"}], "after_files": [{"content": "# pylint: disable=invalid-name\n\"\"\"Runtime NDArray api\"\"\"\nfrom __future__ import absolute_import\n\nimport ctypes\nfrom ..base import _LIB, check_call, c_str\nfrom ..runtime_ctypes import TVMArrayHandle, TVMNDArrayContainerHandle\nfrom .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _return_handle\n\n\nTVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)\n_c_str_dltensor = c_str('dltensor')\n_c_str_used_dltensor = c_str('used_dltensor')\n\n\n# used for PyCapsule manipulation\nif hasattr(ctypes, 'pythonapi'):\n ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p\n ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p\n ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object\n\n\ndef _from_dlpack(dltensor):\n dltensor = ctypes.py_object(dltensor)\n if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)\n # enforce type to make sure it works for all ctypes\n ptr = ctypes.cast(ptr, ctypes.c_void_p)\n handle = TVMArrayHandle()\n check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))\n ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)\n ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))\n return _make_array(handle, False, False)\n raise ValueError(\"Expect a dltensor field, PyCapsule can only be consumed once\")\n\n\ndef _dlpack_deleter(pycapsule):\n pycapsule = ctypes.cast(pycapsule, ctypes.py_object)\n if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)\n # enforce type to make sure it works for all ctypes\n ptr = ctypes.cast(ctypes.c_void_p, ptr)\n _LIB.TVMDLManagedTensorCallDeleter(ptr)\n ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))\n\n_c_dlpack_deleter = TVMPyCapsuleDestructor(_dlpack_deleter)\n\n\nclass NDArrayBase(object):\n \"\"\"A simple Device/CPU Array object in runtime.\"\"\"\n __slots__ = [\"handle\", \"is_view\"]\n # pylint: disable=no-member\n def __init__(self, handle, is_view=False):\n \"\"\"Initialize the function with handle\n\n Parameters\n ----------\n handle : TVMArrayHandle\n the handle to the underlying C++ TVMArray\n \"\"\"\n self.handle = handle\n self.is_view = is_view\n\n def __del__(self):\n if not self.is_view and _LIB:\n check_call(_LIB.TVMArrayFree(self.handle))\n\n @property\n def _tvm_handle(self):\n return ctypes.cast(self.handle, ctypes.c_void_p).value\n\n def to_dlpack(self):\n \"\"\"Produce an array from a DLPack Tensor without copying memory\n\n Returns\n -------\n dlpack : DLPack tensor view of the array data\n \"\"\"\n handle = ctypes.c_void_p()\n check_call(_LIB.TVMArrayToDLPack(self.handle, ctypes.byref(handle)))\n return ctypes.pythonapi.PyCapsule_New(handle, _c_str_dltensor, _c_dlpack_deleter)\n\n\ndef _make_array(handle, is_view, is_container):\n global _TVM_ND_CLS\n handle = ctypes.cast(handle, TVMArrayHandle)\n fcreate = _CLASS_NDARRAY\n if is_container and _TVM_ND_CLS:\n array_type_info = ctypes.cast(handle, TVMNDArrayContainerHandle).array_type_info.value\n if array_type_info > 0:\n fcreate = _TVM_ND_CLS[array_type_info]\n return fcreate(handle, is_view)\n\n_TVM_COMPATS = ()\n\ndef _reg_extension(cls, fcreate):\n global _TVM_COMPATS\n _TVM_COMPATS += (cls,)\n if fcreate:\n fret = lambda x: fcreate(_return_handle(x))\n RETURN_SWITCH[cls._tvm_tcode] = fret\n C_TO_PY_ARG_SWITCH[cls._tvm_tcode] = _wrap_arg_func(fret, cls._tvm_tcode)\n\n_TVM_ND_CLS = {}\n\ndef _reg_ndarray(cls, fcreate):\n global _TVM_ND_CLS\n _TVM_ND_CLS[cls._array_type_code] = fcreate\n\n_CLASS_NDARRAY = None\n\ndef _set_class_ndarray(cls):\n global _CLASS_NDARRAY\n _CLASS_NDARRAY = cls\n", "path": "python/tvm/_ffi/_ctypes/ndarray.py"}]}
| 1,663 | 347 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.