problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_3617
|
rasdani/github-patches
|
git_diff
|
pandas-dev__pandas-8758
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pandas.core.groupby.SeriesGroupBy.hist doesn't take kwargs
I'd like to pass certain kwargs to the hist function of a SeriesGroupBy object (alpha, legend, etc). The plot function provides more kwargs (e.g. legend, but not alpha) but no **kwargs to forward to matplotlib. The documentation (viewed with ? in ipython) suggests that there should be a kwds argument, but in reality this is not the case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/util/decorators.py`
Content:
```
1 from pandas.compat import StringIO, callable
2 from pandas.lib import cache_readonly
3 import sys
4 import warnings
5 from functools import wraps
6
7
8 def deprecate(name, alternative, alt_name=None):
9 alt_name = alt_name or alternative.__name__
10
11 def wrapper(*args, **kwargs):
12 warnings.warn("%s is deprecated. Use %s instead" % (name, alt_name),
13 FutureWarning)
14 return alternative(*args, **kwargs)
15 return wrapper
16
17
18 def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None):
19 """Decorator to deprecate a keyword argument of a function
20
21 Parameters
22 ----------
23 old_arg_name : str
24 Name of argument in function to deprecate
25 new_arg_name : str
26 Name of prefered argument in function
27 mapping : dict or callable
28 If mapping is present, use it to translate old arguments to
29 new arguments. A callable must do its own value checking;
30 values not found in a dict will be forwarded unchanged.
31
32 Examples
33 --------
34 The following deprecates 'cols', using 'columns' instead
35
36 >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
37 ... def f(columns=''):
38 ... print(columns)
39 ...
40 >>> f(columns='should work ok')
41 should work ok
42 >>> f(cols='should raise warning')
43 FutureWarning: cols is deprecated, use columns instead
44 warnings.warn(msg, FutureWarning)
45 should raise warning
46 >>> f(cols='should error', columns="can't pass do both")
47 TypeError: Can only specify 'cols' or 'columns', not both
48 >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no', False})
49 ... def f(new=False):
50 ... print('yes!' if new else 'no!')
51 ...
52 >>> f(old='yes')
53 FutureWarning: old='yes' is deprecated, use new=True instead
54 warnings.warn(msg, FutureWarning)
55 yes!
56
57 """
58 if mapping is not None and not hasattr(mapping, 'get') and \
59 not callable(mapping):
60 raise TypeError("mapping from old to new argument values "
61 "must be dict or callable!")
62 def _deprecate_kwarg(func):
63 @wraps(func)
64 def wrapper(*args, **kwargs):
65 old_arg_value = kwargs.pop(old_arg_name, None)
66 if old_arg_value is not None:
67 if mapping is not None:
68 if hasattr(mapping, 'get'):
69 new_arg_value = mapping.get(old_arg_value,
70 old_arg_value)
71 else:
72 new_arg_value = mapping(old_arg_value)
73 msg = "the %s=%r keyword is deprecated, " \
74 "use %s=%r instead" % \
75 (old_arg_name, old_arg_value,
76 new_arg_name, new_arg_value)
77 else:
78 new_arg_value = old_arg_value
79 msg = "the '%s' keyword is deprecated, " \
80 "use '%s' instead" % (old_arg_name, new_arg_name)
81 warnings.warn(msg, FutureWarning)
82 if kwargs.get(new_arg_name, None) is not None:
83 msg = "Can only specify '%s' or '%s', not both" % \
84 (old_arg_name, new_arg_name)
85 raise TypeError(msg)
86 else:
87 kwargs[new_arg_name] = new_arg_value
88 return func(*args, **kwargs)
89 return wrapper
90 return _deprecate_kwarg
91
92
93 # Substitution and Appender are derived from matplotlib.docstring (1.1.0)
94 # module http://matplotlib.sourceforge.net/users/license.html
95
96
97 class Substitution(object):
98 """
99 A decorator to take a function's docstring and perform string
100 substitution on it.
101
102 This decorator should be robust even if func.__doc__ is None
103 (for example, if -OO was passed to the interpreter)
104
105 Usage: construct a docstring.Substitution with a sequence or
106 dictionary suitable for performing substitution; then
107 decorate a suitable function with the constructed object. e.g.
108
109 sub_author_name = Substitution(author='Jason')
110
111 @sub_author_name
112 def some_function(x):
113 "%(author)s wrote this function"
114
115 # note that some_function.__doc__ is now "Jason wrote this function"
116
117 One can also use positional arguments.
118
119 sub_first_last_names = Substitution('Edgar Allen', 'Poe')
120
121 @sub_first_last_names
122 def some_function(x):
123 "%s %s wrote the Raven"
124 """
125 def __init__(self, *args, **kwargs):
126 if (args and kwargs):
127 raise AssertionError( "Only positional or keyword args are allowed")
128
129 self.params = args or kwargs
130
131 def __call__(self, func):
132 func.__doc__ = func.__doc__ and func.__doc__ % self.params
133 return func
134
135 def update(self, *args, **kwargs):
136 "Assume self.params is a dict and update it with supplied args"
137 self.params.update(*args, **kwargs)
138
139 @classmethod
140 def from_params(cls, params):
141 """
142 In the case where the params is a mutable sequence (list or dictionary)
143 and it may change before this class is called, one may explicitly use a
144 reference to the params rather than using *args or **kwargs which will
145 copy the values and not reference them.
146 """
147 result = cls()
148 result.params = params
149 return result
150
151
152 class Appender(object):
153 """
154 A function decorator that will append an addendum to the docstring
155 of the target function.
156
157 This decorator should be robust even if func.__doc__ is None
158 (for example, if -OO was passed to the interpreter).
159
160 Usage: construct a docstring.Appender with a string to be joined to
161 the original docstring. An optional 'join' parameter may be supplied
162 which will be used to join the docstring and addendum. e.g.
163
164 add_copyright = Appender("Copyright (c) 2009", join='\n')
165
166 @add_copyright
167 def my_dog(has='fleas'):
168 "This docstring will have a copyright below"
169 pass
170 """
171 def __init__(self, addendum, join='', indents=0):
172 if indents > 0:
173 self.addendum = indent(addendum, indents=indents)
174 else:
175 self.addendum = addendum
176 self.join = join
177
178 def __call__(self, func):
179 func.__doc__ = func.__doc__ if func.__doc__ else ''
180 self.addendum = self.addendum if self.addendum else ''
181 docitems = [func.__doc__, self.addendum]
182 func.__doc__ = self.join.join(docitems)
183 return func
184
185
186 def indent(text, indents=1):
187 if not text or not isinstance(text, str):
188 return ''
189 jointext = ''.join(['\n'] + [' '] * indents)
190 return jointext.join(text.split('\n'))
191
192
193 def suppress_stdout(f):
194 def wrapped(*args, **kwargs):
195 try:
196 sys.stdout = StringIO()
197 f(*args, **kwargs)
198 finally:
199 sys.stdout = sys.__stdout__
200
201 return wrapped
202
203
204 class KnownFailureTest(Exception):
205 '''Raise this exception to mark a test as a known failing test.'''
206 pass
207
208
209 def knownfailureif(fail_condition, msg=None):
210 """
211 Make function raise KnownFailureTest exception if given condition is true.
212
213 If the condition is a callable, it is used at runtime to dynamically
214 make the decision. This is useful for tests that may require costly
215 imports, to delay the cost until the test suite is actually executed.
216
217 Parameters
218 ----------
219 fail_condition : bool or callable
220 Flag to determine whether to mark the decorated test as a known
221 failure (if True) or not (if False).
222 msg : str, optional
223 Message to give on raising a KnownFailureTest exception.
224 Default is None.
225
226 Returns
227 -------
228 decorator : function
229 Decorator, which, when applied to a function, causes SkipTest
230 to be raised when `skip_condition` is True, and the function
231 to be called normally otherwise.
232
233 Notes
234 -----
235 The decorator itself is decorated with the ``nose.tools.make_decorator``
236 function in order to transmit function name, and various other metadata.
237
238 """
239 if msg is None:
240 msg = 'Test skipped due to known failure'
241
242 # Allow for both boolean or callable known failure conditions.
243 if callable(fail_condition):
244 fail_val = fail_condition
245 else:
246 fail_val = lambda: fail_condition
247
248 def knownfail_decorator(f):
249 # Local import to avoid a hard nose dependency and only incur the
250 # import time overhead at actual test-time.
251 import nose
252
253 def knownfailer(*args, **kwargs):
254 if fail_val():
255 raise KnownFailureTest(msg)
256 else:
257 return f(*args, **kwargs)
258 return nose.tools.make_decorator(f)(knownfailer)
259
260 return knownfail_decorator
261
262 def make_signature(func) :
263 """
264 Returns a string repr of the arg list of a func call, with any defaults
265
266 Examples
267 --------
268
269 >>> def f(a,b,c=2) :
270 >>> return a*b*c
271 >>> print(_make_signature(f))
272 a,b,c=2
273 """
274 from inspect import getargspec
275 spec = getargspec(func)
276 if spec.defaults == None :
277 n_wo_defaults = len(spec.args)
278 defaults = ('',) * n_wo_defaults
279 else :
280 n_wo_defaults = len(spec.args) - len(spec.defaults)
281 defaults = ('',) * n_wo_defaults + spec.defaults
282 args = []
283 for i, (var, default) in enumerate(zip(spec.args, defaults)) :
284 args.append(var if default=='' else var+'='+repr(default))
285 return args, spec.args
286
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py
--- a/pandas/util/decorators.py
+++ b/pandas/util/decorators.py
@@ -282,5 +282,9 @@
args = []
for i, (var, default) in enumerate(zip(spec.args, defaults)) :
args.append(var if default=='' else var+'='+repr(default))
+ if spec.varargs:
+ args.append('*' + spec.varargs)
+ if spec.keywords:
+ args.append('**' + spec.keywords)
return args, spec.args
|
{"golden_diff": "diff --git a/pandas/util/decorators.py b/pandas/util/decorators.py\n--- a/pandas/util/decorators.py\n+++ b/pandas/util/decorators.py\n@@ -282,5 +282,9 @@\n args = []\n for i, (var, default) in enumerate(zip(spec.args, defaults)) :\n args.append(var if default=='' else var+'='+repr(default))\n+ if spec.varargs:\n+ args.append('*' + spec.varargs)\n+ if spec.keywords:\n+ args.append('**' + spec.keywords)\n return args, spec.args\n", "issue": "pandas.core.groupby.SeriesGroupBy.hist doesn't take kwargs\nI'd like to pass certain kwargs to the hist function of a SeriesGroupBy object (alpha, legend, etc). The plot function provides more kwargs (e.g. legend, but not alpha) but no **kwargs to forward to matplotlib. The documentation (viewed with ? in ipython) suggests that there should be a kwds argument, but in reality this is not the case.\n\n", "before_files": [{"content": "from pandas.compat import StringIO, callable\nfrom pandas.lib import cache_readonly\nimport sys\nimport warnings\nfrom functools import wraps\n\n\ndef deprecate(name, alternative, alt_name=None):\n alt_name = alt_name or alternative.__name__\n\n def wrapper(*args, **kwargs):\n warnings.warn(\"%s is deprecated. Use %s instead\" % (name, alt_name),\n FutureWarning)\n return alternative(*args, **kwargs)\n return wrapper\n\n\ndef deprecate_kwarg(old_arg_name, new_arg_name, mapping=None):\n \"\"\"Decorator to deprecate a keyword argument of a function\n\n Parameters\n ----------\n old_arg_name : str\n Name of argument in function to deprecate\n new_arg_name : str\n Name of prefered argument in function\n mapping : dict or callable\n If mapping is present, use it to translate old arguments to\n new arguments. A callable must do its own value checking;\n values not found in a dict will be forwarded unchanged.\n\n Examples\n --------\n The following deprecates 'cols', using 'columns' instead\n\n >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')\n ... def f(columns=''):\n ... print(columns)\n ...\n >>> f(columns='should work ok')\n should work ok\n >>> f(cols='should raise warning')\n FutureWarning: cols is deprecated, use columns instead\n warnings.warn(msg, FutureWarning)\n should raise warning\n >>> f(cols='should error', columns=\"can't pass do both\")\n TypeError: Can only specify 'cols' or 'columns', not both\n >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no', False})\n ... def f(new=False):\n ... print('yes!' if new else 'no!')\n ...\n >>> f(old='yes')\n FutureWarning: old='yes' is deprecated, use new=True instead\n warnings.warn(msg, FutureWarning)\n yes!\n\n \"\"\"\n if mapping is not None and not hasattr(mapping, 'get') and \\\n not callable(mapping):\n raise TypeError(\"mapping from old to new argument values \"\n \"must be dict or callable!\")\n def _deprecate_kwarg(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n old_arg_value = kwargs.pop(old_arg_name, None)\n if old_arg_value is not None:\n if mapping is not None:\n if hasattr(mapping, 'get'):\n new_arg_value = mapping.get(old_arg_value,\n old_arg_value)\n else:\n new_arg_value = mapping(old_arg_value)\n msg = \"the %s=%r keyword is deprecated, \" \\\n \"use %s=%r instead\" % \\\n (old_arg_name, old_arg_value,\n new_arg_name, new_arg_value)\n else:\n new_arg_value = old_arg_value\n msg = \"the '%s' keyword is deprecated, \" \\\n \"use '%s' instead\" % (old_arg_name, new_arg_name)\n warnings.warn(msg, FutureWarning)\n if kwargs.get(new_arg_name, None) is not None:\n msg = \"Can only specify '%s' or '%s', not both\" % \\\n (old_arg_name, new_arg_name)\n raise TypeError(msg)\n else:\n kwargs[new_arg_name] = new_arg_value\n return func(*args, **kwargs)\n return wrapper\n return _deprecate_kwarg\n\n\n# Substitution and Appender are derived from matplotlib.docstring (1.1.0)\n# module http://matplotlib.sourceforge.net/users/license.html\n\n\nclass Substitution(object):\n \"\"\"\n A decorator to take a function's docstring and perform string\n substitution on it.\n\n This decorator should be robust even if func.__doc__ is None\n (for example, if -OO was passed to the interpreter)\n\n Usage: construct a docstring.Substitution with a sequence or\n dictionary suitable for performing substitution; then\n decorate a suitable function with the constructed object. e.g.\n\n sub_author_name = Substitution(author='Jason')\n\n @sub_author_name\n def some_function(x):\n \"%(author)s wrote this function\"\n\n # note that some_function.__doc__ is now \"Jason wrote this function\"\n\n One can also use positional arguments.\n\n sub_first_last_names = Substitution('Edgar Allen', 'Poe')\n\n @sub_first_last_names\n def some_function(x):\n \"%s %s wrote the Raven\"\n \"\"\"\n def __init__(self, *args, **kwargs):\n if (args and kwargs):\n raise AssertionError( \"Only positional or keyword args are allowed\")\n\n self.params = args or kwargs\n\n def __call__(self, func):\n func.__doc__ = func.__doc__ and func.__doc__ % self.params\n return func\n\n def update(self, *args, **kwargs):\n \"Assume self.params is a dict and update it with supplied args\"\n self.params.update(*args, **kwargs)\n\n @classmethod\n def from_params(cls, params):\n \"\"\"\n In the case where the params is a mutable sequence (list or dictionary)\n and it may change before this class is called, one may explicitly use a\n reference to the params rather than using *args or **kwargs which will\n copy the values and not reference them.\n \"\"\"\n result = cls()\n result.params = params\n return result\n\n\nclass Appender(object):\n \"\"\"\n A function decorator that will append an addendum to the docstring\n of the target function.\n\n This decorator should be robust even if func.__doc__ is None\n (for example, if -OO was passed to the interpreter).\n\n Usage: construct a docstring.Appender with a string to be joined to\n the original docstring. An optional 'join' parameter may be supplied\n which will be used to join the docstring and addendum. e.g.\n\n add_copyright = Appender(\"Copyright (c) 2009\", join='\\n')\n\n @add_copyright\n def my_dog(has='fleas'):\n \"This docstring will have a copyright below\"\n pass\n \"\"\"\n def __init__(self, addendum, join='', indents=0):\n if indents > 0:\n self.addendum = indent(addendum, indents=indents)\n else:\n self.addendum = addendum\n self.join = join\n\n def __call__(self, func):\n func.__doc__ = func.__doc__ if func.__doc__ else ''\n self.addendum = self.addendum if self.addendum else ''\n docitems = [func.__doc__, self.addendum]\n func.__doc__ = self.join.join(docitems)\n return func\n\n\ndef indent(text, indents=1):\n if not text or not isinstance(text, str):\n return ''\n jointext = ''.join(['\\n'] + [' '] * indents)\n return jointext.join(text.split('\\n'))\n\n\ndef suppress_stdout(f):\n def wrapped(*args, **kwargs):\n try:\n sys.stdout = StringIO()\n f(*args, **kwargs)\n finally:\n sys.stdout = sys.__stdout__\n\n return wrapped\n\n\nclass KnownFailureTest(Exception):\n '''Raise this exception to mark a test as a known failing test.'''\n pass\n\n\ndef knownfailureif(fail_condition, msg=None):\n \"\"\"\n Make function raise KnownFailureTest exception if given condition is true.\n\n If the condition is a callable, it is used at runtime to dynamically\n make the decision. This is useful for tests that may require costly\n imports, to delay the cost until the test suite is actually executed.\n\n Parameters\n ----------\n fail_condition : bool or callable\n Flag to determine whether to mark the decorated test as a known\n failure (if True) or not (if False).\n msg : str, optional\n Message to give on raising a KnownFailureTest exception.\n Default is None.\n\n Returns\n -------\n decorator : function\n Decorator, which, when applied to a function, causes SkipTest\n to be raised when `skip_condition` is True, and the function\n to be called normally otherwise.\n\n Notes\n -----\n The decorator itself is decorated with the ``nose.tools.make_decorator``\n function in order to transmit function name, and various other metadata.\n\n \"\"\"\n if msg is None:\n msg = 'Test skipped due to known failure'\n\n # Allow for both boolean or callable known failure conditions.\n if callable(fail_condition):\n fail_val = fail_condition\n else:\n fail_val = lambda: fail_condition\n\n def knownfail_decorator(f):\n # Local import to avoid a hard nose dependency and only incur the\n # import time overhead at actual test-time.\n import nose\n\n def knownfailer(*args, **kwargs):\n if fail_val():\n raise KnownFailureTest(msg)\n else:\n return f(*args, **kwargs)\n return nose.tools.make_decorator(f)(knownfailer)\n\n return knownfail_decorator\n\ndef make_signature(func) :\n \"\"\"\n Returns a string repr of the arg list of a func call, with any defaults\n\n Examples\n --------\n\n >>> def f(a,b,c=2) :\n >>> return a*b*c\n >>> print(_make_signature(f))\n a,b,c=2\n \"\"\"\n from inspect import getargspec\n spec = getargspec(func)\n if spec.defaults == None :\n n_wo_defaults = len(spec.args)\n defaults = ('',) * n_wo_defaults\n else :\n n_wo_defaults = len(spec.args) - len(spec.defaults)\n defaults = ('',) * n_wo_defaults + spec.defaults\n args = []\n for i, (var, default) in enumerate(zip(spec.args, defaults)) :\n args.append(var if default=='' else var+'='+repr(default))\n return args, spec.args\n\n", "path": "pandas/util/decorators.py"}], "after_files": [{"content": "from pandas.compat import StringIO, callable\nfrom pandas.lib import cache_readonly\nimport sys\nimport warnings\nfrom functools import wraps\n\n\ndef deprecate(name, alternative, alt_name=None):\n alt_name = alt_name or alternative.__name__\n\n def wrapper(*args, **kwargs):\n warnings.warn(\"%s is deprecated. Use %s instead\" % (name, alt_name),\n FutureWarning)\n return alternative(*args, **kwargs)\n return wrapper\n\n\ndef deprecate_kwarg(old_arg_name, new_arg_name, mapping=None):\n \"\"\"Decorator to deprecate a keyword argument of a function\n\n Parameters\n ----------\n old_arg_name : str\n Name of argument in function to deprecate\n new_arg_name : str\n Name of prefered argument in function\n mapping : dict or callable\n If mapping is present, use it to translate old arguments to\n new arguments. A callable must do its own value checking;\n values not found in a dict will be forwarded unchanged.\n\n Examples\n --------\n The following deprecates 'cols', using 'columns' instead\n\n >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')\n ... def f(columns=''):\n ... print(columns)\n ...\n >>> f(columns='should work ok')\n should work ok\n >>> f(cols='should raise warning')\n FutureWarning: cols is deprecated, use columns instead\n warnings.warn(msg, FutureWarning)\n should raise warning\n >>> f(cols='should error', columns=\"can't pass do both\")\n TypeError: Can only specify 'cols' or 'columns', not both\n >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no', False})\n ... def f(new=False):\n ... print('yes!' if new else 'no!')\n ...\n >>> f(old='yes')\n FutureWarning: old='yes' is deprecated, use new=True instead\n warnings.warn(msg, FutureWarning)\n yes!\n\n \"\"\"\n if mapping is not None and not hasattr(mapping, 'get') and \\\n not callable(mapping):\n raise TypeError(\"mapping from old to new argument values \"\n \"must be dict or callable!\")\n def _deprecate_kwarg(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n old_arg_value = kwargs.pop(old_arg_name, None)\n if old_arg_value is not None:\n if mapping is not None:\n if hasattr(mapping, 'get'):\n new_arg_value = mapping.get(old_arg_value,\n old_arg_value)\n else:\n new_arg_value = mapping(old_arg_value)\n msg = \"the %s=%r keyword is deprecated, \" \\\n \"use %s=%r instead\" % \\\n (old_arg_name, old_arg_value,\n new_arg_name, new_arg_value)\n else:\n new_arg_value = old_arg_value\n msg = \"the '%s' keyword is deprecated, \" \\\n \"use '%s' instead\" % (old_arg_name, new_arg_name)\n warnings.warn(msg, FutureWarning)\n if kwargs.get(new_arg_name, None) is not None:\n msg = \"Can only specify '%s' or '%s', not both\" % \\\n (old_arg_name, new_arg_name)\n raise TypeError(msg)\n else:\n kwargs[new_arg_name] = new_arg_value\n return func(*args, **kwargs)\n return wrapper\n return _deprecate_kwarg\n\n\n# Substitution and Appender are derived from matplotlib.docstring (1.1.0)\n# module http://matplotlib.sourceforge.net/users/license.html\n\n\nclass Substitution(object):\n \"\"\"\n A decorator to take a function's docstring and perform string\n substitution on it.\n\n This decorator should be robust even if func.__doc__ is None\n (for example, if -OO was passed to the interpreter)\n\n Usage: construct a docstring.Substitution with a sequence or\n dictionary suitable for performing substitution; then\n decorate a suitable function with the constructed object. e.g.\n\n sub_author_name = Substitution(author='Jason')\n\n @sub_author_name\n def some_function(x):\n \"%(author)s wrote this function\"\n\n # note that some_function.__doc__ is now \"Jason wrote this function\"\n\n One can also use positional arguments.\n\n sub_first_last_names = Substitution('Edgar Allen', 'Poe')\n\n @sub_first_last_names\n def some_function(x):\n \"%s %s wrote the Raven\"\n \"\"\"\n def __init__(self, *args, **kwargs):\n if (args and kwargs):\n raise AssertionError( \"Only positional or keyword args are allowed\")\n\n self.params = args or kwargs\n\n def __call__(self, func):\n func.__doc__ = func.__doc__ and func.__doc__ % self.params\n return func\n\n def update(self, *args, **kwargs):\n \"Assume self.params is a dict and update it with supplied args\"\n self.params.update(*args, **kwargs)\n\n @classmethod\n def from_params(cls, params):\n \"\"\"\n In the case where the params is a mutable sequence (list or dictionary)\n and it may change before this class is called, one may explicitly use a\n reference to the params rather than using *args or **kwargs which will\n copy the values and not reference them.\n \"\"\"\n result = cls()\n result.params = params\n return result\n\n\nclass Appender(object):\n \"\"\"\n A function decorator that will append an addendum to the docstring\n of the target function.\n\n This decorator should be robust even if func.__doc__ is None\n (for example, if -OO was passed to the interpreter).\n\n Usage: construct a docstring.Appender with a string to be joined to\n the original docstring. An optional 'join' parameter may be supplied\n which will be used to join the docstring and addendum. e.g.\n\n add_copyright = Appender(\"Copyright (c) 2009\", join='\\n')\n\n @add_copyright\n def my_dog(has='fleas'):\n \"This docstring will have a copyright below\"\n pass\n \"\"\"\n def __init__(self, addendum, join='', indents=0):\n if indents > 0:\n self.addendum = indent(addendum, indents=indents)\n else:\n self.addendum = addendum\n self.join = join\n\n def __call__(self, func):\n func.__doc__ = func.__doc__ if func.__doc__ else ''\n self.addendum = self.addendum if self.addendum else ''\n docitems = [func.__doc__, self.addendum]\n func.__doc__ = self.join.join(docitems)\n return func\n\n\ndef indent(text, indents=1):\n if not text or not isinstance(text, str):\n return ''\n jointext = ''.join(['\\n'] + [' '] * indents)\n return jointext.join(text.split('\\n'))\n\n\ndef suppress_stdout(f):\n def wrapped(*args, **kwargs):\n try:\n sys.stdout = StringIO()\n f(*args, **kwargs)\n finally:\n sys.stdout = sys.__stdout__\n\n return wrapped\n\n\nclass KnownFailureTest(Exception):\n '''Raise this exception to mark a test as a known failing test.'''\n pass\n\n\ndef knownfailureif(fail_condition, msg=None):\n \"\"\"\n Make function raise KnownFailureTest exception if given condition is true.\n\n If the condition is a callable, it is used at runtime to dynamically\n make the decision. This is useful for tests that may require costly\n imports, to delay the cost until the test suite is actually executed.\n\n Parameters\n ----------\n fail_condition : bool or callable\n Flag to determine whether to mark the decorated test as a known\n failure (if True) or not (if False).\n msg : str, optional\n Message to give on raising a KnownFailureTest exception.\n Default is None.\n\n Returns\n -------\n decorator : function\n Decorator, which, when applied to a function, causes SkipTest\n to be raised when `skip_condition` is True, and the function\n to be called normally otherwise.\n\n Notes\n -----\n The decorator itself is decorated with the ``nose.tools.make_decorator``\n function in order to transmit function name, and various other metadata.\n\n \"\"\"\n if msg is None:\n msg = 'Test skipped due to known failure'\n\n # Allow for both boolean or callable known failure conditions.\n if callable(fail_condition):\n fail_val = fail_condition\n else:\n fail_val = lambda: fail_condition\n\n def knownfail_decorator(f):\n # Local import to avoid a hard nose dependency and only incur the\n # import time overhead at actual test-time.\n import nose\n\n def knownfailer(*args, **kwargs):\n if fail_val():\n raise KnownFailureTest(msg)\n else:\n return f(*args, **kwargs)\n return nose.tools.make_decorator(f)(knownfailer)\n\n return knownfail_decorator\n\ndef make_signature(func) :\n \"\"\"\n Returns a string repr of the arg list of a func call, with any defaults\n\n Examples\n --------\n\n >>> def f(a,b,c=2) :\n >>> return a*b*c\n >>> print(_make_signature(f))\n a,b,c=2\n \"\"\"\n from inspect import getargspec\n spec = getargspec(func)\n if spec.defaults == None :\n n_wo_defaults = len(spec.args)\n defaults = ('',) * n_wo_defaults\n else :\n n_wo_defaults = len(spec.args) - len(spec.defaults)\n defaults = ('',) * n_wo_defaults + spec.defaults\n args = []\n for i, (var, default) in enumerate(zip(spec.args, defaults)) :\n args.append(var if default=='' else var+'='+repr(default))\n if spec.varargs:\n args.append('*' + spec.varargs)\n if spec.keywords:\n args.append('**' + spec.keywords)\n return args, spec.args\n\n", "path": "pandas/util/decorators.py"}]}
| 3,279 | 133 |
gh_patches_debug_15505
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-1721
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Override of PhasedXPowGate causes determinant of gate to jump discontinuously
```
>>> np.linalg.det(cirq.unitary(cirq.PhasedXPowGate(phase_exponent=1.01, exponent=0.5)))
0.9999999999999998j
>>> np.linalg.det(cirq.unitary(cirq.PhasedXPowGate(phase_exponent=1., exponent=0.5)))
-0.9999999999999998j
>>> np.linalg.det(cirq.unitary(cirq.PhasedXPowGate(phase_exponent=0.99, exponent=0.5)))
0.9999999999999998j
```
and the gates also change badly
```
>>> cirq.unitary(cirq.PhasedXPowGate(phase_exponent=1.01, exponent=0.5))
array([[ 0.5 +0.5j , -0.4840479 +0.51545866j],
[-0.51545866+0.4840479j , 0.5 +0.5j ]])
>>> cirq.unitary(cirq.PhasedXPowGate(phase_exponent=1, exponent=0.5))
array([[0.5-0.5j, 0.5+0.5j],
[0.5+0.5j, 0.5-0.5j]])
>>> cirq.unitary(cirq.PhasedXPowGate(phase_exponent=0.99, exponent=0.5))
array([[ 0.5 +0.5j , -0.51545866+0.4840479j ],
[-0.4840479 +0.51545866j, 0.5 +0.5j ]])
```
These discontinuous jumps are likely coming from the __new__ override code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/ops/phased_x_gate.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """An `XPowGate` conjugated by `ZPowGate`s."""
16 from typing import Union, Sequence, Tuple, Optional, cast
17
18 import math
19 import numpy as np
20 import sympy
21
22 import cirq
23 from cirq import value, protocols
24 from cirq._compat import proper_repr
25 from cirq.ops import gate_features, raw_types, op_tree
26 from cirq.type_workarounds import NotImplementedType
27
28 @value.value_equality
29 class PhasedXPowGate(gate_features.SingleQubitGate):
30 """A gate equivalent to the circuit βββZ^-pβββX^tβββZ^pβββ."""
31
32 def __new__(cls,
33 *,
34 phase_exponent: Union[float, sympy.Symbol],
35 exponent: Union[float, sympy.Symbol] = 1.0,
36 global_shift: float = 0.0):
37 """Substitutes a raw X or raw Y if possible.
38
39 Args:
40 phase_exponent: The exponent on the Z gates conjugating the X gate.
41 exponent: The exponent on the X gate conjugated by Zs.
42 global_shift: How much to shift the operation's eigenvalues at
43 exponent=1.
44 """
45 p = value.canonicalize_half_turns(phase_exponent)
46 if p == 0:
47 return cirq.ops.common_gates.XPowGate(
48 exponent=exponent,
49 global_shift=global_shift)
50 if p == 0.5:
51 return cirq.ops.common_gates.YPowGate(
52 exponent=exponent,
53 global_shift=global_shift)
54 if p == 1 and not isinstance(exponent, sympy.Symbol):
55 return cirq.ops.common_gates.XPowGate(
56 exponent=-exponent,
57 global_shift=global_shift)
58 if p == -0.5 and not isinstance(exponent, sympy.Symbol):
59 return cirq.ops.common_gates.YPowGate(
60 exponent=-exponent,
61 global_shift=global_shift)
62 return super().__new__(cls)
63
64 def __init__(self,
65 *,
66 phase_exponent: Union[float, sympy.Symbol],
67 exponent: Union[float, sympy.Symbol] = 1.0,
68 global_shift: float = 0.0) -> None:
69 """
70 Args:
71 phase_exponent: The exponent on the Z gates conjugating the X gate.
72 exponent: The exponent on the X gate conjugated by Zs.
73 global_shift: How much to shift the operation's eigenvalues at
74 exponent=1.
75 """
76 self._phase_exponent = value.canonicalize_half_turns(phase_exponent)
77 self._exponent = exponent
78 self._global_shift = global_shift
79
80 def _qasm_(self,
81 args: protocols.QasmArgs,
82 qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]:
83 if cirq.is_parameterized(self):
84 return None
85
86 args.validate_version('2.0')
87
88 e = cast(float, value.canonicalize_half_turns(self._exponent))
89 p = cast(float, self.phase_exponent)
90 epsilon = 10**-args.precision
91
92 if abs(e + 0.5) <= epsilon:
93 return args.format('u2({0:half_turns}, {1:half_turns}) {2};\n',
94 p + 0.5, -p - 0.5, qubits[0])
95
96 if abs(e - 0.5) <= epsilon:
97 return args.format('u2({0:half_turns}, {1:half_turns}) {2};\n',
98 p - 0.5, -p + 0.5, qubits[0])
99
100 return args.format(
101 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\n',
102 -e, p + 0.5, -p - 0.5, qubits[0])
103
104 def _decompose_(self, qubits: Sequence[raw_types.Qid]
105 ) -> op_tree.OP_TREE:
106 assert len(qubits) == 1
107 q = qubits[0]
108 z = cirq.Z(q)**self._phase_exponent
109 x = cirq.X(q)**self._exponent
110 if protocols.is_parameterized(z):
111 return NotImplemented
112 return z**-1, x, z
113
114 @property
115 def exponent(self) -> Union[float, sympy.Symbol]:
116 """The exponent on the central X gate conjugated by the Z gates."""
117 return self._exponent
118
119 @property
120 def phase_exponent(self) -> Union[float, sympy.Symbol]:
121 """The exponent on the Z gates conjugating the X gate."""
122 return self._phase_exponent
123
124 def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate':
125 new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)
126 if new_exponent is NotImplemented:
127 return NotImplemented
128 return PhasedXPowGate(phase_exponent=self._phase_exponent,
129 exponent=new_exponent,
130 global_shift=self._global_shift)
131
132 def _trace_distance_bound_(self):
133 """See `cirq.SupportsTraceDistanceBound`."""
134 return protocols.trace_distance_bound(cirq.X**self._exponent)
135
136 def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:
137 """See `cirq.SupportsUnitary`."""
138 if self._is_parameterized_():
139 return NotImplemented
140 z = protocols.unitary(cirq.Z**self._phase_exponent)
141 x = protocols.unitary(cirq.X**self._exponent)
142 p = np.exp(1j * np.pi * self._global_shift * self._exponent)
143 return np.dot(np.dot(z, x), np.conj(z)) * p
144
145 def _pauli_expansion_(self) -> value.LinearDict[str]:
146 if self._is_parameterized_():
147 return NotImplemented
148 phase_angle = np.pi * self._phase_exponent / 2
149 angle = np.pi * self._exponent / 2
150 phase = 1j**(2 * self._exponent * (self._global_shift + 0.5))
151 return value.LinearDict({
152 'I': phase * np.cos(angle),
153 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle),
154 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle),
155 })
156
157 def _is_parameterized_(self) -> bool:
158 """See `cirq.SupportsParameterization`."""
159 return (protocols.is_parameterized(self._exponent) or
160 protocols.is_parameterized(self._phase_exponent))
161
162 def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':
163 """See `cirq.SupportsParameterization`."""
164 return PhasedXPowGate(
165 phase_exponent=param_resolver.value_of(self._phase_exponent),
166 exponent=param_resolver.value_of(self._exponent),
167 global_shift=self._global_shift)
168
169 def _phase_by_(self, phase_turns, qubit_index):
170 """See `cirq.SupportsPhase`."""
171 assert qubit_index == 0
172 return PhasedXPowGate(
173 exponent=self._exponent,
174 phase_exponent=self._phase_exponent + phase_turns * 2,
175 global_shift=self._global_shift)
176
177 def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs
178 ) -> protocols.CircuitDiagramInfo:
179 """See `cirq.SupportsCircuitDiagramInfo`."""
180
181 if (isinstance(self.phase_exponent, sympy.Symbol) or
182 args.precision is None):
183 s = 'PhasedX({})'.format(self.phase_exponent)
184 else:
185 s = 'PhasedX({{:.{}}})'.format(args.precision).format(
186 self.phase_exponent)
187 return protocols.CircuitDiagramInfo(
188 wire_symbols=(s,),
189 exponent=value.canonicalize_half_turns(self._exponent))
190
191 def __str__(self):
192 info = protocols.circuit_diagram_info(self)
193 if info.exponent == 1:
194 return info.wire_symbols[0]
195 return '{}^{}'.format(info.wire_symbols[0], info.exponent)
196
197 def __repr__(self):
198 args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]
199 if self.exponent != 1:
200 args.append('exponent={}'.format(proper_repr(self.exponent)))
201 if self._global_shift != 0:
202 args.append('global_shift={!r}'.format(self._global_shift))
203 return 'cirq.PhasedXPowGate({})'.format(', '.join(args))
204
205 def _period(self):
206 exponents = [self._global_shift, 1 + self._global_shift]
207 real_periods = [abs(2/e) for e in exponents if e != 0]
208 int_periods = [int(np.round(e)) for e in real_periods]
209 if any(i != r for i, r in zip(real_periods, int_periods)):
210 return None
211 if len(int_periods) == 1:
212 return int_periods[0]
213 return int_periods[0] * int_periods[1] / math.gcd(*int_periods)
214
215 @property
216 def _canonical_exponent(self):
217 period = self._period()
218 if not period or isinstance(self._exponent, sympy.Symbol):
219 return self._exponent
220 else:
221 return self._exponent % period
222
223 def _value_equality_values_(self):
224 return self.phase_exponent, self._canonical_exponent, self._global_shift
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq/ops/phased_x_gate.py b/cirq/ops/phased_x_gate.py
--- a/cirq/ops/phased_x_gate.py
+++ b/cirq/ops/phased_x_gate.py
@@ -48,17 +48,8 @@
exponent=exponent,
global_shift=global_shift)
if p == 0.5:
- return cirq.ops.common_gates.YPowGate(
- exponent=exponent,
- global_shift=global_shift)
- if p == 1 and not isinstance(exponent, sympy.Symbol):
- return cirq.ops.common_gates.XPowGate(
- exponent=-exponent,
- global_shift=global_shift)
- if p == -0.5 and not isinstance(exponent, sympy.Symbol):
- return cirq.ops.common_gates.YPowGate(
- exponent=-exponent,
- global_shift=global_shift)
+ return cirq.ops.common_gates.YPowGate(exponent=exponent,
+ global_shift=global_shift)
return super().__new__(cls)
def __init__(self,
|
{"golden_diff": "diff --git a/cirq/ops/phased_x_gate.py b/cirq/ops/phased_x_gate.py\n--- a/cirq/ops/phased_x_gate.py\n+++ b/cirq/ops/phased_x_gate.py\n@@ -48,17 +48,8 @@\n exponent=exponent,\n global_shift=global_shift)\n if p == 0.5:\n- return cirq.ops.common_gates.YPowGate(\n- exponent=exponent,\n- global_shift=global_shift)\n- if p == 1 and not isinstance(exponent, sympy.Symbol):\n- return cirq.ops.common_gates.XPowGate(\n- exponent=-exponent,\n- global_shift=global_shift)\n- if p == -0.5 and not isinstance(exponent, sympy.Symbol):\n- return cirq.ops.common_gates.YPowGate(\n- exponent=-exponent,\n- global_shift=global_shift)\n+ return cirq.ops.common_gates.YPowGate(exponent=exponent,\n+ global_shift=global_shift)\n return super().__new__(cls)\n \n def __init__(self,\n", "issue": "Override of PhasedXPowGate causes determinant of gate to jump discontinuously\n```\r\n>>> np.linalg.det(cirq.unitary(cirq.PhasedXPowGate(phase_exponent=1.01, exponent=0.5)))\r\n0.9999999999999998j\r\n>>> np.linalg.det(cirq.unitary(cirq.PhasedXPowGate(phase_exponent=1., exponent=0.5)))\r\n-0.9999999999999998j\r\n>>> np.linalg.det(cirq.unitary(cirq.PhasedXPowGate(phase_exponent=0.99, exponent=0.5)))\r\n0.9999999999999998j\r\n```\r\nand the gates also change badly\r\n```\r\n>>> cirq.unitary(cirq.PhasedXPowGate(phase_exponent=1.01, exponent=0.5))\r\narray([[ 0.5 +0.5j , -0.4840479 +0.51545866j],\r\n [-0.51545866+0.4840479j , 0.5 +0.5j ]])\r\n>>> cirq.unitary(cirq.PhasedXPowGate(phase_exponent=1, exponent=0.5))\r\narray([[0.5-0.5j, 0.5+0.5j],\r\n [0.5+0.5j, 0.5-0.5j]])\r\n>>> cirq.unitary(cirq.PhasedXPowGate(phase_exponent=0.99, exponent=0.5))\r\narray([[ 0.5 +0.5j , -0.51545866+0.4840479j ],\r\n [-0.4840479 +0.51545866j, 0.5 +0.5j ]])\r\n```\r\n\r\nThese discontinuous jumps are likely coming from the __new__ override code.\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An `XPowGate` conjugated by `ZPowGate`s.\"\"\"\nfrom typing import Union, Sequence, Tuple, Optional, cast\n\nimport math\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq import value, protocols\nfrom cirq._compat import proper_repr\nfrom cirq.ops import gate_features, raw_types, op_tree\nfrom cirq.type_workarounds import NotImplementedType\n\[email protected]_equality\nclass PhasedXPowGate(gate_features.SingleQubitGate):\n \"\"\"A gate equivalent to the circuit \u2500\u2500\u2500Z^-p\u2500\u2500\u2500X^t\u2500\u2500\u2500Z^p\u2500\u2500\u2500.\"\"\"\n\n def __new__(cls,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0):\n \"\"\"Substitutes a raw X or raw Y if possible.\n\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n p = value.canonicalize_half_turns(phase_exponent)\n if p == 0:\n return cirq.ops.common_gates.XPowGate(\n exponent=exponent,\n global_shift=global_shift)\n if p == 0.5:\n return cirq.ops.common_gates.YPowGate(\n exponent=exponent,\n global_shift=global_shift)\n if p == 1 and not isinstance(exponent, sympy.Symbol):\n return cirq.ops.common_gates.XPowGate(\n exponent=-exponent,\n global_shift=global_shift)\n if p == -0.5 and not isinstance(exponent, sympy.Symbol):\n return cirq.ops.common_gates.YPowGate(\n exponent=-exponent,\n global_shift=global_shift)\n return super().__new__(cls)\n\n def __init__(self,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n self._phase_exponent = value.canonicalize_half_turns(phase_exponent)\n self._exponent = exponent\n self._global_shift = global_shift\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]:\n if cirq.is_parameterized(self):\n return None\n\n args.validate_version('2.0')\n\n e = cast(float, value.canonicalize_half_turns(self._exponent))\n p = cast(float, self.phase_exponent)\n epsilon = 10**-args.precision\n\n if abs(e + 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p + 0.5, -p - 0.5, qubits[0])\n\n if abs(e - 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p - 0.5, -p + 0.5, qubits[0])\n\n return args.format(\n 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\\n',\n -e, p + 0.5, -p - 0.5, qubits[0])\n\n def _decompose_(self, qubits: Sequence[raw_types.Qid]\n ) -> op_tree.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q)**self._phase_exponent\n x = cirq.X(q)**self._exponent\n if protocols.is_parameterized(z):\n return NotImplemented\n return z**-1, x, z\n\n @property\n def exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the central X gate conjugated by the Z gates.\"\"\"\n return self._exponent\n\n @property\n def phase_exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the Z gates conjugating the X gate.\"\"\"\n return self._phase_exponent\n\n def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate':\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return PhasedXPowGate(phase_exponent=self._phase_exponent,\n exponent=new_exponent,\n global_shift=self._global_shift)\n\n def _trace_distance_bound_(self):\n \"\"\"See `cirq.SupportsTraceDistanceBound`.\"\"\"\n return protocols.trace_distance_bound(cirq.X**self._exponent)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n \"\"\"See `cirq.SupportsUnitary`.\"\"\"\n if self._is_parameterized_():\n return NotImplemented\n z = protocols.unitary(cirq.Z**self._phase_exponent)\n x = protocols.unitary(cirq.X**self._exponent)\n p = np.exp(1j * np.pi * self._global_shift * self._exponent)\n return np.dot(np.dot(z, x), np.conj(z)) * p\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if self._is_parameterized_():\n return NotImplemented\n phase_angle = np.pi * self._phase_exponent / 2\n angle = np.pi * self._exponent / 2\n phase = 1j**(2 * self._exponent * (self._global_shift + 0.5))\n return value.LinearDict({\n 'I': phase * np.cos(angle),\n 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle),\n 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle),\n })\n\n def _is_parameterized_(self) -> bool:\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return (protocols.is_parameterized(self._exponent) or\n protocols.is_parameterized(self._phase_exponent))\n\n def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return PhasedXPowGate(\n phase_exponent=param_resolver.value_of(self._phase_exponent),\n exponent=param_resolver.value_of(self._exponent),\n global_shift=self._global_shift)\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n assert qubit_index == 0\n return PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=self._phase_exponent + phase_turns * 2,\n global_shift=self._global_shift)\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n \"\"\"See `cirq.SupportsCircuitDiagramInfo`.\"\"\"\n\n if (isinstance(self.phase_exponent, sympy.Symbol) or\n args.precision is None):\n s = 'PhasedX({})'.format(self.phase_exponent)\n else:\n s = 'PhasedX({{:.{}}})'.format(args.precision).format(\n self.phase_exponent)\n return protocols.CircuitDiagramInfo(\n wire_symbols=(s,),\n exponent=value.canonicalize_half_turns(self._exponent))\n\n def __str__(self):\n info = protocols.circuit_diagram_info(self)\n if info.exponent == 1:\n return info.wire_symbols[0]\n return '{}^{}'.format(info.wire_symbols[0], info.exponent)\n\n def __repr__(self):\n args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]\n if self.exponent != 1:\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n if self._global_shift != 0:\n args.append('global_shift={!r}'.format(self._global_shift))\n return 'cirq.PhasedXPowGate({})'.format(', '.join(args))\n\n def _period(self):\n exponents = [self._global_shift, 1 + self._global_shift]\n real_periods = [abs(2/e) for e in exponents if e != 0]\n int_periods = [int(np.round(e)) for e in real_periods]\n if any(i != r for i, r in zip(real_periods, int_periods)):\n return None\n if len(int_periods) == 1:\n return int_periods[0]\n return int_periods[0] * int_periods[1] / math.gcd(*int_periods)\n\n @property\n def _canonical_exponent(self):\n period = self._period()\n if not period or isinstance(self._exponent, sympy.Symbol):\n return self._exponent\n else:\n return self._exponent % period\n\n def _value_equality_values_(self):\n return self.phase_exponent, self._canonical_exponent, self._global_shift\n", "path": "cirq/ops/phased_x_gate.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An `XPowGate` conjugated by `ZPowGate`s.\"\"\"\nfrom typing import Union, Sequence, Tuple, Optional, cast\n\nimport math\nimport numpy as np\nimport sympy\n\nimport cirq\nfrom cirq import value, protocols\nfrom cirq._compat import proper_repr\nfrom cirq.ops import gate_features, raw_types, op_tree\nfrom cirq.type_workarounds import NotImplementedType\n\[email protected]_equality\nclass PhasedXPowGate(gate_features.SingleQubitGate):\n \"\"\"A gate equivalent to the circuit \u2500\u2500\u2500Z^-p\u2500\u2500\u2500X^t\u2500\u2500\u2500Z^p\u2500\u2500\u2500.\"\"\"\n\n def __new__(cls,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0):\n \"\"\"Substitutes a raw X or raw Y if possible.\n\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n p = value.canonicalize_half_turns(phase_exponent)\n if p == 0:\n return cirq.ops.common_gates.XPowGate(\n exponent=exponent,\n global_shift=global_shift)\n if p == 0.5:\n return cirq.ops.common_gates.YPowGate(exponent=exponent,\n global_shift=global_shift)\n return super().__new__(cls)\n\n def __init__(self,\n *,\n phase_exponent: Union[float, sympy.Symbol],\n exponent: Union[float, sympy.Symbol] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"\n Args:\n phase_exponent: The exponent on the Z gates conjugating the X gate.\n exponent: The exponent on the X gate conjugated by Zs.\n global_shift: How much to shift the operation's eigenvalues at\n exponent=1.\n \"\"\"\n self._phase_exponent = value.canonicalize_half_turns(phase_exponent)\n self._exponent = exponent\n self._global_shift = global_shift\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.Qid, ...]) -> Optional[str]:\n if cirq.is_parameterized(self):\n return None\n\n args.validate_version('2.0')\n\n e = cast(float, value.canonicalize_half_turns(self._exponent))\n p = cast(float, self.phase_exponent)\n epsilon = 10**-args.precision\n\n if abs(e + 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p + 0.5, -p - 0.5, qubits[0])\n\n if abs(e - 0.5) <= epsilon:\n return args.format('u2({0:half_turns}, {1:half_turns}) {2};\\n',\n p - 0.5, -p + 0.5, qubits[0])\n\n return args.format(\n 'u3({0:half_turns}, {1:half_turns}, {2:half_turns}) {3};\\n',\n -e, p + 0.5, -p - 0.5, qubits[0])\n\n def _decompose_(self, qubits: Sequence[raw_types.Qid]\n ) -> op_tree.OP_TREE:\n assert len(qubits) == 1\n q = qubits[0]\n z = cirq.Z(q)**self._phase_exponent\n x = cirq.X(q)**self._exponent\n if protocols.is_parameterized(z):\n return NotImplemented\n return z**-1, x, z\n\n @property\n def exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the central X gate conjugated by the Z gates.\"\"\"\n return self._exponent\n\n @property\n def phase_exponent(self) -> Union[float, sympy.Symbol]:\n \"\"\"The exponent on the Z gates conjugating the X gate.\"\"\"\n return self._phase_exponent\n\n def __pow__(self, exponent: Union[float, sympy.Symbol]) -> 'PhasedXPowGate':\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return PhasedXPowGate(phase_exponent=self._phase_exponent,\n exponent=new_exponent,\n global_shift=self._global_shift)\n\n def _trace_distance_bound_(self):\n \"\"\"See `cirq.SupportsTraceDistanceBound`.\"\"\"\n return protocols.trace_distance_bound(cirq.X**self._exponent)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n \"\"\"See `cirq.SupportsUnitary`.\"\"\"\n if self._is_parameterized_():\n return NotImplemented\n z = protocols.unitary(cirq.Z**self._phase_exponent)\n x = protocols.unitary(cirq.X**self._exponent)\n p = np.exp(1j * np.pi * self._global_shift * self._exponent)\n return np.dot(np.dot(z, x), np.conj(z)) * p\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if self._is_parameterized_():\n return NotImplemented\n phase_angle = np.pi * self._phase_exponent / 2\n angle = np.pi * self._exponent / 2\n phase = 1j**(2 * self._exponent * (self._global_shift + 0.5))\n return value.LinearDict({\n 'I': phase * np.cos(angle),\n 'X': -1j * phase * np.sin(angle) * np.cos(2 * phase_angle),\n 'Y': -1j * phase * np.sin(angle) * np.sin(2 * phase_angle),\n })\n\n def _is_parameterized_(self) -> bool:\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return (protocols.is_parameterized(self._exponent) or\n protocols.is_parameterized(self._phase_exponent))\n\n def _resolve_parameters_(self, param_resolver) -> 'PhasedXPowGate':\n \"\"\"See `cirq.SupportsParameterization`.\"\"\"\n return PhasedXPowGate(\n phase_exponent=param_resolver.value_of(self._phase_exponent),\n exponent=param_resolver.value_of(self._exponent),\n global_shift=self._global_shift)\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n assert qubit_index == 0\n return PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=self._phase_exponent + phase_turns * 2,\n global_shift=self._global_shift)\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n \"\"\"See `cirq.SupportsCircuitDiagramInfo`.\"\"\"\n\n if (isinstance(self.phase_exponent, sympy.Symbol) or\n args.precision is None):\n s = 'PhasedX({})'.format(self.phase_exponent)\n else:\n s = 'PhasedX({{:.{}}})'.format(args.precision).format(\n self.phase_exponent)\n return protocols.CircuitDiagramInfo(\n wire_symbols=(s,),\n exponent=value.canonicalize_half_turns(self._exponent))\n\n def __str__(self):\n info = protocols.circuit_diagram_info(self)\n if info.exponent == 1:\n return info.wire_symbols[0]\n return '{}^{}'.format(info.wire_symbols[0], info.exponent)\n\n def __repr__(self):\n args = ['phase_exponent={}'.format(proper_repr(self.phase_exponent))]\n if self.exponent != 1:\n args.append('exponent={}'.format(proper_repr(self.exponent)))\n if self._global_shift != 0:\n args.append('global_shift={!r}'.format(self._global_shift))\n return 'cirq.PhasedXPowGate({})'.format(', '.join(args))\n\n def _period(self):\n exponents = [self._global_shift, 1 + self._global_shift]\n real_periods = [abs(2/e) for e in exponents if e != 0]\n int_periods = [int(np.round(e)) for e in real_periods]\n if any(i != r for i, r in zip(real_periods, int_periods)):\n return None\n if len(int_periods) == 1:\n return int_periods[0]\n return int_periods[0] * int_periods[1] / math.gcd(*int_periods)\n\n @property\n def _canonical_exponent(self):\n period = self._period()\n if not period or isinstance(self._exponent, sympy.Symbol):\n return self._exponent\n else:\n return self._exponent % period\n\n def _value_equality_values_(self):\n return self.phase_exponent, self._canonical_exponent, self._global_shift\n", "path": "cirq/ops/phased_x_gate.py"}]}
| 3,547 | 245 |
gh_patches_debug_26942
|
rasdani/github-patches
|
git_diff
|
PokemonGoF__PokemonGo-Bot-2461
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
should_throttle_retry crash
Everything working fine, bot is walking then crashes to this. Happen multiple times so far. Everything is up-to-date.
```
Traceback (most recent call last):
File "pokecli.py", line 445, in <module>
main()
File "pokecli.py", line 71, in main
bot.tick()
File "C:\Trainers\PokemonGo-Bot01\pokemongo_bot\__init__.py", line 92, in tick
if worker.work() == WorkerResult.RUNNING:
File "C:\Trainers\PokemonGo-Bot01\pokemongo_bot\cell_workers\incubate_eggs.py", line 41, in work
self._apply_incubators()
File "C:\Trainers\PokemonGo-Bot01\pokemongo_bot\cell_workers\incubate_eggs.py", line 52, in _apply_incubators
pokemon_id=egg["id"]
File "c:\pokemongo-bot\src\pgoapi\pgoapi\pgoapi.py", line 84, in function
return request.call()
File "C:\Trainers\PokemonGo-Bot01\pokemongo_bot\api_wrapper.py", line 112, in call
if should_throttle_retry:
UnboundLocalError: local variable 'should_throttle_retry' referenced before assignment
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pokemongo_bot/api_wrapper.py`
Content:
```
1 import time
2
3 from pgoapi.exceptions import (ServerSideRequestThrottlingException,
4 NotLoggedInException, ServerBusyOrOfflineException,
5 NoPlayerPositionSetException, EmptySubrequestChainException,
6 UnexpectedResponseException)
7 from pgoapi.pgoapi import PGoApi, PGoApiRequest, RpcApi
8 from pgoapi.protos.POGOProtos.Networking.Requests_pb2 import RequestType
9
10 import pokemongo_bot.logger as logger
11 from human_behaviour import sleep
12
13 class ApiWrapper(PGoApi):
14 def __init__(self):
15 PGoApi.__init__(self)
16 self.useVanillaRequest = False
17
18 def create_request(self):
19 RequestClass = ApiRequest
20 if self.useVanillaRequest:
21 RequestClass = PGoApiRequest
22
23 return RequestClass(
24 self._api_endpoint,
25 self._auth_provider,
26 self._position_lat,
27 self._position_lng,
28 self._position_alt
29 )
30
31 def login(self, *args):
32 # login needs base class "create_request"
33 self.useVanillaRequest = True
34 try:
35 ret_value = PGoApi.login(self, *args)
36 finally:
37 # cleanup code
38 self.useVanillaRequest = False
39 return ret_value
40
41
42 class ApiRequest(PGoApiRequest):
43 def __init__(self, *args):
44 PGoApiRequest.__init__(self, *args)
45 self.request_callers = []
46 self.last_api_request_time = None
47 self.requests_per_seconds = 2
48
49 def can_call(self):
50 if not self._req_method_list:
51 raise EmptySubrequestChainException()
52
53 if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None):
54 raise NoPlayerPositionSetException()
55
56 if self._auth_provider is None or not self._auth_provider.is_login():
57 self.log.info('Not logged in')
58 raise NotLoggedInException()
59
60 return True
61
62 def _call(self):
63 return PGoApiRequest.call(self)
64
65 def _pop_request_callers(self):
66 r = self.request_callers
67 self.request_callers = []
68 return [i.upper() for i in r]
69
70 def is_response_valid(self, result, request_callers):
71 if not result or result is None or not isinstance(result, dict):
72 return False
73
74 if not 'responses' in result or not 'status_code' in result:
75 return False
76
77 if not isinstance(result['responses'], dict):
78 return False
79
80 # the response can still programatically be valid at this point
81 # but still be wrong. we need to check if the server did sent what we asked it
82 for request_caller in request_callers:
83 if not request_caller in result['responses']:
84 return False
85
86 return True
87
88 def call(self, max_retry=15):
89 request_callers = self._pop_request_callers()
90 if not self.can_call():
91 return False # currently this is never ran, exceptions are raised before
92
93 request_timestamp = None
94 api_req_method_list = self._req_method_list
95 result = None
96 try_cnt = 0
97 throttling_retry = 0
98 unexpected_response_retry = 0
99 while True:
100 request_timestamp = self.throttle_sleep()
101 # self._call internally clear this field, so save it
102 self._req_method_list = [req_method for req_method in api_req_method_list]
103 try:
104 result = self._call()
105 should_throttle_retry = False
106 should_unexpected_response_retry = False
107 except ServerSideRequestThrottlingException:
108 should_throttle_retry = True
109 except UnexpectedResponseException:
110 should_unexpected_response_retry = True
111
112 if should_throttle_retry:
113 throttling_retry += 1
114 if throttling_retry >= max_retry:
115 raise ServerSideRequestThrottlingException('Server throttled too many times')
116 sleep(1) # huge sleep ?
117 continue # skip response checking
118
119 if should_unexpected_response_retry:
120 unexpected_reponse_retry += 1
121 if unexpected_response_retry >= 5:
122 logger.log('Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.', 'red')
123 sleep(30)
124 else:
125 sleep(2)
126 continue
127
128 if not self.is_response_valid(result, request_callers):
129 try_cnt += 1
130 if try_cnt > 3:
131 logger.log('Server seems to be busy or offline - try again - {}/{}'.format(try_cnt, max_retry), 'red')
132 if try_cnt >= max_retry:
133 raise ServerBusyOrOfflineException()
134 sleep(1)
135 else:
136 break
137
138 self.last_api_request_time = request_timestamp
139 return result
140
141 def __getattr__(self, func):
142 if func.upper() in RequestType.keys():
143 self.request_callers.append(func)
144 return PGoApiRequest.__getattr__(self, func)
145
146 def throttle_sleep(self):
147 now_milliseconds = time.time() * 1000
148 required_delay_between_requests = 1000 / self.requests_per_seconds
149
150 difference = now_milliseconds - (self.last_api_request_time if self.last_api_request_time else 0)
151
152 if self.last_api_request_time != None and difference < required_delay_between_requests:
153 sleep_time = required_delay_between_requests - difference
154 time.sleep(sleep_time / 1000)
155
156 return now_milliseconds
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pokemongo_bot/api_wrapper.py b/pokemongo_bot/api_wrapper.py
--- a/pokemongo_bot/api_wrapper.py
+++ b/pokemongo_bot/api_wrapper.py
@@ -100,10 +100,10 @@
request_timestamp = self.throttle_sleep()
# self._call internally clear this field, so save it
self._req_method_list = [req_method for req_method in api_req_method_list]
+ should_throttle_retry = False
+ should_unexpected_response_retry = False
try:
result = self._call()
- should_throttle_retry = False
- should_unexpected_response_retry = False
except ServerSideRequestThrottlingException:
should_throttle_retry = True
except UnexpectedResponseException:
@@ -117,7 +117,7 @@
continue # skip response checking
if should_unexpected_response_retry:
- unexpected_reponse_retry += 1
+ unexpected_response_retry += 1
if unexpected_response_retry >= 5:
logger.log('Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.', 'red')
sleep(30)
|
{"golden_diff": "diff --git a/pokemongo_bot/api_wrapper.py b/pokemongo_bot/api_wrapper.py\n--- a/pokemongo_bot/api_wrapper.py\n+++ b/pokemongo_bot/api_wrapper.py\n@@ -100,10 +100,10 @@\n request_timestamp = self.throttle_sleep()\n # self._call internally clear this field, so save it\n self._req_method_list = [req_method for req_method in api_req_method_list]\n+ should_throttle_retry = False\n+ should_unexpected_response_retry = False\n try:\n result = self._call()\n- should_throttle_retry = False\n- should_unexpected_response_retry = False\n except ServerSideRequestThrottlingException:\n should_throttle_retry = True\n except UnexpectedResponseException:\n@@ -117,7 +117,7 @@\n continue # skip response checking\n \n if should_unexpected_response_retry:\n- unexpected_reponse_retry += 1\n+ unexpected_response_retry += 1\n if unexpected_response_retry >= 5:\n logger.log('Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.', 'red')\n sleep(30)\n", "issue": "should_throttle_retry crash\nEverything working fine, bot is walking then crashes to this. Happen multiple times so far. Everything is up-to-date.\n\n```\nTraceback (most recent call last):\n File \"pokecli.py\", line 445, in <module>\n main()\n File \"pokecli.py\", line 71, in main\n bot.tick()\n File \"C:\\Trainers\\PokemonGo-Bot01\\pokemongo_bot\\__init__.py\", line 92, in tick\n if worker.work() == WorkerResult.RUNNING:\n File \"C:\\Trainers\\PokemonGo-Bot01\\pokemongo_bot\\cell_workers\\incubate_eggs.py\", line 41, in work\n self._apply_incubators()\n File \"C:\\Trainers\\PokemonGo-Bot01\\pokemongo_bot\\cell_workers\\incubate_eggs.py\", line 52, in _apply_incubators\n pokemon_id=egg[\"id\"]\n File \"c:\\pokemongo-bot\\src\\pgoapi\\pgoapi\\pgoapi.py\", line 84, in function\n return request.call()\n File \"C:\\Trainers\\PokemonGo-Bot01\\pokemongo_bot\\api_wrapper.py\", line 112, in call\n if should_throttle_retry:\nUnboundLocalError: local variable 'should_throttle_retry' referenced before assignment\n```\n\n", "before_files": [{"content": "import time\n\nfrom pgoapi.exceptions import (ServerSideRequestThrottlingException,\n NotLoggedInException, ServerBusyOrOfflineException,\n NoPlayerPositionSetException, EmptySubrequestChainException,\n UnexpectedResponseException)\nfrom pgoapi.pgoapi import PGoApi, PGoApiRequest, RpcApi\nfrom pgoapi.protos.POGOProtos.Networking.Requests_pb2 import RequestType\n\nimport pokemongo_bot.logger as logger\nfrom human_behaviour import sleep\n\nclass ApiWrapper(PGoApi):\n def __init__(self):\n PGoApi.__init__(self)\n self.useVanillaRequest = False\n\n def create_request(self):\n RequestClass = ApiRequest\n if self.useVanillaRequest:\n RequestClass = PGoApiRequest\n\n return RequestClass(\n self._api_endpoint,\n self._auth_provider,\n self._position_lat,\n self._position_lng,\n self._position_alt\n )\n\n def login(self, *args):\n # login needs base class \"create_request\"\n self.useVanillaRequest = True\n try:\n ret_value = PGoApi.login(self, *args)\n finally:\n # cleanup code\n self.useVanillaRequest = False\n return ret_value\n\n\nclass ApiRequest(PGoApiRequest):\n def __init__(self, *args):\n PGoApiRequest.__init__(self, *args)\n self.request_callers = []\n self.last_api_request_time = None\n self.requests_per_seconds = 2\n\n def can_call(self):\n if not self._req_method_list:\n raise EmptySubrequestChainException()\n\n if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None):\n raise NoPlayerPositionSetException()\n\n if self._auth_provider is None or not self._auth_provider.is_login():\n self.log.info('Not logged in')\n raise NotLoggedInException()\n\n return True\n\n def _call(self):\n return PGoApiRequest.call(self)\n\n def _pop_request_callers(self):\n r = self.request_callers\n self.request_callers = []\n return [i.upper() for i in r]\n\n def is_response_valid(self, result, request_callers):\n if not result or result is None or not isinstance(result, dict):\n return False\n\n if not 'responses' in result or not 'status_code' in result:\n return False\n\n if not isinstance(result['responses'], dict):\n return False\n\n # the response can still programatically be valid at this point\n # but still be wrong. we need to check if the server did sent what we asked it\n for request_caller in request_callers:\n if not request_caller in result['responses']:\n return False\n\n return True\n\n def call(self, max_retry=15):\n request_callers = self._pop_request_callers()\n if not self.can_call():\n return False # currently this is never ran, exceptions are raised before\n\n request_timestamp = None\n api_req_method_list = self._req_method_list\n result = None\n try_cnt = 0\n throttling_retry = 0\n unexpected_response_retry = 0\n while True:\n request_timestamp = self.throttle_sleep()\n # self._call internally clear this field, so save it\n self._req_method_list = [req_method for req_method in api_req_method_list]\n try:\n result = self._call()\n should_throttle_retry = False\n should_unexpected_response_retry = False\n except ServerSideRequestThrottlingException:\n should_throttle_retry = True\n except UnexpectedResponseException:\n should_unexpected_response_retry = True\n\n if should_throttle_retry:\n throttling_retry += 1\n if throttling_retry >= max_retry:\n raise ServerSideRequestThrottlingException('Server throttled too many times')\n sleep(1) # huge sleep ?\n continue # skip response checking\n\n if should_unexpected_response_retry:\n unexpected_reponse_retry += 1\n if unexpected_response_retry >= 5:\n logger.log('Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.', 'red')\n sleep(30)\n else:\n sleep(2)\n continue\n \n if not self.is_response_valid(result, request_callers):\n try_cnt += 1\n if try_cnt > 3:\n logger.log('Server seems to be busy or offline - try again - {}/{}'.format(try_cnt, max_retry), 'red')\n if try_cnt >= max_retry:\n raise ServerBusyOrOfflineException()\n sleep(1)\n else:\n break\n\n self.last_api_request_time = request_timestamp\n return result\n\n def __getattr__(self, func):\n if func.upper() in RequestType.keys():\n self.request_callers.append(func)\n return PGoApiRequest.__getattr__(self, func)\n\n def throttle_sleep(self):\n now_milliseconds = time.time() * 1000\n required_delay_between_requests = 1000 / self.requests_per_seconds\n\n difference = now_milliseconds - (self.last_api_request_time if self.last_api_request_time else 0)\n\n if self.last_api_request_time != None and difference < required_delay_between_requests:\n sleep_time = required_delay_between_requests - difference\n time.sleep(sleep_time / 1000)\n\n return now_milliseconds\n", "path": "pokemongo_bot/api_wrapper.py"}], "after_files": [{"content": "import time\n\nfrom pgoapi.exceptions import (ServerSideRequestThrottlingException,\n NotLoggedInException, ServerBusyOrOfflineException,\n NoPlayerPositionSetException, EmptySubrequestChainException,\n UnexpectedResponseException)\nfrom pgoapi.pgoapi import PGoApi, PGoApiRequest, RpcApi\nfrom pgoapi.protos.POGOProtos.Networking.Requests_pb2 import RequestType\n\nimport pokemongo_bot.logger as logger\nfrom human_behaviour import sleep\n\nclass ApiWrapper(PGoApi):\n def __init__(self):\n PGoApi.__init__(self)\n self.useVanillaRequest = False\n\n def create_request(self):\n RequestClass = ApiRequest\n if self.useVanillaRequest:\n RequestClass = PGoApiRequest\n\n return RequestClass(\n self._api_endpoint,\n self._auth_provider,\n self._position_lat,\n self._position_lng,\n self._position_alt\n )\n\n def login(self, *args):\n # login needs base class \"create_request\"\n self.useVanillaRequest = True\n try:\n ret_value = PGoApi.login(self, *args)\n finally:\n # cleanup code\n self.useVanillaRequest = False\n return ret_value\n\n\nclass ApiRequest(PGoApiRequest):\n def __init__(self, *args):\n PGoApiRequest.__init__(self, *args)\n self.request_callers = []\n self.last_api_request_time = None\n self.requests_per_seconds = 2\n\n def can_call(self):\n if not self._req_method_list:\n raise EmptySubrequestChainException()\n\n if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None):\n raise NoPlayerPositionSetException()\n\n if self._auth_provider is None or not self._auth_provider.is_login():\n self.log.info('Not logged in')\n raise NotLoggedInException()\n\n return True\n\n def _call(self):\n return PGoApiRequest.call(self)\n\n def _pop_request_callers(self):\n r = self.request_callers\n self.request_callers = []\n return [i.upper() for i in r]\n\n def is_response_valid(self, result, request_callers):\n if not result or result is None or not isinstance(result, dict):\n return False\n\n if not 'responses' in result or not 'status_code' in result:\n return False\n\n if not isinstance(result['responses'], dict):\n return False\n\n # the response can still programatically be valid at this point\n # but still be wrong. we need to check if the server did sent what we asked it\n for request_caller in request_callers:\n if not request_caller in result['responses']:\n return False\n\n return True\n\n def call(self, max_retry=15):\n request_callers = self._pop_request_callers()\n if not self.can_call():\n return False # currently this is never ran, exceptions are raised before\n\n request_timestamp = None\n api_req_method_list = self._req_method_list\n result = None\n try_cnt = 0\n throttling_retry = 0\n unexpected_response_retry = 0\n while True:\n request_timestamp = self.throttle_sleep()\n # self._call internally clear this field, so save it\n self._req_method_list = [req_method for req_method in api_req_method_list]\n should_throttle_retry = False\n should_unexpected_response_retry = False\n try:\n result = self._call()\n except ServerSideRequestThrottlingException:\n should_throttle_retry = True\n except UnexpectedResponseException:\n should_unexpected_response_retry = True\n\n if should_throttle_retry:\n throttling_retry += 1\n if throttling_retry >= max_retry:\n raise ServerSideRequestThrottlingException('Server throttled too many times')\n sleep(1) # huge sleep ?\n continue # skip response checking\n\n if should_unexpected_response_retry:\n unexpected_response_retry += 1\n if unexpected_response_retry >= 5:\n logger.log('Server is not responding correctly to our requests. Waiting for 30 seconds to reconnect.', 'red')\n sleep(30)\n else:\n sleep(2)\n continue\n \n if not self.is_response_valid(result, request_callers):\n try_cnt += 1\n if try_cnt > 3:\n logger.log('Server seems to be busy or offline - try again - {}/{}'.format(try_cnt, max_retry), 'red')\n if try_cnt >= max_retry:\n raise ServerBusyOrOfflineException()\n sleep(1)\n else:\n break\n\n self.last_api_request_time = request_timestamp\n return result\n\n def __getattr__(self, func):\n if func.upper() in RequestType.keys():\n self.request_callers.append(func)\n return PGoApiRequest.__getattr__(self, func)\n\n def throttle_sleep(self):\n now_milliseconds = time.time() * 1000\n required_delay_between_requests = 1000 / self.requests_per_seconds\n\n difference = now_milliseconds - (self.last_api_request_time if self.last_api_request_time else 0)\n\n if self.last_api_request_time != None and difference < required_delay_between_requests:\n sleep_time = required_delay_between_requests - difference\n time.sleep(sleep_time / 1000)\n\n return now_milliseconds\n", "path": "pokemongo_bot/api_wrapper.py"}]}
| 2,147 | 263 |
gh_patches_debug_7169
|
rasdani/github-patches
|
git_diff
|
google__osv.dev-447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clarify max query limit on bulk query endpoint
Looks like there's [a limit to the number of queries](https://github.com/google/osv/blob/168a6b2c593d4fac2defbc8546e634256cdfa5b0/gcp/api/server.py#L92-L94) that can be submitted at once with the bulk query endpoint, which is [set to 1000](https://github.com/google/osv/blob/168a6b2c593d4fac2defbc8546e634256cdfa5b0/gcp/api/server.py#L42).
It's understandable but not documented in the api docs as far as I can see.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gcp/api/osv_service_v1_pb2_grpc.py`
Content:
```
1 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2 """Client and server classes corresponding to protobuf-defined services."""
3 import grpc
4
5 from osv import vulnerability_pb2 as osv_dot_vulnerability__pb2
6 import osv_service_v1_pb2 as osv__service__v1__pb2
7
8
9 class OSVStub(object):
10 """Open source vulnerability database.
11 """
12
13 def __init__(self, channel):
14 """Constructor.
15
16 Args:
17 channel: A grpc.Channel.
18 """
19 self.GetVulnById = channel.unary_unary(
20 '/osv.v1.OSV/GetVulnById',
21 request_serializer=osv__service__v1__pb2.GetVulnByIdParameters.SerializeToString,
22 response_deserializer=osv_dot_vulnerability__pb2.Vulnerability.FromString,
23 )
24 self.QueryAffected = channel.unary_unary(
25 '/osv.v1.OSV/QueryAffected',
26 request_serializer=osv__service__v1__pb2.QueryAffectedParameters.SerializeToString,
27 response_deserializer=osv__service__v1__pb2.VulnerabilityList.FromString,
28 )
29 self.QueryAffectedBatch = channel.unary_unary(
30 '/osv.v1.OSV/QueryAffectedBatch',
31 request_serializer=osv__service__v1__pb2.QueryAffectedBatchParameters.SerializeToString,
32 response_deserializer=osv__service__v1__pb2.BatchVulnerabilityList.FromString,
33 )
34
35
36 class OSVServicer(object):
37 """Open source vulnerability database.
38 """
39
40 def GetVulnById(self, request, context):
41 """Return a `Vulnerability` object for a given OSV ID.
42 """
43 context.set_code(grpc.StatusCode.UNIMPLEMENTED)
44 context.set_details('Method not implemented!')
45 raise NotImplementedError('Method not implemented!')
46
47 def QueryAffected(self, request, context):
48 """Query vulnerabilities for a particular project at a given commit or
49 version.
50 """
51 context.set_code(grpc.StatusCode.UNIMPLEMENTED)
52 context.set_details('Method not implemented!')
53 raise NotImplementedError('Method not implemented!')
54
55 def QueryAffectedBatch(self, request, context):
56 """Query vulnerabilities (batched) for given package versions and commits.
57 """
58 context.set_code(grpc.StatusCode.UNIMPLEMENTED)
59 context.set_details('Method not implemented!')
60 raise NotImplementedError('Method not implemented!')
61
62
63 def add_OSVServicer_to_server(servicer, server):
64 rpc_method_handlers = {
65 'GetVulnById': grpc.unary_unary_rpc_method_handler(
66 servicer.GetVulnById,
67 request_deserializer=osv__service__v1__pb2.GetVulnByIdParameters.FromString,
68 response_serializer=osv_dot_vulnerability__pb2.Vulnerability.SerializeToString,
69 ),
70 'QueryAffected': grpc.unary_unary_rpc_method_handler(
71 servicer.QueryAffected,
72 request_deserializer=osv__service__v1__pb2.QueryAffectedParameters.FromString,
73 response_serializer=osv__service__v1__pb2.VulnerabilityList.SerializeToString,
74 ),
75 'QueryAffectedBatch': grpc.unary_unary_rpc_method_handler(
76 servicer.QueryAffectedBatch,
77 request_deserializer=osv__service__v1__pb2.QueryAffectedBatchParameters.FromString,
78 response_serializer=osv__service__v1__pb2.BatchVulnerabilityList.SerializeToString,
79 ),
80 }
81 generic_handler = grpc.method_handlers_generic_handler(
82 'osv.v1.OSV', rpc_method_handlers)
83 server.add_generic_rpc_handlers((generic_handler,))
84
85
86 # This class is part of an EXPERIMENTAL API.
87 class OSV(object):
88 """Open source vulnerability database.
89 """
90
91 @staticmethod
92 def GetVulnById(request,
93 target,
94 options=(),
95 channel_credentials=None,
96 call_credentials=None,
97 insecure=False,
98 compression=None,
99 wait_for_ready=None,
100 timeout=None,
101 metadata=None):
102 return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/GetVulnById',
103 osv__service__v1__pb2.GetVulnByIdParameters.SerializeToString,
104 osv_dot_vulnerability__pb2.Vulnerability.FromString,
105 options, channel_credentials,
106 insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
107
108 @staticmethod
109 def QueryAffected(request,
110 target,
111 options=(),
112 channel_credentials=None,
113 call_credentials=None,
114 insecure=False,
115 compression=None,
116 wait_for_ready=None,
117 timeout=None,
118 metadata=None):
119 return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/QueryAffected',
120 osv__service__v1__pb2.QueryAffectedParameters.SerializeToString,
121 osv__service__v1__pb2.VulnerabilityList.FromString,
122 options, channel_credentials,
123 insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
124
125 @staticmethod
126 def QueryAffectedBatch(request,
127 target,
128 options=(),
129 channel_credentials=None,
130 call_credentials=None,
131 insecure=False,
132 compression=None,
133 wait_for_ready=None,
134 timeout=None,
135 metadata=None):
136 return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/QueryAffectedBatch',
137 osv__service__v1__pb2.QueryAffectedBatchParameters.SerializeToString,
138 osv__service__v1__pb2.BatchVulnerabilityList.FromString,
139 options, channel_credentials,
140 insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gcp/api/osv_service_v1_pb2_grpc.py b/gcp/api/osv_service_v1_pb2_grpc.py
--- a/gcp/api/osv_service_v1_pb2_grpc.py
+++ b/gcp/api/osv_service_v1_pb2_grpc.py
@@ -54,6 +54,7 @@
def QueryAffectedBatch(self, request, context):
"""Query vulnerabilities (batched) for given package versions and commits.
+ This currently allows a maximum of 1000 package versions to be included in a single query.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
|
{"golden_diff": "diff --git a/gcp/api/osv_service_v1_pb2_grpc.py b/gcp/api/osv_service_v1_pb2_grpc.py\n--- a/gcp/api/osv_service_v1_pb2_grpc.py\n+++ b/gcp/api/osv_service_v1_pb2_grpc.py\n@@ -54,6 +54,7 @@\n \n def QueryAffectedBatch(self, request, context):\n \"\"\"Query vulnerabilities (batched) for given package versions and commits.\n+ This currently allows a maximum of 1000 package versions to be included in a single query.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n", "issue": "Clarify max query limit on bulk query endpoint\nLooks like there's [a limit to the number of queries](https://github.com/google/osv/blob/168a6b2c593d4fac2defbc8546e634256cdfa5b0/gcp/api/server.py#L92-L94) that can be submitted at once with the bulk query endpoint, which is [set to 1000](https://github.com/google/osv/blob/168a6b2c593d4fac2defbc8546e634256cdfa5b0/gcp/api/server.py#L42).\r\n\r\nIt's understandable but not documented in the api docs as far as I can see.\n", "before_files": [{"content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom osv import vulnerability_pb2 as osv_dot_vulnerability__pb2\nimport osv_service_v1_pb2 as osv__service__v1__pb2\n\n\nclass OSVStub(object):\n \"\"\"Open source vulnerability database.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.GetVulnById = channel.unary_unary(\n '/osv.v1.OSV/GetVulnById',\n request_serializer=osv__service__v1__pb2.GetVulnByIdParameters.SerializeToString,\n response_deserializer=osv_dot_vulnerability__pb2.Vulnerability.FromString,\n )\n self.QueryAffected = channel.unary_unary(\n '/osv.v1.OSV/QueryAffected',\n request_serializer=osv__service__v1__pb2.QueryAffectedParameters.SerializeToString,\n response_deserializer=osv__service__v1__pb2.VulnerabilityList.FromString,\n )\n self.QueryAffectedBatch = channel.unary_unary(\n '/osv.v1.OSV/QueryAffectedBatch',\n request_serializer=osv__service__v1__pb2.QueryAffectedBatchParameters.SerializeToString,\n response_deserializer=osv__service__v1__pb2.BatchVulnerabilityList.FromString,\n )\n\n\nclass OSVServicer(object):\n \"\"\"Open source vulnerability database.\n \"\"\"\n\n def GetVulnById(self, request, context):\n \"\"\"Return a `Vulnerability` object for a given OSV ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def QueryAffected(self, request, context):\n \"\"\"Query vulnerabilities for a particular project at a given commit or\n version.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def QueryAffectedBatch(self, request, context):\n \"\"\"Query vulnerabilities (batched) for given package versions and commits.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_OSVServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'GetVulnById': grpc.unary_unary_rpc_method_handler(\n servicer.GetVulnById,\n request_deserializer=osv__service__v1__pb2.GetVulnByIdParameters.FromString,\n response_serializer=osv_dot_vulnerability__pb2.Vulnerability.SerializeToString,\n ),\n 'QueryAffected': grpc.unary_unary_rpc_method_handler(\n servicer.QueryAffected,\n request_deserializer=osv__service__v1__pb2.QueryAffectedParameters.FromString,\n response_serializer=osv__service__v1__pb2.VulnerabilityList.SerializeToString,\n ),\n 'QueryAffectedBatch': grpc.unary_unary_rpc_method_handler(\n servicer.QueryAffectedBatch,\n request_deserializer=osv__service__v1__pb2.QueryAffectedBatchParameters.FromString,\n response_serializer=osv__service__v1__pb2.BatchVulnerabilityList.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'osv.v1.OSV', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass OSV(object):\n \"\"\"Open source vulnerability database.\n \"\"\"\n\n @staticmethod\n def GetVulnById(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/GetVulnById',\n osv__service__v1__pb2.GetVulnByIdParameters.SerializeToString,\n osv_dot_vulnerability__pb2.Vulnerability.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def QueryAffected(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/QueryAffected',\n osv__service__v1__pb2.QueryAffectedParameters.SerializeToString,\n osv__service__v1__pb2.VulnerabilityList.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def QueryAffectedBatch(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/QueryAffectedBatch',\n osv__service__v1__pb2.QueryAffectedBatchParameters.SerializeToString,\n osv__service__v1__pb2.BatchVulnerabilityList.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n", "path": "gcp/api/osv_service_v1_pb2_grpc.py"}], "after_files": [{"content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\n\"\"\"Client and server classes corresponding to protobuf-defined services.\"\"\"\nimport grpc\n\nfrom osv import vulnerability_pb2 as osv_dot_vulnerability__pb2\nimport osv_service_v1_pb2 as osv__service__v1__pb2\n\n\nclass OSVStub(object):\n \"\"\"Open source vulnerability database.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.GetVulnById = channel.unary_unary(\n '/osv.v1.OSV/GetVulnById',\n request_serializer=osv__service__v1__pb2.GetVulnByIdParameters.SerializeToString,\n response_deserializer=osv_dot_vulnerability__pb2.Vulnerability.FromString,\n )\n self.QueryAffected = channel.unary_unary(\n '/osv.v1.OSV/QueryAffected',\n request_serializer=osv__service__v1__pb2.QueryAffectedParameters.SerializeToString,\n response_deserializer=osv__service__v1__pb2.VulnerabilityList.FromString,\n )\n self.QueryAffectedBatch = channel.unary_unary(\n '/osv.v1.OSV/QueryAffectedBatch',\n request_serializer=osv__service__v1__pb2.QueryAffectedBatchParameters.SerializeToString,\n response_deserializer=osv__service__v1__pb2.BatchVulnerabilityList.FromString,\n )\n\n\nclass OSVServicer(object):\n \"\"\"Open source vulnerability database.\n \"\"\"\n\n def GetVulnById(self, request, context):\n \"\"\"Return a `Vulnerability` object for a given OSV ID.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def QueryAffected(self, request, context):\n \"\"\"Query vulnerabilities for a particular project at a given commit or\n version.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def QueryAffectedBatch(self, request, context):\n \"\"\"Query vulnerabilities (batched) for given package versions and commits.\n This currently allows a maximum of 1000 package versions to be included in a single query.\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n\ndef add_OSVServicer_to_server(servicer, server):\n rpc_method_handlers = {\n 'GetVulnById': grpc.unary_unary_rpc_method_handler(\n servicer.GetVulnById,\n request_deserializer=osv__service__v1__pb2.GetVulnByIdParameters.FromString,\n response_serializer=osv_dot_vulnerability__pb2.Vulnerability.SerializeToString,\n ),\n 'QueryAffected': grpc.unary_unary_rpc_method_handler(\n servicer.QueryAffected,\n request_deserializer=osv__service__v1__pb2.QueryAffectedParameters.FromString,\n response_serializer=osv__service__v1__pb2.VulnerabilityList.SerializeToString,\n ),\n 'QueryAffectedBatch': grpc.unary_unary_rpc_method_handler(\n servicer.QueryAffectedBatch,\n request_deserializer=osv__service__v1__pb2.QueryAffectedBatchParameters.FromString,\n response_serializer=osv__service__v1__pb2.BatchVulnerabilityList.SerializeToString,\n ),\n }\n generic_handler = grpc.method_handlers_generic_handler(\n 'osv.v1.OSV', rpc_method_handlers)\n server.add_generic_rpc_handlers((generic_handler,))\n\n\n # This class is part of an EXPERIMENTAL API.\nclass OSV(object):\n \"\"\"Open source vulnerability database.\n \"\"\"\n\n @staticmethod\n def GetVulnById(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/GetVulnById',\n osv__service__v1__pb2.GetVulnByIdParameters.SerializeToString,\n osv_dot_vulnerability__pb2.Vulnerability.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def QueryAffected(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/QueryAffected',\n osv__service__v1__pb2.QueryAffectedParameters.SerializeToString,\n osv__service__v1__pb2.VulnerabilityList.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n\n @staticmethod\n def QueryAffectedBatch(request,\n target,\n options=(),\n channel_credentials=None,\n call_credentials=None,\n insecure=False,\n compression=None,\n wait_for_ready=None,\n timeout=None,\n metadata=None):\n return grpc.experimental.unary_unary(request, target, '/osv.v1.OSV/QueryAffectedBatch',\n osv__service__v1__pb2.QueryAffectedBatchParameters.SerializeToString,\n osv__service__v1__pb2.BatchVulnerabilityList.FromString,\n options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n", "path": "gcp/api/osv_service_v1_pb2_grpc.py"}]}
| 1,948 | 148 |
gh_patches_debug_10116
|
rasdani/github-patches
|
git_diff
|
prowler-cloud__prowler-2430
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: cloudfront_distributions_https_enabled false positive
### Steps to Reproduce
Prowler reports cloudfront_distributions_https_enabled as failed when the cloudfront distribution has "Redirect HTTP to HTTPS" set in viewer protocol policy on behavior.
### Expected behavior
For the example attached, it should "PASS".
### Actual Result with Screenshots or Logs
```
{
"ETag": "XXXXXXXX",
"DistributionConfig": {
"CallerReference": "1561029664879",
"Aliases": {
"Quantity": 1,
"Items": [
xxxxxxxxx"
]
},
"DefaultRootObject": "",
"Origins": {
"Quantity": 1,
"Items": [
{
"Id": "xxxxxxxx",
"DomainName": "xxxxxxxx",
"OriginPath": "",
"CustomHeaders": {
"Quantity": 0
},
"S3OriginConfig": {
"OriginAccessIdentity": ""
},
"ConnectionAttempts": 3,
"ConnectionTimeout": 10,
"OriginShield": {
"Enabled": false
},
"OriginAccessControlId": ""
}
]
},
"OriginGroups": {
"Quantity": 0
},
"DefaultCacheBehavior": {
"TargetOriginId": "xxxxxxxx",
"TrustedSigners": {
"Enabled": false,
"Quantity": 0
},
"TrustedKeyGroups": {
"Enabled": false,
"Quantity": 0
},
"ViewerProtocolPolicy": "redirect-to-https",
"AllowedMethods": {
"Quantity": 2,
"Items": [
"HEAD",
"GET"
],
"CachedMethods": {
"Quantity": 2,
"Items": [
"HEAD",
"GET"
]
}
},
"SmoothStreaming": false,
"Compress": true,
"LambdaFunctionAssociations": {
"Quantity": 0
},
"FunctionAssociations": {
"Quantity": 0
},
"FieldLevelEncryptionId": "",
"ResponseHeadersPolicyId": "4dde66c4-bea6-48eb-9d5c-520b29617292",
"ForwardedValues": {
"QueryString": true,
"Cookies": {
"Forward": "none"
},
"Headers": {
"Quantity": 2,
"Items": [
"Origin",
"Referer"
]
},
"QueryStringCacheKeys": {
"Quantity": 0
}
},
"MinTTL": 0,
"DefaultTTL": 86400,
"MaxTTL": 31536000
},
"CacheBehaviors": {
"Quantity": 0
},
"CustomErrorResponses": {
"Quantity": 0
},
"Comment": "xxxxxxx",
"Logging": {
"Enabled": true,
"IncludeCookies": false,
"Bucket": "xxxxxxx",
"Prefix": "xxxxxxx"
},
"PriceClass": "PriceClass_100",
"Enabled": true,
"ViewerCertificate": {
"CloudFrontDefaultCertificate": false,
"ACMCertificateArn": "xxxxxxxx",
"SSLSupportMethod": "sni-only",
"MinimumProtocolVersion": "TLSv1.2_2021",
"Certificate": "xxxxxxxxxxxxx",
"CertificateSource": "acm"
},
"Restrictions": {
"GeoRestriction": {
"RestrictionType": "none",
"Quantity": 0
}
},
"WebACLId": "",
"HttpVersion": "http2and3",
"IsIPV6Enabled": true,
"ContinuousDeploymentPolicyId": "",
"Staging": false
}
}
```
### How did you install Prowler?
Docker (docker pull toniblyx/prowler)
### Environment Resource
7. Workstation
### OS used
5. Docker
### Prowler version
3.5.3
### Pip version
From official docker image
### Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `prowler/providers/aws/services/cloudfront/cloudfront_service.py`
Content:
```
1 from dataclasses import dataclass
2 from enum import Enum
3 from typing import Optional
4
5 from pydantic import BaseModel
6
7 from prowler.lib.logger import logger
8 from prowler.lib.scan_filters.scan_filters import is_resource_filtered
9 from prowler.providers.aws.aws_provider import generate_regional_clients
10
11
12 ################## CloudFront
13 class CloudFront:
14 def __init__(self, audit_info):
15 self.service = "cloudfront"
16 self.session = audit_info.audit_session
17 self.audited_account = audit_info.audited_account
18 self.audit_resources = audit_info.audit_resources
19 global_client = generate_regional_clients(
20 self.service, audit_info, global_service=True
21 )
22 self.distributions = {}
23 if global_client:
24 self.client = list(global_client.values())[0]
25 self.region = self.client.region
26 self.__list_distributions__(self.client, self.region)
27 self.__get_distribution_config__(
28 self.client, self.distributions, self.region
29 )
30 self.__list_tags_for_resource__(
31 self.client, self.distributions, self.region
32 )
33
34 def __get_session__(self):
35 return self.session
36
37 def __list_distributions__(self, client, region) -> dict:
38 logger.info("CloudFront - Listing Distributions...")
39 try:
40 list_ditributions_paginator = client.get_paginator("list_distributions")
41 for page in list_ditributions_paginator.paginate():
42 if "Items" in page["DistributionList"]:
43 for item in page["DistributionList"]["Items"]:
44 if not self.audit_resources or (
45 is_resource_filtered(item["ARN"], self.audit_resources)
46 ):
47 distribution_id = item["Id"]
48 distribution_arn = item["ARN"]
49 origins = item["Origins"]["Items"]
50 distribution = Distribution(
51 arn=distribution_arn,
52 id=distribution_id,
53 origins=origins,
54 region=region,
55 )
56 self.distributions[distribution_id] = distribution
57
58 except Exception as error:
59 logger.error(
60 f"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
61 )
62
63 def __get_distribution_config__(self, client, distributions, region) -> dict:
64 logger.info("CloudFront - Getting Distributions...")
65 try:
66 for distribution_id in distributions.keys():
67 distribution_config = client.get_distribution_config(Id=distribution_id)
68 # Global Config
69 distributions[distribution_id].logging_enabled = distribution_config[
70 "DistributionConfig"
71 ]["Logging"]["Enabled"]
72 distributions[
73 distribution_id
74 ].geo_restriction_type = distribution_config["DistributionConfig"][
75 "Restrictions"
76 ][
77 "GeoRestriction"
78 ][
79 "RestrictionType"
80 ]
81 distributions[distribution_id].web_acl_id = distribution_config[
82 "DistributionConfig"
83 ]["WebACLId"]
84
85 # Default Cache Config
86 default_cache_config = DefaultCacheConfigBehaviour(
87 realtime_log_config_arn=distribution_config["DistributionConfig"][
88 "DefaultCacheBehavior"
89 ].get("RealtimeLogConfigArn"),
90 viewer_protocol_policy=distribution_config["DistributionConfig"][
91 "DefaultCacheBehavior"
92 ].get("ViewerProtocolPolicy"),
93 field_level_encryption_id=distribution_config["DistributionConfig"][
94 "DefaultCacheBehavior"
95 ].get("FieldLevelEncryptionId"),
96 )
97 distributions[
98 distribution_id
99 ].default_cache_config = default_cache_config
100
101 except Exception as error:
102 logger.error(
103 f"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
104 )
105
106 def __list_tags_for_resource__(self, client, distributions, region):
107 logger.info("CloudFront - List Tags...")
108 try:
109 for distribution in distributions.values():
110 response = client.list_tags_for_resource(Resource=distribution.arn)[
111 "Tags"
112 ]
113 distribution.tags = response.get("Items")
114 except Exception as error:
115 logger.error(
116 f"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
117 )
118
119
120 class OriginsSSLProtocols(Enum):
121 SSLv3 = "SSLv3"
122 TLSv1 = "TLSv1"
123 TLSv1_1 = "TLSv1.1"
124 TLSv1_2 = "TLSv1.2"
125
126
127 class ViewerProtocolPolicy(Enum):
128 """The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern"""
129
130 allow_all = "allow-all"
131 redirect_to_https = "redirect-to-https"
132 https_only = "https-only"
133
134
135 class GeoRestrictionType(Enum):
136 """Method types that you want to use to restrict distribution of your content by country"""
137
138 none = "none"
139 blacklist = "blacklist"
140 whitelist = "whitelist"
141
142
143 @dataclass
144 class DefaultCacheConfigBehaviour:
145 realtime_log_config_arn: str
146 viewer_protocol_policy: ViewerProtocolPolicy
147 field_level_encryption_id: str
148
149
150 class Distribution(BaseModel):
151 """Distribution holds a CloudFront Distribution with the required information to run the rela"""
152
153 arn: str
154 id: str
155 region: str
156 logging_enabled: bool = False
157 default_cache_config: Optional[DefaultCacheConfigBehaviour]
158 geo_restriction_type: Optional[GeoRestrictionType]
159 origins: list
160 web_acl_id: str = ""
161 tags: Optional[list] = []
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/prowler/providers/aws/services/cloudfront/cloudfront_service.py b/prowler/providers/aws/services/cloudfront/cloudfront_service.py
--- a/prowler/providers/aws/services/cloudfront/cloudfront_service.py
+++ b/prowler/providers/aws/services/cloudfront/cloudfront_service.py
@@ -1,4 +1,3 @@
-from dataclasses import dataclass
from enum import Enum
from typing import Optional
@@ -140,9 +139,8 @@
whitelist = "whitelist"
-@dataclass
-class DefaultCacheConfigBehaviour:
- realtime_log_config_arn: str
+class DefaultCacheConfigBehaviour(BaseModel):
+ realtime_log_config_arn: Optional[str]
viewer_protocol_policy: ViewerProtocolPolicy
field_level_encryption_id: str
|
{"golden_diff": "diff --git a/prowler/providers/aws/services/cloudfront/cloudfront_service.py b/prowler/providers/aws/services/cloudfront/cloudfront_service.py\n--- a/prowler/providers/aws/services/cloudfront/cloudfront_service.py\n+++ b/prowler/providers/aws/services/cloudfront/cloudfront_service.py\n@@ -1,4 +1,3 @@\n-from dataclasses import dataclass\n from enum import Enum\n from typing import Optional\n \n@@ -140,9 +139,8 @@\n whitelist = \"whitelist\"\n \n \n-@dataclass\n-class DefaultCacheConfigBehaviour:\n- realtime_log_config_arn: str\n+class DefaultCacheConfigBehaviour(BaseModel):\n+ realtime_log_config_arn: Optional[str]\n viewer_protocol_policy: ViewerProtocolPolicy\n field_level_encryption_id: str\n", "issue": "[Bug]: cloudfront_distributions_https_enabled false positive\n### Steps to Reproduce\n\nProwler reports cloudfront_distributions_https_enabled as failed when the cloudfront distribution has \"Redirect HTTP to HTTPS\" set in viewer protocol policy on behavior.\n\n### Expected behavior\n\nFor the example attached, it should \"PASS\".\n\n### Actual Result with Screenshots or Logs\n\n```\r\n{\r\n \"ETag\": \"XXXXXXXX\",\r\n \"DistributionConfig\": {\r\n \"CallerReference\": \"1561029664879\",\r\n \"Aliases\": {\r\n \"Quantity\": 1,\r\n \"Items\": [\r\n xxxxxxxxx\"\r\n ]\r\n },\r\n \"DefaultRootObject\": \"\",\r\n \"Origins\": {\r\n \"Quantity\": 1,\r\n \"Items\": [\r\n {\r\n \"Id\": \"xxxxxxxx\",\r\n \"DomainName\": \"xxxxxxxx\",\r\n \"OriginPath\": \"\",\r\n \"CustomHeaders\": {\r\n \"Quantity\": 0\r\n },\r\n \"S3OriginConfig\": {\r\n \"OriginAccessIdentity\": \"\"\r\n },\r\n \"ConnectionAttempts\": 3,\r\n \"ConnectionTimeout\": 10,\r\n \"OriginShield\": {\r\n \"Enabled\": false\r\n },\r\n \"OriginAccessControlId\": \"\"\r\n }\r\n ]\r\n },\r\n \"OriginGroups\": {\r\n \"Quantity\": 0\r\n },\r\n \"DefaultCacheBehavior\": {\r\n \"TargetOriginId\": \"xxxxxxxx\",\r\n \"TrustedSigners\": {\r\n \"Enabled\": false,\r\n \"Quantity\": 0\r\n },\r\n \"TrustedKeyGroups\": {\r\n \"Enabled\": false,\r\n \"Quantity\": 0\r\n },\r\n \"ViewerProtocolPolicy\": \"redirect-to-https\",\r\n \"AllowedMethods\": {\r\n \"Quantity\": 2,\r\n \"Items\": [\r\n \"HEAD\",\r\n \"GET\"\r\n ],\r\n \"CachedMethods\": {\r\n \"Quantity\": 2,\r\n \"Items\": [\r\n \"HEAD\",\r\n \"GET\"\r\n ]\r\n }\r\n },\r\n \"SmoothStreaming\": false,\r\n \"Compress\": true,\r\n \"LambdaFunctionAssociations\": {\r\n \"Quantity\": 0\r\n },\r\n \"FunctionAssociations\": {\r\n \"Quantity\": 0\r\n },\r\n \"FieldLevelEncryptionId\": \"\",\r\n \"ResponseHeadersPolicyId\": \"4dde66c4-bea6-48eb-9d5c-520b29617292\",\r\n \"ForwardedValues\": {\r\n \"QueryString\": true,\r\n \"Cookies\": {\r\n \"Forward\": \"none\"\r\n },\r\n \"Headers\": {\r\n \"Quantity\": 2,\r\n \"Items\": [\r\n \"Origin\",\r\n \"Referer\"\r\n ]\r\n },\r\n \"QueryStringCacheKeys\": {\r\n \"Quantity\": 0\r\n }\r\n },\r\n \"MinTTL\": 0,\r\n \"DefaultTTL\": 86400,\r\n \"MaxTTL\": 31536000\r\n },\r\n \"CacheBehaviors\": {\r\n \"Quantity\": 0\r\n },\r\n \"CustomErrorResponses\": {\r\n \"Quantity\": 0\r\n },\r\n \"Comment\": \"xxxxxxx\",\r\n \"Logging\": {\r\n \"Enabled\": true,\r\n \"IncludeCookies\": false,\r\n \"Bucket\": \"xxxxxxx\",\r\n \"Prefix\": \"xxxxxxx\"\r\n },\r\n \"PriceClass\": \"PriceClass_100\",\r\n \"Enabled\": true,\r\n \"ViewerCertificate\": {\r\n \"CloudFrontDefaultCertificate\": false,\r\n \"ACMCertificateArn\": \"xxxxxxxx\",\r\n \"SSLSupportMethod\": \"sni-only\",\r\n \"MinimumProtocolVersion\": \"TLSv1.2_2021\",\r\n \"Certificate\": \"xxxxxxxxxxxxx\",\r\n \"CertificateSource\": \"acm\"\r\n },\r\n \"Restrictions\": {\r\n \"GeoRestriction\": {\r\n \"RestrictionType\": \"none\",\r\n \"Quantity\": 0\r\n }\r\n },\r\n \"WebACLId\": \"\",\r\n \"HttpVersion\": \"http2and3\",\r\n \"IsIPV6Enabled\": true,\r\n \"ContinuousDeploymentPolicyId\": \"\",\r\n \"Staging\": false\r\n }\r\n}\r\n```\n\n### How did you install Prowler?\n\nDocker (docker pull toniblyx/prowler)\n\n### Environment Resource\n\n7. Workstation\n\n### OS used\n\n5. Docker\n\n### Prowler version\n\n3.5.3\n\n### Pip version\n\nFrom official docker image\n\n### Context\n\n_No response_\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Optional\n\nfrom pydantic import BaseModel\n\nfrom prowler.lib.logger import logger\nfrom prowler.lib.scan_filters.scan_filters import is_resource_filtered\nfrom prowler.providers.aws.aws_provider import generate_regional_clients\n\n\n################## CloudFront\nclass CloudFront:\n def __init__(self, audit_info):\n self.service = \"cloudfront\"\n self.session = audit_info.audit_session\n self.audited_account = audit_info.audited_account\n self.audit_resources = audit_info.audit_resources\n global_client = generate_regional_clients(\n self.service, audit_info, global_service=True\n )\n self.distributions = {}\n if global_client:\n self.client = list(global_client.values())[0]\n self.region = self.client.region\n self.__list_distributions__(self.client, self.region)\n self.__get_distribution_config__(\n self.client, self.distributions, self.region\n )\n self.__list_tags_for_resource__(\n self.client, self.distributions, self.region\n )\n\n def __get_session__(self):\n return self.session\n\n def __list_distributions__(self, client, region) -> dict:\n logger.info(\"CloudFront - Listing Distributions...\")\n try:\n list_ditributions_paginator = client.get_paginator(\"list_distributions\")\n for page in list_ditributions_paginator.paginate():\n if \"Items\" in page[\"DistributionList\"]:\n for item in page[\"DistributionList\"][\"Items\"]:\n if not self.audit_resources or (\n is_resource_filtered(item[\"ARN\"], self.audit_resources)\n ):\n distribution_id = item[\"Id\"]\n distribution_arn = item[\"ARN\"]\n origins = item[\"Origins\"][\"Items\"]\n distribution = Distribution(\n arn=distribution_arn,\n id=distribution_id,\n origins=origins,\n region=region,\n )\n self.distributions[distribution_id] = distribution\n\n except Exception as error:\n logger.error(\n f\"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __get_distribution_config__(self, client, distributions, region) -> dict:\n logger.info(\"CloudFront - Getting Distributions...\")\n try:\n for distribution_id in distributions.keys():\n distribution_config = client.get_distribution_config(Id=distribution_id)\n # Global Config\n distributions[distribution_id].logging_enabled = distribution_config[\n \"DistributionConfig\"\n ][\"Logging\"][\"Enabled\"]\n distributions[\n distribution_id\n ].geo_restriction_type = distribution_config[\"DistributionConfig\"][\n \"Restrictions\"\n ][\n \"GeoRestriction\"\n ][\n \"RestrictionType\"\n ]\n distributions[distribution_id].web_acl_id = distribution_config[\n \"DistributionConfig\"\n ][\"WebACLId\"]\n\n # Default Cache Config\n default_cache_config = DefaultCacheConfigBehaviour(\n realtime_log_config_arn=distribution_config[\"DistributionConfig\"][\n \"DefaultCacheBehavior\"\n ].get(\"RealtimeLogConfigArn\"),\n viewer_protocol_policy=distribution_config[\"DistributionConfig\"][\n \"DefaultCacheBehavior\"\n ].get(\"ViewerProtocolPolicy\"),\n field_level_encryption_id=distribution_config[\"DistributionConfig\"][\n \"DefaultCacheBehavior\"\n ].get(\"FieldLevelEncryptionId\"),\n )\n distributions[\n distribution_id\n ].default_cache_config = default_cache_config\n\n except Exception as error:\n logger.error(\n f\"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __list_tags_for_resource__(self, client, distributions, region):\n logger.info(\"CloudFront - List Tags...\")\n try:\n for distribution in distributions.values():\n response = client.list_tags_for_resource(Resource=distribution.arn)[\n \"Tags\"\n ]\n distribution.tags = response.get(\"Items\")\n except Exception as error:\n logger.error(\n f\"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n\nclass OriginsSSLProtocols(Enum):\n SSLv3 = \"SSLv3\"\n TLSv1 = \"TLSv1\"\n TLSv1_1 = \"TLSv1.1\"\n TLSv1_2 = \"TLSv1.2\"\n\n\nclass ViewerProtocolPolicy(Enum):\n \"\"\"The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern\"\"\"\n\n allow_all = \"allow-all\"\n redirect_to_https = \"redirect-to-https\"\n https_only = \"https-only\"\n\n\nclass GeoRestrictionType(Enum):\n \"\"\"Method types that you want to use to restrict distribution of your content by country\"\"\"\n\n none = \"none\"\n blacklist = \"blacklist\"\n whitelist = \"whitelist\"\n\n\n@dataclass\nclass DefaultCacheConfigBehaviour:\n realtime_log_config_arn: str\n viewer_protocol_policy: ViewerProtocolPolicy\n field_level_encryption_id: str\n\n\nclass Distribution(BaseModel):\n \"\"\"Distribution holds a CloudFront Distribution with the required information to run the rela\"\"\"\n\n arn: str\n id: str\n region: str\n logging_enabled: bool = False\n default_cache_config: Optional[DefaultCacheConfigBehaviour]\n geo_restriction_type: Optional[GeoRestrictionType]\n origins: list\n web_acl_id: str = \"\"\n tags: Optional[list] = []\n", "path": "prowler/providers/aws/services/cloudfront/cloudfront_service.py"}], "after_files": [{"content": "from enum import Enum\nfrom typing import Optional\n\nfrom pydantic import BaseModel\n\nfrom prowler.lib.logger import logger\nfrom prowler.lib.scan_filters.scan_filters import is_resource_filtered\nfrom prowler.providers.aws.aws_provider import generate_regional_clients\n\n\n################## CloudFront\nclass CloudFront:\n def __init__(self, audit_info):\n self.service = \"cloudfront\"\n self.session = audit_info.audit_session\n self.audited_account = audit_info.audited_account\n self.audit_resources = audit_info.audit_resources\n global_client = generate_regional_clients(\n self.service, audit_info, global_service=True\n )\n self.distributions = {}\n if global_client:\n self.client = list(global_client.values())[0]\n self.region = self.client.region\n self.__list_distributions__(self.client, self.region)\n self.__get_distribution_config__(\n self.client, self.distributions, self.region\n )\n self.__list_tags_for_resource__(\n self.client, self.distributions, self.region\n )\n\n def __get_session__(self):\n return self.session\n\n def __list_distributions__(self, client, region) -> dict:\n logger.info(\"CloudFront - Listing Distributions...\")\n try:\n list_ditributions_paginator = client.get_paginator(\"list_distributions\")\n for page in list_ditributions_paginator.paginate():\n if \"Items\" in page[\"DistributionList\"]:\n for item in page[\"DistributionList\"][\"Items\"]:\n if not self.audit_resources or (\n is_resource_filtered(item[\"ARN\"], self.audit_resources)\n ):\n distribution_id = item[\"Id\"]\n distribution_arn = item[\"ARN\"]\n origins = item[\"Origins\"][\"Items\"]\n distribution = Distribution(\n arn=distribution_arn,\n id=distribution_id,\n origins=origins,\n region=region,\n )\n self.distributions[distribution_id] = distribution\n\n except Exception as error:\n logger.error(\n f\"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __get_distribution_config__(self, client, distributions, region) -> dict:\n logger.info(\"CloudFront - Getting Distributions...\")\n try:\n for distribution_id in distributions.keys():\n distribution_config = client.get_distribution_config(Id=distribution_id)\n # Global Config\n distributions[distribution_id].logging_enabled = distribution_config[\n \"DistributionConfig\"\n ][\"Logging\"][\"Enabled\"]\n distributions[\n distribution_id\n ].geo_restriction_type = distribution_config[\"DistributionConfig\"][\n \"Restrictions\"\n ][\n \"GeoRestriction\"\n ][\n \"RestrictionType\"\n ]\n distributions[distribution_id].web_acl_id = distribution_config[\n \"DistributionConfig\"\n ][\"WebACLId\"]\n\n # Default Cache Config\n default_cache_config = DefaultCacheConfigBehaviour(\n realtime_log_config_arn=distribution_config[\"DistributionConfig\"][\n \"DefaultCacheBehavior\"\n ].get(\"RealtimeLogConfigArn\"),\n viewer_protocol_policy=distribution_config[\"DistributionConfig\"][\n \"DefaultCacheBehavior\"\n ].get(\"ViewerProtocolPolicy\"),\n field_level_encryption_id=distribution_config[\"DistributionConfig\"][\n \"DefaultCacheBehavior\"\n ].get(\"FieldLevelEncryptionId\"),\n )\n distributions[\n distribution_id\n ].default_cache_config = default_cache_config\n\n except Exception as error:\n logger.error(\n f\"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n def __list_tags_for_resource__(self, client, distributions, region):\n logger.info(\"CloudFront - List Tags...\")\n try:\n for distribution in distributions.values():\n response = client.list_tags_for_resource(Resource=distribution.arn)[\n \"Tags\"\n ]\n distribution.tags = response.get(\"Items\")\n except Exception as error:\n logger.error(\n f\"{region} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}\"\n )\n\n\nclass OriginsSSLProtocols(Enum):\n SSLv3 = \"SSLv3\"\n TLSv1 = \"TLSv1\"\n TLSv1_1 = \"TLSv1.1\"\n TLSv1_2 = \"TLSv1.2\"\n\n\nclass ViewerProtocolPolicy(Enum):\n \"\"\"The protocol that viewers can use to access the files in the origin specified by TargetOriginId when a request matches the path pattern in PathPattern\"\"\"\n\n allow_all = \"allow-all\"\n redirect_to_https = \"redirect-to-https\"\n https_only = \"https-only\"\n\n\nclass GeoRestrictionType(Enum):\n \"\"\"Method types that you want to use to restrict distribution of your content by country\"\"\"\n\n none = \"none\"\n blacklist = \"blacklist\"\n whitelist = \"whitelist\"\n\n\nclass DefaultCacheConfigBehaviour(BaseModel):\n realtime_log_config_arn: Optional[str]\n viewer_protocol_policy: ViewerProtocolPolicy\n field_level_encryption_id: str\n\n\nclass Distribution(BaseModel):\n \"\"\"Distribution holds a CloudFront Distribution with the required information to run the rela\"\"\"\n\n arn: str\n id: str\n region: str\n logging_enabled: bool = False\n default_cache_config: Optional[DefaultCacheConfigBehaviour]\n geo_restriction_type: Optional[GeoRestrictionType]\n origins: list\n web_acl_id: str = \"\"\n tags: Optional[list] = []\n", "path": "prowler/providers/aws/services/cloudfront/cloudfront_service.py"}]}
| 2,773 | 168 |
gh_patches_debug_22305
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-5990
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py`
Content:
```
1 from typing import Any, Dict, List, Optional, Set
2
3 import numpy as np
4 import pandas as pd
5
6 import great_expectations.exceptions as ge_exceptions
7 from great_expectations.rule_based_profiler.config import ParameterBuilderConfig
8 from great_expectations.rule_based_profiler.domain import Domain
9 from great_expectations.rule_based_profiler.helpers.util import (
10 NP_EPSILON,
11 get_parameter_value_and_validate_return_type,
12 )
13 from great_expectations.rule_based_profiler.metric_computation_result import MetricValue
14 from great_expectations.rule_based_profiler.parameter_builder import (
15 MetricSingleBatchParameterBuilder,
16 )
17 from great_expectations.rule_based_profiler.parameter_container import (
18 DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
19 FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY,
20 FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY,
21 RAW_PARAMETER_KEY,
22 ParameterContainer,
23 ParameterNode,
24 )
25 from great_expectations.types.attributes import Attributes
26
27
28 class HistogramSingleBatchParameterBuilder(MetricSingleBatchParameterBuilder):
29 """
30 Compute histogram using specified metric for one Batch of data.
31 """
32
33 exclude_field_names: Set[
34 str
35 ] = MetricSingleBatchParameterBuilder.exclude_field_names | {
36 "column_partition_metric_single_batch_parameter_builder_config",
37 "metric_name",
38 "metric_domain_kwargs",
39 "metric_value_kwargs",
40 "enforce_numeric_metric",
41 "replace_nan_with_zero",
42 "reduce_scalar_metric",
43 }
44
45 def __init__(
46 self,
47 name: str,
48 bins: str = "uniform",
49 n_bins: int = 10,
50 allow_relative_error: bool = False,
51 evaluation_parameter_builder_configs: Optional[
52 List[ParameterBuilderConfig]
53 ] = None,
54 data_context: Optional["BaseDataContext"] = None, # noqa: F821
55 ) -> None:
56 """
57 Args:
58 name: the name of this parameter -- this is user-specified parameter name (from configuration);
59 it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter."
60 and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>").
61 bins: Partitioning strategy (one of "uniform", "ntile", "quantile", "percentile", or "auto"); please refer
62 to "ColumnPartition" (great_expectations/expectations/metrics/column_aggregate_metrics/column_partition.py).
63 n_bins: Number of bins for histogram computation (ignored and recomputed if "bins" argument is "auto").
64 allow_relative_error: Used for partitionong strategy values that involve quantiles (all except "uniform").
65 evaluation_parameter_builder_configs: ParameterBuilder configurations, executing and making whose respective
66 ParameterBuilder objects' outputs available (as fully-qualified parameter names) is pre-requisite.
67 These "ParameterBuilder" configurations help build parameters needed for this "ParameterBuilder".
68 data_context: BaseDataContext associated with this ParameterBuilder
69 """
70
71 self._column_partition_metric_single_batch_parameter_builder_config = (
72 ParameterBuilderConfig(
73 module_name="great_expectations.rule_based_profiler.parameter_builder",
74 class_name="MetricSingleBatchParameterBuilder",
75 name="column_partition_metric_single_batch_parameter_builder",
76 metric_name="column.partition",
77 metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
78 metric_value_kwargs={
79 "bins": bins,
80 "n_bins": n_bins,
81 "allow_relative_error": allow_relative_error,
82 },
83 enforce_numeric_metric=False,
84 replace_nan_with_zero=False,
85 reduce_scalar_metric=False,
86 evaluation_parameter_builder_configs=None,
87 )
88 )
89
90 if evaluation_parameter_builder_configs is None:
91 evaluation_parameter_builder_configs = [
92 self._column_partition_metric_single_batch_parameter_builder_config,
93 ]
94
95 super().__init__(
96 name=name,
97 metric_name=None,
98 metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
99 metric_value_kwargs=None,
100 enforce_numeric_metric=False,
101 replace_nan_with_zero=False,
102 reduce_scalar_metric=False,
103 evaluation_parameter_builder_configs=evaluation_parameter_builder_configs,
104 data_context=data_context,
105 )
106
107 def _build_parameters(
108 self,
109 domain: Domain,
110 variables: Optional[ParameterContainer] = None,
111 parameters: Optional[Dict[str, ParameterContainer]] = None,
112 recompute_existing_parameter_values: bool = False,
113 ) -> Attributes:
114 """
115 Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and details.
116
117 Returns:
118 Attributes object, containing computed parameter values and parameter computation details metadata.
119 """
120 fully_qualified_column_partition_metric_single_batch_parameter_builder_name: str = f"{RAW_PARAMETER_KEY}{self._column_partition_metric_single_batch_parameter_builder_config.name}"
121 # Obtain "column.partition" from "rule state" (i.e., variables and parameters); from instance variable otherwise.
122 column_partition_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(
123 domain=domain,
124 parameter_reference=fully_qualified_column_partition_metric_single_batch_parameter_builder_name,
125 expected_return_type=None,
126 variables=variables,
127 parameters=parameters,
128 )
129 bins: MetricValue = column_partition_parameter_node[
130 FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY
131 ]
132
133 element: Any
134 if bins is None or all(pd.isnull(element) for element in bins):
135 raise ge_exceptions.ProfilerExecutionError(
136 message=f"""Partitioning values for {self.__class__.__name__} by \
137 {self._column_partition_metric_single_batch_parameter_builder_config.name} into bins encountered empty or non-existent \
138 elements.
139 """
140 )
141
142 if not np.issubdtype(bins.dtype, np.number):
143 raise ge_exceptions.ProfilerExecutionError(
144 message=f"""Partitioning values for {self.__class__.__name__} by \
145 {self._column_partition_metric_single_batch_parameter_builder_config.name} did not yield bins of supported data type.
146 """
147 )
148
149 # Only unique "bins" are necessary (hence, "n_bins" is potentially lowered to fit data distribution).
150 bins = sorted(set(bins))
151
152 column_values_nonnull_count_metric_single_batch_parameter_builder = MetricSingleBatchParameterBuilder(
153 name="column_values_nonnull_count_metric_single_batch_parameter_builder",
154 metric_name="column_values.nonnull.count",
155 metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,
156 metric_value_kwargs=None,
157 enforce_numeric_metric=False,
158 replace_nan_with_zero=False,
159 reduce_scalar_metric=False,
160 evaluation_parameter_builder_configs=None,
161 data_context=self.data_context,
162 )
163 column_values_nonnull_count_metric_single_batch_parameter_builder.build_parameters(
164 domain=domain,
165 variables=variables,
166 parameters=parameters,
167 batch_list=self.batch_list,
168 batch_request=self.batch_request,
169 recompute_existing_parameter_values=recompute_existing_parameter_values,
170 )
171 # Obtain "column_values.nonnull.count" from "rule state" (i.e., variables and parameters); from instance variable otherwise.
172 column_values_nonnull_count_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(
173 domain=domain,
174 parameter_reference=column_values_nonnull_count_metric_single_batch_parameter_builder.raw_fully_qualified_parameter_name,
175 expected_return_type=None,
176 variables=variables,
177 parameters=parameters,
178 )
179
180 self.metric_name = "column.histogram"
181 self.metric_value_kwargs = {
182 "bins": tuple(bins),
183 }
184
185 # Compute metric value for one Batch object.
186 super().build_parameters(
187 domain=domain,
188 variables=variables,
189 parameters=parameters,
190 parameter_computation_impl=super()._build_parameters,
191 recompute_existing_parameter_values=recompute_existing_parameter_values,
192 )
193
194 # Retrieve metric values for one Batch object.
195 parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(
196 domain=domain,
197 parameter_reference=self.raw_fully_qualified_parameter_name,
198 expected_return_type=None,
199 variables=variables,
200 parameters=parameters,
201 )
202
203 weights: np.ndarray = np.asarray(
204 parameter_node[FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY]
205 ) / (
206 column_values_nonnull_count_parameter_node[
207 FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY
208 ]
209 + NP_EPSILON
210 )
211 tail_weights: float = (1.0 - sum(weights)) / 2.0
212
213 partition_object: dict = {
214 "bins": bins,
215 "weights": weights.tolist(),
216 "tail_weights": [tail_weights, tail_weights],
217 }
218 details: dict = parameter_node[FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY]
219
220 return Attributes(
221 {
222 FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY: partition_object,
223 FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY: details,
224 }
225 )
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py b/great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py
--- a/great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py
+++ b/great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py
@@ -1,7 +1,6 @@
-from typing import Any, Dict, List, Optional, Set
+from typing import Dict, List, Optional, Set
import numpy as np
-import pandas as pd
import great_expectations.exceptions as ge_exceptions
from great_expectations.rule_based_profiler.config import ParameterBuilderConfig
@@ -130,8 +129,7 @@
FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY
]
- element: Any
- if bins is None or all(pd.isnull(element) for element in bins):
+ if bins is None:
raise ge_exceptions.ProfilerExecutionError(
message=f"""Partitioning values for {self.__class__.__name__} by \
{self._column_partition_metric_single_batch_parameter_builder_config.name} into bins encountered empty or non-existent \
|
{"golden_diff": "diff --git a/great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py b/great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py\n--- a/great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py\n+++ b/great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py\n@@ -1,7 +1,6 @@\n-from typing import Any, Dict, List, Optional, Set\n+from typing import Dict, List, Optional, Set\n \n import numpy as np\n-import pandas as pd\n \n import great_expectations.exceptions as ge_exceptions\n from great_expectations.rule_based_profiler.config import ParameterBuilderConfig\n@@ -130,8 +129,7 @@\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY\n ]\n \n- element: Any\n- if bins is None or all(pd.isnull(element) for element in bins):\n+ if bins is None:\n raise ge_exceptions.ProfilerExecutionError(\n message=f\"\"\"Partitioning values for {self.__class__.__name__} by \\\n {self._column_partition_metric_single_batch_parameter_builder_config.name} into bins encountered empty or non-existent \\\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Set\n\nimport numpy as np\nimport pandas as pd\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.rule_based_profiler.config import ParameterBuilderConfig\nfrom great_expectations.rule_based_profiler.domain import Domain\nfrom great_expectations.rule_based_profiler.helpers.util import (\n NP_EPSILON,\n get_parameter_value_and_validate_return_type,\n)\nfrom great_expectations.rule_based_profiler.metric_computation_result import MetricValue\nfrom great_expectations.rule_based_profiler.parameter_builder import (\n MetricSingleBatchParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_container import (\n DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY,\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY,\n RAW_PARAMETER_KEY,\n ParameterContainer,\n ParameterNode,\n)\nfrom great_expectations.types.attributes import Attributes\n\n\nclass HistogramSingleBatchParameterBuilder(MetricSingleBatchParameterBuilder):\n \"\"\"\n Compute histogram using specified metric for one Batch of data.\n \"\"\"\n\n exclude_field_names: Set[\n str\n ] = MetricSingleBatchParameterBuilder.exclude_field_names | {\n \"column_partition_metric_single_batch_parameter_builder_config\",\n \"metric_name\",\n \"metric_domain_kwargs\",\n \"metric_value_kwargs\",\n \"enforce_numeric_metric\",\n \"replace_nan_with_zero\",\n \"reduce_scalar_metric\",\n }\n\n def __init__(\n self,\n name: str,\n bins: str = \"uniform\",\n n_bins: int = 10,\n allow_relative_error: bool = False,\n evaluation_parameter_builder_configs: Optional[\n List[ParameterBuilderConfig]\n ] = None,\n data_context: Optional[\"BaseDataContext\"] = None, # noqa: F821\n ) -> None:\n \"\"\"\n Args:\n name: the name of this parameter -- this is user-specified parameter name (from configuration);\n it is not the fully-qualified parameter name; a fully-qualified parameter name must start with \"$parameter.\"\n and may contain one or more subsequent parts (e.g., \"$parameter.<my_param_from_config>.<metric_name>\").\n bins: Partitioning strategy (one of \"uniform\", \"ntile\", \"quantile\", \"percentile\", or \"auto\"); please refer\n to \"ColumnPartition\" (great_expectations/expectations/metrics/column_aggregate_metrics/column_partition.py).\n n_bins: Number of bins for histogram computation (ignored and recomputed if \"bins\" argument is \"auto\").\n allow_relative_error: Used for partitionong strategy values that involve quantiles (all except \"uniform\").\n evaluation_parameter_builder_configs: ParameterBuilder configurations, executing and making whose respective\n ParameterBuilder objects' outputs available (as fully-qualified parameter names) is pre-requisite.\n These \"ParameterBuilder\" configurations help build parameters needed for this \"ParameterBuilder\".\n data_context: BaseDataContext associated with this ParameterBuilder\n \"\"\"\n\n self._column_partition_metric_single_batch_parameter_builder_config = (\n ParameterBuilderConfig(\n module_name=\"great_expectations.rule_based_profiler.parameter_builder\",\n class_name=\"MetricSingleBatchParameterBuilder\",\n name=\"column_partition_metric_single_batch_parameter_builder\",\n metric_name=\"column.partition\",\n metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n metric_value_kwargs={\n \"bins\": bins,\n \"n_bins\": n_bins,\n \"allow_relative_error\": allow_relative_error,\n },\n enforce_numeric_metric=False,\n replace_nan_with_zero=False,\n reduce_scalar_metric=False,\n evaluation_parameter_builder_configs=None,\n )\n )\n\n if evaluation_parameter_builder_configs is None:\n evaluation_parameter_builder_configs = [\n self._column_partition_metric_single_batch_parameter_builder_config,\n ]\n\n super().__init__(\n name=name,\n metric_name=None,\n metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n metric_value_kwargs=None,\n enforce_numeric_metric=False,\n replace_nan_with_zero=False,\n reduce_scalar_metric=False,\n evaluation_parameter_builder_configs=evaluation_parameter_builder_configs,\n data_context=data_context,\n )\n\n def _build_parameters(\n self,\n domain: Domain,\n variables: Optional[ParameterContainer] = None,\n parameters: Optional[Dict[str, ParameterContainer]] = None,\n recompute_existing_parameter_values: bool = False,\n ) -> Attributes:\n \"\"\"\n Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and details.\n\n Returns:\n Attributes object, containing computed parameter values and parameter computation details metadata.\n \"\"\"\n fully_qualified_column_partition_metric_single_batch_parameter_builder_name: str = f\"{RAW_PARAMETER_KEY}{self._column_partition_metric_single_batch_parameter_builder_config.name}\"\n # Obtain \"column.partition\" from \"rule state\" (i.e., variables and parameters); from instance variable otherwise.\n column_partition_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=fully_qualified_column_partition_metric_single_batch_parameter_builder_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n bins: MetricValue = column_partition_parameter_node[\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY\n ]\n\n element: Any\n if bins is None or all(pd.isnull(element) for element in bins):\n raise ge_exceptions.ProfilerExecutionError(\n message=f\"\"\"Partitioning values for {self.__class__.__name__} by \\\n{self._column_partition_metric_single_batch_parameter_builder_config.name} into bins encountered empty or non-existent \\\nelements.\n\"\"\"\n )\n\n if not np.issubdtype(bins.dtype, np.number):\n raise ge_exceptions.ProfilerExecutionError(\n message=f\"\"\"Partitioning values for {self.__class__.__name__} by \\\n{self._column_partition_metric_single_batch_parameter_builder_config.name} did not yield bins of supported data type.\n\"\"\"\n )\n\n # Only unique \"bins\" are necessary (hence, \"n_bins\" is potentially lowered to fit data distribution).\n bins = sorted(set(bins))\n\n column_values_nonnull_count_metric_single_batch_parameter_builder = MetricSingleBatchParameterBuilder(\n name=\"column_values_nonnull_count_metric_single_batch_parameter_builder\",\n metric_name=\"column_values.nonnull.count\",\n metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n metric_value_kwargs=None,\n enforce_numeric_metric=False,\n replace_nan_with_zero=False,\n reduce_scalar_metric=False,\n evaluation_parameter_builder_configs=None,\n data_context=self.data_context,\n )\n column_values_nonnull_count_metric_single_batch_parameter_builder.build_parameters(\n domain=domain,\n variables=variables,\n parameters=parameters,\n batch_list=self.batch_list,\n batch_request=self.batch_request,\n recompute_existing_parameter_values=recompute_existing_parameter_values,\n )\n # Obtain \"column_values.nonnull.count\" from \"rule state\" (i.e., variables and parameters); from instance variable otherwise.\n column_values_nonnull_count_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=column_values_nonnull_count_metric_single_batch_parameter_builder.raw_fully_qualified_parameter_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n\n self.metric_name = \"column.histogram\"\n self.metric_value_kwargs = {\n \"bins\": tuple(bins),\n }\n\n # Compute metric value for one Batch object.\n super().build_parameters(\n domain=domain,\n variables=variables,\n parameters=parameters,\n parameter_computation_impl=super()._build_parameters,\n recompute_existing_parameter_values=recompute_existing_parameter_values,\n )\n\n # Retrieve metric values for one Batch object.\n parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=self.raw_fully_qualified_parameter_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n\n weights: np.ndarray = np.asarray(\n parameter_node[FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY]\n ) / (\n column_values_nonnull_count_parameter_node[\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY\n ]\n + NP_EPSILON\n )\n tail_weights: float = (1.0 - sum(weights)) / 2.0\n\n partition_object: dict = {\n \"bins\": bins,\n \"weights\": weights.tolist(),\n \"tail_weights\": [tail_weights, tail_weights],\n }\n details: dict = parameter_node[FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY]\n\n return Attributes(\n {\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY: partition_object,\n FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY: details,\n }\n )\n", "path": "great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py"}], "after_files": [{"content": "from typing import Dict, List, Optional, Set\n\nimport numpy as np\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations.rule_based_profiler.config import ParameterBuilderConfig\nfrom great_expectations.rule_based_profiler.domain import Domain\nfrom great_expectations.rule_based_profiler.helpers.util import (\n NP_EPSILON,\n get_parameter_value_and_validate_return_type,\n)\nfrom great_expectations.rule_based_profiler.metric_computation_result import MetricValue\nfrom great_expectations.rule_based_profiler.parameter_builder import (\n MetricSingleBatchParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_container import (\n DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY,\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY,\n RAW_PARAMETER_KEY,\n ParameterContainer,\n ParameterNode,\n)\nfrom great_expectations.types.attributes import Attributes\n\n\nclass HistogramSingleBatchParameterBuilder(MetricSingleBatchParameterBuilder):\n \"\"\"\n Compute histogram using specified metric for one Batch of data.\n \"\"\"\n\n exclude_field_names: Set[\n str\n ] = MetricSingleBatchParameterBuilder.exclude_field_names | {\n \"column_partition_metric_single_batch_parameter_builder_config\",\n \"metric_name\",\n \"metric_domain_kwargs\",\n \"metric_value_kwargs\",\n \"enforce_numeric_metric\",\n \"replace_nan_with_zero\",\n \"reduce_scalar_metric\",\n }\n\n def __init__(\n self,\n name: str,\n bins: str = \"uniform\",\n n_bins: int = 10,\n allow_relative_error: bool = False,\n evaluation_parameter_builder_configs: Optional[\n List[ParameterBuilderConfig]\n ] = None,\n data_context: Optional[\"BaseDataContext\"] = None, # noqa: F821\n ) -> None:\n \"\"\"\n Args:\n name: the name of this parameter -- this is user-specified parameter name (from configuration);\n it is not the fully-qualified parameter name; a fully-qualified parameter name must start with \"$parameter.\"\n and may contain one or more subsequent parts (e.g., \"$parameter.<my_param_from_config>.<metric_name>\").\n bins: Partitioning strategy (one of \"uniform\", \"ntile\", \"quantile\", \"percentile\", or \"auto\"); please refer\n to \"ColumnPartition\" (great_expectations/expectations/metrics/column_aggregate_metrics/column_partition.py).\n n_bins: Number of bins for histogram computation (ignored and recomputed if \"bins\" argument is \"auto\").\n allow_relative_error: Used for partitionong strategy values that involve quantiles (all except \"uniform\").\n evaluation_parameter_builder_configs: ParameterBuilder configurations, executing and making whose respective\n ParameterBuilder objects' outputs available (as fully-qualified parameter names) is pre-requisite.\n These \"ParameterBuilder\" configurations help build parameters needed for this \"ParameterBuilder\".\n data_context: BaseDataContext associated with this ParameterBuilder\n \"\"\"\n\n self._column_partition_metric_single_batch_parameter_builder_config = (\n ParameterBuilderConfig(\n module_name=\"great_expectations.rule_based_profiler.parameter_builder\",\n class_name=\"MetricSingleBatchParameterBuilder\",\n name=\"column_partition_metric_single_batch_parameter_builder\",\n metric_name=\"column.partition\",\n metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n metric_value_kwargs={\n \"bins\": bins,\n \"n_bins\": n_bins,\n \"allow_relative_error\": allow_relative_error,\n },\n enforce_numeric_metric=False,\n replace_nan_with_zero=False,\n reduce_scalar_metric=False,\n evaluation_parameter_builder_configs=None,\n )\n )\n\n if evaluation_parameter_builder_configs is None:\n evaluation_parameter_builder_configs = [\n self._column_partition_metric_single_batch_parameter_builder_config,\n ]\n\n super().__init__(\n name=name,\n metric_name=None,\n metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n metric_value_kwargs=None,\n enforce_numeric_metric=False,\n replace_nan_with_zero=False,\n reduce_scalar_metric=False,\n evaluation_parameter_builder_configs=evaluation_parameter_builder_configs,\n data_context=data_context,\n )\n\n def _build_parameters(\n self,\n domain: Domain,\n variables: Optional[ParameterContainer] = None,\n parameters: Optional[Dict[str, ParameterContainer]] = None,\n recompute_existing_parameter_values: bool = False,\n ) -> Attributes:\n \"\"\"\n Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and details.\n\n Returns:\n Attributes object, containing computed parameter values and parameter computation details metadata.\n \"\"\"\n fully_qualified_column_partition_metric_single_batch_parameter_builder_name: str = f\"{RAW_PARAMETER_KEY}{self._column_partition_metric_single_batch_parameter_builder_config.name}\"\n # Obtain \"column.partition\" from \"rule state\" (i.e., variables and parameters); from instance variable otherwise.\n column_partition_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=fully_qualified_column_partition_metric_single_batch_parameter_builder_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n bins: MetricValue = column_partition_parameter_node[\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY\n ]\n\n if bins is None:\n raise ge_exceptions.ProfilerExecutionError(\n message=f\"\"\"Partitioning values for {self.__class__.__name__} by \\\n{self._column_partition_metric_single_batch_parameter_builder_config.name} into bins encountered empty or non-existent \\\nelements.\n\"\"\"\n )\n\n if not np.issubdtype(bins.dtype, np.number):\n raise ge_exceptions.ProfilerExecutionError(\n message=f\"\"\"Partitioning values for {self.__class__.__name__} by \\\n{self._column_partition_metric_single_batch_parameter_builder_config.name} did not yield bins of supported data type.\n\"\"\"\n )\n\n # Only unique \"bins\" are necessary (hence, \"n_bins\" is potentially lowered to fit data distribution).\n bins = sorted(set(bins))\n\n column_values_nonnull_count_metric_single_batch_parameter_builder = MetricSingleBatchParameterBuilder(\n name=\"column_values_nonnull_count_metric_single_batch_parameter_builder\",\n metric_name=\"column_values.nonnull.count\",\n metric_domain_kwargs=DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME,\n metric_value_kwargs=None,\n enforce_numeric_metric=False,\n replace_nan_with_zero=False,\n reduce_scalar_metric=False,\n evaluation_parameter_builder_configs=None,\n data_context=self.data_context,\n )\n column_values_nonnull_count_metric_single_batch_parameter_builder.build_parameters(\n domain=domain,\n variables=variables,\n parameters=parameters,\n batch_list=self.batch_list,\n batch_request=self.batch_request,\n recompute_existing_parameter_values=recompute_existing_parameter_values,\n )\n # Obtain \"column_values.nonnull.count\" from \"rule state\" (i.e., variables and parameters); from instance variable otherwise.\n column_values_nonnull_count_parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=column_values_nonnull_count_metric_single_batch_parameter_builder.raw_fully_qualified_parameter_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n\n self.metric_name = \"column.histogram\"\n self.metric_value_kwargs = {\n \"bins\": tuple(bins),\n }\n\n # Compute metric value for one Batch object.\n super().build_parameters(\n domain=domain,\n variables=variables,\n parameters=parameters,\n parameter_computation_impl=super()._build_parameters,\n recompute_existing_parameter_values=recompute_existing_parameter_values,\n )\n\n # Retrieve metric values for one Batch object.\n parameter_node: ParameterNode = get_parameter_value_and_validate_return_type(\n domain=domain,\n parameter_reference=self.raw_fully_qualified_parameter_name,\n expected_return_type=None,\n variables=variables,\n parameters=parameters,\n )\n\n weights: np.ndarray = np.asarray(\n parameter_node[FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY]\n ) / (\n column_values_nonnull_count_parameter_node[\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY\n ]\n + NP_EPSILON\n )\n tail_weights: float = (1.0 - sum(weights)) / 2.0\n\n partition_object: dict = {\n \"bins\": bins,\n \"weights\": weights.tolist(),\n \"tail_weights\": [tail_weights, tail_weights],\n }\n details: dict = parameter_node[FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY]\n\n return Attributes(\n {\n FULLY_QUALIFIED_PARAMETER_NAME_VALUE_KEY: partition_object,\n FULLY_QUALIFIED_PARAMETER_NAME_METADATA_KEY: details,\n }\n )\n", "path": "great_expectations/rule_based_profiler/parameter_builder/histogram_single_batch_parameter_builder.py"}]}
| 2,709 | 262 |
gh_patches_debug_1976
|
rasdani/github-patches
|
git_diff
|
spotify__luigi-1809
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Terminal Width affects Lock identification under Centos 6.5
The luigi.lock.getpcmd function will return a shorter command line if the terminal is smaller in width. This can result in locks being misidentified as identical and can be a significant problem if your binary/scripts are located in deep paths.
Presumably there is some option that can be passed to ps to resolve this.
edit: It looks like if the ps command is changed to `ps x -wwo pid,args` that should take care of it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `luigi/lock.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 """
18 Locking functionality when launching things from the command line.
19 Uses a pidfile.
20 This prevents multiple identical workflows to be launched simultaneously.
21 """
22 from __future__ import print_function
23
24 import hashlib
25 import os
26
27 from luigi import six
28
29
30 def getpcmd(pid):
31 """
32 Returns command of process.
33
34 :param pid:
35 """
36 if os.name == "nt":
37 # Use wmic command instead of ps on Windows.
38 cmd = 'wmic path win32_process where ProcessID=%s get Commandline' % (pid, )
39 with os.popen(cmd, 'r') as p:
40 lines = [line for line in p.readlines() if line.strip("\r\n ") != ""]
41 if lines:
42 _, val = lines
43 return val
44 else:
45 cmd = 'ps -xo pid,args'
46 with os.popen(cmd, 'r') as p:
47 # Skip the column titles
48 p.readline()
49 for line in p:
50 spid, scmd = line.strip().split(' ', 1)
51 if int(spid) == int(pid):
52 return scmd
53 # Fallback instead of None, for e.g. Cygwin where -o is an "unknown option" for the ps command:
54 return '[PROCESS_WITH_PID={}]'.format(pid)
55
56
57 def get_info(pid_dir, my_pid=None):
58 # Check the name and pid of this process
59 if my_pid is None:
60 my_pid = os.getpid()
61
62 my_cmd = getpcmd(my_pid)
63
64 if six.PY3:
65 cmd_hash = my_cmd.encode('utf8')
66 else:
67 cmd_hash = my_cmd
68
69 pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'
70
71 return my_pid, my_cmd, pid_file
72
73
74 def acquire_for(pid_dir, num_available=1, kill_signal=None):
75 """
76 Makes sure the process is only run once at the same time with the same name.
77
78 Notice that we since we check the process name, different parameters to the same
79 command can spawn multiple processes at the same time, i.e. running
80 "/usr/bin/my_process" does not prevent anyone from launching
81 "/usr/bin/my_process --foo bar".
82 """
83
84 my_pid, my_cmd, pid_file = get_info(pid_dir)
85
86 # Check if there is a pid file corresponding to this name
87 if not os.path.exists(pid_dir):
88 os.mkdir(pid_dir)
89 os.chmod(pid_dir, 0o777)
90
91 pids = set()
92 pid_cmds = {}
93 if os.path.exists(pid_file):
94 # There is such a file - read the pid and look up its process name
95 pids.update(filter(None, map(str.strip, open(pid_file))))
96 pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)
97 matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))
98
99 if kill_signal is not None:
100 for pid in map(int, matching_pids):
101 os.kill(pid, kill_signal)
102 elif len(matching_pids) >= num_available:
103 # We are already running under a different pid
104 print('Pid(s)', ', '.join(matching_pids), 'already running')
105 return False
106 else:
107 # The pid belongs to something else, we could
108 pass
109 pid_cmds[str(my_pid)] = my_cmd
110
111 # Write pids
112 pids.add(str(my_pid))
113 with open(pid_file, 'w') as f:
114 f.writelines('%s\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))
115
116 # Make the file writable by all
117 if os.name == 'nt':
118 pass
119 else:
120 s = os.stat(pid_file)
121 if os.getuid() == s.st_uid:
122 os.chmod(pid_file, s.st_mode | 0o777)
123
124 return True
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/luigi/lock.py b/luigi/lock.py
--- a/luigi/lock.py
+++ b/luigi/lock.py
@@ -42,7 +42,7 @@
_, val = lines
return val
else:
- cmd = 'ps -xo pid,args'
+ cmd = 'ps x -wwo pid,args'
with os.popen(cmd, 'r') as p:
# Skip the column titles
p.readline()
|
{"golden_diff": "diff --git a/luigi/lock.py b/luigi/lock.py\n--- a/luigi/lock.py\n+++ b/luigi/lock.py\n@@ -42,7 +42,7 @@\n _, val = lines\n return val\n else:\n- cmd = 'ps -xo pid,args'\n+ cmd = 'ps x -wwo pid,args'\n with os.popen(cmd, 'r') as p:\n # Skip the column titles\n p.readline()\n", "issue": "Terminal Width affects Lock identification under Centos 6.5\nThe luigi.lock.getpcmd function will return a shorter command line if the terminal is smaller in width. This can result in locks being misidentified as identical and can be a significant problem if your binary/scripts are located in deep paths.\n\nPresumably there is some option that can be passed to ps to resolve this.\n\nedit: It looks like if the ps command is changed to `ps x -wwo pid,args` that should take care of it.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nLocking functionality when launching things from the command line.\nUses a pidfile.\nThis prevents multiple identical workflows to be launched simultaneously.\n\"\"\"\nfrom __future__ import print_function\n\nimport hashlib\nimport os\n\nfrom luigi import six\n\n\ndef getpcmd(pid):\n \"\"\"\n Returns command of process.\n\n :param pid:\n \"\"\"\n if os.name == \"nt\":\n # Use wmic command instead of ps on Windows.\n cmd = 'wmic path win32_process where ProcessID=%s get Commandline' % (pid, )\n with os.popen(cmd, 'r') as p:\n lines = [line for line in p.readlines() if line.strip(\"\\r\\n \") != \"\"]\n if lines:\n _, val = lines\n return val\n else:\n cmd = 'ps -xo pid,args'\n with os.popen(cmd, 'r') as p:\n # Skip the column titles\n p.readline()\n for line in p:\n spid, scmd = line.strip().split(' ', 1)\n if int(spid) == int(pid):\n return scmd\n # Fallback instead of None, for e.g. Cygwin where -o is an \"unknown option\" for the ps command:\n return '[PROCESS_WITH_PID={}]'.format(pid)\n\n\ndef get_info(pid_dir, my_pid=None):\n # Check the name and pid of this process\n if my_pid is None:\n my_pid = os.getpid()\n\n my_cmd = getpcmd(my_pid)\n\n if six.PY3:\n cmd_hash = my_cmd.encode('utf8')\n else:\n cmd_hash = my_cmd\n\n pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'\n\n return my_pid, my_cmd, pid_file\n\n\ndef acquire_for(pid_dir, num_available=1, kill_signal=None):\n \"\"\"\n Makes sure the process is only run once at the same time with the same name.\n\n Notice that we since we check the process name, different parameters to the same\n command can spawn multiple processes at the same time, i.e. running\n \"/usr/bin/my_process\" does not prevent anyone from launching\n \"/usr/bin/my_process --foo bar\".\n \"\"\"\n\n my_pid, my_cmd, pid_file = get_info(pid_dir)\n\n # Check if there is a pid file corresponding to this name\n if not os.path.exists(pid_dir):\n os.mkdir(pid_dir)\n os.chmod(pid_dir, 0o777)\n\n pids = set()\n pid_cmds = {}\n if os.path.exists(pid_file):\n # There is such a file - read the pid and look up its process name\n pids.update(filter(None, map(str.strip, open(pid_file))))\n pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)\n matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))\n\n if kill_signal is not None:\n for pid in map(int, matching_pids):\n os.kill(pid, kill_signal)\n elif len(matching_pids) >= num_available:\n # We are already running under a different pid\n print('Pid(s)', ', '.join(matching_pids), 'already running')\n return False\n else:\n # The pid belongs to something else, we could\n pass\n pid_cmds[str(my_pid)] = my_cmd\n\n # Write pids\n pids.add(str(my_pid))\n with open(pid_file, 'w') as f:\n f.writelines('%s\\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))\n\n # Make the file writable by all\n if os.name == 'nt':\n pass\n else:\n s = os.stat(pid_file)\n if os.getuid() == s.st_uid:\n os.chmod(pid_file, s.st_mode | 0o777)\n\n return True\n", "path": "luigi/lock.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nLocking functionality when launching things from the command line.\nUses a pidfile.\nThis prevents multiple identical workflows to be launched simultaneously.\n\"\"\"\nfrom __future__ import print_function\n\nimport hashlib\nimport os\n\nfrom luigi import six\n\n\ndef getpcmd(pid):\n \"\"\"\n Returns command of process.\n\n :param pid:\n \"\"\"\n if os.name == \"nt\":\n # Use wmic command instead of ps on Windows.\n cmd = 'wmic path win32_process where ProcessID=%s get Commandline' % (pid, )\n with os.popen(cmd, 'r') as p:\n lines = [line for line in p.readlines() if line.strip(\"\\r\\n \") != \"\"]\n if lines:\n _, val = lines\n return val\n else:\n cmd = 'ps x -wwo pid,args'\n with os.popen(cmd, 'r') as p:\n # Skip the column titles\n p.readline()\n for line in p:\n spid, scmd = line.strip().split(' ', 1)\n if int(spid) == int(pid):\n return scmd\n # Fallback instead of None, for e.g. Cygwin where -o is an \"unknown option\" for the ps command:\n return '[PROCESS_WITH_PID={}]'.format(pid)\n\n\ndef get_info(pid_dir, my_pid=None):\n # Check the name and pid of this process\n if my_pid is None:\n my_pid = os.getpid()\n\n my_cmd = getpcmd(my_pid)\n\n if six.PY3:\n cmd_hash = my_cmd.encode('utf8')\n else:\n cmd_hash = my_cmd\n\n pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'\n\n return my_pid, my_cmd, pid_file\n\n\ndef acquire_for(pid_dir, num_available=1, kill_signal=None):\n \"\"\"\n Makes sure the process is only run once at the same time with the same name.\n\n Notice that we since we check the process name, different parameters to the same\n command can spawn multiple processes at the same time, i.e. running\n \"/usr/bin/my_process\" does not prevent anyone from launching\n \"/usr/bin/my_process --foo bar\".\n \"\"\"\n\n my_pid, my_cmd, pid_file = get_info(pid_dir)\n\n # Check if there is a pid file corresponding to this name\n if not os.path.exists(pid_dir):\n os.mkdir(pid_dir)\n os.chmod(pid_dir, 0o777)\n\n pids = set()\n pid_cmds = {}\n if os.path.exists(pid_file):\n # There is such a file - read the pid and look up its process name\n pids.update(filter(None, map(str.strip, open(pid_file))))\n pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)\n matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))\n\n if kill_signal is not None:\n for pid in map(int, matching_pids):\n os.kill(pid, kill_signal)\n elif len(matching_pids) >= num_available:\n # We are already running under a different pid\n print('Pid(s)', ', '.join(matching_pids), 'already running')\n return False\n else:\n # The pid belongs to something else, we could\n pass\n pid_cmds[str(my_pid)] = my_cmd\n\n # Write pids\n pids.add(str(my_pid))\n with open(pid_file, 'w') as f:\n f.writelines('%s\\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))\n\n # Make the file writable by all\n if os.name == 'nt':\n pass\n else:\n s = os.stat(pid_file)\n if os.getuid() == s.st_uid:\n os.chmod(pid_file, s.st_mode | 0o777)\n\n return True\n", "path": "luigi/lock.py"}]}
| 1,641 | 109 |
gh_patches_debug_22324
|
rasdani/github-patches
|
git_diff
|
encode__starlette-597
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Session cookie triggers failure on websocket connection upgrade
Hi,
When enabling the session cookie on an app, if a websocket connection upgrade is made, then we seem to fall into a an assertion failuire:
```
Traceback (most recent call last):
File ".venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 146, in run_asgi
asgi = self.app(self.scope)
File ".venv/lib/python3.7/site-packages/starlette/applications.py", line 134, in __call__
return self.error_middleware(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/errors.py", line 88, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 37, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/base.py", line 22, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/base.py", line 22, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/exceptions.py", line 49, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/routing.py", line 587, in __call__
return route(scope)
File ".venv/lib/python3.7/site-packages/starlette/routing.py", line 356, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/applications.py", line 134, in __call__
return self.error_middleware(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/errors.py", line 88, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/sessions.py", line 34, in __call__
request = Request(scope)
File ".venv/lib/python3.7/site-packages/starlette/requests.py", line 128, in __init__
assert scope["type"] == "http"
```
I think the issue is that the session middleware doesn't coimplain when the request type is both http and websocket: https://github.com/encode/starlette/blob/master/starlette/middleware/sessions.py#L32
But right after, creates a Request from the scope when, itself assert only http: https://github.com/encode/starlette/blob/master/starlette/requests.py#L128
Session cookie triggers failure on websocket connection upgrade
Hi,
When enabling the session cookie on an app, if a websocket connection upgrade is made, then we seem to fall into a an assertion failuire:
```
Traceback (most recent call last):
File ".venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py", line 146, in run_asgi
asgi = self.app(self.scope)
File ".venv/lib/python3.7/site-packages/starlette/applications.py", line 134, in __call__
return self.error_middleware(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/errors.py", line 88, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 37, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/base.py", line 22, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/base.py", line 22, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/exceptions.py", line 49, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/routing.py", line 587, in __call__
return route(scope)
File ".venv/lib/python3.7/site-packages/starlette/routing.py", line 356, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/applications.py", line 134, in __call__
return self.error_middleware(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/errors.py", line 88, in __call__
return self.app(scope)
File ".venv/lib/python3.7/site-packages/starlette/middleware/sessions.py", line 34, in __call__
request = Request(scope)
File ".venv/lib/python3.7/site-packages/starlette/requests.py", line 128, in __init__
assert scope["type"] == "http"
```
I think the issue is that the session middleware doesn't coimplain when the request type is both http and websocket: https://github.com/encode/starlette/blob/master/starlette/middleware/sessions.py#L32
But right after, creates a Request from the scope when, itself assert only http: https://github.com/encode/starlette/blob/master/starlette/requests.py#L128
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/sessions.py`
Content:
```
1 import json
2 import typing
3 from base64 import b64decode, b64encode
4
5 import itsdangerous
6 from itsdangerous.exc import BadTimeSignature, SignatureExpired
7
8 from starlette.datastructures import MutableHeaders, Secret
9 from starlette.requests import Request
10 from starlette.types import ASGIApp, Message, Receive, Scope, Send
11
12
13 class SessionMiddleware:
14 def __init__(
15 self,
16 app: ASGIApp,
17 secret_key: typing.Union[str, Secret],
18 session_cookie: str = "session",
19 max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds
20 same_site: str = "lax",
21 https_only: bool = False,
22 ) -> None:
23 self.app = app
24 self.signer = itsdangerous.TimestampSigner(str(secret_key))
25 self.session_cookie = session_cookie
26 self.max_age = max_age
27 self.security_flags = "httponly; samesite=" + same_site
28 if https_only: # Secure flag can be used with HTTPS only
29 self.security_flags += "; secure"
30
31 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
32 if scope["type"] not in ("http", "websocket"): # pragma: no cover
33 await self.app(scope, receive, send)
34 return
35
36 request = Request(scope)
37 initial_session_was_empty = True
38
39 if self.session_cookie in request.cookies:
40 data = request.cookies[self.session_cookie].encode("utf-8")
41 try:
42 data = self.signer.unsign(data, max_age=self.max_age)
43 scope["session"] = json.loads(b64decode(data))
44 initial_session_was_empty = False
45 except (BadTimeSignature, SignatureExpired):
46 scope["session"] = {}
47 else:
48 scope["session"] = {}
49
50 async def send_wrapper(message: Message) -> None:
51 if message["type"] == "http.response.start":
52 if scope["session"]:
53 # We have session data to persist.
54 data = b64encode(json.dumps(scope["session"]).encode("utf-8"))
55 data = self.signer.sign(data)
56 headers = MutableHeaders(scope=message)
57 header_value = "%s=%s; path=/; Max-Age=%d; %s" % (
58 self.session_cookie,
59 data.decode("utf-8"),
60 self.max_age,
61 self.security_flags,
62 )
63 headers.append("Set-Cookie", header_value)
64 elif not initial_session_was_empty:
65 # The session has been cleared.
66 headers = MutableHeaders(scope=message)
67 header_value = "%s=%s; %s" % (
68 self.session_cookie,
69 "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;",
70 self.security_flags,
71 )
72 headers.append("Set-Cookie", header_value)
73 await send(message)
74
75 await self.app(scope, receive, send_wrapper)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py
--- a/starlette/middleware/sessions.py
+++ b/starlette/middleware/sessions.py
@@ -6,7 +6,7 @@
from itsdangerous.exc import BadTimeSignature, SignatureExpired
from starlette.datastructures import MutableHeaders, Secret
-from starlette.requests import Request
+from starlette.requests import HTTPConnection
from starlette.types import ASGIApp, Message, Receive, Scope, Send
@@ -33,11 +33,11 @@
await self.app(scope, receive, send)
return
- request = Request(scope)
+ connection = HTTPConnection(scope)
initial_session_was_empty = True
- if self.session_cookie in request.cookies:
- data = request.cookies[self.session_cookie].encode("utf-8")
+ if self.session_cookie in connection.cookies:
+ data = connection.cookies[self.session_cookie].encode("utf-8")
try:
data = self.signer.unsign(data, max_age=self.max_age)
scope["session"] = json.loads(b64decode(data))
|
{"golden_diff": "diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py\n--- a/starlette/middleware/sessions.py\n+++ b/starlette/middleware/sessions.py\n@@ -6,7 +6,7 @@\n from itsdangerous.exc import BadTimeSignature, SignatureExpired\n \n from starlette.datastructures import MutableHeaders, Secret\n-from starlette.requests import Request\n+from starlette.requests import HTTPConnection\n from starlette.types import ASGIApp, Message, Receive, Scope, Send\n \n \n@@ -33,11 +33,11 @@\n await self.app(scope, receive, send)\n return\n \n- request = Request(scope)\n+ connection = HTTPConnection(scope)\n initial_session_was_empty = True\n \n- if self.session_cookie in request.cookies:\n- data = request.cookies[self.session_cookie].encode(\"utf-8\")\n+ if self.session_cookie in connection.cookies:\n+ data = connection.cookies[self.session_cookie].encode(\"utf-8\")\n try:\n data = self.signer.unsign(data, max_age=self.max_age)\n scope[\"session\"] = json.loads(b64decode(data))\n", "issue": "Session cookie triggers failure on websocket connection upgrade\nHi,\r\n\r\nWhen enabling the session cookie on an app, if a websocket connection upgrade is made, then we seem to fall into a an assertion failuire:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py\", line 146, in run_asgi\r\n asgi = self.app(self.scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/applications.py\", line 134, in __call__\r\n return self.error_middleware(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/errors.py\", line 88, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py\", line 37, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/base.py\", line 22, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/base.py\", line 22, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/exceptions.py\", line 49, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/routing.py\", line 587, in __call__\r\n return route(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/routing.py\", line 356, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/applications.py\", line 134, in __call__\r\n return self.error_middleware(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/errors.py\", line 88, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/sessions.py\", line 34, in __call__\r\n request = Request(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/requests.py\", line 128, in __init__\r\n assert scope[\"type\"] == \"http\"\r\n```\r\n\r\nI think the issue is that the session middleware doesn't coimplain when the request type is both http and websocket: https://github.com/encode/starlette/blob/master/starlette/middleware/sessions.py#L32\r\n\r\nBut right after, creates a Request from the scope when, itself assert only http: https://github.com/encode/starlette/blob/master/starlette/requests.py#L128\r\n\r\n\nSession cookie triggers failure on websocket connection upgrade\nHi,\r\n\r\nWhen enabling the session cookie on an app, if a websocket connection upgrade is made, then we seem to fall into a an assertion failuire:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".venv/lib/python3.7/site-packages/uvicorn/protocols/websockets/websockets_impl.py\", line 146, in run_asgi\r\n asgi = self.app(self.scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/applications.py\", line 134, in __call__\r\n return self.error_middleware(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/errors.py\", line 88, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py\", line 37, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/base.py\", line 22, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/base.py\", line 22, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/exceptions.py\", line 49, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/routing.py\", line 587, in __call__\r\n return route(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/routing.py\", line 356, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/applications.py\", line 134, in __call__\r\n return self.error_middleware(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/errors.py\", line 88, in __call__\r\n return self.app(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/middleware/sessions.py\", line 34, in __call__\r\n request = Request(scope)\r\n File \".venv/lib/python3.7/site-packages/starlette/requests.py\", line 128, in __init__\r\n assert scope[\"type\"] == \"http\"\r\n```\r\n\r\nI think the issue is that the session middleware doesn't coimplain when the request type is both http and websocket: https://github.com/encode/starlette/blob/master/starlette/middleware/sessions.py#L32\r\n\r\nBut right after, creates a Request from the scope when, itself assert only http: https://github.com/encode/starlette/blob/master/starlette/requests.py#L128\r\n\r\n\n", "before_files": [{"content": "import json\nimport typing\nfrom base64 import b64decode, b64encode\n\nimport itsdangerous\nfrom itsdangerous.exc import BadTimeSignature, SignatureExpired\n\nfrom starlette.datastructures import MutableHeaders, Secret\nfrom starlette.requests import Request\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass SessionMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n secret_key: typing.Union[str, Secret],\n session_cookie: str = \"session\",\n max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds\n same_site: str = \"lax\",\n https_only: bool = False,\n ) -> None:\n self.app = app\n self.signer = itsdangerous.TimestampSigner(str(secret_key))\n self.session_cookie = session_cookie\n self.max_age = max_age\n self.security_flags = \"httponly; samesite=\" + same_site\n if https_only: # Secure flag can be used with HTTPS only\n self.security_flags += \"; secure\"\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] not in (\"http\", \"websocket\"): # pragma: no cover\n await self.app(scope, receive, send)\n return\n\n request = Request(scope)\n initial_session_was_empty = True\n\n if self.session_cookie in request.cookies:\n data = request.cookies[self.session_cookie].encode(\"utf-8\")\n try:\n data = self.signer.unsign(data, max_age=self.max_age)\n scope[\"session\"] = json.loads(b64decode(data))\n initial_session_was_empty = False\n except (BadTimeSignature, SignatureExpired):\n scope[\"session\"] = {}\n else:\n scope[\"session\"] = {}\n\n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s; path=/; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n self.max_age,\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n elif not initial_session_was_empty:\n # The session has been cleared.\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s; %s\" % (\n self.session_cookie,\n \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n await send(message)\n\n await self.app(scope, receive, send_wrapper)\n", "path": "starlette/middleware/sessions.py"}], "after_files": [{"content": "import json\nimport typing\nfrom base64 import b64decode, b64encode\n\nimport itsdangerous\nfrom itsdangerous.exc import BadTimeSignature, SignatureExpired\n\nfrom starlette.datastructures import MutableHeaders, Secret\nfrom starlette.requests import HTTPConnection\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass SessionMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n secret_key: typing.Union[str, Secret],\n session_cookie: str = \"session\",\n max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds\n same_site: str = \"lax\",\n https_only: bool = False,\n ) -> None:\n self.app = app\n self.signer = itsdangerous.TimestampSigner(str(secret_key))\n self.session_cookie = session_cookie\n self.max_age = max_age\n self.security_flags = \"httponly; samesite=\" + same_site\n if https_only: # Secure flag can be used with HTTPS only\n self.security_flags += \"; secure\"\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] not in (\"http\", \"websocket\"): # pragma: no cover\n await self.app(scope, receive, send)\n return\n\n connection = HTTPConnection(scope)\n initial_session_was_empty = True\n\n if self.session_cookie in connection.cookies:\n data = connection.cookies[self.session_cookie].encode(\"utf-8\")\n try:\n data = self.signer.unsign(data, max_age=self.max_age)\n scope[\"session\"] = json.loads(b64decode(data))\n initial_session_was_empty = False\n except (BadTimeSignature, SignatureExpired):\n scope[\"session\"] = {}\n else:\n scope[\"session\"] = {}\n\n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s; path=/; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n self.max_age,\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n elif not initial_session_was_empty:\n # The session has been cleared.\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s; %s\" % (\n self.session_cookie,\n \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n await send(message)\n\n await self.app(scope, receive, send_wrapper)\n", "path": "starlette/middleware/sessions.py"}]}
| 2,264 | 243 |
gh_patches_debug_3084
|
rasdani/github-patches
|
git_diff
|
coala__coala-bears-2837
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PySafetyBear: Add asciinema
Content of file on Bear will perform it's action also have to shown in asciinema.
difficulty/newcomer
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/python/requirements/PySafetyBear.py`
Content:
```
1 import os
2 from collections import namedtuple
3 import pkg_resources
4 import re
5
6 from safety import safety
7
8 from coalib.bears.LocalBear import LocalBear
9 from dependency_management.requirements.PipRequirement import PipRequirement
10 from coalib.results.Result import Result
11 from coalib.settings.Setting import path
12 from coalib.results.SourceRange import SourceRange
13 from coalib.settings.Setting import typed_list
14
15
16 # It was for old versions of safety and those versions will be allow in future.
17 def cve_key_checker(vulnerability):
18 if 'cve' in vulnerability.data:
19 if vulnerability.data['cve'] is None:
20 return None
21 else:
22 return True
23 else:
24 return None
25
26
27 # the safety module expects an object that looks like this
28 # (not importing it from there because it's in a private-ish location)
29 Package = namedtuple('Package', ('key', 'version'))
30
31 safety_get_vulnerabilities = safety.get_vulnerabilities
32 _insecure_full_json_url = ('https://raw.githubusercontent.com/pyupio/'
33 'safety-db/master/data/insecure_full.json')
34
35 _insecure_json_url = ('https://raw.githubusercontent.com/'
36 'pyupio/safety-db/master/data/insecure.json')
37
38
39 def _get_vulnerabilities(pkg, spec, db):
40 for entry in safety_get_vulnerabilities(pkg, spec, db):
41 entry['cve'] = entry['id'] if entry['cve'] is None else entry['cve']
42 entry['id'] = entry['cve']
43 yield entry
44
45
46 safety.get_vulnerabilities = _get_vulnerabilities
47
48
49 class PySafetyBear(LocalBear):
50 """
51 Checks if any of your Python dependencies have known security issues.
52
53 Data is taken from pyup.io's vulnerability database hosted at
54 https://github.com/pyupio/safety.
55 """
56
57 LANGUAGES = {
58 'Python Requirements',
59 'Python 2 Requirements',
60 'Python 3 Requirements',
61 }
62 AUTHORS = {'Bence Nagy'}
63 REQUIREMENTS = {PipRequirement('safety', '1.8.2')}
64 AUTHORS_EMAILS = {'[email protected]'}
65 LICENSE = 'AGPL'
66 CAN_DETECT = {'Security'}
67
68 def setup_dependencies(self):
69 file = self.download_cached_file(_insecure_full_json_url,
70 'insecure_full.json')
71 self.download_cached_file(_insecure_json_url,
72 'insecure.json')
73 type(self).db_path = os.path.dirname(file)
74
75 def run(self, filename, file,
76 db_path: path = '',
77 cve_ignore: typed_list(str) = []):
78 """
79 Checks for vulnerable package versions in requirements files.
80
81 :param db_path: Path to a local vulnerability database.
82 :param cve_ignore: A list of CVE number to be ignore.
83 """
84 db_path = self.db_path if not db_path else db_path
85 packages = list(
86 Package(key=req.key, version=req.specs[0][1])
87 for req in self.try_parse_requirements(file)
88 if len(req.specs) == 1 and req.specs[0][0] == '=='
89 )
90
91 if not packages:
92 return
93
94 for vulnerability in safety.check(packages, key=None,
95 db_mirror=db_path, cached=False,
96 ignore_ids=cve_ignore):
97 if 'cve' in vulnerability.vuln_id.strip().lower():
98 message_template = (
99 '{vuln.name}{vuln.spec} is vulnerable to {vuln.vuln_id} '
100 'and your project is using {vuln.version}.'
101 )
102 else:
103 message_template = (
104 '{vuln.name}{vuln.spec} is vulnerable to '
105 'pyup.io-{vuln.vuln_id} and your project is using '
106 '{vuln.version}.'
107 )
108
109 # StopIteration should not ever happen so skipping its branch
110 line_number, line = next( # pragma: no branch
111 (index, line) for index, line in enumerate(file, start=1)
112 if vulnerability.name in line
113 )
114 version_spec_match = re.search(r'[=<>]+(\S+?)(?:$|\s|#)', line)
115 source_range = SourceRange.from_values(
116 filename,
117 line_number,
118 version_spec_match.start(1) + 1,
119 line_number,
120 version_spec_match.end(1) + 1,
121 )
122
123 yield Result(
124 self,
125 message_template.format(vuln=vulnerability),
126 additional_info=vulnerability.advisory,
127 affected_code=(source_range, ),
128 )
129
130 @staticmethod
131 def try_parse_requirements(lines: typed_list(str)):
132 """
133 Yields all package requirements parsable from the given lines.
134
135 :param lines: An iterable of lines from a requirements file.
136 """
137 for line in lines:
138 try:
139 yield from pkg_resources.parse_requirements(line)
140 except pkg_resources.RequirementParseError:
141 # unsupported requirement specification
142 pass
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bears/python/requirements/PySafetyBear.py b/bears/python/requirements/PySafetyBear.py
--- a/bears/python/requirements/PySafetyBear.py
+++ b/bears/python/requirements/PySafetyBear.py
@@ -64,6 +64,7 @@
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL'
CAN_DETECT = {'Security'}
+ ASCIINEMA_URL = 'https://asciinema.org/a/221386'
def setup_dependencies(self):
file = self.download_cached_file(_insecure_full_json_url,
|
{"golden_diff": "diff --git a/bears/python/requirements/PySafetyBear.py b/bears/python/requirements/PySafetyBear.py\n--- a/bears/python/requirements/PySafetyBear.py\n+++ b/bears/python/requirements/PySafetyBear.py\n@@ -64,6 +64,7 @@\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL'\n CAN_DETECT = {'Security'}\n+ ASCIINEMA_URL = 'https://asciinema.org/a/221386'\n \n def setup_dependencies(self):\n file = self.download_cached_file(_insecure_full_json_url,\n", "issue": "PySafetyBear: Add asciinema \nContent of file on Bear will perform it's action also have to shown in asciinema.\r\n\r\ndifficulty/newcomer\n", "before_files": [{"content": "import os\nfrom collections import namedtuple\nimport pkg_resources\nimport re\n\nfrom safety import safety\n\nfrom coalib.bears.LocalBear import LocalBear\nfrom dependency_management.requirements.PipRequirement import PipRequirement\nfrom coalib.results.Result import Result\nfrom coalib.settings.Setting import path\nfrom coalib.results.SourceRange import SourceRange\nfrom coalib.settings.Setting import typed_list\n\n\n# It was for old versions of safety and those versions will be allow in future.\ndef cve_key_checker(vulnerability):\n if 'cve' in vulnerability.data:\n if vulnerability.data['cve'] is None:\n return None\n else:\n return True\n else:\n return None\n\n\n# the safety module expects an object that looks like this\n# (not importing it from there because it's in a private-ish location)\nPackage = namedtuple('Package', ('key', 'version'))\n\nsafety_get_vulnerabilities = safety.get_vulnerabilities\n_insecure_full_json_url = ('https://raw.githubusercontent.com/pyupio/'\n 'safety-db/master/data/insecure_full.json')\n\n_insecure_json_url = ('https://raw.githubusercontent.com/'\n 'pyupio/safety-db/master/data/insecure.json')\n\n\ndef _get_vulnerabilities(pkg, spec, db):\n for entry in safety_get_vulnerabilities(pkg, spec, db):\n entry['cve'] = entry['id'] if entry['cve'] is None else entry['cve']\n entry['id'] = entry['cve']\n yield entry\n\n\nsafety.get_vulnerabilities = _get_vulnerabilities\n\n\nclass PySafetyBear(LocalBear):\n \"\"\"\n Checks if any of your Python dependencies have known security issues.\n\n Data is taken from pyup.io's vulnerability database hosted at\n https://github.com/pyupio/safety.\n \"\"\"\n\n LANGUAGES = {\n 'Python Requirements',\n 'Python 2 Requirements',\n 'Python 3 Requirements',\n }\n AUTHORS = {'Bence Nagy'}\n REQUIREMENTS = {PipRequirement('safety', '1.8.2')}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL'\n CAN_DETECT = {'Security'}\n\n def setup_dependencies(self):\n file = self.download_cached_file(_insecure_full_json_url,\n 'insecure_full.json')\n self.download_cached_file(_insecure_json_url,\n 'insecure.json')\n type(self).db_path = os.path.dirname(file)\n\n def run(self, filename, file,\n db_path: path = '',\n cve_ignore: typed_list(str) = []):\n \"\"\"\n Checks for vulnerable package versions in requirements files.\n\n :param db_path: Path to a local vulnerability database.\n :param cve_ignore: A list of CVE number to be ignore.\n \"\"\"\n db_path = self.db_path if not db_path else db_path\n packages = list(\n Package(key=req.key, version=req.specs[0][1])\n for req in self.try_parse_requirements(file)\n if len(req.specs) == 1 and req.specs[0][0] == '=='\n )\n\n if not packages:\n return\n\n for vulnerability in safety.check(packages, key=None,\n db_mirror=db_path, cached=False,\n ignore_ids=cve_ignore):\n if 'cve' in vulnerability.vuln_id.strip().lower():\n message_template = (\n '{vuln.name}{vuln.spec} is vulnerable to {vuln.vuln_id} '\n 'and your project is using {vuln.version}.'\n )\n else:\n message_template = (\n '{vuln.name}{vuln.spec} is vulnerable to '\n 'pyup.io-{vuln.vuln_id} and your project is using '\n '{vuln.version}.'\n )\n\n # StopIteration should not ever happen so skipping its branch\n line_number, line = next( # pragma: no branch\n (index, line) for index, line in enumerate(file, start=1)\n if vulnerability.name in line\n )\n version_spec_match = re.search(r'[=<>]+(\\S+?)(?:$|\\s|#)', line)\n source_range = SourceRange.from_values(\n filename,\n line_number,\n version_spec_match.start(1) + 1,\n line_number,\n version_spec_match.end(1) + 1,\n )\n\n yield Result(\n self,\n message_template.format(vuln=vulnerability),\n additional_info=vulnerability.advisory,\n affected_code=(source_range, ),\n )\n\n @staticmethod\n def try_parse_requirements(lines: typed_list(str)):\n \"\"\"\n Yields all package requirements parsable from the given lines.\n\n :param lines: An iterable of lines from a requirements file.\n \"\"\"\n for line in lines:\n try:\n yield from pkg_resources.parse_requirements(line)\n except pkg_resources.RequirementParseError:\n # unsupported requirement specification\n pass\n", "path": "bears/python/requirements/PySafetyBear.py"}], "after_files": [{"content": "import os\nfrom collections import namedtuple\nimport pkg_resources\nimport re\n\nfrom safety import safety\n\nfrom coalib.bears.LocalBear import LocalBear\nfrom dependency_management.requirements.PipRequirement import PipRequirement\nfrom coalib.results.Result import Result\nfrom coalib.settings.Setting import path\nfrom coalib.results.SourceRange import SourceRange\nfrom coalib.settings.Setting import typed_list\n\n\n# It was for old versions of safety and those versions will be allow in future.\ndef cve_key_checker(vulnerability):\n if 'cve' in vulnerability.data:\n if vulnerability.data['cve'] is None:\n return None\n else:\n return True\n else:\n return None\n\n\n# the safety module expects an object that looks like this\n# (not importing it from there because it's in a private-ish location)\nPackage = namedtuple('Package', ('key', 'version'))\n\nsafety_get_vulnerabilities = safety.get_vulnerabilities\n_insecure_full_json_url = ('https://raw.githubusercontent.com/pyupio/'\n 'safety-db/master/data/insecure_full.json')\n\n_insecure_json_url = ('https://raw.githubusercontent.com/'\n 'pyupio/safety-db/master/data/insecure.json')\n\n\ndef _get_vulnerabilities(pkg, spec, db):\n for entry in safety_get_vulnerabilities(pkg, spec, db):\n entry['cve'] = entry['id'] if entry['cve'] is None else entry['cve']\n entry['id'] = entry['cve']\n yield entry\n\n\nsafety.get_vulnerabilities = _get_vulnerabilities\n\n\nclass PySafetyBear(LocalBear):\n \"\"\"\n Checks if any of your Python dependencies have known security issues.\n\n Data is taken from pyup.io's vulnerability database hosted at\n https://github.com/pyupio/safety.\n \"\"\"\n\n LANGUAGES = {\n 'Python Requirements',\n 'Python 2 Requirements',\n 'Python 3 Requirements',\n }\n AUTHORS = {'Bence Nagy'}\n REQUIREMENTS = {PipRequirement('safety', '1.8.2')}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL'\n CAN_DETECT = {'Security'}\n ASCIINEMA_URL = 'https://asciinema.org/a/221386'\n\n def setup_dependencies(self):\n file = self.download_cached_file(_insecure_full_json_url,\n 'insecure_full.json')\n self.download_cached_file(_insecure_json_url,\n 'insecure.json')\n type(self).db_path = os.path.dirname(file)\n\n def run(self, filename, file,\n db_path: path = '',\n cve_ignore: typed_list(str) = []):\n \"\"\"\n Checks for vulnerable package versions in requirements files.\n\n :param db_path: Path to a local vulnerability database.\n :param cve_ignore: A list of CVE number to be ignore.\n \"\"\"\n db_path = self.db_path if not db_path else db_path\n packages = list(\n Package(key=req.key, version=req.specs[0][1])\n for req in self.try_parse_requirements(file)\n if len(req.specs) == 1 and req.specs[0][0] == '=='\n )\n\n if not packages:\n return\n\n for vulnerability in safety.check(packages, key=None,\n db_mirror=db_path, cached=False,\n ignore_ids=cve_ignore):\n if 'cve' in vulnerability.vuln_id.strip().lower():\n message_template = (\n '{vuln.name}{vuln.spec} is vulnerable to {vuln.vuln_id} '\n 'and your project is using {vuln.version}.'\n )\n else:\n message_template = (\n '{vuln.name}{vuln.spec} is vulnerable to '\n 'pyup.io-{vuln.vuln_id} and your project is using '\n '{vuln.version}.'\n )\n\n # StopIteration should not ever happen so skipping its branch\n line_number, line = next( # pragma: no branch\n (index, line) for index, line in enumerate(file, start=1)\n if vulnerability.name in line\n )\n version_spec_match = re.search(r'[=<>]+(\\S+?)(?:$|\\s|#)', line)\n source_range = SourceRange.from_values(\n filename,\n line_number,\n version_spec_match.start(1) + 1,\n line_number,\n version_spec_match.end(1) + 1,\n )\n\n yield Result(\n self,\n message_template.format(vuln=vulnerability),\n additional_info=vulnerability.advisory,\n affected_code=(source_range, ),\n )\n\n @staticmethod\n def try_parse_requirements(lines: typed_list(str)):\n \"\"\"\n Yields all package requirements parsable from the given lines.\n\n :param lines: An iterable of lines from a requirements file.\n \"\"\"\n for line in lines:\n try:\n yield from pkg_resources.parse_requirements(line)\n except pkg_resources.RequirementParseError:\n # unsupported requirement specification\n pass\n", "path": "bears/python/requirements/PySafetyBear.py"}]}
| 1,704 | 137 |
gh_patches_debug_13759
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1721
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Groups miss description in API v2
### Describe the bug
The `description` field is missing in the `activemembers/groups` serializers.
### How to reproduce
Look at swagger.
### Expected behaviour
An extra field for the description, at least in the detail endpoints.
### Additional context
Also something to double-check: groups have a field `display_members`. This is not included in the serializers, so are the members not shown if that field is true?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/activemembers/api/v2/serializers/member_group.py`
Content:
```
1 from rest_framework import serializers
2
3 from activemembers.api.v2.serializers.member_group_membership import (
4 MemberGroupMembershipSerializer,
5 )
6 from activemembers.models import MemberGroup
7 from thaliawebsite.api.v2.serializers import ThumbnailSerializer
8
9
10 class MemberGroupSerializer(serializers.ModelSerializer):
11 """API serializer for member groups."""
12
13 def __init__(self, *args, **kwargs):
14 super().__init__(*args, **kwargs)
15
16 if "get_memberships" not in self.context and "members" in self.fields:
17 self.fields.pop("members")
18
19 class Meta:
20 """Meta class for the serializer."""
21
22 model = MemberGroup
23 fields = (
24 "pk",
25 "name",
26 "type",
27 "since",
28 "until",
29 "contact_address",
30 "photo",
31 "members",
32 )
33
34 members = serializers.SerializerMethodField("_members")
35 type = serializers.SerializerMethodField("_type")
36 photo = ThumbnailSerializer(placeholder="activemembers/images/placeholder.png")
37
38 def _members(self, instance):
39 memberships = self.context["get_memberships"](instance).prefetch_related(
40 "member__membergroupmembership_set"
41 )
42 return MemberGroupMembershipSerializer(
43 many=True, context=self.context
44 ).to_representation(memberships)
45
46 def _type(self, instance):
47 if hasattr(instance, "board"):
48 return "board"
49 if hasattr(instance, "committee"):
50 return "committee"
51 if hasattr(instance, "society"):
52 return "society"
53 return None
54
55
56 class MemberGroupListSerializer(MemberGroupSerializer):
57 class Meta:
58 """Meta class for the serializer."""
59
60 model = MemberGroup
61 fields = ("pk", "name", "type", "since", "until", "contact_address", "photo")
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/activemembers/api/v2/serializers/member_group.py b/website/activemembers/api/v2/serializers/member_group.py
--- a/website/activemembers/api/v2/serializers/member_group.py
+++ b/website/activemembers/api/v2/serializers/member_group.py
@@ -24,6 +24,7 @@
"pk",
"name",
"type",
+ "description",
"since",
"until",
"contact_address",
@@ -58,4 +59,13 @@
"""Meta class for the serializer."""
model = MemberGroup
- fields = ("pk", "name", "type", "since", "until", "contact_address", "photo")
+ fields = (
+ "pk",
+ "name",
+ "type",
+ "description",
+ "since",
+ "until",
+ "contact_address",
+ "photo",
+ )
|
{"golden_diff": "diff --git a/website/activemembers/api/v2/serializers/member_group.py b/website/activemembers/api/v2/serializers/member_group.py\n--- a/website/activemembers/api/v2/serializers/member_group.py\n+++ b/website/activemembers/api/v2/serializers/member_group.py\n@@ -24,6 +24,7 @@\n \"pk\",\n \"name\",\n \"type\",\n+ \"description\",\n \"since\",\n \"until\",\n \"contact_address\",\n@@ -58,4 +59,13 @@\n \"\"\"Meta class for the serializer.\"\"\"\n \n model = MemberGroup\n- fields = (\"pk\", \"name\", \"type\", \"since\", \"until\", \"contact_address\", \"photo\")\n+ fields = (\n+ \"pk\",\n+ \"name\",\n+ \"type\",\n+ \"description\",\n+ \"since\",\n+ \"until\",\n+ \"contact_address\",\n+ \"photo\",\n+ )\n", "issue": "Groups miss description in API v2\n### Describe the bug\r\nThe `description` field is missing in the `activemembers/groups` serializers. \r\n\r\n### How to reproduce\r\nLook at swagger.\r\n\r\n### Expected behaviour\r\nAn extra field for the description, at least in the detail endpoints.\r\n\r\n### Additional context\r\nAlso something to double-check: groups have a field `display_members`. This is not included in the serializers, so are the members not shown if that field is true?\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group_membership import (\n MemberGroupMembershipSerializer,\n)\nfrom activemembers.models import MemberGroup\nfrom thaliawebsite.api.v2.serializers import ThumbnailSerializer\n\n\nclass MemberGroupSerializer(serializers.ModelSerializer):\n \"\"\"API serializer for member groups.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if \"get_memberships\" not in self.context and \"members\" in self.fields:\n self.fields.pop(\"members\")\n\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = MemberGroup\n fields = (\n \"pk\",\n \"name\",\n \"type\",\n \"since\",\n \"until\",\n \"contact_address\",\n \"photo\",\n \"members\",\n )\n\n members = serializers.SerializerMethodField(\"_members\")\n type = serializers.SerializerMethodField(\"_type\")\n photo = ThumbnailSerializer(placeholder=\"activemembers/images/placeholder.png\")\n\n def _members(self, instance):\n memberships = self.context[\"get_memberships\"](instance).prefetch_related(\n \"member__membergroupmembership_set\"\n )\n return MemberGroupMembershipSerializer(\n many=True, context=self.context\n ).to_representation(memberships)\n\n def _type(self, instance):\n if hasattr(instance, \"board\"):\n return \"board\"\n if hasattr(instance, \"committee\"):\n return \"committee\"\n if hasattr(instance, \"society\"):\n return \"society\"\n return None\n\n\nclass MemberGroupListSerializer(MemberGroupSerializer):\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = MemberGroup\n fields = (\"pk\", \"name\", \"type\", \"since\", \"until\", \"contact_address\", \"photo\")\n", "path": "website/activemembers/api/v2/serializers/member_group.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group_membership import (\n MemberGroupMembershipSerializer,\n)\nfrom activemembers.models import MemberGroup\nfrom thaliawebsite.api.v2.serializers import ThumbnailSerializer\n\n\nclass MemberGroupSerializer(serializers.ModelSerializer):\n \"\"\"API serializer for member groups.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if \"get_memberships\" not in self.context and \"members\" in self.fields:\n self.fields.pop(\"members\")\n\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = MemberGroup\n fields = (\n \"pk\",\n \"name\",\n \"type\",\n \"description\",\n \"since\",\n \"until\",\n \"contact_address\",\n \"photo\",\n \"members\",\n )\n\n members = serializers.SerializerMethodField(\"_members\")\n type = serializers.SerializerMethodField(\"_type\")\n photo = ThumbnailSerializer(placeholder=\"activemembers/images/placeholder.png\")\n\n def _members(self, instance):\n memberships = self.context[\"get_memberships\"](instance).prefetch_related(\n \"member__membergroupmembership_set\"\n )\n return MemberGroupMembershipSerializer(\n many=True, context=self.context\n ).to_representation(memberships)\n\n def _type(self, instance):\n if hasattr(instance, \"board\"):\n return \"board\"\n if hasattr(instance, \"committee\"):\n return \"committee\"\n if hasattr(instance, \"society\"):\n return \"society\"\n return None\n\n\nclass MemberGroupListSerializer(MemberGroupSerializer):\n class Meta:\n \"\"\"Meta class for the serializer.\"\"\"\n\n model = MemberGroup\n fields = (\n \"pk\",\n \"name\",\n \"type\",\n \"description\",\n \"since\",\n \"until\",\n \"contact_address\",\n \"photo\",\n )\n", "path": "website/activemembers/api/v2/serializers/member_group.py"}]}
| 864 | 217 |
gh_patches_debug_33803
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-2984
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow for empty CSS variable definitions
Consider the following CSS snippets:
1.
```sass
$x:
* {
background: red;
}
```
2.
```sass
$x:
* {
background: $x;
}
```
3.
```sass
* {
background: red;
}
$x:
```
All three should work with the following behaviour:
- an empty variable definition is allowed; and
- setting a style to an empty variable should unset that style.
For example, 2. should unset the background of all widgets.
As of now, 2. and 3. can't be parsed.
2. raises an error regarding an undefined variable and 3. raises an `AttributeError` inside `substitute_references` in `parse.py`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/css/parse.py`
Content:
```
1 from __future__ import annotations
2
3 from functools import lru_cache
4 from pathlib import PurePath
5 from typing import Iterable, Iterator, NoReturn
6
7 from ..suggestions import get_suggestion
8 from ._help_renderables import HelpText
9 from ._styles_builder import DeclarationError, StylesBuilder
10 from .errors import UnresolvedVariableError
11 from .model import (
12 CombinatorType,
13 Declaration,
14 RuleSet,
15 Selector,
16 SelectorSet,
17 SelectorType,
18 )
19 from .styles import Styles
20 from .tokenize import Token, tokenize, tokenize_declarations, tokenize_values
21 from .tokenizer import EOFError, ReferencedBy
22 from .types import Specificity3
23
24 SELECTOR_MAP: dict[str, tuple[SelectorType, Specificity3]] = {
25 "selector": (SelectorType.TYPE, (0, 0, 1)),
26 "selector_start": (SelectorType.TYPE, (0, 0, 1)),
27 "selector_class": (SelectorType.CLASS, (0, 1, 0)),
28 "selector_start_class": (SelectorType.CLASS, (0, 1, 0)),
29 "selector_id": (SelectorType.ID, (1, 0, 0)),
30 "selector_start_id": (SelectorType.ID, (1, 0, 0)),
31 "selector_universal": (SelectorType.UNIVERSAL, (0, 0, 0)),
32 "selector_start_universal": (SelectorType.UNIVERSAL, (0, 0, 0)),
33 }
34
35
36 @lru_cache(maxsize=1024)
37 def parse_selectors(css_selectors: str) -> tuple[SelectorSet, ...]:
38 if not css_selectors.strip():
39 return ()
40
41 tokens = iter(tokenize(css_selectors, ""))
42
43 get_selector = SELECTOR_MAP.get
44 combinator: CombinatorType | None = CombinatorType.DESCENDENT
45 selectors: list[Selector] = []
46 rule_selectors: list[list[Selector]] = []
47
48 while True:
49 try:
50 token = next(tokens)
51 except EOFError:
52 break
53 token_name = token.name
54 if token_name == "pseudo_class":
55 selectors[-1]._add_pseudo_class(token.value.lstrip(":"))
56 elif token_name == "whitespace":
57 if combinator is None or combinator == CombinatorType.SAME:
58 combinator = CombinatorType.DESCENDENT
59 elif token_name == "new_selector":
60 rule_selectors.append(selectors[:])
61 selectors.clear()
62 combinator = None
63 elif token_name == "declaration_set_start":
64 break
65 elif token_name == "combinator_child":
66 combinator = CombinatorType.CHILD
67 else:
68 _selector, specificity = get_selector(
69 token_name, (SelectorType.TYPE, (0, 0, 0))
70 )
71 selectors.append(
72 Selector(
73 name=token.value.lstrip(".#"),
74 combinator=combinator or CombinatorType.DESCENDENT,
75 type=_selector,
76 specificity=specificity,
77 )
78 )
79 combinator = CombinatorType.SAME
80 if selectors:
81 rule_selectors.append(selectors[:])
82
83 selector_set = tuple(SelectorSet.from_selectors(rule_selectors))
84 return selector_set
85
86
87 def parse_rule_set(
88 tokens: Iterator[Token],
89 token: Token,
90 is_default_rules: bool = False,
91 tie_breaker: int = 0,
92 ) -> Iterable[RuleSet]:
93 get_selector = SELECTOR_MAP.get
94 combinator: CombinatorType | None = CombinatorType.DESCENDENT
95 selectors: list[Selector] = []
96 rule_selectors: list[list[Selector]] = []
97 styles_builder = StylesBuilder()
98
99 while True:
100 if token.name == "pseudo_class":
101 selectors[-1]._add_pseudo_class(token.value.lstrip(":"))
102 elif token.name == "whitespace":
103 if combinator is None or combinator == CombinatorType.SAME:
104 combinator = CombinatorType.DESCENDENT
105 elif token.name == "new_selector":
106 rule_selectors.append(selectors[:])
107 selectors.clear()
108 combinator = None
109 elif token.name == "declaration_set_start":
110 break
111 elif token.name == "combinator_child":
112 combinator = CombinatorType.CHILD
113 else:
114 _selector, specificity = get_selector(
115 token.name, (SelectorType.TYPE, (0, 0, 0))
116 )
117 selectors.append(
118 Selector(
119 name=token.value.lstrip(".#"),
120 combinator=combinator or CombinatorType.DESCENDENT,
121 type=_selector,
122 specificity=specificity,
123 )
124 )
125 combinator = CombinatorType.SAME
126
127 token = next(tokens)
128
129 if selectors:
130 rule_selectors.append(selectors[:])
131
132 declaration = Declaration(token, "")
133
134 errors: list[tuple[Token, str | HelpText]] = []
135
136 while True:
137 token = next(tokens)
138 token_name = token.name
139 if token_name in ("whitespace", "declaration_end"):
140 continue
141 if token_name == "declaration_name":
142 if declaration.tokens:
143 try:
144 styles_builder.add_declaration(declaration)
145 except DeclarationError as error:
146 errors.append((error.token, error.message))
147 declaration = Declaration(token, "")
148 declaration.name = token.value.rstrip(":")
149 elif token_name == "declaration_set_end":
150 break
151 else:
152 declaration.tokens.append(token)
153
154 if declaration.tokens:
155 try:
156 styles_builder.add_declaration(declaration)
157 except DeclarationError as error:
158 errors.append((error.token, error.message))
159
160 rule_set = RuleSet(
161 list(SelectorSet.from_selectors(rule_selectors)),
162 styles_builder.styles,
163 errors,
164 is_default_rules=is_default_rules,
165 tie_breaker=tie_breaker,
166 )
167 rule_set._post_parse()
168 yield rule_set
169
170
171 def parse_declarations(css: str, path: str) -> Styles:
172 """Parse declarations and return a Styles object.
173
174 Args:
175 css: String containing CSS.
176 path: Path to the CSS, or something else to identify the location.
177
178 Returns:
179 A styles object.
180 """
181
182 tokens = iter(tokenize_declarations(css, path))
183 styles_builder = StylesBuilder()
184
185 declaration: Declaration | None = None
186 errors: list[tuple[Token, str | HelpText]] = []
187
188 while True:
189 token = next(tokens, None)
190 if token is None:
191 break
192 token_name = token.name
193 if token_name in ("whitespace", "declaration_end", "eof"):
194 continue
195 if token_name == "declaration_name":
196 if declaration and declaration.tokens:
197 try:
198 styles_builder.add_declaration(declaration)
199 except DeclarationError as error:
200 errors.append((error.token, error.message))
201 raise
202 declaration = Declaration(token, "")
203 declaration.name = token.value.rstrip(":")
204 elif token_name == "declaration_set_end":
205 break
206 else:
207 if declaration:
208 declaration.tokens.append(token)
209
210 if declaration and declaration.tokens:
211 try:
212 styles_builder.add_declaration(declaration)
213 except DeclarationError as error:
214 errors.append((error.token, error.message))
215 raise
216
217 return styles_builder.styles
218
219
220 def _unresolved(variable_name: str, variables: Iterable[str], token: Token) -> NoReturn:
221 """Raise a TokenError regarding an unresolved variable.
222
223 Args:
224 variable_name: A variable name.
225 variables: Possible choices used to generate suggestion.
226 token: The Token.
227
228 Raises:
229 UnresolvedVariableError: Always raises a TokenError.
230 """
231 message = f"reference to undefined variable '${variable_name}'"
232 suggested_variable = get_suggestion(variable_name, list(variables))
233 if suggested_variable:
234 message += f"; did you mean '${suggested_variable}'?"
235
236 raise UnresolvedVariableError(
237 token.path,
238 token.code,
239 token.start,
240 message,
241 end=token.end,
242 )
243
244
245 def substitute_references(
246 tokens: Iterable[Token], css_variables: dict[str, list[Token]] | None = None
247 ) -> Iterable[Token]:
248 """Replace variable references with values by substituting variable reference
249 tokens with the tokens representing their values.
250
251 Args:
252 tokens: Iterator of Tokens which may contain tokens
253 with the name "variable_ref".
254
255 Returns:
256 Yields Tokens such that any variable references (tokens where
257 token.name == "variable_ref") have been replaced with the tokens representing
258 the value. In other words, an Iterable of Tokens similar to the original input,
259 but with variables resolved. Substituted tokens will have their referenced_by
260 attribute populated with information about where the tokens are being substituted to.
261 """
262 variables: dict[str, list[Token]] = css_variables.copy() if css_variables else {}
263
264 iter_tokens = iter(tokens)
265
266 while True:
267 token = next(iter_tokens, None)
268 if token is None:
269 break
270 if token.name == "variable_name":
271 variable_name = token.value[1:-1] # Trim the $ and the :, i.e. "$x:" -> "x"
272 yield token
273
274 while True:
275 token = next(iter_tokens, None)
276 if token is not None and token.name == "whitespace":
277 yield token
278 else:
279 break
280
281 # Store the tokens for any variable definitions, and substitute
282 # any variable references we encounter with them.
283 while True:
284 if not token:
285 break
286 elif token.name == "whitespace":
287 variables.setdefault(variable_name, []).append(token)
288 yield token
289 elif token.name == "variable_value_end":
290 yield token
291 break
292 # For variables referring to other variables
293 elif token.name == "variable_ref":
294 ref_name = token.value[1:]
295 if ref_name in variables:
296 variable_tokens = variables.setdefault(variable_name, [])
297 reference_tokens = variables[ref_name]
298 variable_tokens.extend(reference_tokens)
299 ref_location = token.location
300 ref_length = len(token.value)
301 for _token in reference_tokens:
302 yield _token.with_reference(
303 ReferencedBy(
304 ref_name, ref_location, ref_length, token.code
305 )
306 )
307 else:
308 _unresolved(ref_name, variables.keys(), token)
309 else:
310 variables.setdefault(variable_name, []).append(token)
311 yield token
312 token = next(iter_tokens, None)
313 elif token.name == "variable_ref":
314 variable_name = token.value[1:] # Trim the $, so $x -> x
315 if variable_name in variables:
316 variable_tokens = variables[variable_name]
317 ref_location = token.location
318 ref_length = len(token.value)
319 ref_code = token.code
320 for _token in variable_tokens:
321 yield _token.with_reference(
322 ReferencedBy(variable_name, ref_location, ref_length, ref_code)
323 )
324 else:
325 _unresolved(variable_name, variables.keys(), token)
326 else:
327 yield token
328
329
330 def parse(
331 css: str,
332 path: str | PurePath,
333 variables: dict[str, str] | None = None,
334 variable_tokens: dict[str, list[Token]] | None = None,
335 is_default_rules: bool = False,
336 tie_breaker: int = 0,
337 ) -> Iterable[RuleSet]:
338 """Parse CSS by tokenizing it, performing variable substitution,
339 and generating rule sets from it.
340
341 Args:
342 css: The input CSS
343 path: Path to the CSS
344 variables: Substitution variables to substitute tokens for.
345 is_default_rules: True if the rules we're extracting are
346 default (i.e. in Widget.DEFAULT_CSS) rules. False if they're from user defined CSS.
347 """
348
349 reference_tokens = tokenize_values(variables) if variables is not None else {}
350 if variable_tokens:
351 reference_tokens.update(variable_tokens)
352
353 tokens = iter(substitute_references(tokenize(css, path), variable_tokens))
354 while True:
355 token = next(tokens, None)
356 if token is None:
357 break
358 if token.name.startswith("selector_start"):
359 yield from parse_rule_set(
360 tokens,
361 token,
362 is_default_rules=is_default_rules,
363 tie_breaker=tie_breaker,
364 )
365
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/textual/css/parse.py b/src/textual/css/parse.py
--- a/src/textual/css/parse.py
+++ b/src/textual/css/parse.py
@@ -269,6 +269,7 @@
break
if token.name == "variable_name":
variable_name = token.value[1:-1] # Trim the $ and the :, i.e. "$x:" -> "x"
+ variable_tokens = variables.setdefault(variable_name, [])
yield token
while True:
@@ -284,7 +285,7 @@
if not token:
break
elif token.name == "whitespace":
- variables.setdefault(variable_name, []).append(token)
+ variable_tokens.append(token)
yield token
elif token.name == "variable_value_end":
yield token
@@ -293,7 +294,6 @@
elif token.name == "variable_ref":
ref_name = token.value[1:]
if ref_name in variables:
- variable_tokens = variables.setdefault(variable_name, [])
reference_tokens = variables[ref_name]
variable_tokens.extend(reference_tokens)
ref_location = token.location
@@ -307,7 +307,7 @@
else:
_unresolved(ref_name, variables.keys(), token)
else:
- variables.setdefault(variable_name, []).append(token)
+ variable_tokens.append(token)
yield token
token = next(iter_tokens, None)
elif token.name == "variable_ref":
|
{"golden_diff": "diff --git a/src/textual/css/parse.py b/src/textual/css/parse.py\n--- a/src/textual/css/parse.py\n+++ b/src/textual/css/parse.py\n@@ -269,6 +269,7 @@\n break\n if token.name == \"variable_name\":\n variable_name = token.value[1:-1] # Trim the $ and the :, i.e. \"$x:\" -> \"x\"\n+ variable_tokens = variables.setdefault(variable_name, [])\n yield token\n \n while True:\n@@ -284,7 +285,7 @@\n if not token:\n break\n elif token.name == \"whitespace\":\n- variables.setdefault(variable_name, []).append(token)\n+ variable_tokens.append(token)\n yield token\n elif token.name == \"variable_value_end\":\n yield token\n@@ -293,7 +294,6 @@\n elif token.name == \"variable_ref\":\n ref_name = token.value[1:]\n if ref_name in variables:\n- variable_tokens = variables.setdefault(variable_name, [])\n reference_tokens = variables[ref_name]\n variable_tokens.extend(reference_tokens)\n ref_location = token.location\n@@ -307,7 +307,7 @@\n else:\n _unresolved(ref_name, variables.keys(), token)\n else:\n- variables.setdefault(variable_name, []).append(token)\n+ variable_tokens.append(token)\n yield token\n token = next(iter_tokens, None)\n elif token.name == \"variable_ref\":\n", "issue": "Allow for empty CSS variable definitions\nConsider the following CSS snippets:\r\n\r\n 1.\r\n```sass\r\n$x: \r\n* {\r\n background: red;\r\n}\r\n```\r\n\r\n2.\r\n```sass\r\n$x:\r\n* {\r\n background: $x;\r\n}\r\n```\r\n\r\n3.\r\n```sass\r\n* {\r\n background: red;\r\n}\r\n$x:\r\n```\r\n\r\nAll three should work with the following behaviour:\r\n - an empty variable definition is allowed; and\r\n - setting a style to an empty variable should unset that style.\r\n\r\nFor example, 2. should unset the background of all widgets.\r\n\r\nAs of now, 2. and 3. can't be parsed.\r\n2. raises an error regarding an undefined variable and 3. raises an `AttributeError` inside `substitute_references` in `parse.py`.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import lru_cache\nfrom pathlib import PurePath\nfrom typing import Iterable, Iterator, NoReturn\n\nfrom ..suggestions import get_suggestion\nfrom ._help_renderables import HelpText\nfrom ._styles_builder import DeclarationError, StylesBuilder\nfrom .errors import UnresolvedVariableError\nfrom .model import (\n CombinatorType,\n Declaration,\n RuleSet,\n Selector,\n SelectorSet,\n SelectorType,\n)\nfrom .styles import Styles\nfrom .tokenize import Token, tokenize, tokenize_declarations, tokenize_values\nfrom .tokenizer import EOFError, ReferencedBy\nfrom .types import Specificity3\n\nSELECTOR_MAP: dict[str, tuple[SelectorType, Specificity3]] = {\n \"selector\": (SelectorType.TYPE, (0, 0, 1)),\n \"selector_start\": (SelectorType.TYPE, (0, 0, 1)),\n \"selector_class\": (SelectorType.CLASS, (0, 1, 0)),\n \"selector_start_class\": (SelectorType.CLASS, (0, 1, 0)),\n \"selector_id\": (SelectorType.ID, (1, 0, 0)),\n \"selector_start_id\": (SelectorType.ID, (1, 0, 0)),\n \"selector_universal\": (SelectorType.UNIVERSAL, (0, 0, 0)),\n \"selector_start_universal\": (SelectorType.UNIVERSAL, (0, 0, 0)),\n}\n\n\n@lru_cache(maxsize=1024)\ndef parse_selectors(css_selectors: str) -> tuple[SelectorSet, ...]:\n if not css_selectors.strip():\n return ()\n\n tokens = iter(tokenize(css_selectors, \"\"))\n\n get_selector = SELECTOR_MAP.get\n combinator: CombinatorType | None = CombinatorType.DESCENDENT\n selectors: list[Selector] = []\n rule_selectors: list[list[Selector]] = []\n\n while True:\n try:\n token = next(tokens)\n except EOFError:\n break\n token_name = token.name\n if token_name == \"pseudo_class\":\n selectors[-1]._add_pseudo_class(token.value.lstrip(\":\"))\n elif token_name == \"whitespace\":\n if combinator is None or combinator == CombinatorType.SAME:\n combinator = CombinatorType.DESCENDENT\n elif token_name == \"new_selector\":\n rule_selectors.append(selectors[:])\n selectors.clear()\n combinator = None\n elif token_name == \"declaration_set_start\":\n break\n elif token_name == \"combinator_child\":\n combinator = CombinatorType.CHILD\n else:\n _selector, specificity = get_selector(\n token_name, (SelectorType.TYPE, (0, 0, 0))\n )\n selectors.append(\n Selector(\n name=token.value.lstrip(\".#\"),\n combinator=combinator or CombinatorType.DESCENDENT,\n type=_selector,\n specificity=specificity,\n )\n )\n combinator = CombinatorType.SAME\n if selectors:\n rule_selectors.append(selectors[:])\n\n selector_set = tuple(SelectorSet.from_selectors(rule_selectors))\n return selector_set\n\n\ndef parse_rule_set(\n tokens: Iterator[Token],\n token: Token,\n is_default_rules: bool = False,\n tie_breaker: int = 0,\n) -> Iterable[RuleSet]:\n get_selector = SELECTOR_MAP.get\n combinator: CombinatorType | None = CombinatorType.DESCENDENT\n selectors: list[Selector] = []\n rule_selectors: list[list[Selector]] = []\n styles_builder = StylesBuilder()\n\n while True:\n if token.name == \"pseudo_class\":\n selectors[-1]._add_pseudo_class(token.value.lstrip(\":\"))\n elif token.name == \"whitespace\":\n if combinator is None or combinator == CombinatorType.SAME:\n combinator = CombinatorType.DESCENDENT\n elif token.name == \"new_selector\":\n rule_selectors.append(selectors[:])\n selectors.clear()\n combinator = None\n elif token.name == \"declaration_set_start\":\n break\n elif token.name == \"combinator_child\":\n combinator = CombinatorType.CHILD\n else:\n _selector, specificity = get_selector(\n token.name, (SelectorType.TYPE, (0, 0, 0))\n )\n selectors.append(\n Selector(\n name=token.value.lstrip(\".#\"),\n combinator=combinator or CombinatorType.DESCENDENT,\n type=_selector,\n specificity=specificity,\n )\n )\n combinator = CombinatorType.SAME\n\n token = next(tokens)\n\n if selectors:\n rule_selectors.append(selectors[:])\n\n declaration = Declaration(token, \"\")\n\n errors: list[tuple[Token, str | HelpText]] = []\n\n while True:\n token = next(tokens)\n token_name = token.name\n if token_name in (\"whitespace\", \"declaration_end\"):\n continue\n if token_name == \"declaration_name\":\n if declaration.tokens:\n try:\n styles_builder.add_declaration(declaration)\n except DeclarationError as error:\n errors.append((error.token, error.message))\n declaration = Declaration(token, \"\")\n declaration.name = token.value.rstrip(\":\")\n elif token_name == \"declaration_set_end\":\n break\n else:\n declaration.tokens.append(token)\n\n if declaration.tokens:\n try:\n styles_builder.add_declaration(declaration)\n except DeclarationError as error:\n errors.append((error.token, error.message))\n\n rule_set = RuleSet(\n list(SelectorSet.from_selectors(rule_selectors)),\n styles_builder.styles,\n errors,\n is_default_rules=is_default_rules,\n tie_breaker=tie_breaker,\n )\n rule_set._post_parse()\n yield rule_set\n\n\ndef parse_declarations(css: str, path: str) -> Styles:\n \"\"\"Parse declarations and return a Styles object.\n\n Args:\n css: String containing CSS.\n path: Path to the CSS, or something else to identify the location.\n\n Returns:\n A styles object.\n \"\"\"\n\n tokens = iter(tokenize_declarations(css, path))\n styles_builder = StylesBuilder()\n\n declaration: Declaration | None = None\n errors: list[tuple[Token, str | HelpText]] = []\n\n while True:\n token = next(tokens, None)\n if token is None:\n break\n token_name = token.name\n if token_name in (\"whitespace\", \"declaration_end\", \"eof\"):\n continue\n if token_name == \"declaration_name\":\n if declaration and declaration.tokens:\n try:\n styles_builder.add_declaration(declaration)\n except DeclarationError as error:\n errors.append((error.token, error.message))\n raise\n declaration = Declaration(token, \"\")\n declaration.name = token.value.rstrip(\":\")\n elif token_name == \"declaration_set_end\":\n break\n else:\n if declaration:\n declaration.tokens.append(token)\n\n if declaration and declaration.tokens:\n try:\n styles_builder.add_declaration(declaration)\n except DeclarationError as error:\n errors.append((error.token, error.message))\n raise\n\n return styles_builder.styles\n\n\ndef _unresolved(variable_name: str, variables: Iterable[str], token: Token) -> NoReturn:\n \"\"\"Raise a TokenError regarding an unresolved variable.\n\n Args:\n variable_name: A variable name.\n variables: Possible choices used to generate suggestion.\n token: The Token.\n\n Raises:\n UnresolvedVariableError: Always raises a TokenError.\n \"\"\"\n message = f\"reference to undefined variable '${variable_name}'\"\n suggested_variable = get_suggestion(variable_name, list(variables))\n if suggested_variable:\n message += f\"; did you mean '${suggested_variable}'?\"\n\n raise UnresolvedVariableError(\n token.path,\n token.code,\n token.start,\n message,\n end=token.end,\n )\n\n\ndef substitute_references(\n tokens: Iterable[Token], css_variables: dict[str, list[Token]] | None = None\n) -> Iterable[Token]:\n \"\"\"Replace variable references with values by substituting variable reference\n tokens with the tokens representing their values.\n\n Args:\n tokens: Iterator of Tokens which may contain tokens\n with the name \"variable_ref\".\n\n Returns:\n Yields Tokens such that any variable references (tokens where\n token.name == \"variable_ref\") have been replaced with the tokens representing\n the value. In other words, an Iterable of Tokens similar to the original input,\n but with variables resolved. Substituted tokens will have their referenced_by\n attribute populated with information about where the tokens are being substituted to.\n \"\"\"\n variables: dict[str, list[Token]] = css_variables.copy() if css_variables else {}\n\n iter_tokens = iter(tokens)\n\n while True:\n token = next(iter_tokens, None)\n if token is None:\n break\n if token.name == \"variable_name\":\n variable_name = token.value[1:-1] # Trim the $ and the :, i.e. \"$x:\" -> \"x\"\n yield token\n\n while True:\n token = next(iter_tokens, None)\n if token is not None and token.name == \"whitespace\":\n yield token\n else:\n break\n\n # Store the tokens for any variable definitions, and substitute\n # any variable references we encounter with them.\n while True:\n if not token:\n break\n elif token.name == \"whitespace\":\n variables.setdefault(variable_name, []).append(token)\n yield token\n elif token.name == \"variable_value_end\":\n yield token\n break\n # For variables referring to other variables\n elif token.name == \"variable_ref\":\n ref_name = token.value[1:]\n if ref_name in variables:\n variable_tokens = variables.setdefault(variable_name, [])\n reference_tokens = variables[ref_name]\n variable_tokens.extend(reference_tokens)\n ref_location = token.location\n ref_length = len(token.value)\n for _token in reference_tokens:\n yield _token.with_reference(\n ReferencedBy(\n ref_name, ref_location, ref_length, token.code\n )\n )\n else:\n _unresolved(ref_name, variables.keys(), token)\n else:\n variables.setdefault(variable_name, []).append(token)\n yield token\n token = next(iter_tokens, None)\n elif token.name == \"variable_ref\":\n variable_name = token.value[1:] # Trim the $, so $x -> x\n if variable_name in variables:\n variable_tokens = variables[variable_name]\n ref_location = token.location\n ref_length = len(token.value)\n ref_code = token.code\n for _token in variable_tokens:\n yield _token.with_reference(\n ReferencedBy(variable_name, ref_location, ref_length, ref_code)\n )\n else:\n _unresolved(variable_name, variables.keys(), token)\n else:\n yield token\n\n\ndef parse(\n css: str,\n path: str | PurePath,\n variables: dict[str, str] | None = None,\n variable_tokens: dict[str, list[Token]] | None = None,\n is_default_rules: bool = False,\n tie_breaker: int = 0,\n) -> Iterable[RuleSet]:\n \"\"\"Parse CSS by tokenizing it, performing variable substitution,\n and generating rule sets from it.\n\n Args:\n css: The input CSS\n path: Path to the CSS\n variables: Substitution variables to substitute tokens for.\n is_default_rules: True if the rules we're extracting are\n default (i.e. in Widget.DEFAULT_CSS) rules. False if they're from user defined CSS.\n \"\"\"\n\n reference_tokens = tokenize_values(variables) if variables is not None else {}\n if variable_tokens:\n reference_tokens.update(variable_tokens)\n\n tokens = iter(substitute_references(tokenize(css, path), variable_tokens))\n while True:\n token = next(tokens, None)\n if token is None:\n break\n if token.name.startswith(\"selector_start\"):\n yield from parse_rule_set(\n tokens,\n token,\n is_default_rules=is_default_rules,\n tie_breaker=tie_breaker,\n )\n", "path": "src/textual/css/parse.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom functools import lru_cache\nfrom pathlib import PurePath\nfrom typing import Iterable, Iterator, NoReturn\n\nfrom ..suggestions import get_suggestion\nfrom ._help_renderables import HelpText\nfrom ._styles_builder import DeclarationError, StylesBuilder\nfrom .errors import UnresolvedVariableError\nfrom .model import (\n CombinatorType,\n Declaration,\n RuleSet,\n Selector,\n SelectorSet,\n SelectorType,\n)\nfrom .styles import Styles\nfrom .tokenize import Token, tokenize, tokenize_declarations, tokenize_values\nfrom .tokenizer import EOFError, ReferencedBy\nfrom .types import Specificity3\n\nSELECTOR_MAP: dict[str, tuple[SelectorType, Specificity3]] = {\n \"selector\": (SelectorType.TYPE, (0, 0, 1)),\n \"selector_start\": (SelectorType.TYPE, (0, 0, 1)),\n \"selector_class\": (SelectorType.CLASS, (0, 1, 0)),\n \"selector_start_class\": (SelectorType.CLASS, (0, 1, 0)),\n \"selector_id\": (SelectorType.ID, (1, 0, 0)),\n \"selector_start_id\": (SelectorType.ID, (1, 0, 0)),\n \"selector_universal\": (SelectorType.UNIVERSAL, (0, 0, 0)),\n \"selector_start_universal\": (SelectorType.UNIVERSAL, (0, 0, 0)),\n}\n\n\n@lru_cache(maxsize=1024)\ndef parse_selectors(css_selectors: str) -> tuple[SelectorSet, ...]:\n if not css_selectors.strip():\n return ()\n\n tokens = iter(tokenize(css_selectors, \"\"))\n\n get_selector = SELECTOR_MAP.get\n combinator: CombinatorType | None = CombinatorType.DESCENDENT\n selectors: list[Selector] = []\n rule_selectors: list[list[Selector]] = []\n\n while True:\n try:\n token = next(tokens)\n except EOFError:\n break\n token_name = token.name\n if token_name == \"pseudo_class\":\n selectors[-1]._add_pseudo_class(token.value.lstrip(\":\"))\n elif token_name == \"whitespace\":\n if combinator is None or combinator == CombinatorType.SAME:\n combinator = CombinatorType.DESCENDENT\n elif token_name == \"new_selector\":\n rule_selectors.append(selectors[:])\n selectors.clear()\n combinator = None\n elif token_name == \"declaration_set_start\":\n break\n elif token_name == \"combinator_child\":\n combinator = CombinatorType.CHILD\n else:\n _selector, specificity = get_selector(\n token_name, (SelectorType.TYPE, (0, 0, 0))\n )\n selectors.append(\n Selector(\n name=token.value.lstrip(\".#\"),\n combinator=combinator or CombinatorType.DESCENDENT,\n type=_selector,\n specificity=specificity,\n )\n )\n combinator = CombinatorType.SAME\n if selectors:\n rule_selectors.append(selectors[:])\n\n selector_set = tuple(SelectorSet.from_selectors(rule_selectors))\n return selector_set\n\n\ndef parse_rule_set(\n tokens: Iterator[Token],\n token: Token,\n is_default_rules: bool = False,\n tie_breaker: int = 0,\n) -> Iterable[RuleSet]:\n get_selector = SELECTOR_MAP.get\n combinator: CombinatorType | None = CombinatorType.DESCENDENT\n selectors: list[Selector] = []\n rule_selectors: list[list[Selector]] = []\n styles_builder = StylesBuilder()\n\n while True:\n if token.name == \"pseudo_class\":\n selectors[-1]._add_pseudo_class(token.value.lstrip(\":\"))\n elif token.name == \"whitespace\":\n if combinator is None or combinator == CombinatorType.SAME:\n combinator = CombinatorType.DESCENDENT\n elif token.name == \"new_selector\":\n rule_selectors.append(selectors[:])\n selectors.clear()\n combinator = None\n elif token.name == \"declaration_set_start\":\n break\n elif token.name == \"combinator_child\":\n combinator = CombinatorType.CHILD\n else:\n _selector, specificity = get_selector(\n token.name, (SelectorType.TYPE, (0, 0, 0))\n )\n selectors.append(\n Selector(\n name=token.value.lstrip(\".#\"),\n combinator=combinator or CombinatorType.DESCENDENT,\n type=_selector,\n specificity=specificity,\n )\n )\n combinator = CombinatorType.SAME\n\n token = next(tokens)\n\n if selectors:\n rule_selectors.append(selectors[:])\n\n declaration = Declaration(token, \"\")\n\n errors: list[tuple[Token, str | HelpText]] = []\n\n while True:\n token = next(tokens)\n token_name = token.name\n if token_name in (\"whitespace\", \"declaration_end\"):\n continue\n if token_name == \"declaration_name\":\n if declaration.tokens:\n try:\n styles_builder.add_declaration(declaration)\n except DeclarationError as error:\n errors.append((error.token, error.message))\n declaration = Declaration(token, \"\")\n declaration.name = token.value.rstrip(\":\")\n elif token_name == \"declaration_set_end\":\n break\n else:\n declaration.tokens.append(token)\n\n if declaration.tokens:\n try:\n styles_builder.add_declaration(declaration)\n except DeclarationError as error:\n errors.append((error.token, error.message))\n\n rule_set = RuleSet(\n list(SelectorSet.from_selectors(rule_selectors)),\n styles_builder.styles,\n errors,\n is_default_rules=is_default_rules,\n tie_breaker=tie_breaker,\n )\n rule_set._post_parse()\n yield rule_set\n\n\ndef parse_declarations(css: str, path: str) -> Styles:\n \"\"\"Parse declarations and return a Styles object.\n\n Args:\n css: String containing CSS.\n path: Path to the CSS, or something else to identify the location.\n\n Returns:\n A styles object.\n \"\"\"\n\n tokens = iter(tokenize_declarations(css, path))\n styles_builder = StylesBuilder()\n\n declaration: Declaration | None = None\n errors: list[tuple[Token, str | HelpText]] = []\n\n while True:\n token = next(tokens, None)\n if token is None:\n break\n token_name = token.name\n if token_name in (\"whitespace\", \"declaration_end\", \"eof\"):\n continue\n if token_name == \"declaration_name\":\n if declaration and declaration.tokens:\n try:\n styles_builder.add_declaration(declaration)\n except DeclarationError as error:\n errors.append((error.token, error.message))\n raise\n declaration = Declaration(token, \"\")\n declaration.name = token.value.rstrip(\":\")\n elif token_name == \"declaration_set_end\":\n break\n else:\n if declaration:\n declaration.tokens.append(token)\n\n if declaration and declaration.tokens:\n try:\n styles_builder.add_declaration(declaration)\n except DeclarationError as error:\n errors.append((error.token, error.message))\n raise\n\n return styles_builder.styles\n\n\ndef _unresolved(variable_name: str, variables: Iterable[str], token: Token) -> NoReturn:\n \"\"\"Raise a TokenError regarding an unresolved variable.\n\n Args:\n variable_name: A variable name.\n variables: Possible choices used to generate suggestion.\n token: The Token.\n\n Raises:\n UnresolvedVariableError: Always raises a TokenError.\n \"\"\"\n message = f\"reference to undefined variable '${variable_name}'\"\n suggested_variable = get_suggestion(variable_name, list(variables))\n if suggested_variable:\n message += f\"; did you mean '${suggested_variable}'?\"\n\n raise UnresolvedVariableError(\n token.path,\n token.code,\n token.start,\n message,\n end=token.end,\n )\n\n\ndef substitute_references(\n tokens: Iterable[Token], css_variables: dict[str, list[Token]] | None = None\n) -> Iterable[Token]:\n \"\"\"Replace variable references with values by substituting variable reference\n tokens with the tokens representing their values.\n\n Args:\n tokens: Iterator of Tokens which may contain tokens\n with the name \"variable_ref\".\n\n Returns:\n Yields Tokens such that any variable references (tokens where\n token.name == \"variable_ref\") have been replaced with the tokens representing\n the value. In other words, an Iterable of Tokens similar to the original input,\n but with variables resolved. Substituted tokens will have their referenced_by\n attribute populated with information about where the tokens are being substituted to.\n \"\"\"\n variables: dict[str, list[Token]] = css_variables.copy() if css_variables else {}\n\n iter_tokens = iter(tokens)\n\n while True:\n token = next(iter_tokens, None)\n if token is None:\n break\n if token.name == \"variable_name\":\n variable_name = token.value[1:-1] # Trim the $ and the :, i.e. \"$x:\" -> \"x\"\n variable_tokens = variables.setdefault(variable_name, [])\n yield token\n\n while True:\n token = next(iter_tokens, None)\n if token is not None and token.name == \"whitespace\":\n yield token\n else:\n break\n\n # Store the tokens for any variable definitions, and substitute\n # any variable references we encounter with them.\n while True:\n if not token:\n break\n elif token.name == \"whitespace\":\n variable_tokens.append(token)\n yield token\n elif token.name == \"variable_value_end\":\n yield token\n break\n # For variables referring to other variables\n elif token.name == \"variable_ref\":\n ref_name = token.value[1:]\n if ref_name in variables:\n reference_tokens = variables[ref_name]\n variable_tokens.extend(reference_tokens)\n ref_location = token.location\n ref_length = len(token.value)\n for _token in reference_tokens:\n yield _token.with_reference(\n ReferencedBy(\n ref_name, ref_location, ref_length, token.code\n )\n )\n else:\n _unresolved(ref_name, variables.keys(), token)\n else:\n variable_tokens.append(token)\n yield token\n token = next(iter_tokens, None)\n elif token.name == \"variable_ref\":\n variable_name = token.value[1:] # Trim the $, so $x -> x\n if variable_name in variables:\n variable_tokens = variables[variable_name]\n ref_location = token.location\n ref_length = len(token.value)\n ref_code = token.code\n for _token in variable_tokens:\n yield _token.with_reference(\n ReferencedBy(variable_name, ref_location, ref_length, ref_code)\n )\n else:\n _unresolved(variable_name, variables.keys(), token)\n else:\n yield token\n\n\ndef parse(\n css: str,\n path: str | PurePath,\n variables: dict[str, str] | None = None,\n variable_tokens: dict[str, list[Token]] | None = None,\n is_default_rules: bool = False,\n tie_breaker: int = 0,\n) -> Iterable[RuleSet]:\n \"\"\"Parse CSS by tokenizing it, performing variable substitution,\n and generating rule sets from it.\n\n Args:\n css: The input CSS\n path: Path to the CSS\n variables: Substitution variables to substitute tokens for.\n is_default_rules: True if the rules we're extracting are\n default (i.e. in Widget.DEFAULT_CSS) rules. False if they're from user defined CSS.\n \"\"\"\n\n reference_tokens = tokenize_values(variables) if variables is not None else {}\n if variable_tokens:\n reference_tokens.update(variable_tokens)\n\n tokens = iter(substitute_references(tokenize(css, path), variable_tokens))\n while True:\n token = next(tokens, None)\n if token is None:\n break\n if token.name.startswith(\"selector_start\"):\n yield from parse_rule_set(\n tokens,\n token,\n is_default_rules=is_default_rules,\n tie_breaker=tie_breaker,\n )\n", "path": "src/textual/css/parse.py"}]}
| 4,053 | 326 |
gh_patches_debug_28950
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-234
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a rst file for useragents.
There is no rst file for the useragents module, so there is no documentation on readthedocs either.
Found in #211.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/util/web.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import os, tempfile, logging
3 from .misc import size
4 log = logging.getLogger(__name__)
5
6 def wget(url, save=None, timeout=5, **kwargs):
7 """wget(url, save=None, timeout=5) -> str
8
9 Downloads a file via HTTP/HTTPS.
10
11 Args:
12 url (str): URL to download
13 save (str or bool): Name to save as. Any truthy value
14 will auto-generate a name based on the URL.
15 timeout (int): Timeout, in seconds
16
17 Example:
18
19 >>> url = 'http://httpbin.org/robots.txt'
20 >>> with context.local(log_level='ERROR'): result = wget(url)
21 >>> result
22 'User-agent: *\nDisallow: /deny\n'
23 >>> with context.local(log_level='ERROR'): wget(url, True)
24 >>> result == file('robots.txt').read()
25 True
26 """
27 import requests
28
29 with log.progress("Downloading '%s'" % url) as w:
30 w.status("Making request...")
31
32 response = requests.get(url, stream=True, **kwargs)
33
34 if not response.ok:
35 w.failure("Got code %s" % response.status_code)
36 return
37
38 total_size = int(response.headers.get('content-length',0))
39
40 w.status('0 / %s' % size(total_size))
41
42 # Find out the next largest size we can represent as
43 chunk_size = 1
44 while chunk_size < (total_size/10):
45 chunk_size *= 1000
46
47 # Count chunks as they're received
48 total_data = ''
49
50 # Loop until we have all of the data
51 for chunk in response.iter_content(chunk_size = 2**10):
52 total_data += chunk
53 if total_size:
54 w.status('%s / %s' % (size(total_data), size(total_size)))
55 else:
56 w.status('%s' % size(total_data))
57
58 # Save to the target file if provided
59 if save:
60 if not isinstance(save, (str, unicode)):
61 save = os.path.basename(url)
62 save = save or tempfile.NamedTemporaryFile(dir='.', delete=False).name
63 with file(save,'wb+') as f:
64 f.write(total_data)
65 w.success('Saved %r (%s)' % (f.name, size(total_data)))
66 else:
67 w.success('%s' % size(total_data))
68
69 return total_data
70
71
```
Path: `pwnlib/util/hashes.py`
Content:
```
1 import hashlib
2
3 for _algo in hashlib.algorithms:
4 def _closure():
5 hash = hashlib.__dict__[_algo]
6 def file(p):
7 h = hash()
8 fd = open(p)
9 while True:
10 s = fd.read(4096)
11 if not s:
12 break
13 h.update(s)
14 fd.close()
15 return h
16 def sum(s):
17 return hash(s)
18 filef = lambda x: file(x).digest()
19 filef.__doc__ = 'Calculates the %s sum of a file' % _algo
20 sumf = lambda x: sum(x).digest()
21 sumf.__doc__ = 'Calculates the %s sum of a string' % _algo
22 fileh = lambda x: file(x).hexdigest()
23 fileh.__doc__ = 'Calculates the %s sum of a file; returns hex-encoded' % _algo
24 sumh = lambda x: sum(x).hexdigest()
25 sumh.__doc__ = 'Calculates the %s sum of a string; returns hex-encoded' % _algo
26 return filef, sumf, fileh, sumh
27 (globals()[_algo + 'file'],
28 globals()[_algo + 'sum'],
29 globals()[_algo + 'filehex'],
30 globals()[_algo + 'sumhex']) = _closure()
31
```
Path: `pwnlib/useragents.py`
Content:
```
1 """
2 Database of 22,000 user agent strings
3 """
4 __all__ = ['getall', 'random']
5
6 import os, random as randommod
7
8 _cache = None
9
10 def _load():
11 global _cache
12 if _cache is None:
13 _cache = set()
14 with open(os.path.join(os.path.dirname(__file__),
15 'data/useragents/useragents.txt'
16 ), 'r') as fd:
17 for line in fd:
18 if line:
19 _cache.add(line.strip())
20 return _cache
21
22 def getall():
23 """getall() -> str set
24
25 Get all the user agents that we know about.
26
27 Args:
28 None
29
30 Returns:
31 A set of user agent strings.
32
33 Examples:
34 >>> 'libcurl-agent/1.0' in getall()
35 True
36 >>> 'wget' in getall()
37 True
38 """
39 return _load().copy()
40
41 def random():
42 """random() -> str
43
44 Get a random user agent string.
45
46 Args:
47 None
48
49 Returns:
50 A random user agent string selected from :func:`getall`.
51
52 >>> import random as randommod
53 >>> randommod.seed(1)
54 >>> random()
55 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; FDM)'
56 """
57 return randommod.choice(list(_load()))
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwnlib/useragents.py b/pwnlib/useragents.py
--- a/pwnlib/useragents.py
+++ b/pwnlib/useragents.py
@@ -1,5 +1,5 @@
"""
-Database of 22,000 user agent strings
+Database of >22,000 user agent strings
"""
__all__ = ['getall', 'random']
@@ -52,6 +52,6 @@
>>> import random as randommod
>>> randommod.seed(1)
>>> random()
- 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; FDM)'
+ 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FunWebProducts; FunWebProducts-MyTotalSearch; iebar)'
"""
return randommod.choice(list(_load()))
diff --git a/pwnlib/util/hashes.py b/pwnlib/util/hashes.py
--- a/pwnlib/util/hashes.py
+++ b/pwnlib/util/hashes.py
@@ -1,3 +1,6 @@
+"""
+Functions for computing various hashes of files and strings.
+"""
import hashlib
for _algo in hashlib.algorithms:
diff --git a/pwnlib/util/web.py b/pwnlib/util/web.py
--- a/pwnlib/util/web.py
+++ b/pwnlib/util/web.py
@@ -17,10 +17,12 @@
Example:
>>> url = 'http://httpbin.org/robots.txt'
- >>> with context.local(log_level='ERROR'): result = wget(url)
+ >>> with context.local(log_level='ERROR'):
+ ... result = wget(url)
>>> result
- 'User-agent: *\nDisallow: /deny\n'
- >>> with context.local(log_level='ERROR'): wget(url, True)
+ 'User-agent: *\\nDisallow: /deny\\n'
+ >>> with context.local(log_level='ERROR'):
+ ... _ = wget(url, True)
>>> result == file('robots.txt').read()
True
"""
@@ -67,4 +69,3 @@
w.success('%s' % size(total_data))
return total_data
-
|
{"golden_diff": "diff --git a/pwnlib/useragents.py b/pwnlib/useragents.py\n--- a/pwnlib/useragents.py\n+++ b/pwnlib/useragents.py\n@@ -1,5 +1,5 @@\n \"\"\"\n-Database of 22,000 user agent strings\n+Database of >22,000 user agent strings\n \"\"\"\n __all__ = ['getall', 'random']\n \n@@ -52,6 +52,6 @@\n >>> import random as randommod\n >>> randommod.seed(1)\n >>> random()\n- 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; FDM)'\n+ 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FunWebProducts; FunWebProducts-MyTotalSearch; iebar)'\n \"\"\"\n return randommod.choice(list(_load()))\ndiff --git a/pwnlib/util/hashes.py b/pwnlib/util/hashes.py\n--- a/pwnlib/util/hashes.py\n+++ b/pwnlib/util/hashes.py\n@@ -1,3 +1,6 @@\n+\"\"\"\n+Functions for computing various hashes of files and strings.\n+\"\"\"\n import hashlib\n \n for _algo in hashlib.algorithms:\ndiff --git a/pwnlib/util/web.py b/pwnlib/util/web.py\n--- a/pwnlib/util/web.py\n+++ b/pwnlib/util/web.py\n@@ -17,10 +17,12 @@\n Example:\n \n >>> url = 'http://httpbin.org/robots.txt'\n- >>> with context.local(log_level='ERROR'): result = wget(url)\n+ >>> with context.local(log_level='ERROR'):\n+ ... result = wget(url)\n >>> result\n- 'User-agent: *\\nDisallow: /deny\\n'\n- >>> with context.local(log_level='ERROR'): wget(url, True)\n+ 'User-agent: *\\\\nDisallow: /deny\\\\n'\n+ >>> with context.local(log_level='ERROR'):\n+ ... _ = wget(url, True)\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n@@ -67,4 +69,3 @@\n w.success('%s' % size(total_data))\n \n return total_data\n-\n", "issue": "Add a rst file for useragents.\nThere is no rst file for the useragents module, so there is no documentation on readthedocs either.\n\nFound in #211.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os, tempfile, logging\nfrom .misc import size\nlog = logging.getLogger(__name__)\n\ndef wget(url, save=None, timeout=5, **kwargs):\n \"\"\"wget(url, save=None, timeout=5) -> str\n\n Downloads a file via HTTP/HTTPS.\n\n Args:\n url (str): URL to download\n save (str or bool): Name to save as. Any truthy value\n will auto-generate a name based on the URL.\n timeout (int): Timeout, in seconds\n\n Example:\n\n >>> url = 'http://httpbin.org/robots.txt'\n >>> with context.local(log_level='ERROR'): result = wget(url)\n >>> result\n 'User-agent: *\\nDisallow: /deny\\n'\n >>> with context.local(log_level='ERROR'): wget(url, True)\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n import requests\n\n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n\n response = requests.get(url, stream=True, **kwargs)\n\n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n return\n\n total_size = int(response.headers.get('content-length',0))\n\n w.status('0 / %s' % size(total_size))\n\n # Find out the next largest size we can represent as\n chunk_size = 1\n while chunk_size < (total_size/10):\n chunk_size *= 1000\n\n # Count chunks as they're received\n total_data = ''\n\n # Loop until we have all of the data\n for chunk in response.iter_content(chunk_size = 2**10):\n total_data += chunk\n if total_size:\n w.status('%s / %s' % (size(total_data), size(total_size)))\n else:\n w.status('%s' % size(total_data))\n\n # Save to the target file if provided\n if save:\n if not isinstance(save, (str, unicode)):\n save = os.path.basename(url)\n save = save or tempfile.NamedTemporaryFile(dir='.', delete=False).name\n with file(save,'wb+') as f:\n f.write(total_data)\n w.success('Saved %r (%s)' % (f.name, size(total_data)))\n else:\n w.success('%s' % size(total_data))\n\n return total_data\n\n", "path": "pwnlib/util/web.py"}, {"content": "import hashlib\n\nfor _algo in hashlib.algorithms:\n def _closure():\n hash = hashlib.__dict__[_algo]\n def file(p):\n h = hash()\n fd = open(p)\n while True:\n s = fd.read(4096)\n if not s:\n break\n h.update(s)\n fd.close()\n return h\n def sum(s):\n return hash(s)\n filef = lambda x: file(x).digest()\n filef.__doc__ = 'Calculates the %s sum of a file' % _algo\n sumf = lambda x: sum(x).digest()\n sumf.__doc__ = 'Calculates the %s sum of a string' % _algo\n fileh = lambda x: file(x).hexdigest()\n fileh.__doc__ = 'Calculates the %s sum of a file; returns hex-encoded' % _algo\n sumh = lambda x: sum(x).hexdigest()\n sumh.__doc__ = 'Calculates the %s sum of a string; returns hex-encoded' % _algo\n return filef, sumf, fileh, sumh\n (globals()[_algo + 'file'],\n globals()[_algo + 'sum'],\n globals()[_algo + 'filehex'],\n globals()[_algo + 'sumhex']) = _closure()\n", "path": "pwnlib/util/hashes.py"}, {"content": "\"\"\"\nDatabase of 22,000 user agent strings\n\"\"\"\n__all__ = ['getall', 'random']\n\nimport os, random as randommod\n\n_cache = None\n\ndef _load():\n global _cache\n if _cache is None:\n _cache = set()\n with open(os.path.join(os.path.dirname(__file__),\n 'data/useragents/useragents.txt'\n ), 'r') as fd:\n for line in fd:\n if line:\n _cache.add(line.strip())\n return _cache\n\ndef getall():\n \"\"\"getall() -> str set\n\n Get all the user agents that we know about.\n\n Args:\n None\n\n Returns:\n A set of user agent strings.\n\n Examples:\n >>> 'libcurl-agent/1.0' in getall()\n True\n >>> 'wget' in getall()\n True\n \"\"\"\n return _load().copy()\n\ndef random():\n \"\"\"random() -> str\n\n Get a random user agent string.\n\n Args:\n None\n\n Returns:\n A random user agent string selected from :func:`getall`.\n\n >>> import random as randommod\n >>> randommod.seed(1)\n >>> random()\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; FDM)'\n \"\"\"\n return randommod.choice(list(_load()))\n", "path": "pwnlib/useragents.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os, tempfile, logging\nfrom .misc import size\nlog = logging.getLogger(__name__)\n\ndef wget(url, save=None, timeout=5, **kwargs):\n \"\"\"wget(url, save=None, timeout=5) -> str\n\n Downloads a file via HTTP/HTTPS.\n\n Args:\n url (str): URL to download\n save (str or bool): Name to save as. Any truthy value\n will auto-generate a name based on the URL.\n timeout (int): Timeout, in seconds\n\n Example:\n\n >>> url = 'http://httpbin.org/robots.txt'\n >>> with context.local(log_level='ERROR'):\n ... result = wget(url)\n >>> result\n 'User-agent: *\\\\nDisallow: /deny\\\\n'\n >>> with context.local(log_level='ERROR'):\n ... _ = wget(url, True)\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n import requests\n\n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n\n response = requests.get(url, stream=True, **kwargs)\n\n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n return\n\n total_size = int(response.headers.get('content-length',0))\n\n w.status('0 / %s' % size(total_size))\n\n # Find out the next largest size we can represent as\n chunk_size = 1\n while chunk_size < (total_size/10):\n chunk_size *= 1000\n\n # Count chunks as they're received\n total_data = ''\n\n # Loop until we have all of the data\n for chunk in response.iter_content(chunk_size = 2**10):\n total_data += chunk\n if total_size:\n w.status('%s / %s' % (size(total_data), size(total_size)))\n else:\n w.status('%s' % size(total_data))\n\n # Save to the target file if provided\n if save:\n if not isinstance(save, (str, unicode)):\n save = os.path.basename(url)\n save = save or tempfile.NamedTemporaryFile(dir='.', delete=False).name\n with file(save,'wb+') as f:\n f.write(total_data)\n w.success('Saved %r (%s)' % (f.name, size(total_data)))\n else:\n w.success('%s' % size(total_data))\n\n return total_data\n", "path": "pwnlib/util/web.py"}, {"content": "\"\"\"\nFunctions for computing various hashes of files and strings.\n\"\"\"\nimport hashlib\n\nfor _algo in hashlib.algorithms:\n def _closure():\n hash = hashlib.__dict__[_algo]\n def file(p):\n h = hash()\n fd = open(p)\n while True:\n s = fd.read(4096)\n if not s:\n break\n h.update(s)\n fd.close()\n return h\n def sum(s):\n return hash(s)\n filef = lambda x: file(x).digest()\n filef.__doc__ = 'Calculates the %s sum of a file' % _algo\n sumf = lambda x: sum(x).digest()\n sumf.__doc__ = 'Calculates the %s sum of a string' % _algo\n fileh = lambda x: file(x).hexdigest()\n fileh.__doc__ = 'Calculates the %s sum of a file; returns hex-encoded' % _algo\n sumh = lambda x: sum(x).hexdigest()\n sumh.__doc__ = 'Calculates the %s sum of a string; returns hex-encoded' % _algo\n return filef, sumf, fileh, sumh\n (globals()[_algo + 'file'],\n globals()[_algo + 'sum'],\n globals()[_algo + 'filehex'],\n globals()[_algo + 'sumhex']) = _closure()\n", "path": "pwnlib/util/hashes.py"}, {"content": "\"\"\"\nDatabase of >22,000 user agent strings\n\"\"\"\n__all__ = ['getall', 'random']\n\nimport os, random as randommod\n\n_cache = None\n\ndef _load():\n global _cache\n if _cache is None:\n _cache = set()\n with open(os.path.join(os.path.dirname(__file__),\n 'data/useragents/useragents.txt'\n ), 'r') as fd:\n for line in fd:\n if line:\n _cache.add(line.strip())\n return _cache\n\ndef getall():\n \"\"\"getall() -> str set\n\n Get all the user agents that we know about.\n\n Args:\n None\n\n Returns:\n A set of user agent strings.\n\n Examples:\n >>> 'libcurl-agent/1.0' in getall()\n True\n >>> 'wget' in getall()\n True\n \"\"\"\n return _load().copy()\n\ndef random():\n \"\"\"random() -> str\n\n Get a random user agent string.\n\n Args:\n None\n\n Returns:\n A random user agent string selected from :func:`getall`.\n\n >>> import random as randommod\n >>> randommod.seed(1)\n >>> random()\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; FunWebProducts; FunWebProducts-MyTotalSearch; iebar)'\n \"\"\"\n return randommod.choice(list(_load()))\n", "path": "pwnlib/useragents.py"}]}
| 1,791 | 528 |
gh_patches_debug_1872
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-275
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Base media skill is loaded as a normal skill
At the moment, the load_skills function tries to load the media skill as if it is a normal skill.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/skills/core.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import imp
20 import time
21
22 import abc
23 import os.path
24 import re
25 from adapt.intent import Intent
26 from os.path import join, dirname, splitext, isdir
27
28 from mycroft.client.enclosure.api import EnclosureAPI
29 from mycroft.configuration import ConfigurationManager
30 from mycroft.dialog import DialogLoader
31 from mycroft.filesystem import FileSystemAccess
32 from mycroft.messagebus.message import Message
33 from mycroft.util.log import getLogger
34
35 __author__ = 'seanfitz'
36
37 PRIMARY_SKILLS = ['intent', 'wake']
38 BLACKLISTED_SKILLS = ["send_sms"]
39 SKILLS_BASEDIR = dirname(__file__)
40 THIRD_PARTY_SKILLS_DIR = "/opt/mycroft/third_party"
41
42 MainModule = '__init__'
43
44 logger = getLogger(__name__)
45
46
47 def load_vocab_from_file(path, vocab_type, emitter):
48 with open(path, 'r') as voc_file:
49 for line in voc_file.readlines():
50 parts = line.strip().split("|")
51 entity = parts[0]
52
53 emitter.emit(
54 Message("register_vocab",
55 metadata={'start': entity, 'end': vocab_type}))
56 for alias in parts[1:]:
57 emitter.emit(
58 Message("register_vocab",
59 metadata={'start': alias, 'end': vocab_type,
60 'alias_of': entity}))
61
62
63 def load_regex_from_file(path, emitter):
64 if(path.endswith('.rx')):
65 with open(path, 'r') as reg_file:
66 for line in reg_file.readlines():
67 re.compile(line.strip())
68 emitter.emit(
69 Message("register_vocab",
70 metadata={'regex': line.strip()}))
71
72
73 def load_vocabulary(basedir, emitter):
74 for vocab_type in os.listdir(basedir):
75 load_vocab_from_file(
76 join(basedir, vocab_type), splitext(vocab_type)[0], emitter)
77
78
79 def load_regex(basedir, emitter):
80 for regex_type in os.listdir(basedir):
81 if regex_type.endswith(".rx"):
82 load_regex_from_file(
83 join(basedir, regex_type), emitter)
84
85
86 def create_intent_envelope(intent):
87 return Message(None, metadata=intent.__dict__, context={})
88
89
90 def open_intent_envelope(message):
91 intent_dict = message.metadata
92 return Intent(intent_dict.get('name'),
93 intent_dict.get('requires'),
94 intent_dict.get('at_least_one'),
95 intent_dict.get('optional'))
96
97
98 def load_skill(skill_descriptor, emitter):
99 try:
100 skill_module = imp.load_module(
101 skill_descriptor["name"] + MainModule, *skill_descriptor["info"])
102 if (hasattr(skill_module, 'create_skill') and
103 callable(skill_module.create_skill)):
104 # v2 skills framework
105 skill = skill_module.create_skill()
106 skill.bind(emitter)
107 skill.initialize()
108 return skill
109 else:
110 logger.warn(
111 "Module %s does not appear to be skill" % (
112 skill_descriptor["name"]))
113 except:
114 logger.error(
115 "Failed to load skill: " + skill_descriptor["name"], exc_info=True)
116 return None
117
118
119 def get_skills(skills_folder):
120 skills = []
121 possible_skills = os.listdir(skills_folder)
122 for i in possible_skills:
123 location = join(skills_folder, i)
124 if (not isdir(location) or
125 not MainModule + ".py" in os.listdir(location)):
126 continue
127
128 skills.append(create_skill_descriptor(location))
129 skills = sorted(skills, key=lambda p: p.get('name'))
130 return skills
131
132
133 def create_skill_descriptor(skill_folder):
134 info = imp.find_module(MainModule, [skill_folder])
135 return {"name": os.path.basename(skill_folder), "info": info}
136
137
138 def load_skills(emitter, skills_root=SKILLS_BASEDIR):
139 skills = get_skills(skills_root)
140 for skill in skills:
141 if skill['name'] in PRIMARY_SKILLS:
142 load_skill(skill, emitter)
143
144 for skill in skills:
145 if (skill['name'] not in PRIMARY_SKILLS and
146 skill['name'] not in BLACKLISTED_SKILLS):
147 load_skill(skill, emitter)
148
149
150 class MycroftSkill(object):
151 """
152 Abstract base class which provides common behaviour and parameters to all
153 Skills implementation.
154 """
155
156 def __init__(self, name, emitter=None):
157 self.name = name
158 self.bind(emitter)
159 config = ConfigurationManager.get()
160 self.config = config.get(name)
161 self.config_core = config.get('core')
162 self.dialog_renderer = None
163 self.file_system = FileSystemAccess(join('skills', name))
164 self.registered_intents = []
165
166 @property
167 def location(self):
168 return self.config_core.get('location')
169
170 @property
171 def lang(self):
172 return self.config_core.get('lang')
173
174 def bind(self, emitter):
175 if emitter:
176 self.emitter = emitter
177 self.enclosure = EnclosureAPI(emitter)
178 self.__register_stop()
179
180 def __register_stop(self):
181 self.stop_time = time.time()
182 self.stop_threshold = self.config_core.get('stop_threshold')
183 self.emitter.on('mycroft.stop', self.__handle_stop)
184
185 def detach(self):
186 for name in self.registered_intents:
187 self.emitter.emit(
188 Message("detach_intent", metadata={"intent_name": name}))
189
190 def initialize(self):
191 """
192 Initialization function to be implemented by all Skills.
193
194 Usually used to create intents rules and register them.
195 """
196 raise Exception("Initialize not implemented for skill: " + self.name)
197
198 def register_intent(self, intent_parser, handler):
199 intent_message = create_intent_envelope(intent_parser)
200 intent_message.message_type = "register_intent"
201 self.emitter.emit(intent_message)
202 self.registered_intents.append(intent_parser.name)
203
204 def receive_handler(message):
205 try:
206 handler(message)
207 except:
208 # TODO: Localize
209 self.speak(
210 "An error occurred while processing a request in " +
211 self.name)
212 logger.error(
213 "An error occurred while processing a request in " +
214 self.name, exc_info=True)
215
216 self.emitter.on(intent_parser.name, receive_handler)
217
218 def register_vocabulary(self, entity, entity_type):
219 self.emitter.emit(
220 Message('register_vocab',
221 metadata={'start': entity, 'end': entity_type}))
222
223 def register_regex(self, regex_str):
224 re.compile(regex_str) # validate regex
225 self.emitter.emit(
226 Message('register_vocab', metadata={'regex': regex_str}))
227
228 def speak(self, utterance):
229 self.emitter.emit(Message("speak", metadata={'utterance': utterance}))
230
231 def speak_dialog(self, key, data={}):
232 self.speak(self.dialog_renderer.render(key, data))
233
234 def init_dialog(self, root_directory):
235 self.dialog_renderer = DialogLoader().load(
236 join(root_directory, 'dialog', self.lang))
237
238 def load_data_files(self, root_directory):
239 self.init_dialog(root_directory)
240 self.load_vocab_files(join(root_directory, 'vocab', self.lang))
241
242 def load_vocab_files(self, vocab_dir):
243 load_vocabulary(vocab_dir, self.emitter)
244
245 def load_regex_files(self, regex_dir):
246 load_regex(regex_dir, self.emitter)
247
248 def __handle_stop(self, event):
249 self.stop_time = time.time()
250 self.stop()
251
252 @abc.abstractmethod
253 def stop(self):
254 pass
255
256 def is_stop(self):
257 passed_time = time.time() - self.stop_time
258 return passed_time < self.stop_threshold
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mycroft/skills/core.py b/mycroft/skills/core.py
--- a/mycroft/skills/core.py
+++ b/mycroft/skills/core.py
@@ -35,7 +35,7 @@
__author__ = 'seanfitz'
PRIMARY_SKILLS = ['intent', 'wake']
-BLACKLISTED_SKILLS = ["send_sms"]
+BLACKLISTED_SKILLS = ["send_sms", "media"]
SKILLS_BASEDIR = dirname(__file__)
THIRD_PARTY_SKILLS_DIR = "/opt/mycroft/third_party"
|
{"golden_diff": "diff --git a/mycroft/skills/core.py b/mycroft/skills/core.py\n--- a/mycroft/skills/core.py\n+++ b/mycroft/skills/core.py\n@@ -35,7 +35,7 @@\n __author__ = 'seanfitz'\n \n PRIMARY_SKILLS = ['intent', 'wake']\n-BLACKLISTED_SKILLS = [\"send_sms\"]\n+BLACKLISTED_SKILLS = [\"send_sms\", \"media\"]\n SKILLS_BASEDIR = dirname(__file__)\n THIRD_PARTY_SKILLS_DIR = \"/opt/mycroft/third_party\"\n", "issue": "Base media skill is loaded as a normal skill\nAt the moment, the load_skills function tries to load the media skill as if it is a normal skill. \n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport imp\nimport time\n\nimport abc\nimport os.path\nimport re\nfrom adapt.intent import Intent\nfrom os.path import join, dirname, splitext, isdir\n\nfrom mycroft.client.enclosure.api import EnclosureAPI\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.dialog import DialogLoader\nfrom mycroft.filesystem import FileSystemAccess\nfrom mycroft.messagebus.message import Message\nfrom mycroft.util.log import getLogger\n\n__author__ = 'seanfitz'\n\nPRIMARY_SKILLS = ['intent', 'wake']\nBLACKLISTED_SKILLS = [\"send_sms\"]\nSKILLS_BASEDIR = dirname(__file__)\nTHIRD_PARTY_SKILLS_DIR = \"/opt/mycroft/third_party\"\n\nMainModule = '__init__'\n\nlogger = getLogger(__name__)\n\n\ndef load_vocab_from_file(path, vocab_type, emitter):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n parts = line.strip().split(\"|\")\n entity = parts[0]\n\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'start': entity, 'end': vocab_type}))\n for alias in parts[1:]:\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'start': alias, 'end': vocab_type,\n 'alias_of': entity}))\n\n\ndef load_regex_from_file(path, emitter):\n if(path.endswith('.rx')):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n re.compile(line.strip())\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'regex': line.strip()}))\n\n\ndef load_vocabulary(basedir, emitter):\n for vocab_type in os.listdir(basedir):\n load_vocab_from_file(\n join(basedir, vocab_type), splitext(vocab_type)[0], emitter)\n\n\ndef load_regex(basedir, emitter):\n for regex_type in os.listdir(basedir):\n if regex_type.endswith(\".rx\"):\n load_regex_from_file(\n join(basedir, regex_type), emitter)\n\n\ndef create_intent_envelope(intent):\n return Message(None, metadata=intent.__dict__, context={})\n\n\ndef open_intent_envelope(message):\n intent_dict = message.metadata\n return Intent(intent_dict.get('name'),\n intent_dict.get('requires'),\n intent_dict.get('at_least_one'),\n intent_dict.get('optional'))\n\n\ndef load_skill(skill_descriptor, emitter):\n try:\n skill_module = imp.load_module(\n skill_descriptor[\"name\"] + MainModule, *skill_descriptor[\"info\"])\n if (hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)):\n # v2 skills framework\n skill = skill_module.create_skill()\n skill.bind(emitter)\n skill.initialize()\n return skill\n else:\n logger.warn(\n \"Module %s does not appear to be skill\" % (\n skill_descriptor[\"name\"]))\n except:\n logger.error(\n \"Failed to load skill: \" + skill_descriptor[\"name\"], exc_info=True)\n return None\n\n\ndef get_skills(skills_folder):\n skills = []\n possible_skills = os.listdir(skills_folder)\n for i in possible_skills:\n location = join(skills_folder, i)\n if (not isdir(location) or\n not MainModule + \".py\" in os.listdir(location)):\n continue\n\n skills.append(create_skill_descriptor(location))\n skills = sorted(skills, key=lambda p: p.get('name'))\n return skills\n\n\ndef create_skill_descriptor(skill_folder):\n info = imp.find_module(MainModule, [skill_folder])\n return {\"name\": os.path.basename(skill_folder), \"info\": info}\n\n\ndef load_skills(emitter, skills_root=SKILLS_BASEDIR):\n skills = get_skills(skills_root)\n for skill in skills:\n if skill['name'] in PRIMARY_SKILLS:\n load_skill(skill, emitter)\n\n for skill in skills:\n if (skill['name'] not in PRIMARY_SKILLS and\n skill['name'] not in BLACKLISTED_SKILLS):\n load_skill(skill, emitter)\n\n\nclass MycroftSkill(object):\n \"\"\"\n Abstract base class which provides common behaviour and parameters to all\n Skills implementation.\n \"\"\"\n\n def __init__(self, name, emitter=None):\n self.name = name\n self.bind(emitter)\n config = ConfigurationManager.get()\n self.config = config.get(name)\n self.config_core = config.get('core')\n self.dialog_renderer = None\n self.file_system = FileSystemAccess(join('skills', name))\n self.registered_intents = []\n\n @property\n def location(self):\n return self.config_core.get('location')\n\n @property\n def lang(self):\n return self.config_core.get('lang')\n\n def bind(self, emitter):\n if emitter:\n self.emitter = emitter\n self.enclosure = EnclosureAPI(emitter)\n self.__register_stop()\n\n def __register_stop(self):\n self.stop_time = time.time()\n self.stop_threshold = self.config_core.get('stop_threshold')\n self.emitter.on('mycroft.stop', self.__handle_stop)\n\n def detach(self):\n for name in self.registered_intents:\n self.emitter.emit(\n Message(\"detach_intent\", metadata={\"intent_name\": name}))\n\n def initialize(self):\n \"\"\"\n Initialization function to be implemented by all Skills.\n\n Usually used to create intents rules and register them.\n \"\"\"\n raise Exception(\"Initialize not implemented for skill: \" + self.name)\n\n def register_intent(self, intent_parser, handler):\n intent_message = create_intent_envelope(intent_parser)\n intent_message.message_type = \"register_intent\"\n self.emitter.emit(intent_message)\n self.registered_intents.append(intent_parser.name)\n\n def receive_handler(message):\n try:\n handler(message)\n except:\n # TODO: Localize\n self.speak(\n \"An error occurred while processing a request in \" +\n self.name)\n logger.error(\n \"An error occurred while processing a request in \" +\n self.name, exc_info=True)\n\n self.emitter.on(intent_parser.name, receive_handler)\n\n def register_vocabulary(self, entity, entity_type):\n self.emitter.emit(\n Message('register_vocab',\n metadata={'start': entity, 'end': entity_type}))\n\n def register_regex(self, regex_str):\n re.compile(regex_str) # validate regex\n self.emitter.emit(\n Message('register_vocab', metadata={'regex': regex_str}))\n\n def speak(self, utterance):\n self.emitter.emit(Message(\"speak\", metadata={'utterance': utterance}))\n\n def speak_dialog(self, key, data={}):\n self.speak(self.dialog_renderer.render(key, data))\n\n def init_dialog(self, root_directory):\n self.dialog_renderer = DialogLoader().load(\n join(root_directory, 'dialog', self.lang))\n\n def load_data_files(self, root_directory):\n self.init_dialog(root_directory)\n self.load_vocab_files(join(root_directory, 'vocab', self.lang))\n\n def load_vocab_files(self, vocab_dir):\n load_vocabulary(vocab_dir, self.emitter)\n\n def load_regex_files(self, regex_dir):\n load_regex(regex_dir, self.emitter)\n\n def __handle_stop(self, event):\n self.stop_time = time.time()\n self.stop()\n\n @abc.abstractmethod\n def stop(self):\n pass\n\n def is_stop(self):\n passed_time = time.time() - self.stop_time\n return passed_time < self.stop_threshold\n", "path": "mycroft/skills/core.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport imp\nimport time\n\nimport abc\nimport os.path\nimport re\nfrom adapt.intent import Intent\nfrom os.path import join, dirname, splitext, isdir\n\nfrom mycroft.client.enclosure.api import EnclosureAPI\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.dialog import DialogLoader\nfrom mycroft.filesystem import FileSystemAccess\nfrom mycroft.messagebus.message import Message\nfrom mycroft.util.log import getLogger\n\n__author__ = 'seanfitz'\n\nPRIMARY_SKILLS = ['intent', 'wake']\nBLACKLISTED_SKILLS = [\"send_sms\", \"media\"]\nSKILLS_BASEDIR = dirname(__file__)\nTHIRD_PARTY_SKILLS_DIR = \"/opt/mycroft/third_party\"\n\nMainModule = '__init__'\n\nlogger = getLogger(__name__)\n\n\ndef load_vocab_from_file(path, vocab_type, emitter):\n with open(path, 'r') as voc_file:\n for line in voc_file.readlines():\n parts = line.strip().split(\"|\")\n entity = parts[0]\n\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'start': entity, 'end': vocab_type}))\n for alias in parts[1:]:\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'start': alias, 'end': vocab_type,\n 'alias_of': entity}))\n\n\ndef load_regex_from_file(path, emitter):\n if(path.endswith('.rx')):\n with open(path, 'r') as reg_file:\n for line in reg_file.readlines():\n re.compile(line.strip())\n emitter.emit(\n Message(\"register_vocab\",\n metadata={'regex': line.strip()}))\n\n\ndef load_vocabulary(basedir, emitter):\n for vocab_type in os.listdir(basedir):\n load_vocab_from_file(\n join(basedir, vocab_type), splitext(vocab_type)[0], emitter)\n\n\ndef load_regex(basedir, emitter):\n for regex_type in os.listdir(basedir):\n if regex_type.endswith(\".rx\"):\n load_regex_from_file(\n join(basedir, regex_type), emitter)\n\n\ndef create_intent_envelope(intent):\n return Message(None, metadata=intent.__dict__, context={})\n\n\ndef open_intent_envelope(message):\n intent_dict = message.metadata\n return Intent(intent_dict.get('name'),\n intent_dict.get('requires'),\n intent_dict.get('at_least_one'),\n intent_dict.get('optional'))\n\n\ndef load_skill(skill_descriptor, emitter):\n try:\n skill_module = imp.load_module(\n skill_descriptor[\"name\"] + MainModule, *skill_descriptor[\"info\"])\n if (hasattr(skill_module, 'create_skill') and\n callable(skill_module.create_skill)):\n # v2 skills framework\n skill = skill_module.create_skill()\n skill.bind(emitter)\n skill.initialize()\n return skill\n else:\n logger.warn(\n \"Module %s does not appear to be skill\" % (\n skill_descriptor[\"name\"]))\n except:\n logger.error(\n \"Failed to load skill: \" + skill_descriptor[\"name\"], exc_info=True)\n return None\n\n\ndef get_skills(skills_folder):\n skills = []\n possible_skills = os.listdir(skills_folder)\n for i in possible_skills:\n location = join(skills_folder, i)\n if (not isdir(location) or\n not MainModule + \".py\" in os.listdir(location)):\n continue\n\n skills.append(create_skill_descriptor(location))\n skills = sorted(skills, key=lambda p: p.get('name'))\n return skills\n\n\ndef create_skill_descriptor(skill_folder):\n info = imp.find_module(MainModule, [skill_folder])\n return {\"name\": os.path.basename(skill_folder), \"info\": info}\n\n\ndef load_skills(emitter, skills_root=SKILLS_BASEDIR):\n skills = get_skills(skills_root)\n for skill in skills:\n if skill['name'] in PRIMARY_SKILLS:\n load_skill(skill, emitter)\n\n for skill in skills:\n if (skill['name'] not in PRIMARY_SKILLS and\n skill['name'] not in BLACKLISTED_SKILLS):\n load_skill(skill, emitter)\n\n\nclass MycroftSkill(object):\n \"\"\"\n Abstract base class which provides common behaviour and parameters to all\n Skills implementation.\n \"\"\"\n\n def __init__(self, name, emitter=None):\n self.name = name\n self.bind(emitter)\n config = ConfigurationManager.get()\n self.config = config.get(name)\n self.config_core = config.get('core')\n self.dialog_renderer = None\n self.file_system = FileSystemAccess(join('skills', name))\n self.registered_intents = []\n\n @property\n def location(self):\n return self.config_core.get('location')\n\n @property\n def lang(self):\n return self.config_core.get('lang')\n\n def bind(self, emitter):\n if emitter:\n self.emitter = emitter\n self.enclosure = EnclosureAPI(emitter)\n self.__register_stop()\n\n def __register_stop(self):\n self.stop_time = time.time()\n self.stop_threshold = self.config_core.get('stop_threshold')\n self.emitter.on('mycroft.stop', self.__handle_stop)\n\n def detach(self):\n for name in self.registered_intents:\n self.emitter.emit(\n Message(\"detach_intent\", metadata={\"intent_name\": name}))\n\n def initialize(self):\n \"\"\"\n Initialization function to be implemented by all Skills.\n\n Usually used to create intents rules and register them.\n \"\"\"\n raise Exception(\"Initialize not implemented for skill: \" + self.name)\n\n def register_intent(self, intent_parser, handler):\n intent_message = create_intent_envelope(intent_parser)\n intent_message.message_type = \"register_intent\"\n self.emitter.emit(intent_message)\n self.registered_intents.append(intent_parser.name)\n\n def receive_handler(message):\n try:\n handler(message)\n except:\n # TODO: Localize\n self.speak(\n \"An error occurred while processing a request in \" +\n self.name)\n logger.error(\n \"An error occurred while processing a request in \" +\n self.name, exc_info=True)\n\n self.emitter.on(intent_parser.name, receive_handler)\n\n def register_vocabulary(self, entity, entity_type):\n self.emitter.emit(\n Message('register_vocab',\n metadata={'start': entity, 'end': entity_type}))\n\n def register_regex(self, regex_str):\n re.compile(regex_str) # validate regex\n self.emitter.emit(\n Message('register_vocab', metadata={'regex': regex_str}))\n\n def speak(self, utterance):\n self.emitter.emit(Message(\"speak\", metadata={'utterance': utterance}))\n\n def speak_dialog(self, key, data={}):\n self.speak(self.dialog_renderer.render(key, data))\n\n def init_dialog(self, root_directory):\n self.dialog_renderer = DialogLoader().load(\n join(root_directory, 'dialog', self.lang))\n\n def load_data_files(self, root_directory):\n self.init_dialog(root_directory)\n self.load_vocab_files(join(root_directory, 'vocab', self.lang))\n\n def load_vocab_files(self, vocab_dir):\n load_vocabulary(vocab_dir, self.emitter)\n\n def load_regex_files(self, regex_dir):\n load_regex(regex_dir, self.emitter)\n\n def __handle_stop(self, event):\n self.stop_time = time.time()\n self.stop()\n\n @abc.abstractmethod\n def stop(self):\n pass\n\n def is_stop(self):\n passed_time = time.time() - self.stop_time\n return passed_time < self.stop_threshold\n", "path": "mycroft/skills/core.py"}]}
| 2,743 | 119 |
gh_patches_debug_12273
|
rasdani/github-patches
|
git_diff
|
praw-dev__praw-1422
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Timeout not converted to int
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
No request can be made with the current code. The timeout specified has to be an int, but the config stores it as a string.
**To Reproduce**
Steps to reproduce the behavior:
```python
reddit.request("tesT")
```
**Expected behavior**
Timeout works and the request goes through.
**Code/Logs**
```
In [4]: reddit.subreddit("The_Donald").subscribers
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in _validate_timeout(cls, value, name)
139 try:
--> 140 if value <= 0:
141 raise ValueError(
TypeError: '<=' not supported between instances of 'str' and 'int'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/requestor.py in request(self, timeout, *args, **kwargs)
52 try:
---> 53 return self._http.request(*args, timeout=timeout, **kwargs)
54 except Exception as exc:
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)
529 send_kwargs.update(settings)
--> 530 resp = self.send(prep, **send_kwargs)
531
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/sessions.py in send(self, request, **kwargs)
642 # Send the request
--> 643 r = adapter.send(request, **kwargs)
644
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)
434 else:
--> 435 timeout = TimeoutSauce(connect=timeout, read=timeout)
436
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in __init__(self, total, connect, read)
95 def __init__(self, total=None, connect=_Default, read=_Default):
---> 96 self._connect = self._validate_timeout(connect, "connect")
97 self._read = self._validate_timeout(read, "read")
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in _validate_timeout(cls, value, name)
149 "Timeout value %s was %s, but it must be an "
--> 150 "int, float or None." % (name, value)
151 )
ValueError: Timeout value connect was 16, but it must be an int, float or None.
During handling of the above exception, another exception occurred:
RequestException Traceback (most recent call last)
<ipython-input-4-a5aa025940f1> in <module>
----> 1 reddit.subreddit("The_Donald").subscribers
~/PycharmProjects/praw/praw/models/reddit/base.py in __getattr__(self, attribute)
31 """Return the value of `attribute`."""
32 if not attribute.startswith("_") and not self._fetched:
---> 33 self._fetch()
34 return getattr(self, attribute)
35 raise AttributeError(
~/PycharmProjects/praw/praw/models/reddit/subreddit.py in _fetch(self)
514
515 def _fetch(self):
--> 516 data = self._fetch_data()
517 data = data["data"]
518 other = type(self)(self._reddit, _data=data)
~/PycharmProjects/praw/praw/models/reddit/subreddit.py in _fetch_data(self)
511 name, fields, params = self._fetch_info()
512 path = API_PATH[name].format(**fields)
--> 513 return self._reddit.request("GET", path, params)
514
515 def _fetch(self):
~/PycharmProjects/praw/praw/reddit.py in request(self, method, path, params, data, files)
661 files=files,
662 params=params,
--> 663 timeout=self.config.timeout,
664 )
665 except BadRequest as exception:
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in request(self, method, path, data, files, json, params, timeout)
334 params=params,
335 timeout=timeout,
--> 336 url=url,
337 )
338
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in _request_with_retries(self, data, files, json, method, params, timeout, url, retry_strategy_state)
233 retry_strategy_state,
234 timeout,
--> 235 url,
236 )
237
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in _make_request(self, data, files, json, method, params, retry_strategy_state, timeout, url)
193 json=json,
194 params=params,
--> 195 timeout=timeout,
196 )
197 log.debug(
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/rate_limit.py in call(self, request_function, set_header_callback, *args, **kwargs)
34 self.delay()
35 kwargs["headers"] = set_header_callback()
---> 36 response = request_function(*args, **kwargs)
37 self.update(response.headers)
38 return response
~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/requestor.py in request(self, timeout, *args, **kwargs)
53 return self._http.request(*args, timeout=timeout, **kwargs)
54 except Exception as exc:
---> 55 raise RequestException(exc, args, kwargs)
RequestException: error with request Timeout value connect was 16, but it must be an int, float or None.
```
**System Info**
- OS: MacOS 10.14.6
- Python: 3.7.7
- PRAW Version: 7.0.0dev0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `praw/config.py`
Content:
```
1 """Provides the code to load PRAW's configuration file `praw.ini`."""
2 import configparser
3 import os
4 import sys
5 from threading import Lock
6 from typing import Optional
7
8 from .exceptions import ClientException
9
10
11 class _NotSet:
12 def __bool__(self):
13 return False
14
15 __nonzero__ = __bool__
16
17 def __str__(self):
18 return "NotSet"
19
20
21 class Config:
22 """A class containing the configuration for a reddit site."""
23
24 CONFIG = None
25 CONFIG_NOT_SET = _NotSet() # Represents a config value that is not set.
26 LOCK = Lock()
27 INTERPOLATION_LEVEL = {
28 "basic": configparser.BasicInterpolation,
29 "extended": configparser.ExtendedInterpolation,
30 }
31
32 @staticmethod
33 def _config_boolean(item):
34 if isinstance(item, bool):
35 return item
36 return item.lower() in {"1", "yes", "true", "on"}
37
38 @classmethod
39 def _load_config(cls, config_interpolation: Optional[str] = None):
40 """Attempt to load settings from various praw.ini files."""
41 if config_interpolation is not None:
42 interpolator_class = cls.INTERPOLATION_LEVEL[
43 config_interpolation
44 ]()
45 else:
46 interpolator_class = None
47 config = configparser.ConfigParser(interpolation=interpolator_class)
48 module_dir = os.path.dirname(sys.modules[__name__].__file__)
49 if "APPDATA" in os.environ: # Windows
50 os_config_path = os.environ["APPDATA"]
51 elif "XDG_CONFIG_HOME" in os.environ: # Modern Linux
52 os_config_path = os.environ["XDG_CONFIG_HOME"]
53 elif "HOME" in os.environ: # Legacy Linux
54 os_config_path = os.path.join(os.environ["HOME"], ".config")
55 else:
56 os_config_path = None
57 locations = [os.path.join(module_dir, "praw.ini"), "praw.ini"]
58 if os_config_path is not None:
59 locations.insert(1, os.path.join(os_config_path, "praw.ini"))
60 config.read(locations)
61 cls.CONFIG = config
62
63 @property
64 def short_url(self) -> str:
65 """Return the short url or raise a ClientException when not set."""
66 if self._short_url is self.CONFIG_NOT_SET:
67 raise ClientException("No short domain specified.")
68 return self._short_url
69
70 def __init__(
71 self,
72 site_name: str,
73 config_interpolation: Optional[str] = None,
74 **settings: str
75 ):
76 """Initialize a Config instance."""
77 with Config.LOCK:
78 if Config.CONFIG is None:
79 self._load_config(config_interpolation)
80
81 self._settings = settings
82 self.custom = dict(Config.CONFIG.items(site_name), **settings)
83
84 self.client_id = self.client_secret = self.oauth_url = None
85 self.reddit_url = self.refresh_token = self.redirect_uri = None
86 self.password = self.user_agent = self.username = None
87
88 self._initialize_attributes()
89
90 def _fetch(self, key):
91 value = self.custom[key]
92 del self.custom[key]
93 return value
94
95 def _fetch_default(self, key, default=None):
96 if key not in self.custom:
97 return default
98 return self._fetch(key)
99
100 def _fetch_or_not_set(self, key):
101 if key in self._settings: # Passed in values have the highest priority
102 return self._fetch(key)
103
104 env_value = os.getenv("praw_{}".format(key))
105 ini_value = self._fetch_default(key) # Needed to remove from custom
106
107 # Environment variables have higher priority than praw.ini settings
108 return env_value or ini_value or self.CONFIG_NOT_SET
109
110 def _initialize_attributes(self):
111 self._short_url = (
112 self._fetch_default("short_url") or self.CONFIG_NOT_SET
113 )
114 self.check_for_updates = self._config_boolean(
115 self._fetch_or_not_set("check_for_updates")
116 )
117 self.kinds = {
118 x: self._fetch("{}_kind".format(x))
119 for x in [
120 "comment",
121 "message",
122 "redditor",
123 "submission",
124 "subreddit",
125 "trophy",
126 ]
127 }
128
129 for attribute in (
130 "client_id",
131 "client_secret",
132 "redirect_uri",
133 "refresh_token",
134 "password",
135 "user_agent",
136 "username",
137 ):
138 setattr(self, attribute, self._fetch_or_not_set(attribute))
139
140 for required_attribute in ("oauth_url", "reddit_url", "timeout"):
141 setattr(self, required_attribute, self._fetch(required_attribute))
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/praw/config.py b/praw/config.py
--- a/praw/config.py
+++ b/praw/config.py
@@ -139,3 +139,16 @@
for required_attribute in ("oauth_url", "reddit_url", "timeout"):
setattr(self, required_attribute, self._fetch(required_attribute))
+
+ for attribute, conversion in {"timeout": int}.items():
+ try:
+ setattr(self, attribute, conversion(getattr(self, attribute)))
+ except ValueError:
+ raise ValueError(
+ "An incorrect config type was given for option {}. The "
+ "expected type is {}, but the given value is {}.".format(
+ attribute,
+ conversion.__name__,
+ getattr(self, attribute),
+ )
+ )
|
{"golden_diff": "diff --git a/praw/config.py b/praw/config.py\n--- a/praw/config.py\n+++ b/praw/config.py\n@@ -139,3 +139,16 @@\n \n for required_attribute in (\"oauth_url\", \"reddit_url\", \"timeout\"):\n setattr(self, required_attribute, self._fetch(required_attribute))\n+\n+ for attribute, conversion in {\"timeout\": int}.items():\n+ try:\n+ setattr(self, attribute, conversion(getattr(self, attribute)))\n+ except ValueError:\n+ raise ValueError(\n+ \"An incorrect config type was given for option {}. The \"\n+ \"expected type is {}, but the given value is {}.\".format(\n+ attribute,\n+ conversion.__name__,\n+ getattr(self, attribute),\n+ )\n+ )\n", "issue": "Timeout not converted to int\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nNo request can be made with the current code. The timeout specified has to be an int, but the config stores it as a string.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n```python\r\nreddit.request(\"tesT\")\r\n```\r\n\r\n**Expected behavior**\r\n\r\nTimeout works and the request goes through.\r\n\r\n**Code/Logs**\r\n\r\n```\r\nIn [4]: reddit.subreddit(\"The_Donald\").subscribers \r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in _validate_timeout(cls, value, name)\r\n 139 try:\r\n--> 140 if value <= 0:\r\n 141 raise ValueError(\r\n\r\nTypeError: '<=' not supported between instances of 'str' and 'int'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nValueError Traceback (most recent call last)\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/requestor.py in request(self, timeout, *args, **kwargs)\r\n 52 try:\r\n---> 53 return self._http.request(*args, timeout=timeout, **kwargs)\r\n 54 except Exception as exc:\r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/sessions.py in request(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\r\n 529 send_kwargs.update(settings)\r\n--> 530 resp = self.send(prep, **send_kwargs)\r\n 531 \r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/sessions.py in send(self, request, **kwargs)\r\n 642 # Send the request\r\n--> 643 r = adapter.send(request, **kwargs)\r\n 644 \r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/requests/adapters.py in send(self, request, stream, timeout, verify, cert, proxies)\r\n 434 else:\r\n--> 435 timeout = TimeoutSauce(connect=timeout, read=timeout)\r\n 436 \r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in __init__(self, total, connect, read)\r\n 95 def __init__(self, total=None, connect=_Default, read=_Default):\r\n---> 96 self._connect = self._validate_timeout(connect, \"connect\")\r\n 97 self._read = self._validate_timeout(read, \"read\")\r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/urllib3/util/timeout.py in _validate_timeout(cls, value, name)\r\n 149 \"Timeout value %s was %s, but it must be an \"\r\n--> 150 \"int, float or None.\" % (name, value)\r\n 151 )\r\n\r\nValueError: Timeout value connect was 16, but it must be an int, float or None.\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nRequestException Traceback (most recent call last)\r\n<ipython-input-4-a5aa025940f1> in <module>\r\n----> 1 reddit.subreddit(\"The_Donald\").subscribers\r\n\r\n~/PycharmProjects/praw/praw/models/reddit/base.py in __getattr__(self, attribute)\r\n 31 \"\"\"Return the value of `attribute`.\"\"\"\r\n 32 if not attribute.startswith(\"_\") and not self._fetched:\r\n---> 33 self._fetch()\r\n 34 return getattr(self, attribute)\r\n 35 raise AttributeError(\r\n\r\n~/PycharmProjects/praw/praw/models/reddit/subreddit.py in _fetch(self)\r\n 514 \r\n 515 def _fetch(self):\r\n--> 516 data = self._fetch_data()\r\n 517 data = data[\"data\"]\r\n 518 other = type(self)(self._reddit, _data=data)\r\n\r\n~/PycharmProjects/praw/praw/models/reddit/subreddit.py in _fetch_data(self)\r\n 511 name, fields, params = self._fetch_info()\r\n 512 path = API_PATH[name].format(**fields)\r\n--> 513 return self._reddit.request(\"GET\", path, params)\r\n 514 \r\n 515 def _fetch(self):\r\n\r\n~/PycharmProjects/praw/praw/reddit.py in request(self, method, path, params, data, files)\r\n 661 files=files,\r\n 662 params=params,\r\n--> 663 timeout=self.config.timeout,\r\n 664 )\r\n 665 except BadRequest as exception:\r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in request(self, method, path, data, files, json, params, timeout)\r\n 334 params=params,\r\n 335 timeout=timeout,\r\n--> 336 url=url,\r\n 337 )\r\n 338 \r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in _request_with_retries(self, data, files, json, method, params, timeout, url, retry_strategy_state)\r\n 233 retry_strategy_state,\r\n 234 timeout,\r\n--> 235 url,\r\n 236 )\r\n 237 \r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/sessions.py in _make_request(self, data, files, json, method, params, retry_strategy_state, timeout, url)\r\n 193 json=json,\r\n 194 params=params,\r\n--> 195 timeout=timeout,\r\n 196 )\r\n 197 log.debug(\r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/rate_limit.py in call(self, request_function, set_header_callback, *args, **kwargs)\r\n 34 self.delay()\r\n 35 kwargs[\"headers\"] = set_header_callback()\r\n---> 36 response = request_function(*args, **kwargs)\r\n 37 self.update(response.headers)\r\n 38 return response\r\n\r\n~/.virtualenvs/praw/venv/lib/python3.7/site-packages/prawcore/requestor.py in request(self, timeout, *args, **kwargs)\r\n 53 return self._http.request(*args, timeout=timeout, **kwargs)\r\n 54 except Exception as exc:\r\n---> 55 raise RequestException(exc, args, kwargs)\r\n\r\nRequestException: error with request Timeout value connect was 16, but it must be an int, float or None.\r\n```\r\n\r\n**System Info**\r\n - OS: MacOS 10.14.6\r\n - Python: 3.7.7\r\n - PRAW Version: 7.0.0dev0\r\n\n", "before_files": [{"content": "\"\"\"Provides the code to load PRAW's configuration file `praw.ini`.\"\"\"\nimport configparser\nimport os\nimport sys\nfrom threading import Lock\nfrom typing import Optional\n\nfrom .exceptions import ClientException\n\n\nclass _NotSet:\n def __bool__(self):\n return False\n\n __nonzero__ = __bool__\n\n def __str__(self):\n return \"NotSet\"\n\n\nclass Config:\n \"\"\"A class containing the configuration for a reddit site.\"\"\"\n\n CONFIG = None\n CONFIG_NOT_SET = _NotSet() # Represents a config value that is not set.\n LOCK = Lock()\n INTERPOLATION_LEVEL = {\n \"basic\": configparser.BasicInterpolation,\n \"extended\": configparser.ExtendedInterpolation,\n }\n\n @staticmethod\n def _config_boolean(item):\n if isinstance(item, bool):\n return item\n return item.lower() in {\"1\", \"yes\", \"true\", \"on\"}\n\n @classmethod\n def _load_config(cls, config_interpolation: Optional[str] = None):\n \"\"\"Attempt to load settings from various praw.ini files.\"\"\"\n if config_interpolation is not None:\n interpolator_class = cls.INTERPOLATION_LEVEL[\n config_interpolation\n ]()\n else:\n interpolator_class = None\n config = configparser.ConfigParser(interpolation=interpolator_class)\n module_dir = os.path.dirname(sys.modules[__name__].__file__)\n if \"APPDATA\" in os.environ: # Windows\n os_config_path = os.environ[\"APPDATA\"]\n elif \"XDG_CONFIG_HOME\" in os.environ: # Modern Linux\n os_config_path = os.environ[\"XDG_CONFIG_HOME\"]\n elif \"HOME\" in os.environ: # Legacy Linux\n os_config_path = os.path.join(os.environ[\"HOME\"], \".config\")\n else:\n os_config_path = None\n locations = [os.path.join(module_dir, \"praw.ini\"), \"praw.ini\"]\n if os_config_path is not None:\n locations.insert(1, os.path.join(os_config_path, \"praw.ini\"))\n config.read(locations)\n cls.CONFIG = config\n\n @property\n def short_url(self) -> str:\n \"\"\"Return the short url or raise a ClientException when not set.\"\"\"\n if self._short_url is self.CONFIG_NOT_SET:\n raise ClientException(\"No short domain specified.\")\n return self._short_url\n\n def __init__(\n self,\n site_name: str,\n config_interpolation: Optional[str] = None,\n **settings: str\n ):\n \"\"\"Initialize a Config instance.\"\"\"\n with Config.LOCK:\n if Config.CONFIG is None:\n self._load_config(config_interpolation)\n\n self._settings = settings\n self.custom = dict(Config.CONFIG.items(site_name), **settings)\n\n self.client_id = self.client_secret = self.oauth_url = None\n self.reddit_url = self.refresh_token = self.redirect_uri = None\n self.password = self.user_agent = self.username = None\n\n self._initialize_attributes()\n\n def _fetch(self, key):\n value = self.custom[key]\n del self.custom[key]\n return value\n\n def _fetch_default(self, key, default=None):\n if key not in self.custom:\n return default\n return self._fetch(key)\n\n def _fetch_or_not_set(self, key):\n if key in self._settings: # Passed in values have the highest priority\n return self._fetch(key)\n\n env_value = os.getenv(\"praw_{}\".format(key))\n ini_value = self._fetch_default(key) # Needed to remove from custom\n\n # Environment variables have higher priority than praw.ini settings\n return env_value or ini_value or self.CONFIG_NOT_SET\n\n def _initialize_attributes(self):\n self._short_url = (\n self._fetch_default(\"short_url\") or self.CONFIG_NOT_SET\n )\n self.check_for_updates = self._config_boolean(\n self._fetch_or_not_set(\"check_for_updates\")\n )\n self.kinds = {\n x: self._fetch(\"{}_kind\".format(x))\n for x in [\n \"comment\",\n \"message\",\n \"redditor\",\n \"submission\",\n \"subreddit\",\n \"trophy\",\n ]\n }\n\n for attribute in (\n \"client_id\",\n \"client_secret\",\n \"redirect_uri\",\n \"refresh_token\",\n \"password\",\n \"user_agent\",\n \"username\",\n ):\n setattr(self, attribute, self._fetch_or_not_set(attribute))\n\n for required_attribute in (\"oauth_url\", \"reddit_url\", \"timeout\"):\n setattr(self, required_attribute, self._fetch(required_attribute))\n", "path": "praw/config.py"}], "after_files": [{"content": "\"\"\"Provides the code to load PRAW's configuration file `praw.ini`.\"\"\"\nimport configparser\nimport os\nimport sys\nfrom threading import Lock\nfrom typing import Optional\n\nfrom .exceptions import ClientException\n\n\nclass _NotSet:\n def __bool__(self):\n return False\n\n __nonzero__ = __bool__\n\n def __str__(self):\n return \"NotSet\"\n\n\nclass Config:\n \"\"\"A class containing the configuration for a reddit site.\"\"\"\n\n CONFIG = None\n CONFIG_NOT_SET = _NotSet() # Represents a config value that is not set.\n LOCK = Lock()\n INTERPOLATION_LEVEL = {\n \"basic\": configparser.BasicInterpolation,\n \"extended\": configparser.ExtendedInterpolation,\n }\n\n @staticmethod\n def _config_boolean(item):\n if isinstance(item, bool):\n return item\n return item.lower() in {\"1\", \"yes\", \"true\", \"on\"}\n\n @classmethod\n def _load_config(cls, config_interpolation: Optional[str] = None):\n \"\"\"Attempt to load settings from various praw.ini files.\"\"\"\n if config_interpolation is not None:\n interpolator_class = cls.INTERPOLATION_LEVEL[\n config_interpolation\n ]()\n else:\n interpolator_class = None\n config = configparser.ConfigParser(interpolation=interpolator_class)\n module_dir = os.path.dirname(sys.modules[__name__].__file__)\n if \"APPDATA\" in os.environ: # Windows\n os_config_path = os.environ[\"APPDATA\"]\n elif \"XDG_CONFIG_HOME\" in os.environ: # Modern Linux\n os_config_path = os.environ[\"XDG_CONFIG_HOME\"]\n elif \"HOME\" in os.environ: # Legacy Linux\n os_config_path = os.path.join(os.environ[\"HOME\"], \".config\")\n else:\n os_config_path = None\n locations = [os.path.join(module_dir, \"praw.ini\"), \"praw.ini\"]\n if os_config_path is not None:\n locations.insert(1, os.path.join(os_config_path, \"praw.ini\"))\n config.read(locations)\n cls.CONFIG = config\n\n @property\n def short_url(self) -> str:\n \"\"\"Return the short url or raise a ClientException when not set.\"\"\"\n if self._short_url is self.CONFIG_NOT_SET:\n raise ClientException(\"No short domain specified.\")\n return self._short_url\n\n def __init__(\n self,\n site_name: str,\n config_interpolation: Optional[str] = None,\n **settings: str\n ):\n \"\"\"Initialize a Config instance.\"\"\"\n with Config.LOCK:\n if Config.CONFIG is None:\n self._load_config(config_interpolation)\n\n self._settings = settings\n self.custom = dict(Config.CONFIG.items(site_name), **settings)\n\n self.client_id = self.client_secret = self.oauth_url = None\n self.reddit_url = self.refresh_token = self.redirect_uri = None\n self.password = self.user_agent = self.username = None\n\n self._initialize_attributes()\n\n def _fetch(self, key):\n value = self.custom[key]\n del self.custom[key]\n return value\n\n def _fetch_default(self, key, default=None):\n if key not in self.custom:\n return default\n return self._fetch(key)\n\n def _fetch_or_not_set(self, key):\n if key in self._settings: # Passed in values have the highest priority\n return self._fetch(key)\n\n env_value = os.getenv(\"praw_{}\".format(key))\n ini_value = self._fetch_default(key) # Needed to remove from custom\n\n # Environment variables have higher priority than praw.ini settings\n return env_value or ini_value or self.CONFIG_NOT_SET\n\n def _initialize_attributes(self):\n self._short_url = (\n self._fetch_default(\"short_url\") or self.CONFIG_NOT_SET\n )\n self.check_for_updates = self._config_boolean(\n self._fetch_or_not_set(\"check_for_updates\")\n )\n self.kinds = {\n x: self._fetch(\"{}_kind\".format(x))\n for x in [\n \"comment\",\n \"message\",\n \"redditor\",\n \"submission\",\n \"subreddit\",\n \"trophy\",\n ]\n }\n\n for attribute in (\n \"client_id\",\n \"client_secret\",\n \"redirect_uri\",\n \"refresh_token\",\n \"password\",\n \"user_agent\",\n \"username\",\n ):\n setattr(self, attribute, self._fetch_or_not_set(attribute))\n\n for required_attribute in (\"oauth_url\", \"reddit_url\", \"timeout\"):\n setattr(self, required_attribute, self._fetch(required_attribute))\n\n for attribute, conversion in {\"timeout\": int}.items():\n try:\n setattr(self, attribute, conversion(getattr(self, attribute)))\n except ValueError:\n raise ValueError(\n \"An incorrect config type was given for option {}. The \"\n \"expected type is {}, but the given value is {}.\".format(\n attribute,\n conversion.__name__,\n getattr(self, attribute),\n )\n )\n", "path": "praw/config.py"}]}
| 3,197 | 169 |
gh_patches_debug_5798
|
rasdani/github-patches
|
git_diff
|
wright-group__WrightTools-228
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
wt.google_drive.Drive.download() calls `self._list_folder()` when it should call `self.list_folder()`
See offending [line](https://github.com/wright-group/WrightTools/blob/039507d95561fd237dc7ba66b4d4f37ce6148397/WrightTools/google_drive.py#L248).
[Line](https://github.com/wright-group/WrightTools/blob/039507d95561fd237dc7ba66b4d4f37ce6148397/WrightTools/google_drive.py#L281) where correct method is defined.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/google_drive.py`
Content:
```
1 """Interact with google drive using the pydrive package."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 from __future__ import absolute_import, division, print_function, unicode_literals
8
9 import os
10 import time
11 import datetime
12 import tempfile
13 import appdirs
14 from glob import glob
15
16
17 # --- define --------------------------------------------------------------------------------------
18
19
20 directory = os.path.dirname(os.path.abspath(__file__))
21
22
23 # --- helper methods ------------------------------------------------------------------------------
24
25
26 def id_to_url(driveid):
27 """Generate a url from a Google Drive id.
28
29 Parameters
30 ----------
31 id : string
32 ID.
33
34 Returns
35 -------
36 string
37 url.
38 """
39 return 'https://drive.google.com/open?id=' + driveid
40
41
42 # --- drive class ---------------------------------------------------------------------------------
43
44
45 class Drive:
46 """Google Drive class."""
47
48 def __init__(self, account_id='default'):
49 """init."""
50 # Define the temp directory and file name format
51 configDir = appdirs.user_data_dir('WrightTools', 'WrightGroup')
52 if not os.path.isdir(configDir):
53 os.makedirs(configDir)
54 prefix = 'google-drive-'
55 suffix = '-' + account_id + '.txt'
56 # Check for existing file
57 lis = glob(os.path.join(configDir, prefix + "*" + suffix))
58 self.mycreds_path = ''
59 if len(lis) > 0:
60 for f in lis:
61 # Check that for read and write access (or is bitwise, checking both)
62 # Note this check is probably not needed with appdirs, but is not
63 # harmful and provides additional insurance against crashes.
64 if os.access(f, os.W_OK | os.R_OK):
65 self.mycreds_path = f
66 break
67 # Make a new file if one does not exist with sufficent permissions
68 if self.mycreds_path == '':
69 self.mycreds_path = tempfile.mkstemp(prefix=prefix,
70 suffix=suffix,
71 text=True,
72 dir=configDir)[1]
73 self._authenticate()
74
75 def _authenticate(self):
76 """Authenticate the user via a web browser.
77
78 This function, once run, will open up a login window in a web browser.
79 The user must then athenticate via email and password to authorize the
80 API for usage with that particular account. Note that 'mycreds.txt' may
81 just be an empty text file. This function will create the correct
82 dictionary structure in the file upon completion.
83 """
84 # This function requires a Client_secrets.json file to be in the
85 # working directory.
86 old_cwd = os.getcwd()
87 os.chdir(directory)
88 # import
89 from pydrive.auth import GoogleAuth
90 from pydrive.drive import GoogleDrive
91 # load
92 self.gauth = GoogleAuth()
93 self.gauth.LoadCredentialsFile(self.mycreds_path)
94 if self.gauth.credentials is None:
95 # authenticate if credentials are not found
96 self.gauth.LocalWebserverAuth()
97 elif self.gauth.access_token_expired:
98 # refresh credentials if they are expired
99 self.gauth.Refresh()
100 else:
101 # initialize the saved credentials
102 self.gauth.Authorize()
103 # finish
104 self.gauth.SaveCredentialsFile(self.mycreds_path)
105 self.api = GoogleDrive(self.gauth)
106 os.chdir(old_cwd)
107
108 def _upload_file(self, filepath, parentid, overwrite=False,
109 delete_local=False, verbose=True):
110 """Upload file.
111
112 Parameters
113 ----------
114 filepath : string
115 Filepath.
116 parentid : string
117 Parent ID.
118 overwrite : boolean (optional)
119 Toggle remote overwrite. Default is False.
120 delete_local : boolean (optional).
121 Toggle local deletion after upload. Default is False.
122 verbose : boolean (optional)
123 Toggle talkback. Default is True.
124 """
125 self._authenticate()
126 title = filepath.split(os.path.sep)[-1]
127 # check if remote file already exists
128 q = {'q': "'{}' in parents and trashed=false".format(parentid)}
129 fs = self.api.ListFile(q).GetList()
130 f = None
131 for fi in fs:
132 # dont want to look at folders
133 if 'folder' in fi['mimeType']:
134 continue
135 if fi['title'] == title:
136 print(title, 'found in upload file')
137 f = fi
138 if f is not None:
139 remove = False
140 statinfo = os.stat(filepath)
141 # filesize different
142 if not int(statinfo.st_size) == int(f['fileSize']):
143 remove = True
144 # modified since creation
145 remote_stamp = f['modifiedDate'].split('.')[0] # UTC
146 remote_stamp = time.mktime(datetime.datetime.strptime(
147 remote_stamp, '%Y-%m-%dT%H:%M:%S').timetuple())
148 local_stamp = os.path.getmtime(filepath) # local
149 local_stamp += time.timezone # UTC
150 if local_stamp > remote_stamp:
151 remove = True
152 # overwrite toggle
153 if overwrite:
154 remove = True
155 # remove
156 if remove:
157 f.Trash()
158 f = None
159 # upload
160 if f is None:
161 f = self.api.CreateFile({'title': title,
162 'parents': [{"id": parentid}]})
163 f.SetContentFile(filepath)
164 f.Upload()
165 f.content.close()
166 if verbose:
167 print('file uploaded from {}'.format(filepath))
168 # delete local
169 if delete_local:
170 os.remove(filepath)
171 # finish
172 return f['id']
173
174 def create_folder(self, name, parentid):
175 """Create a new folder in Google Drive.
176
177 Attributes
178 ----------
179 name : string or list of string
180 Name of new folder to be created or list of new folders and
181 subfolders.
182 parentID : string
183 Google Drive ID of folder that is to be the parent of new folder.
184
185 Returns
186 -------
187 string
188 The unique Google Drive ID of the bottom-most newly created folder.
189 """
190 import time
191 t = time.time()
192 self._authenticate()
193 print(time.time() - t, "Authenticate")
194 t = time.time()
195 # clean inputs
196 if isinstance(name, str):
197 name = [name]
198 # create
199 parent = parentid
200 for n in name:
201 # check if folder with that name already exists
202 q = {
203 'q': "'{}' in parents and trashed=false and mimeType contains \'folder\'".format(
204 parent)}
205 fs = self.api.ListFile(q).GetList()
206 found = False
207 for f in fs:
208 if f['title'] == n:
209 found = True
210 parent = f['id']
211 continue
212 if found:
213 continue
214 # if no folder was found, create one
215 f = self.api.CreateFile({'title': n,
216 "parents": [{"id": parent}],
217 "mimeType": "application/vnd.google-apps.folder"})
218 f.Upload()
219 parent = f['id']
220 print(time.time() - t, "created", n)
221 t = time.time()
222 return parent
223
224 def download(self, fileid, directory='cwd', overwrite=False, verbose=True):
225 """Recursively download from Google Drive into a local directory.
226
227 By default, will not re-download if file passes following checks:
228
229 1. same size as remote file
230
231 2. local file last modified after remote file
232
233 Parameters
234 ----------
235 fileid : str
236 Google drive id for file or folder.
237 directory : str (optional)
238 Local directory to save content into. By default saves to cwd.
239 overwrite : bool (optional)
240 Toggle forcing file overwrites. Default is False.
241 verbose : bool (optional)s
242 Toggle talkback. Default is True.
243
244 Returns
245 -------
246 pydrive.files.GoogleDriveFile
247 """
248 self._authenticate()
249 # get directory
250 if directory == 'cwd':
251 directory = os.getcwd()
252 # get file object
253 f = self.api.CreateFile({'id': fileid})
254 f_path = os.path.join(directory, f['title'])
255 if f['mimeType'].split('.')[-1] == 'folder': # folder
256 # create folder
257 if not os.path.isdir(f_path):
258 os.mkdir(f_path)
259 # fill contents
260 for child_id in self._list_folder(fileid):
261 self.download(child_id, directory=f_path)
262 else: # single file
263 # check if file exists
264 if os.path.isfile(f_path):
265 remove = False
266 statinfo = os.stat(f_path)
267 # filesize different
268 if not int(statinfo.st_size) == int(f['fileSize']):
269 remove = True
270 # modified since creation
271 remote_stamp = f['modifiedDate'].split('.')[0] # UTC
272 remote_stamp = time.mktime(datetime.datetime.strptime(
273 remote_stamp, '%Y-%m-%dT%H:%M:%S').timetuple())
274 local_stamp = os.path.getmtime(f_path) # local
275 local_stamp += time.timezone # UTC
276 if local_stamp < remote_stamp:
277 remove = True
278 # overwrite toggle
279 if overwrite:
280 remove = True
281 # remove
282 if remove:
283 os.remove(f_path)
284 else:
285 return f
286 # download
287 f.GetContentFile(f_path)
288 if verbose:
289 print('file downloaded to {}'.format(f_path))
290 # finish
291 return f
292
293 def list_folder(self, folderid):
294 """List contents of a remote folder.
295
296 Parameters
297 ----------
298 folderid : string
299 Folder ID.
300
301 Returns
302 -------
303 list of strings
304 List of contained IDs.
305 """
306 # adapted from https://github.com/googledrive/PyDrive/issues/37
307 # folder_id: GoogleDriveFile['id']
308 self._authenticate()
309 q = {'q': "'{}' in parents and trashed=false".format(folderid)}
310 raw_sub_contents = self.api.ListFile(q).GetList()
311 return [i['id'] for i in raw_sub_contents]
312
313 def upload(self, path, parentid, overwrite=False, delete_local=False,
314 verbose=True):
315 """Upload local file(s) to Google Drive.
316
317 Parameters
318 ----------
319 path : str
320 Path to local file or folder.
321 parentid : str
322 Google Drive ID of remote folder.
323 overwrite : bool (optional)
324 Toggle forcing overwrite of remote files. Default is False.
325 delete_local : bool (optional)
326 Toggle deleting local files and folders once uploaded. Default is
327 False.
328 verbose : bool (optional)
329 Toggle talkback. Default is True.
330
331 Returns
332 -------
333 driveid : str
334 Google Drive ID of folder or file uploaded
335 """
336 self._authenticate()
337 if os.path.isfile(path):
338 return self._upload_file(path, parentid, overwrite=overwrite,
339 delete_local=delete_local,
340 verbose=verbose)
341 elif os.path.isdir(path):
342 top_path_length = len(path.split(os.path.sep))
343 for tup in os.walk(path, topdown=False):
344 self._authenticate()
345 folder_path, _, file_names = tup
346 print(folder_path)
347 # create folder on google drive
348 name = folder_path.split(os.path.sep)[top_path_length - 1:]
349 folderid = self.create_folder(name, parentid)
350 # upload files
351 for file_name in file_names:
352 p = os.path.join(folder_path, file_name)
353 self._upload_file(p, folderid, overwrite=overwrite,
354 delete_local=delete_local,
355 verbose=verbose)
356 # remove folder
357 if delete_local:
358 os.rmdir(folder_path)
359 # finish
360 return folderid
361 else:
362 raise Exception('path {0} not valid in Drive.upload'.format(path))
363
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/WrightTools/google_drive.py b/WrightTools/google_drive.py
--- a/WrightTools/google_drive.py
+++ b/WrightTools/google_drive.py
@@ -245,7 +245,7 @@
if not os.path.isdir(f_path):
os.mkdir(f_path)
# fill contents
- for child_id in self._list_folder(fileid):
+ for child_id in self.list_folder(fileid):
self.download(child_id, directory=f_path)
else: # single file
# check if file exists
|
{"golden_diff": "diff --git a/WrightTools/google_drive.py b/WrightTools/google_drive.py\n--- a/WrightTools/google_drive.py\n+++ b/WrightTools/google_drive.py\n@@ -245,7 +245,7 @@\n if not os.path.isdir(f_path):\n os.mkdir(f_path)\n # fill contents\n- for child_id in self._list_folder(fileid):\n+ for child_id in self.list_folder(fileid):\n self.download(child_id, directory=f_path)\n else: # single file\n # check if file exists\n", "issue": "wt.google_drive.Drive.download() calls `self._list_folder()` when it should call `self.list_folder()`\nSee offending [line](https://github.com/wright-group/WrightTools/blob/039507d95561fd237dc7ba66b4d4f37ce6148397/WrightTools/google_drive.py#L248).\r\n\r\n[Line](https://github.com/wright-group/WrightTools/blob/039507d95561fd237dc7ba66b4d4f37ce6148397/WrightTools/google_drive.py#L281) where correct method is defined. \r\n\n", "before_files": [{"content": "\"\"\"Interact with google drive using the pydrive package.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport time\nimport datetime\nimport tempfile\nimport appdirs\nfrom glob import glob\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\ndirectory = os.path.dirname(os.path.abspath(__file__))\n\n\n# --- helper methods ------------------------------------------------------------------------------\n\n\ndef id_to_url(driveid):\n \"\"\"Generate a url from a Google Drive id.\n\n Parameters\n ----------\n id : string\n ID.\n\n Returns\n -------\n string\n url.\n \"\"\"\n return 'https://drive.google.com/open?id=' + driveid\n\n\n# --- drive class ---------------------------------------------------------------------------------\n\n\nclass Drive:\n \"\"\"Google Drive class.\"\"\"\n\n def __init__(self, account_id='default'):\n \"\"\"init.\"\"\"\n # Define the temp directory and file name format\n configDir = appdirs.user_data_dir('WrightTools', 'WrightGroup')\n if not os.path.isdir(configDir):\n os.makedirs(configDir)\n prefix = 'google-drive-'\n suffix = '-' + account_id + '.txt'\n # Check for existing file\n lis = glob(os.path.join(configDir, prefix + \"*\" + suffix))\n self.mycreds_path = ''\n if len(lis) > 0:\n for f in lis:\n # Check that for read and write access (or is bitwise, checking both)\n # Note this check is probably not needed with appdirs, but is not\n # harmful and provides additional insurance against crashes.\n if os.access(f, os.W_OK | os.R_OK):\n self.mycreds_path = f\n break\n # Make a new file if one does not exist with sufficent permissions\n if self.mycreds_path == '':\n self.mycreds_path = tempfile.mkstemp(prefix=prefix,\n suffix=suffix,\n text=True,\n dir=configDir)[1]\n self._authenticate()\n\n def _authenticate(self):\n \"\"\"Authenticate the user via a web browser.\n\n This function, once run, will open up a login window in a web browser.\n The user must then athenticate via email and password to authorize the\n API for usage with that particular account. Note that 'mycreds.txt' may\n just be an empty text file. This function will create the correct\n dictionary structure in the file upon completion.\n \"\"\"\n # This function requires a Client_secrets.json file to be in the\n # working directory.\n old_cwd = os.getcwd()\n os.chdir(directory)\n # import\n from pydrive.auth import GoogleAuth\n from pydrive.drive import GoogleDrive\n # load\n self.gauth = GoogleAuth()\n self.gauth.LoadCredentialsFile(self.mycreds_path)\n if self.gauth.credentials is None:\n # authenticate if credentials are not found\n self.gauth.LocalWebserverAuth()\n elif self.gauth.access_token_expired:\n # refresh credentials if they are expired\n self.gauth.Refresh()\n else:\n # initialize the saved credentials\n self.gauth.Authorize()\n # finish\n self.gauth.SaveCredentialsFile(self.mycreds_path)\n self.api = GoogleDrive(self.gauth)\n os.chdir(old_cwd)\n\n def _upload_file(self, filepath, parentid, overwrite=False,\n delete_local=False, verbose=True):\n \"\"\"Upload file.\n\n Parameters\n ----------\n filepath : string\n Filepath.\n parentid : string\n Parent ID.\n overwrite : boolean (optional)\n Toggle remote overwrite. Default is False.\n delete_local : boolean (optional).\n Toggle local deletion after upload. Default is False.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n \"\"\"\n self._authenticate()\n title = filepath.split(os.path.sep)[-1]\n # check if remote file already exists\n q = {'q': \"'{}' in parents and trashed=false\".format(parentid)}\n fs = self.api.ListFile(q).GetList()\n f = None\n for fi in fs:\n # dont want to look at folders\n if 'folder' in fi['mimeType']:\n continue\n if fi['title'] == title:\n print(title, 'found in upload file')\n f = fi\n if f is not None:\n remove = False\n statinfo = os.stat(filepath)\n # filesize different\n if not int(statinfo.st_size) == int(f['fileSize']):\n remove = True\n # modified since creation\n remote_stamp = f['modifiedDate'].split('.')[0] # UTC\n remote_stamp = time.mktime(datetime.datetime.strptime(\n remote_stamp, '%Y-%m-%dT%H:%M:%S').timetuple())\n local_stamp = os.path.getmtime(filepath) # local\n local_stamp += time.timezone # UTC\n if local_stamp > remote_stamp:\n remove = True\n # overwrite toggle\n if overwrite:\n remove = True\n # remove\n if remove:\n f.Trash()\n f = None\n # upload\n if f is None:\n f = self.api.CreateFile({'title': title,\n 'parents': [{\"id\": parentid}]})\n f.SetContentFile(filepath)\n f.Upload()\n f.content.close()\n if verbose:\n print('file uploaded from {}'.format(filepath))\n # delete local\n if delete_local:\n os.remove(filepath)\n # finish\n return f['id']\n\n def create_folder(self, name, parentid):\n \"\"\"Create a new folder in Google Drive.\n\n Attributes\n ----------\n name : string or list of string\n Name of new folder to be created or list of new folders and\n subfolders.\n parentID : string\n Google Drive ID of folder that is to be the parent of new folder.\n\n Returns\n -------\n string\n The unique Google Drive ID of the bottom-most newly created folder.\n \"\"\"\n import time\n t = time.time()\n self._authenticate()\n print(time.time() - t, \"Authenticate\")\n t = time.time()\n # clean inputs\n if isinstance(name, str):\n name = [name]\n # create\n parent = parentid\n for n in name:\n # check if folder with that name already exists\n q = {\n 'q': \"'{}' in parents and trashed=false and mimeType contains \\'folder\\'\".format(\n parent)}\n fs = self.api.ListFile(q).GetList()\n found = False\n for f in fs:\n if f['title'] == n:\n found = True\n parent = f['id']\n continue\n if found:\n continue\n # if no folder was found, create one\n f = self.api.CreateFile({'title': n,\n \"parents\": [{\"id\": parent}],\n \"mimeType\": \"application/vnd.google-apps.folder\"})\n f.Upload()\n parent = f['id']\n print(time.time() - t, \"created\", n)\n t = time.time()\n return parent\n\n def download(self, fileid, directory='cwd', overwrite=False, verbose=True):\n \"\"\"Recursively download from Google Drive into a local directory.\n\n By default, will not re-download if file passes following checks:\n\n 1. same size as remote file\n\n 2. local file last modified after remote file\n\n Parameters\n ----------\n fileid : str\n Google drive id for file or folder.\n directory : str (optional)\n Local directory to save content into. By default saves to cwd.\n overwrite : bool (optional)\n Toggle forcing file overwrites. Default is False.\n verbose : bool (optional)s\n Toggle talkback. Default is True.\n\n Returns\n -------\n pydrive.files.GoogleDriveFile\n \"\"\"\n self._authenticate()\n # get directory\n if directory == 'cwd':\n directory = os.getcwd()\n # get file object\n f = self.api.CreateFile({'id': fileid})\n f_path = os.path.join(directory, f['title'])\n if f['mimeType'].split('.')[-1] == 'folder': # folder\n # create folder\n if not os.path.isdir(f_path):\n os.mkdir(f_path)\n # fill contents\n for child_id in self._list_folder(fileid):\n self.download(child_id, directory=f_path)\n else: # single file\n # check if file exists\n if os.path.isfile(f_path):\n remove = False\n statinfo = os.stat(f_path)\n # filesize different\n if not int(statinfo.st_size) == int(f['fileSize']):\n remove = True\n # modified since creation\n remote_stamp = f['modifiedDate'].split('.')[0] # UTC\n remote_stamp = time.mktime(datetime.datetime.strptime(\n remote_stamp, '%Y-%m-%dT%H:%M:%S').timetuple())\n local_stamp = os.path.getmtime(f_path) # local\n local_stamp += time.timezone # UTC\n if local_stamp < remote_stamp:\n remove = True\n # overwrite toggle\n if overwrite:\n remove = True\n # remove\n if remove:\n os.remove(f_path)\n else:\n return f\n # download\n f.GetContentFile(f_path)\n if verbose:\n print('file downloaded to {}'.format(f_path))\n # finish\n return f\n\n def list_folder(self, folderid):\n \"\"\"List contents of a remote folder.\n\n Parameters\n ----------\n folderid : string\n Folder ID.\n\n Returns\n -------\n list of strings\n List of contained IDs.\n \"\"\"\n # adapted from https://github.com/googledrive/PyDrive/issues/37\n # folder_id: GoogleDriveFile['id']\n self._authenticate()\n q = {'q': \"'{}' in parents and trashed=false\".format(folderid)}\n raw_sub_contents = self.api.ListFile(q).GetList()\n return [i['id'] for i in raw_sub_contents]\n\n def upload(self, path, parentid, overwrite=False, delete_local=False,\n verbose=True):\n \"\"\"Upload local file(s) to Google Drive.\n\n Parameters\n ----------\n path : str\n Path to local file or folder.\n parentid : str\n Google Drive ID of remote folder.\n overwrite : bool (optional)\n Toggle forcing overwrite of remote files. Default is False.\n delete_local : bool (optional)\n Toggle deleting local files and folders once uploaded. Default is\n False.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n driveid : str\n Google Drive ID of folder or file uploaded\n \"\"\"\n self._authenticate()\n if os.path.isfile(path):\n return self._upload_file(path, parentid, overwrite=overwrite,\n delete_local=delete_local,\n verbose=verbose)\n elif os.path.isdir(path):\n top_path_length = len(path.split(os.path.sep))\n for tup in os.walk(path, topdown=False):\n self._authenticate()\n folder_path, _, file_names = tup\n print(folder_path)\n # create folder on google drive\n name = folder_path.split(os.path.sep)[top_path_length - 1:]\n folderid = self.create_folder(name, parentid)\n # upload files\n for file_name in file_names:\n p = os.path.join(folder_path, file_name)\n self._upload_file(p, folderid, overwrite=overwrite,\n delete_local=delete_local,\n verbose=verbose)\n # remove folder\n if delete_local:\n os.rmdir(folder_path)\n # finish\n return folderid\n else:\n raise Exception('path {0} not valid in Drive.upload'.format(path))\n", "path": "WrightTools/google_drive.py"}], "after_files": [{"content": "\"\"\"Interact with google drive using the pydrive package.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport time\nimport datetime\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\ndirectory = os.path.dirname(os.path.abspath(__file__))\n\n\n# --- ensure google drive creds folder populated --------------------------------------------------\n\n\ncreds_dir = os.path.join(directory, 'temp', 'google drive')\nif not os.path.isdir(creds_dir):\n os.mkdir(creds_dir)\n\nmycreds_path = os.path.join(creds_dir, 'mycreds.txt')\nif not os.path.isfile(mycreds_path):\n open(mycreds_path, 'a').close()\n\n\n# --- helper methods ------------------------------------------------------------------------------\n\n\ndef id_to_url(driveid):\n \"\"\"Generate a url from a Google Drive id.\n\n Parameters\n ----------\n id : string\n ID.\n\n Returns\n -------\n string\n url.\n \"\"\"\n return 'https://drive.google.com/open?id=' + driveid\n\n\n# --- drive class ---------------------------------------------------------------------------------\n\n\nclass Drive:\n \"\"\"Google Drive class.\"\"\"\n\n def __init__(self):\n \"\"\"init.\"\"\"\n # authenticate\n self.mycreds_path = mycreds_path\n self._authenticate()\n\n def _authenticate(self):\n \"\"\"Authenticate the user via a web browser.\n\n This function, once run, will open up a login window in a web browser.\n The user must then athenticate via email and password to authorize the\n API for usage with that particular account. Note that 'mycreds.txt' may\n just be an empty text file. This function will create the correct\n dictionary structure in the file upon completion.\n \"\"\"\n # This function requires a Client_secrets.json file to be in the\n # working directory.\n old_cwd = os.getcwd()\n os.chdir(directory)\n # import\n from pydrive.auth import GoogleAuth\n from pydrive.drive import GoogleDrive\n # load\n self.gauth = GoogleAuth()\n self.gauth.LoadCredentialsFile(self.mycreds_path)\n if self.gauth.credentials is None:\n # authenticate if credentials are not found\n self.gauth.LocalWebserverAuth()\n elif self.gauth.access_token_expired:\n # refresh credentials if they are expired\n self.gauth.Refresh()\n else:\n # initialize the saved credentials\n self.gauth.Authorize()\n # finish\n self.gauth.SaveCredentialsFile(self.mycreds_path)\n self.api = GoogleDrive(self.gauth)\n os.chdir(old_cwd)\n\n def _upload_file(self, filepath, parentid, overwrite=False,\n delete_local=False, verbose=True):\n \"\"\"Upload file.\n\n Parameters\n ----------\n filepath : string\n Filepath.\n parentid : string\n Parent ID.\n overwrite : boolean (optional)\n Toggle remote overwrite. Default is False.\n delete_local : boolean (optional).\n Toggle local deletion after upload. Default is False.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n \"\"\"\n self._authenticate()\n title = filepath.split(os.path.sep)[-1]\n # check if remote file already exists\n q = {'q': \"'{}' in parents and trashed=false\".format(parentid)}\n fs = self.api.ListFile(q).GetList()\n f = None\n for fi in fs:\n # dont want to look at folders\n if 'folder' in fi['mimeType']:\n continue\n if fi['title'] == title:\n print(title, 'found in upload file')\n f = fi\n if f is not None:\n remove = False\n statinfo = os.stat(filepath)\n # filesize different\n if not int(statinfo.st_size) == int(f['fileSize']):\n remove = True\n # modified since creation\n remote_stamp = f['modifiedDate'].split('.')[0] # UTC\n remote_stamp = time.mktime(datetime.datetime.strptime(\n remote_stamp, '%Y-%m-%dT%H:%M:%S').timetuple())\n local_stamp = os.path.getmtime(filepath) # local\n local_stamp += time.timezone # UTC\n if local_stamp > remote_stamp:\n remove = True\n # overwrite toggle\n if overwrite:\n remove = True\n # remove\n if remove:\n f.Trash()\n f = None\n # upload\n if f is None:\n f = self.api.CreateFile({'title': title,\n 'parents': [{\"id\": parentid}]})\n f.SetContentFile(filepath)\n f.Upload()\n f.content.close()\n if verbose:\n print('file uploaded from {}'.format(filepath))\n # delete local\n if delete_local:\n os.remove(filepath)\n # finish\n return f['id']\n\n def create_folder(self, name, parentid):\n \"\"\"Create a new folder in Google Drive.\n\n Attributes\n ----------\n name : string or list of string\n Name of new folder to be created or list of new folders and\n subfolders.\n parentID : string\n Google Drive ID of folder that is to be the parent of new folder.\n\n Returns\n -------\n string\n The unique Google Drive ID of the bottom-most newly created folder.\n \"\"\"\n import time\n t = time.time()\n self._authenticate()\n print(time.time() - t, \"Authenticate\")\n t = time.time()\n # clean inputs\n if isinstance(name, str):\n name = [name]\n # create\n parent = parentid\n for n in name:\n # check if folder with that name already exists\n q = {\n 'q': \"'{}' in parents and trashed=false and mimeType contains \\'folder\\'\".format(\n parent)}\n fs = self.api.ListFile(q).GetList()\n found = False\n for f in fs:\n if f['title'] == n:\n found = True\n parent = f['id']\n continue\n if found:\n continue\n # if no folder was found, create one\n f = self.api.CreateFile({'title': n,\n \"parents\": [{\"id\": parent}],\n \"mimeType\": \"application/vnd.google-apps.folder\"})\n f.Upload()\n parent = f['id']\n print(time.time() - t, \"created\", n)\n t = time.time()\n return parent\n\n def download(self, fileid, directory='cwd', overwrite=False, verbose=True):\n \"\"\"Recursively download from Google Drive into a local directory.\n\n By default, will not re-download if file passes following checks:\n\n 1. same size as remote file\n\n 2. local file last modified after remote file\n\n Parameters\n ----------\n fileid : str\n Google drive id for file or folder.\n directory : str (optional)\n Local directory to save content into. By default saves to cwd.\n overwrite : bool (optional)\n Toggle forcing file overwrites. Default is False.\n verbose : bool (optional)s\n Toggle talkback. Default is True.\n\n Returns\n -------\n pydrive.files.GoogleDriveFile\n \"\"\"\n self._authenticate()\n # get directory\n if directory == 'cwd':\n directory = os.getcwd()\n # get file object\n f = self.api.CreateFile({'id': fileid})\n f_path = os.path.join(directory, f['title'])\n if f['mimeType'].split('.')[-1] == 'folder': # folder\n # create folder\n if not os.path.isdir(f_path):\n os.mkdir(f_path)\n # fill contents\n for child_id in self.list_folder(fileid):\n self.download(child_id, directory=f_path)\n else: # single file\n # check if file exists\n if os.path.isfile(f_path):\n remove = False\n statinfo = os.stat(f_path)\n # filesize different\n if not int(statinfo.st_size) == int(f['fileSize']):\n remove = True\n # modified since creation\n remote_stamp = f['modifiedDate'].split('.')[0] # UTC\n remote_stamp = time.mktime(datetime.datetime.strptime(\n remote_stamp, '%Y-%m-%dT%H:%M:%S').timetuple())\n local_stamp = os.path.getmtime(f_path) # local\n local_stamp += time.timezone # UTC\n if local_stamp < remote_stamp:\n remove = True\n # overwrite toggle\n if overwrite:\n remove = True\n # remove\n if remove:\n os.remove(f_path)\n else:\n return f\n # download\n f.GetContentFile(f_path)\n if verbose:\n print('file downloaded to {}'.format(f_path))\n # finish\n return f\n\n def list_folder(self, folderid):\n \"\"\"List contents of a remote folder.\n\n Parameters\n ----------\n folderid : string\n Folder ID.\n\n Returns\n -------\n list of strings\n List of contained IDs.\n \"\"\"\n # adapted from https://github.com/googledrive/PyDrive/issues/37\n # folder_id: GoogleDriveFile['id']\n self._authenticate()\n q = {'q': \"'{}' in parents and trashed=false\".format(folderid)}\n raw_sub_contents = self.api.ListFile(q).GetList()\n return [i['id'] for i in raw_sub_contents]\n\n def upload(self, path, parentid, overwrite=False, delete_local=False,\n verbose=True):\n \"\"\"Upload local file(s) to Google Drive.\n\n Parameters\n ----------\n path : str\n Path to local file or folder.\n parentid : str\n Google Drive ID of remote folder.\n overwrite : bool (optional)\n Toggle forcing overwrite of remote files. Default is False.\n delete_local : bool (optional)\n Toggle deleting local files and folders once uploaded. Default is\n False.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n driveid : str\n Google Drive ID of folder or file uploaded\n \"\"\"\n self._authenticate()\n if os.path.isfile(path):\n return self._upload_file(path, parentid, overwrite=overwrite,\n delete_local=delete_local,\n verbose=verbose)\n elif os.path.isdir(path):\n top_path_length = len(path.split(os.path.sep))\n for tup in os.walk(path, topdown=False):\n self._authenticate()\n folder_path, _, file_names = tup\n print(folder_path)\n # create folder on google drive\n name = folder_path.split(os.path.sep)[top_path_length - 1:]\n folderid = self.create_folder(name, parentid)\n # upload files\n for file_name in file_names:\n p = os.path.join(folder_path, file_name)\n self._upload_file(p, folderid, overwrite=overwrite,\n delete_local=delete_local,\n verbose=verbose)\n # remove folder\n if delete_local:\n os.rmdir(folder_path)\n # finish\n return folderid\n else:\n raise Exception('path {0} not valid in Drive.upload'.format(path))\n", "path": "WrightTools/google_drive.py"}]}
| 3,949 | 120 |
gh_patches_debug_58211
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-55
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make work on Chrome
Make work on Chrome
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/buildpkg.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """
4 Builds a Pyodide package.
5 """
6
7 import argparse
8 import hashlib
9 import os
10 import shutil
11 import subprocess
12
13
14 import common
15
16
17 ROOTDIR = os.path.abspath(os.path.dirname(__file__))
18
19
20 def check_checksum(path, pkg):
21 """
22 Checks that a tarball matches the checksum in the package metadata.
23 """
24 if 'md5' not in pkg['source']:
25 return
26 checksum = pkg['source']['md5']
27 CHUNK_SIZE = 1 << 16
28 h = hashlib.md5()
29 with open(path, 'rb') as fd:
30 while True:
31 chunk = fd.read(CHUNK_SIZE)
32 h.update(chunk)
33 if len(chunk) < CHUNK_SIZE:
34 break
35 if h.hexdigest() != checksum:
36 raise ValueError("Invalid checksum")
37
38
39 def download_and_extract(buildpath, packagedir, pkg, args):
40 tarballpath = os.path.join(
41 buildpath, os.path.basename(pkg['source']['url']))
42 if not os.path.isfile(tarballpath):
43 subprocess.run([
44 'wget', '-q', '-O', tarballpath, pkg['source']['url']
45 ], check=True)
46 check_checksum(tarballpath, pkg)
47 srcpath = os.path.join(buildpath, packagedir)
48 if not os.path.isdir(srcpath):
49 shutil.unpack_archive(tarballpath, buildpath)
50 return srcpath
51
52
53 def patch(path, srcpath, pkg, args):
54 if os.path.isfile(os.path.join(srcpath, '.patched')):
55 return
56
57 # Apply all of the patches
58 orig_dir = os.getcwd()
59 pkgdir = os.path.abspath(os.path.dirname(path))
60 os.chdir(srcpath)
61 try:
62 for patch in pkg['source'].get('patches', []):
63 subprocess.run([
64 'patch', '-p1', '--binary', '-i', os.path.join(pkgdir, patch)
65 ], check=True)
66 finally:
67 os.chdir(orig_dir)
68
69 # Add any extra files
70 for src, dst in pkg['source'].get('extras', []):
71 shutil.copyfile(os.path.join(pkgdir, src), os.path.join(srcpath, dst))
72
73 with open(os.path.join(srcpath, '.patched'), 'wb') as fd:
74 fd.write(b'\n')
75
76
77 def get_libdir(srcpath, args):
78 # Get the name of the build/lib.XXX directory that distutils wrote its
79 # output to
80 slug = subprocess.check_output([
81 os.path.join(args.host, 'bin', 'python3'),
82 '-c',
83 'import sysconfig, sys; '
84 'print("{}-{}.{}".format('
85 'sysconfig.get_platform(), '
86 'sys.version_info[0], '
87 'sys.version_info[1]))']).decode('ascii').strip()
88 purelib = os.path.join(srcpath, 'build', 'lib')
89 if os.path.isdir(purelib):
90 libdir = purelib
91 else:
92 libdir = os.path.join(srcpath, 'build', 'lib.' + slug)
93 return libdir
94
95
96 def compile(path, srcpath, pkg, args):
97 if os.path.isfile(os.path.join(srcpath, '.built')):
98 return
99
100 orig_dir = os.getcwd()
101 os.chdir(srcpath)
102 try:
103 subprocess.run([
104 os.path.join(args.host, 'bin', 'python3'),
105 os.path.join(ROOTDIR, 'pywasmcross'),
106 '--cflags',
107 args.cflags + ' ' +
108 pkg.get('build', {}).get('cflags', ''),
109 '--ldflags',
110 args.ldflags + ' ' +
111 pkg.get('build', {}).get('ldflags', ''),
112 '--host', args.host,
113 '--target', args.target], check=True)
114 finally:
115 os.chdir(orig_dir)
116
117 post = pkg.get('build', {}).get('post')
118 if post is not None:
119 libdir = get_libdir(srcpath, args)
120 pkgdir = os.path.abspath(os.path.dirname(path))
121 env = {
122 'BUILD': libdir,
123 'PKGDIR': pkgdir
124 }
125 subprocess.run([
126 'bash', '-c', post], env=env, check=True)
127
128 with open(os.path.join(srcpath, '.built'), 'wb') as fd:
129 fd.write(b'\n')
130
131
132 def package_files(buildpath, srcpath, pkg, args):
133 if os.path.isfile(os.path.join(buildpath, '.packaged')):
134 return
135
136 name = pkg['package']['name']
137 libdir = get_libdir(srcpath, args)
138 subprocess.run([
139 'python2',
140 os.path.join(os.environ['EMSCRIPTEN'], 'tools', 'file_packager.py'),
141 os.path.join(buildpath, name + '.data'),
142 '--preload',
143 '{}@/lib/python3.6/site-packages'.format(libdir),
144 '--js-output={}'.format(os.path.join(buildpath, name + '.js')),
145 '--export-name=pyodide',
146 '--exclude', '*.wasm.pre',
147 '--exclude', '__pycache__'], check=True)
148 subprocess.run([
149 'uglifyjs',
150 os.path.join(buildpath, name + '.js'),
151 '-o',
152 os.path.join(buildpath, name + '.js')], check=True)
153
154 with open(os.path.join(buildpath, '.packaged'), 'wb') as fd:
155 fd.write(b'\n')
156
157
158 def build_package(path, args):
159 pkg = common.parse_package(path)
160 packagedir = pkg['package']['name'] + '-' + pkg['package']['version']
161 dirpath = os.path.dirname(path)
162 orig_path = os.getcwd()
163 os.chdir(dirpath)
164 try:
165 buildpath = os.path.join(dirpath, 'build')
166 if not os.path.exists(buildpath):
167 os.makedirs(buildpath)
168 srcpath = download_and_extract(buildpath, packagedir, pkg, args)
169 patch(path, srcpath, pkg, args)
170 compile(path, srcpath, pkg, args)
171 package_files(buildpath, srcpath, pkg, args)
172 finally:
173 os.chdir(orig_path)
174
175
176 def parse_args():
177 parser = argparse.ArgumentParser('Build a pyodide package.')
178 parser.add_argument(
179 'package', type=str, nargs=1,
180 help="Path to meta.yaml package description")
181 parser.add_argument(
182 '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,
183 help='Extra compiling flags')
184 parser.add_argument(
185 '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,
186 help='Extra linking flags')
187 parser.add_argument(
188 '--host', type=str, nargs='?', default=common.HOSTPYTHON,
189 help='The path to the host Python installation')
190 parser.add_argument(
191 '--target', type=str, nargs='?', default=common.TARGETPYTHON,
192 help='The path to the target Python installation')
193 return parser.parse_args()
194
195
196 def main(args):
197 path = os.path.abspath(args.package[0])
198 build_package(path, args)
199
200
201 if __name__ == '__main__':
202 args = parse_args()
203 main(args)
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/buildpkg.py b/tools/buildpkg.py
--- a/tools/buildpkg.py
+++ b/tools/buildpkg.py
@@ -144,7 +144,8 @@
'--js-output={}'.format(os.path.join(buildpath, name + '.js')),
'--export-name=pyodide',
'--exclude', '*.wasm.pre',
- '--exclude', '__pycache__'], check=True)
+ '--exclude', '__pycache__',
+ '--use-preload-plugins'], check=True)
subprocess.run([
'uglifyjs',
os.path.join(buildpath, name + '.js'),
|
{"golden_diff": "diff --git a/tools/buildpkg.py b/tools/buildpkg.py\n--- a/tools/buildpkg.py\n+++ b/tools/buildpkg.py\n@@ -144,7 +144,8 @@\n '--js-output={}'.format(os.path.join(buildpath, name + '.js')),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n- '--exclude', '__pycache__'], check=True)\n+ '--exclude', '__pycache__',\n+ '--use-preload-plugins'], check=True)\n subprocess.run([\n 'uglifyjs',\n os.path.join(buildpath, name + '.js'),\n", "issue": "Make work on Chrome\n\nMake work on Chrome\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n if 'md5' not in pkg['source']:\n return\n checksum = pkg['source']['md5']\n CHUNK_SIZE = 1 << 16\n h = hashlib.md5()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid checksum\")\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = os.path.join(\n buildpath, os.path.basename(pkg['source']['url']))\n if not os.path.isfile(tarballpath):\n subprocess.run([\n 'wget', '-q', '-O', tarballpath, pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = os.path.join(buildpath, packagedir)\n if not os.path.isdir(srcpath):\n shutil.unpack_archive(tarballpath, buildpath)\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if os.path.isfile(os.path.join(srcpath, '.patched')):\n return\n\n # Apply all of the patches\n orig_dir = os.getcwd()\n pkgdir = os.path.abspath(os.path.dirname(path))\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', os.path.join(pkgdir, patch)\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(os.path.join(pkgdir, src), os.path.join(srcpath, dst))\n\n with open(os.path.join(srcpath, '.patched'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n os.path.join(args.host, 'bin', 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = os.path.join(srcpath, 'build', 'lib')\n if os.path.isdir(purelib):\n libdir = purelib\n else:\n libdir = os.path.join(srcpath, 'build', 'lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if os.path.isfile(os.path.join(srcpath, '.built')):\n return\n\n orig_dir = os.getcwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n os.path.join(args.host, 'bin', 'python3'),\n os.path.join(ROOTDIR, 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = os.path.abspath(os.path.dirname(path))\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(os.path.join(srcpath, '.built'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if os.path.isfile(os.path.join(buildpath, '.packaged')):\n return\n\n name = pkg['package']['name']\n libdir = get_libdir(srcpath, args)\n subprocess.run([\n 'python2',\n os.path.join(os.environ['EMSCRIPTEN'], 'tools', 'file_packager.py'),\n os.path.join(buildpath, name + '.data'),\n '--preload',\n '{}@/lib/python3.6/site-packages'.format(libdir),\n '--js-output={}'.format(os.path.join(buildpath, name + '.js')),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__'], check=True)\n subprocess.run([\n 'uglifyjs',\n os.path.join(buildpath, name + '.js'),\n '-o',\n os.path.join(buildpath, name + '.js')], check=True)\n\n with open(os.path.join(buildpath, '.packaged'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = os.path.dirname(path)\n orig_path = os.getcwd()\n os.chdir(dirpath)\n try:\n buildpath = os.path.join(dirpath, 'build')\n if not os.path.exists(buildpath):\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = os.path.abspath(args.package[0])\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nBuilds a Pyodide package.\n\"\"\"\n\nimport argparse\nimport hashlib\nimport os\nimport shutil\nimport subprocess\n\n\nimport common\n\n\nROOTDIR = os.path.abspath(os.path.dirname(__file__))\n\n\ndef check_checksum(path, pkg):\n \"\"\"\n Checks that a tarball matches the checksum in the package metadata.\n \"\"\"\n if 'md5' not in pkg['source']:\n return\n checksum = pkg['source']['md5']\n CHUNK_SIZE = 1 << 16\n h = hashlib.md5()\n with open(path, 'rb') as fd:\n while True:\n chunk = fd.read(CHUNK_SIZE)\n h.update(chunk)\n if len(chunk) < CHUNK_SIZE:\n break\n if h.hexdigest() != checksum:\n raise ValueError(\"Invalid checksum\")\n\n\ndef download_and_extract(buildpath, packagedir, pkg, args):\n tarballpath = os.path.join(\n buildpath, os.path.basename(pkg['source']['url']))\n if not os.path.isfile(tarballpath):\n subprocess.run([\n 'wget', '-q', '-O', tarballpath, pkg['source']['url']\n ], check=True)\n check_checksum(tarballpath, pkg)\n srcpath = os.path.join(buildpath, packagedir)\n if not os.path.isdir(srcpath):\n shutil.unpack_archive(tarballpath, buildpath)\n return srcpath\n\n\ndef patch(path, srcpath, pkg, args):\n if os.path.isfile(os.path.join(srcpath, '.patched')):\n return\n\n # Apply all of the patches\n orig_dir = os.getcwd()\n pkgdir = os.path.abspath(os.path.dirname(path))\n os.chdir(srcpath)\n try:\n for patch in pkg['source'].get('patches', []):\n subprocess.run([\n 'patch', '-p1', '--binary', '-i', os.path.join(pkgdir, patch)\n ], check=True)\n finally:\n os.chdir(orig_dir)\n\n # Add any extra files\n for src, dst in pkg['source'].get('extras', []):\n shutil.copyfile(os.path.join(pkgdir, src), os.path.join(srcpath, dst))\n\n with open(os.path.join(srcpath, '.patched'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef get_libdir(srcpath, args):\n # Get the name of the build/lib.XXX directory that distutils wrote its\n # output to\n slug = subprocess.check_output([\n os.path.join(args.host, 'bin', 'python3'),\n '-c',\n 'import sysconfig, sys; '\n 'print(\"{}-{}.{}\".format('\n 'sysconfig.get_platform(), '\n 'sys.version_info[0], '\n 'sys.version_info[1]))']).decode('ascii').strip()\n purelib = os.path.join(srcpath, 'build', 'lib')\n if os.path.isdir(purelib):\n libdir = purelib\n else:\n libdir = os.path.join(srcpath, 'build', 'lib.' + slug)\n return libdir\n\n\ndef compile(path, srcpath, pkg, args):\n if os.path.isfile(os.path.join(srcpath, '.built')):\n return\n\n orig_dir = os.getcwd()\n os.chdir(srcpath)\n try:\n subprocess.run([\n os.path.join(args.host, 'bin', 'python3'),\n os.path.join(ROOTDIR, 'pywasmcross'),\n '--cflags',\n args.cflags + ' ' +\n pkg.get('build', {}).get('cflags', ''),\n '--ldflags',\n args.ldflags + ' ' +\n pkg.get('build', {}).get('ldflags', ''),\n '--host', args.host,\n '--target', args.target], check=True)\n finally:\n os.chdir(orig_dir)\n\n post = pkg.get('build', {}).get('post')\n if post is not None:\n libdir = get_libdir(srcpath, args)\n pkgdir = os.path.abspath(os.path.dirname(path))\n env = {\n 'BUILD': libdir,\n 'PKGDIR': pkgdir\n }\n subprocess.run([\n 'bash', '-c', post], env=env, check=True)\n\n with open(os.path.join(srcpath, '.built'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef package_files(buildpath, srcpath, pkg, args):\n if os.path.isfile(os.path.join(buildpath, '.packaged')):\n return\n\n name = pkg['package']['name']\n libdir = get_libdir(srcpath, args)\n subprocess.run([\n 'python2',\n os.path.join(os.environ['EMSCRIPTEN'], 'tools', 'file_packager.py'),\n os.path.join(buildpath, name + '.data'),\n '--preload',\n '{}@/lib/python3.6/site-packages'.format(libdir),\n '--js-output={}'.format(os.path.join(buildpath, name + '.js')),\n '--export-name=pyodide',\n '--exclude', '*.wasm.pre',\n '--exclude', '__pycache__',\n '--use-preload-plugins'], check=True)\n subprocess.run([\n 'uglifyjs',\n os.path.join(buildpath, name + '.js'),\n '-o',\n os.path.join(buildpath, name + '.js')], check=True)\n\n with open(os.path.join(buildpath, '.packaged'), 'wb') as fd:\n fd.write(b'\\n')\n\n\ndef build_package(path, args):\n pkg = common.parse_package(path)\n packagedir = pkg['package']['name'] + '-' + pkg['package']['version']\n dirpath = os.path.dirname(path)\n orig_path = os.getcwd()\n os.chdir(dirpath)\n try:\n buildpath = os.path.join(dirpath, 'build')\n if not os.path.exists(buildpath):\n os.makedirs(buildpath)\n srcpath = download_and_extract(buildpath, packagedir, pkg, args)\n patch(path, srcpath, pkg, args)\n compile(path, srcpath, pkg, args)\n package_files(buildpath, srcpath, pkg, args)\n finally:\n os.chdir(orig_path)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser('Build a pyodide package.')\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"Path to meta.yaml package description\")\n parser.add_argument(\n '--cflags', type=str, nargs='?', default=common.DEFAULTCFLAGS,\n help='Extra compiling flags')\n parser.add_argument(\n '--ldflags', type=str, nargs='?', default=common.DEFAULTLDFLAGS,\n help='Extra linking flags')\n parser.add_argument(\n '--host', type=str, nargs='?', default=common.HOSTPYTHON,\n help='The path to the host Python installation')\n parser.add_argument(\n '--target', type=str, nargs='?', default=common.TARGETPYTHON,\n help='The path to the target Python installation')\n return parser.parse_args()\n\n\ndef main(args):\n path = os.path.abspath(args.package[0])\n build_package(path, args)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "tools/buildpkg.py"}]}
| 2,314 | 133 |
gh_patches_debug_18156
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1486
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
US-IPC is occasionally missing a key
```
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 130, in launch_parsers
**parser_kwargs)
File "/home/electricitymap/parsers/US_IPC.py", line 129, in fetch_production
processed_data = data_processer(raw_data, logger)
File "/home/electricitymap/parsers/US_IPC.py", line 83, in data_processer
production.pop(key)
KeyError: 'Net Purchases'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/US_IPC.py`
Content:
```
1 #!usr/bin/env python3
2
3 """Parser for the Idaho Power Comapny area of the United States."""
4
5 from dateutil import parser, tz
6 from itertools import groupby
7 from logging import getLogger
8 import requests
9
10
11 # NOTE No pumped storage yet but future ideas can be found at the following url.
12 # https://docs.idahopower.com/pdfs/AboutUs/PlanningForFuture/irp/IRP.pdf
13
14 # Renewable energy (PURPA) is likely bought with credits from outside the Utility
15 # area and not supplied to customers. For that reason those types are commented out.
16
17 PRODUCTION_URL = 'https://api.idahopower.com/Energy/Api/V1/GenerationAndDemand/Subset'
18
19 GENERATION_MAPPING = {'Non-Utility Geothermal': 'geothermal',
20 'Hydro': 'hydro',
21 'Coal': 'coal',
22 'Diesel': 'oil',
23 'PURPA/Non-Utility Wind': 'wind',
24 'Natural Gas': 'gas',
25 'PURPA/Non-Utility Solar': 'solar'
26 #'PURPA Other': 'biomass'
27 }
28
29 TIMEZONE = tz.gettz("America/Boise")
30
31
32 def get_data(session=None):
33 """Returns a list of dictionaries."""
34
35 s = session or requests.Session()
36
37 req = requests.get(PRODUCTION_URL)
38 json_data = req.json()
39 raw_data = json_data['list']
40
41 return raw_data
42
43
44 def timestamp_converter(timestamp):
45 """Converts a str timestamp into an aware datetime object."""
46
47 dt_naive = parser.parse(timestamp)
48 dt_aware = dt_naive.replace(tzinfo=TIMEZONE)
49
50 return dt_aware
51
52
53 def data_processer(raw_data, logger):
54 """
55 Groups dictionaries by datetime key.
56 Removes unneeded keys and logs any new ones.
57 Returns a list of tuples containing (datetime object, dictionary).
58 """
59
60 dt_key = lambda x: x['datetime']
61 grouped = groupby(raw_data, dt_key)
62
63 keys_to_ignore = {'Load', 'Net Purchases', 'Inadvertent', 'PURPA Other'}
64 known_keys = GENERATION_MAPPING.keys() | keys_to_ignore
65
66 unmapped = set()
67 parsed_data = []
68 for group in grouped:
69 dt = timestamp_converter(group[0])
70 generation = group[1]
71
72 production = {}
73 for gen_type in generation:
74 production[gen_type['name']] = float(gen_type['data'])
75
76 current_keys = production.keys() | set()
77 unknown_keys = current_keys - known_keys
78 unmapped = unmapped | unknown_keys
79
80 keys_to_remove = keys_to_ignore | unknown_keys
81
82 for key in keys_to_remove:
83 production.pop(key)
84
85 production = {GENERATION_MAPPING[k]: v for k, v in production.items()}
86
87 parsed_data.append((dt, production))
88
89 for key in unmapped:
90 logger.warning('Key \'{}\' in US-IPC is not mapped to type.'.format(key), extra={'key': 'US-IPC'})
91
92 return parsed_data
93
94
95 def fetch_production(zone_key = 'US-IPC', session=None, target_datetime=None, logger=getLogger(__name__)):
96 """
97 Requests the last known production mix (in MW) of a given zone
98 Arguments:
99 zone_key (optional) -- used in case a parser is able to fetch multiple zones
100 session (optional) -- request session passed in order to re-use an existing session
101 Return:
102 A list of dictionaries in the form:
103 {
104 'zoneKey': 'FR',
105 'datetime': '2017-01-01T00:00:00Z',
106 'production': {
107 'biomass': 0.0,
108 'coal': 0.0,
109 'gas': 0.0,
110 'hydro': 0.0,
111 'nuclear': null,
112 'oil': 0.0,
113 'solar': 0.0,
114 'wind': 0.0,
115 'geothermal': 0.0,
116 'unknown': 0.0
117 },
118 'storage': {
119 'hydro': -10.0,
120 },
121 'source': 'mysource.com'
122 }
123 """
124
125 if target_datetime is not None:
126 raise NotImplementedError('This parser is not yet able to parse past dates')
127
128 raw_data = get_data(session=session)
129 processed_data = data_processer(raw_data, logger)
130
131 production_data = []
132 for item in processed_data:
133 datapoint = {
134 'zoneKey': zone_key,
135 'datetime': item[0],
136 'production': item[1],
137 'storage': {},
138 'source': 'idahopower.com'
139 }
140
141 production_data.append(datapoint)
142
143 return production_data
144
145
146 if __name__ == '__main__':
147 """Main method, never used by the Electricity Map backend, but handy for testing."""
148
149 print('fetch_production() ->')
150 print(fetch_production())
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/US_IPC.py b/parsers/US_IPC.py
--- a/parsers/US_IPC.py
+++ b/parsers/US_IPC.py
@@ -12,7 +12,7 @@
# https://docs.idahopower.com/pdfs/AboutUs/PlanningForFuture/irp/IRP.pdf
# Renewable energy (PURPA) is likely bought with credits from outside the Utility
-# area and not supplied to customers. For that reason those types are commented out.
+# area and not supplied to customers. For that reason those types are commented out.
PRODUCTION_URL = 'https://api.idahopower.com/Energy/Api/V1/GenerationAndDemand/Subset'
@@ -80,7 +80,7 @@
keys_to_remove = keys_to_ignore | unknown_keys
for key in keys_to_remove:
- production.pop(key)
+ production.pop(key, None)
production = {GENERATION_MAPPING[k]: v for k, v in production.items()}
|
{"golden_diff": "diff --git a/parsers/US_IPC.py b/parsers/US_IPC.py\n--- a/parsers/US_IPC.py\n+++ b/parsers/US_IPC.py\n@@ -12,7 +12,7 @@\n # https://docs.idahopower.com/pdfs/AboutUs/PlanningForFuture/irp/IRP.pdf\n \n # Renewable energy (PURPA) is likely bought with credits from outside the Utility\n-# area and not supplied to customers. For that reason those types are commented out. \n+# area and not supplied to customers. For that reason those types are commented out.\n \n PRODUCTION_URL = 'https://api.idahopower.com/Energy/Api/V1/GenerationAndDemand/Subset'\n \n@@ -80,7 +80,7 @@\n keys_to_remove = keys_to_ignore | unknown_keys\n \n for key in keys_to_remove:\n- production.pop(key)\n+ production.pop(key, None)\n \n production = {GENERATION_MAPPING[k]: v for k, v in production.items()}\n", "issue": "US-IPC is occasionally missing a key\n```\r\nTraceback (most recent call last):\r\n File \"/home/feeder/lib/fetch_data.py\", line 130, in launch_parsers\r\n **parser_kwargs)\r\n File \"/home/electricitymap/parsers/US_IPC.py\", line 129, in fetch_production\r\n processed_data = data_processer(raw_data, logger)\r\n File \"/home/electricitymap/parsers/US_IPC.py\", line 83, in data_processer\r\n production.pop(key)\r\nKeyError: 'Net Purchases'\r\n```\n", "before_files": [{"content": "#!usr/bin/env python3\n\n\"\"\"Parser for the Idaho Power Comapny area of the United States.\"\"\"\n\nfrom dateutil import parser, tz\nfrom itertools import groupby\nfrom logging import getLogger\nimport requests\n\n\n# NOTE No pumped storage yet but future ideas can be found at the following url.\n# https://docs.idahopower.com/pdfs/AboutUs/PlanningForFuture/irp/IRP.pdf\n\n# Renewable energy (PURPA) is likely bought with credits from outside the Utility\n# area and not supplied to customers. For that reason those types are commented out. \n\nPRODUCTION_URL = 'https://api.idahopower.com/Energy/Api/V1/GenerationAndDemand/Subset'\n\nGENERATION_MAPPING = {'Non-Utility Geothermal': 'geothermal',\n 'Hydro': 'hydro',\n 'Coal': 'coal',\n 'Diesel': 'oil',\n 'PURPA/Non-Utility Wind': 'wind',\n 'Natural Gas': 'gas',\n 'PURPA/Non-Utility Solar': 'solar'\n #'PURPA Other': 'biomass'\n }\n\nTIMEZONE = tz.gettz(\"America/Boise\")\n\n\ndef get_data(session=None):\n \"\"\"Returns a list of dictionaries.\"\"\"\n\n s = session or requests.Session()\n\n req = requests.get(PRODUCTION_URL)\n json_data = req.json()\n raw_data = json_data['list']\n\n return raw_data\n\n\ndef timestamp_converter(timestamp):\n \"\"\"Converts a str timestamp into an aware datetime object.\"\"\"\n\n dt_naive = parser.parse(timestamp)\n dt_aware = dt_naive.replace(tzinfo=TIMEZONE)\n\n return dt_aware\n\n\ndef data_processer(raw_data, logger):\n \"\"\"\n Groups dictionaries by datetime key.\n Removes unneeded keys and logs any new ones.\n Returns a list of tuples containing (datetime object, dictionary).\n \"\"\"\n\n dt_key = lambda x: x['datetime']\n grouped = groupby(raw_data, dt_key)\n\n keys_to_ignore = {'Load', 'Net Purchases', 'Inadvertent', 'PURPA Other'}\n known_keys = GENERATION_MAPPING.keys() | keys_to_ignore\n\n unmapped = set()\n parsed_data = []\n for group in grouped:\n dt = timestamp_converter(group[0])\n generation = group[1]\n\n production = {}\n for gen_type in generation:\n production[gen_type['name']] = float(gen_type['data'])\n\n current_keys = production.keys() | set()\n unknown_keys = current_keys - known_keys\n unmapped = unmapped | unknown_keys\n\n keys_to_remove = keys_to_ignore | unknown_keys\n\n for key in keys_to_remove:\n production.pop(key)\n\n production = {GENERATION_MAPPING[k]: v for k, v in production.items()}\n\n parsed_data.append((dt, production))\n\n for key in unmapped:\n logger.warning('Key \\'{}\\' in US-IPC is not mapped to type.'.format(key), extra={'key': 'US-IPC'})\n\n return parsed_data\n\n\ndef fetch_production(zone_key = 'US-IPC', session=None, target_datetime=None, logger=getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple zones\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n if target_datetime is not None:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_data = get_data(session=session)\n processed_data = data_processer(raw_data, logger)\n\n production_data = []\n for item in processed_data:\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': item[0],\n 'production': item[1],\n 'storage': {},\n 'source': 'idahopower.com'\n }\n\n production_data.append(datapoint)\n\n return production_data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/US_IPC.py"}], "after_files": [{"content": "#!usr/bin/env python3\n\n\"\"\"Parser for the Idaho Power Comapny area of the United States.\"\"\"\n\nfrom dateutil import parser, tz\nfrom itertools import groupby\nfrom logging import getLogger\nimport requests\n\n\n# NOTE No pumped storage yet but future ideas can be found at the following url.\n# https://docs.idahopower.com/pdfs/AboutUs/PlanningForFuture/irp/IRP.pdf\n\n# Renewable energy (PURPA) is likely bought with credits from outside the Utility\n# area and not supplied to customers. For that reason those types are commented out.\n\nPRODUCTION_URL = 'https://api.idahopower.com/Energy/Api/V1/GenerationAndDemand/Subset'\n\nGENERATION_MAPPING = {'Non-Utility Geothermal': 'geothermal',\n 'Hydro': 'hydro',\n 'Coal': 'coal',\n 'Diesel': 'oil',\n 'PURPA/Non-Utility Wind': 'wind',\n 'Natural Gas': 'gas',\n 'PURPA/Non-Utility Solar': 'solar'\n #'PURPA Other': 'biomass'\n }\n\nTIMEZONE = tz.gettz(\"America/Boise\")\n\n\ndef get_data(session=None):\n \"\"\"Returns a list of dictionaries.\"\"\"\n\n s = session or requests.Session()\n\n req = requests.get(PRODUCTION_URL)\n json_data = req.json()\n raw_data = json_data['list']\n\n return raw_data\n\n\ndef timestamp_converter(timestamp):\n \"\"\"Converts a str timestamp into an aware datetime object.\"\"\"\n\n dt_naive = parser.parse(timestamp)\n dt_aware = dt_naive.replace(tzinfo=TIMEZONE)\n\n return dt_aware\n\n\ndef data_processer(raw_data, logger):\n \"\"\"\n Groups dictionaries by datetime key.\n Removes unneeded keys and logs any new ones.\n Returns a list of tuples containing (datetime object, dictionary).\n \"\"\"\n\n dt_key = lambda x: x['datetime']\n grouped = groupby(raw_data, dt_key)\n\n keys_to_ignore = {'Load', 'Net Purchases', 'Inadvertent', 'PURPA Other'}\n known_keys = GENERATION_MAPPING.keys() | keys_to_ignore\n\n unmapped = set()\n parsed_data = []\n for group in grouped:\n dt = timestamp_converter(group[0])\n generation = group[1]\n\n production = {}\n for gen_type in generation:\n production[gen_type['name']] = float(gen_type['data'])\n\n current_keys = production.keys() | set()\n unknown_keys = current_keys - known_keys\n unmapped = unmapped | unknown_keys\n\n keys_to_remove = keys_to_ignore | unknown_keys\n\n for key in keys_to_remove:\n production.pop(key, None)\n\n production = {GENERATION_MAPPING[k]: v for k, v in production.items()}\n\n parsed_data.append((dt, production))\n\n for key in unmapped:\n logger.warning('Key \\'{}\\' in US-IPC is not mapped to type.'.format(key), extra={'key': 'US-IPC'})\n\n return parsed_data\n\n\ndef fetch_production(zone_key = 'US-IPC', session=None, target_datetime=None, logger=getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple zones\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n if target_datetime is not None:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_data = get_data(session=session)\n processed_data = data_processer(raw_data, logger)\n\n production_data = []\n for item in processed_data:\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': item[0],\n 'production': item[1],\n 'storage': {},\n 'source': 'idahopower.com'\n }\n\n production_data.append(datapoint)\n\n return production_data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/US_IPC.py"}]}
| 1,821 | 224 |
gh_patches_debug_37351
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-1412
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong placeholder in the rename quick panel
**Describe the bug**
For LSP-intelephense, it seems that the placeholder in the quick panel is the one under the caret rather than the actual symbol which triggers that. The actual renaming operation is done right though.
Unfortunately `Rename` is a premium feature in intelephense... and I can't reproduce this with LSP-pyright... weird.
**To Reproduce**
```php
<?php
class Foo
{
public static function getInstance()
{
return new static();
}
public function abcd()
{
echo 'haha';
}
}
Foo::getInstance()->abcd();
```
Steps to reproduce the behavior:
1. Place caret on `haha`
2. Hover on `bar`
3. Click `Rename` on the popup
4. `haha` will be in the shown quick panel
**Expected behavior**
It should be `bar` in the shown panel.
**Screenshots**
Video recording: https://streamable.com/bw762h
**Environment (please complete the following information):**
- OS: Win7 x64
- Sublime Text version: 4090
- LSP version: the latest commit on `st4000-exploration` 653fd3c4e25c2ed5294f76b0b8b5db79f56f18c2
- Language servers used: intelephense
**Additional context**
I see the server communication log but I didn't see any "line 11" in it but they are all correct "line 15".
LSP log:
```
:: --> LSP-intelephense textDocument/hover(87): {'position': {'character': 23, 'line': 15}, 'textDocument': {'uri': 'file:///E:/_Test/php/src/test.php'}}
:: <<< LSP-intelephense 87: {'range': {'start': {'character': 20, 'line': 15}, 'end': {'character': 24, 'line': 15}}, 'contents': {'kind': 'markdown', 'value': '__Foo::abcd__\n\n```php\n<?php\npublic function abcd() { }\n```\n\n_@return_ `void`'}}
:: --> LSP-intelephense textDocument/prepareRename(88): {'position': {'character': 23, 'line': 15}, 'textDocument': {'uri': 'file:///E:/_Test/php/src/test.php'}}
:: <<< LSP-intelephense 88: {'start': {'character': 20, 'line': 15}, 'end': {'character': 24, 'line': 15}}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/rename.py`
Content:
```
1 import sublime
2 import sublime_plugin
3 from .core.edit import apply_workspace_edit
4 from .core.edit import parse_workspace_edit
5 from .core.protocol import Range
6 from .core.protocol import Request
7 from .core.registry import get_position
8 from .core.registry import LspTextCommand
9 from .core.typing import Any, Optional
10 from .core.views import range_to_region
11 from .core.views import text_document_position_params
12
13
14 class RenameSymbolInputHandler(sublime_plugin.TextInputHandler):
15 def __init__(self, view: sublime.View, placeholder: str) -> None:
16 self.view = view
17 self._placeholder = placeholder
18
19 def name(self) -> str:
20 return "new_name"
21
22 def placeholder(self) -> str:
23 if self._placeholder:
24 return self._placeholder
25 return self.get_current_symbol_name()
26
27 def initial_text(self) -> str:
28 return self.placeholder()
29
30 def validate(self, name: str) -> bool:
31 return len(name) > 0
32
33 def get_current_symbol_name(self) -> str:
34 pos = get_position(self.view)
35 current_name = self.view.substr(self.view.word(pos))
36 # Is this check necessary?
37 if not current_name:
38 current_name = ""
39 return current_name
40
41
42 class LspSymbolRenameCommand(LspTextCommand):
43
44 capability = 'renameProvider'
45
46 # mypy: Signature of "is_enabled" incompatible with supertype "LspTextCommand"
47 def is_enabled( # type: ignore
48 self,
49 new_name: str = "",
50 placeholder: str = "",
51 position: Optional[int] = None,
52 event: Optional[dict] = None,
53 point: Optional[int] = None
54 ) -> bool:
55 if self.best_session("renameProvider.prepareProvider"):
56 # The language server will tell us if the selection is on a valid token.
57 return True
58 return super().is_enabled(event, point)
59
60 def input(self, args: dict) -> Optional[sublime_plugin.TextInputHandler]:
61 if "new_name" not in args:
62 placeholder = args.get("placeholder", "")
63 if not placeholder:
64 point = args.get("point")
65 if isinstance(point, int):
66 placeholder = self.view.substr(self.view.word(point))
67 return RenameSymbolInputHandler(self.view, placeholder)
68 else:
69 return None
70
71 def run(
72 self,
73 edit: sublime.Edit,
74 new_name: str = "",
75 placeholder: str = "",
76 position: Optional[int] = None,
77 event: Optional[dict] = None,
78 point: Optional[int] = None
79 ) -> None:
80 if position is None:
81 pos = get_position(self.view, event, point)
82 if new_name:
83 return self._do_rename(pos, new_name)
84 else:
85 session = self.best_session("{}.prepareProvider".format(self.capability))
86 if session:
87 params = text_document_position_params(self.view, pos)
88 request = Request.prepareRename(params, self.view)
89 self.event = event
90 session.send_request(request, self.on_prepare_result, self.on_prepare_error)
91 else:
92 # trigger InputHandler manually
93 raise TypeError("required positional argument")
94 else:
95 if new_name:
96 return self._do_rename(position, new_name)
97 else:
98 # trigger InputHandler manually
99 raise TypeError("required positional argument")
100
101 def _do_rename(self, position: int, new_name: str) -> None:
102 session = self.best_session(self.capability)
103 if session:
104 params = text_document_position_params(self.view, position)
105 params["newName"] = new_name
106 session.send_request(
107 Request.rename(params, self.view),
108 # This has to run on the main thread due to calling apply_workspace_edit
109 lambda r: sublime.set_timeout(lambda: self.on_rename_result(r))
110 )
111
112 def on_rename_result(self, response: Any) -> None:
113 window = self.view.window()
114 if window:
115 if response:
116 apply_workspace_edit(window, parse_workspace_edit(response))
117 else:
118 window.status_message('Nothing to rename')
119
120 def on_prepare_result(self, response: Any) -> None:
121 if response is None:
122 sublime.error_message("The current selection cannot be renamed")
123 return
124 # It must be a dict at this point.
125 if "placeholder" in response:
126 placeholder = response["placeholder"]
127 r = response["range"]
128 else:
129 placeholder = ""
130 r = response
131 region = range_to_region(Range.from_lsp(r), self.view)
132 args = {"placeholder": placeholder, "position": region.a, "event": self.event}
133 self.view.run_command("lsp_symbol_rename", args)
134
135 def on_prepare_error(self, error: Any) -> None:
136 sublime.error_message("Rename error: {}".format(error["message"]))
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugin/rename.py b/plugin/rename.py
--- a/plugin/rename.py
+++ b/plugin/rename.py
@@ -20,9 +20,7 @@
return "new_name"
def placeholder(self) -> str:
- if self._placeholder:
- return self._placeholder
- return self.get_current_symbol_name()
+ return self._placeholder
def initial_text(self) -> str:
return self.placeholder()
@@ -30,14 +28,6 @@
def validate(self, name: str) -> bool:
return len(name) > 0
- def get_current_symbol_name(self) -> str:
- pos = get_position(self.view)
- current_name = self.view.substr(self.view.word(pos))
- # Is this check necessary?
- if not current_name:
- current_name = ""
- return current_name
-
class LspSymbolRenameCommand(LspTextCommand):
@@ -87,7 +77,7 @@
params = text_document_position_params(self.view, pos)
request = Request.prepareRename(params, self.view)
self.event = event
- session.send_request(request, self.on_prepare_result, self.on_prepare_error)
+ session.send_request(request, lambda r: self.on_prepare_result(r, pos), self.on_prepare_error)
else:
# trigger InputHandler manually
raise TypeError("required positional argument")
@@ -117,7 +107,7 @@
else:
window.status_message('Nothing to rename')
- def on_prepare_result(self, response: Any) -> None:
+ def on_prepare_result(self, response: Any, pos: int) -> None:
if response is None:
sublime.error_message("The current selection cannot be renamed")
return
@@ -126,7 +116,7 @@
placeholder = response["placeholder"]
r = response["range"]
else:
- placeholder = ""
+ placeholder = self.view.substr(self.view.word(pos))
r = response
region = range_to_region(Range.from_lsp(r), self.view)
args = {"placeholder": placeholder, "position": region.a, "event": self.event}
|
{"golden_diff": "diff --git a/plugin/rename.py b/plugin/rename.py\n--- a/plugin/rename.py\n+++ b/plugin/rename.py\n@@ -20,9 +20,7 @@\n return \"new_name\"\n \n def placeholder(self) -> str:\n- if self._placeholder:\n- return self._placeholder\n- return self.get_current_symbol_name()\n+ return self._placeholder\n \n def initial_text(self) -> str:\n return self.placeholder()\n@@ -30,14 +28,6 @@\n def validate(self, name: str) -> bool:\n return len(name) > 0\n \n- def get_current_symbol_name(self) -> str:\n- pos = get_position(self.view)\n- current_name = self.view.substr(self.view.word(pos))\n- # Is this check necessary?\n- if not current_name:\n- current_name = \"\"\n- return current_name\n-\n \n class LspSymbolRenameCommand(LspTextCommand):\n \n@@ -87,7 +77,7 @@\n params = text_document_position_params(self.view, pos)\n request = Request.prepareRename(params, self.view)\n self.event = event\n- session.send_request(request, self.on_prepare_result, self.on_prepare_error)\n+ session.send_request(request, lambda r: self.on_prepare_result(r, pos), self.on_prepare_error)\n else:\n # trigger InputHandler manually\n raise TypeError(\"required positional argument\")\n@@ -117,7 +107,7 @@\n else:\n window.status_message('Nothing to rename')\n \n- def on_prepare_result(self, response: Any) -> None:\n+ def on_prepare_result(self, response: Any, pos: int) -> None:\n if response is None:\n sublime.error_message(\"The current selection cannot be renamed\")\n return\n@@ -126,7 +116,7 @@\n placeholder = response[\"placeholder\"]\n r = response[\"range\"]\n else:\n- placeholder = \"\"\n+ placeholder = self.view.substr(self.view.word(pos))\n r = response\n region = range_to_region(Range.from_lsp(r), self.view)\n args = {\"placeholder\": placeholder, \"position\": region.a, \"event\": self.event}\n", "issue": "Wrong placeholder in the rename quick panel\n**Describe the bug**\r\n\r\nFor LSP-intelephense, it seems that the placeholder in the quick panel is the one under the caret rather than the actual symbol which triggers that. The actual renaming operation is done right though.\r\n\r\nUnfortunately `Rename` is a premium feature in intelephense... and I can't reproduce this with LSP-pyright... weird.\r\n\r\n\r\n**To Reproduce**\r\n\r\n```php\r\n<?php\r\n\r\nclass Foo\r\n{\r\n public static function getInstance()\r\n {\r\n return new static();\r\n }\r\n\r\n public function abcd()\r\n {\r\n echo 'haha';\r\n }\r\n}\r\n\r\nFoo::getInstance()->abcd();\r\n```\r\n\r\nSteps to reproduce the behavior:\r\n1. Place caret on `haha`\r\n2. Hover on `bar`\r\n3. Click `Rename` on the popup\r\n4. `haha` will be in the shown quick panel\r\n\r\n**Expected behavior**\r\n\r\nIt should be `bar` in the shown panel.\r\n\r\n**Screenshots**\r\n\r\nVideo recording: https://streamable.com/bw762h\r\n\r\n**Environment (please complete the following information):**\r\n- OS: Win7 x64\r\n- Sublime Text version: 4090\r\n- LSP version: the latest commit on `st4000-exploration` 653fd3c4e25c2ed5294f76b0b8b5db79f56f18c2 \r\n- Language servers used: intelephense\r\n\r\n**Additional context**\r\n\r\nI see the server communication log but I didn't see any \"line 11\" in it but they are all correct \"line 15\".\r\n\r\nLSP log:\r\n\r\n```\r\n:: --> LSP-intelephense textDocument/hover(87): {'position': {'character': 23, 'line': 15}, 'textDocument': {'uri': 'file:///E:/_Test/php/src/test.php'}}\r\n:: <<< LSP-intelephense 87: {'range': {'start': {'character': 20, 'line': 15}, 'end': {'character': 24, 'line': 15}}, 'contents': {'kind': 'markdown', 'value': '__Foo::abcd__\\n\\n```php\\n<?php\\npublic function abcd() { }\\n```\\n\\n_@return_ `void`'}}\r\n:: --> LSP-intelephense textDocument/prepareRename(88): {'position': {'character': 23, 'line': 15}, 'textDocument': {'uri': 'file:///E:/_Test/php/src/test.php'}}\r\n:: <<< LSP-intelephense 88: {'start': {'character': 20, 'line': 15}, 'end': {'character': 24, 'line': 15}}\r\n```\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\nfrom .core.edit import apply_workspace_edit\nfrom .core.edit import parse_workspace_edit\nfrom .core.protocol import Range\nfrom .core.protocol import Request\nfrom .core.registry import get_position\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, Optional\nfrom .core.views import range_to_region\nfrom .core.views import text_document_position_params\n\n\nclass RenameSymbolInputHandler(sublime_plugin.TextInputHandler):\n def __init__(self, view: sublime.View, placeholder: str) -> None:\n self.view = view\n self._placeholder = placeholder\n\n def name(self) -> str:\n return \"new_name\"\n\n def placeholder(self) -> str:\n if self._placeholder:\n return self._placeholder\n return self.get_current_symbol_name()\n\n def initial_text(self) -> str:\n return self.placeholder()\n\n def validate(self, name: str) -> bool:\n return len(name) > 0\n\n def get_current_symbol_name(self) -> str:\n pos = get_position(self.view)\n current_name = self.view.substr(self.view.word(pos))\n # Is this check necessary?\n if not current_name:\n current_name = \"\"\n return current_name\n\n\nclass LspSymbolRenameCommand(LspTextCommand):\n\n capability = 'renameProvider'\n\n # mypy: Signature of \"is_enabled\" incompatible with supertype \"LspTextCommand\"\n def is_enabled( # type: ignore\n self,\n new_name: str = \"\",\n placeholder: str = \"\",\n position: Optional[int] = None,\n event: Optional[dict] = None,\n point: Optional[int] = None\n ) -> bool:\n if self.best_session(\"renameProvider.prepareProvider\"):\n # The language server will tell us if the selection is on a valid token.\n return True\n return super().is_enabled(event, point)\n\n def input(self, args: dict) -> Optional[sublime_plugin.TextInputHandler]:\n if \"new_name\" not in args:\n placeholder = args.get(\"placeholder\", \"\")\n if not placeholder:\n point = args.get(\"point\")\n if isinstance(point, int):\n placeholder = self.view.substr(self.view.word(point))\n return RenameSymbolInputHandler(self.view, placeholder)\n else:\n return None\n\n def run(\n self,\n edit: sublime.Edit,\n new_name: str = \"\",\n placeholder: str = \"\",\n position: Optional[int] = None,\n event: Optional[dict] = None,\n point: Optional[int] = None\n ) -> None:\n if position is None:\n pos = get_position(self.view, event, point)\n if new_name:\n return self._do_rename(pos, new_name)\n else:\n session = self.best_session(\"{}.prepareProvider\".format(self.capability))\n if session:\n params = text_document_position_params(self.view, pos)\n request = Request.prepareRename(params, self.view)\n self.event = event\n session.send_request(request, self.on_prepare_result, self.on_prepare_error)\n else:\n # trigger InputHandler manually\n raise TypeError(\"required positional argument\")\n else:\n if new_name:\n return self._do_rename(position, new_name)\n else:\n # trigger InputHandler manually\n raise TypeError(\"required positional argument\")\n\n def _do_rename(self, position: int, new_name: str) -> None:\n session = self.best_session(self.capability)\n if session:\n params = text_document_position_params(self.view, position)\n params[\"newName\"] = new_name\n session.send_request(\n Request.rename(params, self.view),\n # This has to run on the main thread due to calling apply_workspace_edit\n lambda r: sublime.set_timeout(lambda: self.on_rename_result(r))\n )\n\n def on_rename_result(self, response: Any) -> None:\n window = self.view.window()\n if window:\n if response:\n apply_workspace_edit(window, parse_workspace_edit(response))\n else:\n window.status_message('Nothing to rename')\n\n def on_prepare_result(self, response: Any) -> None:\n if response is None:\n sublime.error_message(\"The current selection cannot be renamed\")\n return\n # It must be a dict at this point.\n if \"placeholder\" in response:\n placeholder = response[\"placeholder\"]\n r = response[\"range\"]\n else:\n placeholder = \"\"\n r = response\n region = range_to_region(Range.from_lsp(r), self.view)\n args = {\"placeholder\": placeholder, \"position\": region.a, \"event\": self.event}\n self.view.run_command(\"lsp_symbol_rename\", args)\n\n def on_prepare_error(self, error: Any) -> None:\n sublime.error_message(\"Rename error: {}\".format(error[\"message\"]))\n", "path": "plugin/rename.py"}], "after_files": [{"content": "import sublime\nimport sublime_plugin\nfrom .core.edit import apply_workspace_edit\nfrom .core.edit import parse_workspace_edit\nfrom .core.protocol import Range\nfrom .core.protocol import Request\nfrom .core.registry import get_position\nfrom .core.registry import LspTextCommand\nfrom .core.typing import Any, Optional\nfrom .core.views import range_to_region\nfrom .core.views import text_document_position_params\n\n\nclass RenameSymbolInputHandler(sublime_plugin.TextInputHandler):\n def __init__(self, view: sublime.View, placeholder: str) -> None:\n self.view = view\n self._placeholder = placeholder\n\n def name(self) -> str:\n return \"new_name\"\n\n def placeholder(self) -> str:\n return self._placeholder\n\n def initial_text(self) -> str:\n return self.placeholder()\n\n def validate(self, name: str) -> bool:\n return len(name) > 0\n\n\nclass LspSymbolRenameCommand(LspTextCommand):\n\n capability = 'renameProvider'\n\n # mypy: Signature of \"is_enabled\" incompatible with supertype \"LspTextCommand\"\n def is_enabled( # type: ignore\n self,\n new_name: str = \"\",\n placeholder: str = \"\",\n position: Optional[int] = None,\n event: Optional[dict] = None,\n point: Optional[int] = None\n ) -> bool:\n if self.best_session(\"renameProvider.prepareProvider\"):\n # The language server will tell us if the selection is on a valid token.\n return True\n return super().is_enabled(event, point)\n\n def input(self, args: dict) -> Optional[sublime_plugin.TextInputHandler]:\n if \"new_name\" not in args:\n placeholder = args.get(\"placeholder\", \"\")\n if not placeholder:\n point = args.get(\"point\")\n if isinstance(point, int):\n placeholder = self.view.substr(self.view.word(point))\n return RenameSymbolInputHandler(self.view, placeholder)\n else:\n return None\n\n def run(\n self,\n edit: sublime.Edit,\n new_name: str = \"\",\n placeholder: str = \"\",\n position: Optional[int] = None,\n event: Optional[dict] = None,\n point: Optional[int] = None\n ) -> None:\n if position is None:\n pos = get_position(self.view, event, point)\n if new_name:\n return self._do_rename(pos, new_name)\n else:\n session = self.best_session(\"{}.prepareProvider\".format(self.capability))\n if session:\n params = text_document_position_params(self.view, pos)\n request = Request.prepareRename(params, self.view)\n self.event = event\n session.send_request(request, lambda r: self.on_prepare_result(r, pos), self.on_prepare_error)\n else:\n # trigger InputHandler manually\n raise TypeError(\"required positional argument\")\n else:\n if new_name:\n return self._do_rename(position, new_name)\n else:\n # trigger InputHandler manually\n raise TypeError(\"required positional argument\")\n\n def _do_rename(self, position: int, new_name: str) -> None:\n session = self.best_session(self.capability)\n if session:\n params = text_document_position_params(self.view, position)\n params[\"newName\"] = new_name\n session.send_request(\n Request.rename(params, self.view),\n # This has to run on the main thread due to calling apply_workspace_edit\n lambda r: sublime.set_timeout(lambda: self.on_rename_result(r))\n )\n\n def on_rename_result(self, response: Any) -> None:\n window = self.view.window()\n if window:\n if response:\n apply_workspace_edit(window, parse_workspace_edit(response))\n else:\n window.status_message('Nothing to rename')\n\n def on_prepare_result(self, response: Any, pos: int) -> None:\n if response is None:\n sublime.error_message(\"The current selection cannot be renamed\")\n return\n # It must be a dict at this point.\n if \"placeholder\" in response:\n placeholder = response[\"placeholder\"]\n r = response[\"range\"]\n else:\n placeholder = self.view.substr(self.view.word(pos))\n r = response\n region = range_to_region(Range.from_lsp(r), self.view)\n args = {\"placeholder\": placeholder, \"position\": region.a, \"event\": self.event}\n self.view.run_command(\"lsp_symbol_rename\", args)\n\n def on_prepare_error(self, error: Any) -> None:\n sublime.error_message(\"Rename error: {}\".format(error[\"message\"]))\n", "path": "plugin/rename.py"}]}
| 2,208 | 478 |
gh_patches_debug_40663
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-1782
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Automatic computation of zoom levels for rio overwiew
Since GDAL 2.3 in gdaladdo it is [possible](https://www.gdal.org/gdaladdo.html) to omit the list of overview factors. But it looks like `rio overwiew` doesn't support this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/overview.py`
Content:
```
1 # coding: utf-8
2 """Manage overviews of a dataset."""
3
4 from functools import reduce
5 import logging
6 import operator
7
8 import click
9
10 from . import options
11 import rasterio
12 from rasterio.enums import Resampling
13
14
15 def build_handler(ctx, param, value):
16 if value:
17 try:
18 if '^' in value:
19 base, exp_range = value.split('^')
20 exp_min, exp_max = (int(v) for v in exp_range.split('..'))
21 value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]
22 else:
23 value = [int(v) for v in value.split(',')]
24 except Exception:
25 raise click.BadParameter(u"must match 'n,n,n,β¦' or 'n^n..n'.")
26 return value
27
28
29 @click.command('overview', short_help="Construct overviews in an existing dataset.")
30 @options.file_in_arg
31 @click.option('--build', callback=build_handler, metavar=u"f1,f2,β¦|b^min..max",
32 help="A sequence of decimation factors specied as "
33 "comma-separated list of numbers or a base and range of "
34 "exponents.")
35 @click.option('--ls', help="Print the overviews for each band.",
36 is_flag=True, default=False)
37 @click.option('--rebuild', help="Reconstruct existing overviews.",
38 is_flag=True, default=False)
39 @click.option('--resampling', help="Resampling algorithm.",
40 type=click.Choice(
41 [it.name for it in Resampling if it.value in [0, 1, 2, 3, 4, 5, 6, 7]]),
42 default='nearest', show_default=True)
43 @click.pass_context
44 def overview(ctx, input, build, ls, rebuild, resampling):
45 """Construct overviews in an existing dataset.
46
47 A pyramid of overviews computed once and stored in the dataset can
48 improve performance in some applications.
49
50 The decimation levels at which to build overviews can be specified as
51 a comma separated list
52
53 rio overview --build 2,4,8,16
54
55 or a base and range of exponents.
56
57 rio overview --build 2^1..4
58
59 Note that overviews can not currently be removed and are not
60 automatically updated when the dataset's primary bands are
61 modified.
62
63 Information about existing overviews can be printed using the --ls
64 option.
65
66 rio overview --ls
67
68 """
69 with ctx.obj['env']:
70 if ls:
71 with rasterio.open(input, 'r') as dst:
72 resampling_method = dst.tags(
73 ns='rio_overview').get('resampling') or 'unknown'
74
75 click.echo("Overview factors:")
76 for idx in dst.indexes:
77 click.echo(" Band %d: %s (method: '%s')" % (
78 idx, dst.overviews(idx) or 'None', resampling_method))
79 elif rebuild:
80 with rasterio.open(input, 'r+') as dst:
81 # Build the same overviews for all bands.
82 factors = reduce(
83 operator.or_,
84 [set(dst.overviews(i)) for i in dst.indexes])
85
86 # Attempt to recover the resampling method from dataset tags.
87 resampling_method = dst.tags(
88 ns='rio_overview').get('resampling') or resampling
89
90 dst.build_overviews(
91 list(factors), Resampling[resampling_method])
92
93 elif build:
94 with rasterio.open(input, 'r+') as dst:
95 dst.build_overviews(build, Resampling[resampling])
96
97 # Save the resampling method to a tag.
98 dst.update_tags(ns='rio_overview', resampling=resampling)
99
100 else:
101 raise click.UsageError(
102 "Please specify --ls, --rebuild, or --build ...")
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py
--- a/rasterio/rio/overview.py
+++ b/rasterio/rio/overview.py
@@ -19,19 +19,52 @@
base, exp_range = value.split('^')
exp_min, exp_max = (int(v) for v in exp_range.split('..'))
value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]
- else:
+ elif ',' in value:
value = [int(v) for v in value.split(',')]
+ elif value == "auto":
+ pass
+ else:
+ raise Exception
except Exception:
- raise click.BadParameter(u"must match 'n,n,n,β¦' or 'n^n..n'.")
+ raise click.BadParameter(u"must match 'n,n,n,β¦', 'n^n..n', or 'auto'.")
return value
+def get_maximum_overview_level(width, height, minsize=256):
+ """
+ Calculate the maximum overview level of a dataset at which
+ the smallest overview is smaller than `minsize`.
+
+ Attributes
+ ----------
+ width : int
+ Width of the dataset.
+ height : int
+ Height of the dataset.
+ minsize : int (default: 256)
+ Minimum overview size.
+
+ Returns
+ -------
+ overview_level: int
+ overview level.
+
+ """
+ overview_level = 0
+ overview_factor = 1
+ while min(width // overview_factor, height // overview_factor) > minsize:
+ overview_factor *= 2
+ overview_level += 1
+
+ return overview_level
+
+
@click.command('overview', short_help="Construct overviews in an existing dataset.")
@options.file_in_arg
[email protected]('--build', callback=build_handler, metavar=u"f1,f2,β¦|b^min..max",
- help="A sequence of decimation factors specied as "
[email protected]('--build', callback=build_handler, metavar=u"f1,f2,β¦|b^min..max|auto",
+ help="A sequence of decimation factors specified as "
"comma-separated list of numbers or a base and range of "
- "exponents.")
+ "exponents, or 'auto' to automatically determine the maximum factor.")
@click.option('--ls', help="Print the overviews for each band.",
is_flag=True, default=False)
@click.option('--rebuild', help="Reconstruct existing overviews.",
@@ -52,10 +85,15 @@
rio overview --build 2,4,8,16
- or a base and range of exponents.
+ or a base and range of exponents
rio overview --build 2^1..4
+ or 'auto' to automatically determine the maximum decimation level at
+ which the smallest overview is smaller than 256 pixels in size.
+
+ rio overview --build auto
+
Note that overviews can not currently be removed and are not
automatically updated when the dataset's primary bands are
modified.
@@ -92,6 +130,9 @@
elif build:
with rasterio.open(input, 'r+') as dst:
+ if build == "auto":
+ overview_level = get_maximum_overview_level(dst.width, dst.height)
+ build = [2 ** j for j in range(1, overview_level + 1)]
dst.build_overviews(build, Resampling[resampling])
# Save the resampling method to a tag.
|
{"golden_diff": "diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py\n--- a/rasterio/rio/overview.py\n+++ b/rasterio/rio/overview.py\n@@ -19,19 +19,52 @@\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n- else:\n+ elif ',' in value:\n value = [int(v) for v in value.split(',')]\n+ elif value == \"auto\":\n+ pass\n+ else:\n+ raise Exception\n except Exception:\n- raise click.BadParameter(u\"must match 'n,n,n,\u2026' or 'n^n..n'.\")\n+ raise click.BadParameter(u\"must match 'n,n,n,\u2026', 'n^n..n', or 'auto'.\")\n return value\n \n \n+def get_maximum_overview_level(width, height, minsize=256):\n+ \"\"\"\n+ Calculate the maximum overview level of a dataset at which\n+ the smallest overview is smaller than `minsize`.\n+\n+ Attributes\n+ ----------\n+ width : int\n+ Width of the dataset.\n+ height : int\n+ Height of the dataset.\n+ minsize : int (default: 256)\n+ Minimum overview size.\n+\n+ Returns\n+ -------\n+ overview_level: int\n+ overview level.\n+\n+ \"\"\"\n+ overview_level = 0\n+ overview_factor = 1\n+ while min(width // overview_factor, height // overview_factor) > minsize:\n+ overview_factor *= 2\n+ overview_level += 1\n+\n+ return overview_level\n+\n+\n @click.command('overview', short_help=\"Construct overviews in an existing dataset.\")\n @options.file_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max\",\n- help=\"A sequence of decimation factors specied as \"\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max|auto\",\n+ help=\"A sequence of decimation factors specified as \"\n \"comma-separated list of numbers or a base and range of \"\n- \"exponents.\")\n+ \"exponents, or 'auto' to automatically determine the maximum factor.\")\n @click.option('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\n @click.option('--rebuild', help=\"Reconstruct existing overviews.\",\n@@ -52,10 +85,15 @@\n \n rio overview --build 2,4,8,16\n \n- or a base and range of exponents.\n+ or a base and range of exponents\n \n rio overview --build 2^1..4\n \n+ or 'auto' to automatically determine the maximum decimation level at\n+ which the smallest overview is smaller than 256 pixels in size.\n+\n+ rio overview --build auto\n+\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n@@ -92,6 +130,9 @@\n \n elif build:\n with rasterio.open(input, 'r+') as dst:\n+ if build == \"auto\":\n+ overview_level = get_maximum_overview_level(dst.width, dst.height)\n+ build = [2 ** j for j in range(1, overview_level + 1)]\n dst.build_overviews(build, Resampling[resampling])\n \n # Save the resampling method to a tag.\n", "issue": "Automatic computation of zoom levels for rio overwiew\nSince GDAL 2.3 in gdaladdo it is [possible](https://www.gdal.org/gdaladdo.html) to omit the list of overview factors. But it looks like `rio overwiew` doesn't support this.\n", "before_files": [{"content": "# coding: utf-8\n\"\"\"Manage overviews of a dataset.\"\"\"\n\nfrom functools import reduce\nimport logging\nimport operator\n\nimport click\n\nfrom . import options\nimport rasterio\nfrom rasterio.enums import Resampling\n\n\ndef build_handler(ctx, param, value):\n if value:\n try:\n if '^' in value:\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n else:\n value = [int(v) for v in value.split(',')]\n except Exception:\n raise click.BadParameter(u\"must match 'n,n,n,\u2026' or 'n^n..n'.\")\n return value\n\n\[email protected]('overview', short_help=\"Construct overviews in an existing dataset.\")\[email protected]_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max\",\n help=\"A sequence of decimation factors specied as \"\n \"comma-separated list of numbers or a base and range of \"\n \"exponents.\")\[email protected]('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\[email protected]('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\[email protected]('--resampling', help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 1, 2, 3, 4, 5, 6, 7]]),\n default='nearest', show_default=True)\[email protected]_context\ndef overview(ctx, input, build, ls, rebuild, resampling):\n \"\"\"Construct overviews in an existing dataset.\n\n A pyramid of overviews computed once and stored in the dataset can\n improve performance in some applications.\n\n The decimation levels at which to build overviews can be specified as\n a comma separated list\n\n rio overview --build 2,4,8,16\n\n or a base and range of exponents.\n\n rio overview --build 2^1..4\n\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n\n Information about existing overviews can be printed using the --ls\n option.\n\n rio overview --ls\n\n \"\"\"\n with ctx.obj['env']:\n if ls:\n with rasterio.open(input, 'r') as dst:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n\n click.echo(\"Overview factors:\")\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n elif rebuild:\n with rasterio.open(input, 'r+') as dst:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n [set(dst.overviews(i)) for i in dst.indexes])\n\n # Attempt to recover the resampling method from dataset tags.\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or resampling\n\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n\n elif build:\n with rasterio.open(input, 'r+') as dst:\n dst.build_overviews(build, Resampling[resampling])\n\n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n\n else:\n raise click.UsageError(\n \"Please specify --ls, --rebuild, or --build ...\")\n", "path": "rasterio/rio/overview.py"}], "after_files": [{"content": "# coding: utf-8\n\"\"\"Manage overviews of a dataset.\"\"\"\n\nfrom functools import reduce\nimport logging\nimport operator\n\nimport click\n\nfrom . import options\nimport rasterio\nfrom rasterio.enums import Resampling\n\n\ndef build_handler(ctx, param, value):\n if value:\n try:\n if '^' in value:\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n elif ',' in value:\n value = [int(v) for v in value.split(',')]\n elif value == \"auto\":\n pass\n else:\n raise Exception\n except Exception:\n raise click.BadParameter(u\"must match 'n,n,n,\u2026', 'n^n..n', or 'auto'.\")\n return value\n\n\ndef get_maximum_overview_level(width, height, minsize=256):\n \"\"\"\n Calculate the maximum overview level of a dataset at which\n the smallest overview is smaller than `minsize`.\n\n Attributes\n ----------\n width : int\n Width of the dataset.\n height : int\n Height of the dataset.\n minsize : int (default: 256)\n Minimum overview size.\n\n Returns\n -------\n overview_level: int\n overview level.\n\n \"\"\"\n overview_level = 0\n overview_factor = 1\n while min(width // overview_factor, height // overview_factor) > minsize:\n overview_factor *= 2\n overview_level += 1\n\n return overview_level\n\n\[email protected]('overview', short_help=\"Construct overviews in an existing dataset.\")\[email protected]_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max|auto\",\n help=\"A sequence of decimation factors specified as \"\n \"comma-separated list of numbers or a base and range of \"\n \"exponents, or 'auto' to automatically determine the maximum factor.\")\[email protected]('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\[email protected]('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\[email protected]('--resampling', help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 1, 2, 3, 4, 5, 6, 7]]),\n default='nearest', show_default=True)\[email protected]_context\ndef overview(ctx, input, build, ls, rebuild, resampling):\n \"\"\"Construct overviews in an existing dataset.\n\n A pyramid of overviews computed once and stored in the dataset can\n improve performance in some applications.\n\n The decimation levels at which to build overviews can be specified as\n a comma separated list\n\n rio overview --build 2,4,8,16\n\n or a base and range of exponents\n\n rio overview --build 2^1..4\n\n or 'auto' to automatically determine the maximum decimation level at\n which the smallest overview is smaller than 256 pixels in size.\n\n rio overview --build auto\n\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n\n Information about existing overviews can be printed using the --ls\n option.\n\n rio overview --ls\n\n \"\"\"\n with ctx.obj['env']:\n if ls:\n with rasterio.open(input, 'r') as dst:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n\n click.echo(\"Overview factors:\")\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n elif rebuild:\n with rasterio.open(input, 'r+') as dst:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n [set(dst.overviews(i)) for i in dst.indexes])\n\n # Attempt to recover the resampling method from dataset tags.\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or resampling\n\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n\n elif build:\n with rasterio.open(input, 'r+') as dst:\n if build == \"auto\":\n overview_level = get_maximum_overview_level(dst.width, dst.height)\n build = [2 ** j for j in range(1, overview_level + 1)]\n dst.build_overviews(build, Resampling[resampling])\n\n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n\n else:\n raise click.UsageError(\n \"Please specify --ls, --rebuild, or --build ...\")\n", "path": "rasterio/rio/overview.py"}]}
| 1,360 | 815 |
gh_patches_debug_18809
|
rasdani/github-patches
|
git_diff
|
LMFDB__lmfdb-4277
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some Dirichlet character pages are failing to load
The page https://www.lmfdb.org/Character/Dirichlet/947/934 is timing out. Some similar pages such as https://www.lmfdb.org/Character/Dirichlet/947/933 and https://www.lmfdb.org/Character/Dirichlet/947/935 work but are slow too load and the knowl for the fixed field does not work. I believe this is due to some of the recent changes that were made #4231 -- @BarinderBanwait can you take a look at this?
Below is the trace back from the page that is failing to load. the failure is inside the call to "zeta_order" on line 156 of https://github.com/LMFDB/lmfdb/blob/master/lmfdb/characters/TinyConrey.py. I don't think that call should be taking any time, but if Sage is doing something silly we should just compute zeta_order directly. I confess it's not clear to me why we are using Sage DirichletGroup and Sage characters at all (it appears they are being used in just 2 places).
```
Traceback (most recent call last):
File "/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/lmfdb/lmfdb-git-web/lmfdb/characters/main.py", line 367, in render_Dirichletwebpage
webchar = make_webchar(args)
File "/home/lmfdb/lmfdb-git-web/lmfdb/characters/main.py", line 313, in make_webchar
return WebDBDirichletCharacter(**args)
File "/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py", line 925, in __init__
WebDBDirichlet.__init__(self, **kwargs)
File "/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py", line 568, in __init__
self._compute()
File "/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py", line 575, in _compute
self._populate_from_db()
File "/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py", line 589, in _populate_from_db
self._set_generators_and_genvalues(values_data)
File "/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py", line 615, in _set_generators_and_genvalues
self._genvalues_for_code = get_sage_genvalues(self.modulus, self.order, vals, self.chi.sage_zeta_order(self.order))
File "/home/lmfdb/lmfdb-git-web/lmfdb/characters/TinyConrey.py", line 156, in sage_zeta_order
return DirichletGroup(self.modulus, base_ring=CyclotomicField(order)).zeta_order()
File "sage/misc/cachefunc.pyx", line 2310, in sage.misc.cachefunc.CachedMethodCallerNoArgs.__call__ (build/cythonized/sage/misc/cachefunc.c:12712)
self.cache = f(self._instance)
File "/home/sage/sage-9.1/local/lib/python3.7/site-packages/sage/modular/dirichlet.py", line 2880, in zeta_order
order = self.zeta().multiplicative_order()
File "sage/rings/number_field/number_field_element.pyx", line 3229, in sage.rings.number_field.number_field_element.NumberFieldElement.multiplicative_order (build/cythonized/sage/rings/number_field/number_field_element.cpp:27976)
elif not (self.is_integral() and self.norm().is_one()):
File "sage/rings/number_field/number_field_element.pyx", line 3576, in sage.rings.number_field.number_field_element.NumberFieldElement.is_integral (build/cythonized/sage/rings/number_field/number_field_element.cpp:30234)
return all(a in ZZ for a in self.absolute_minpoly())
File "sage/rings/number_field/number_field_element.pyx", line 3576, in genexpr (build/cythonized/sage/rings/number_field/number_field_element.cpp:30109)
return all(a in ZZ for a in self.absolute_minpoly())
File "sage/rings/number_field/number_field_element.pyx", line 4488, in sage.rings.number_field.number_field_element.NumberFieldElement_absolute.absolute_minpoly (build/cythonized/sage/rings/number_field/number_field_element.cpp:37507)
return self.minpoly(var)
File "sage/rings/number_field/number_field_element.pyx", line 4576, in sage.rings.number_field.number_field_element.NumberFieldElement_absolute.minpoly (build/cythonized/sage/rings/number_field/number_field_element.cpp:38144)
return self.charpoly(var, algorithm).radical() # square free part of charpoly
File "sage/rings/number_field/number_field_element.pyx", line 4543, in sage.rings.number_field.number_field_element.NumberFieldElement_absolute.charpoly (build/cythonized/sage/rings/number_field/number_field_element.cpp:37945)
return R(self.matrix().charpoly())
File "sage/matrix/matrix_rational_dense.pyx", line 1034, in sage.matrix.matrix_rational_dense.Matrix_rational_dense.charpoly (build/cythonized/sage/matrix/matrix_rational_dense.c:10660)
f = A.charpoly(var, algorithm=algorithm)
File "sage/matrix/matrix_integer_dense.pyx", line 1336, in sage.matrix.matrix_integer_dense.Matrix_integer_dense.charpoly (build/cythonized/sage/matrix/matrix_integer_dense.c:12941)
sig_on()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfdb/characters/TinyConrey.py`
Content:
```
1 from sage.all import (gcd, Mod, Integer, Integers, Rational, pari, Pari,
2 DirichletGroup, CyclotomicField, euler_phi)
3 from sage.misc.cachefunc import cached_method
4 from sage.modular.dirichlet import DirichletCharacter
5
6 def symbol_numerator(cond, parity):
7 # Reference: Sect. 9.3, Montgomery, Hugh L; Vaughan, Robert C. (2007).
8 # Multiplicative number theory. I. Classical theory. Cambridge Studies in
9 # Advanced Mathematics 97
10 #
11 # Let F = Q(\sqrt(d)) with d a non zero squarefree integer then a real
12 # Dirichlet character \chi(n) can be represented as a Kronecker symbol
13 # (m / n) where { m = d if # d = 1 mod 4 else m = 4d if d = 2,3 (mod) 4 }
14 # and m is the discriminant of F. The conductor of \chi is |m|.
15 #
16 # symbol_numerator returns the appropriate Kronecker symbol depending on
17 # the conductor of \chi.
18 m = cond
19 if cond % 2 == 1:
20 if cond % 4 == 3:
21 m = -cond
22 elif cond % 8 == 4:
23 # Fixed cond % 16 == 4 and cond % 16 == 12 were switched in the
24 # previous version of the code.
25 #
26 # Let d be a non zero squarefree integer. If d = 2,3 (mod) 4 and if
27 # cond = 4d = 4 ( 4n + 2) or 4 (4n + 3) = 16 n + 8 or 16n + 12 then we
28 # set m = cond. On the other hand if d = 1 (mod) 4 and cond = 4d = 4
29 # (4n +1) = 16n + 4 then we set m = -cond.
30 if cond % 16 == 4:
31 m = -cond
32 elif cond % 16 == 8:
33 if parity == 1:
34 m = -cond
35 else:
36 return None
37 return m
38
39
40 def kronecker_symbol(m):
41 if m:
42 return r'\(\displaystyle\left(\frac{%s}{\bullet}\right)\)' % (m)
43 else:
44 return None
45
46 ###############################################################################
47 ## Conrey character with no call to Jonathan's code
48 ## in order to handle big moduli
49 ##
50
51 def get_sage_genvalues(modulus, order, genvalues, zeta_order):
52 """
53 Helper method for computing correct genvalues when constructing
54 the sage character
55 """
56 phi_mod = euler_phi(modulus)
57 exponent_factor = phi_mod / order
58 genvalues_exponent = [x * exponent_factor for x in genvalues]
59 return [x * zeta_order / phi_mod for x in genvalues_exponent]
60
61
62 class PariConreyGroup(object):
63
64 def __init__(self, modulus):
65 self.modulus = int(modulus)
66 self.G = Pari("znstar({},1)".format(modulus))
67
68 def gens(self):
69 return Integers(self.modulus).unit_gens()
70
71 def invariants(self):
72 return pari("znstar({},1).cyc".format(self.modulus))
73
74
75 class ConreyCharacter(object):
76 """
77 tiny implementation on Conrey index only
78 """
79
80 def __init__(self, modulus, number):
81 assert gcd(modulus, number)==1
82 self.modulus = Integer(modulus)
83 self.number = Integer(number)
84 self.G = Pari("znstar({},1)".format(modulus))
85 self.chi_pari = pari("znconreylog(%s,%d)"%(self.G,self.number))
86 self.chi_0 = None
87 self.indlabel = None
88
89 @property
90 def texname(self):
91 from lmfdb.characters.web_character import WebDirichlet
92 return WebDirichlet.char2tex(self.modulus, self.number)
93
94 @cached_method
95 def modfactor(self):
96 return self.modulus.factor()
97
98 @cached_method
99 def conductor(self):
100 B = pari("znconreyconductor(%s,%s,&chi0)"%(self.G, self.chi_pari))
101 if B.type() == 't_INT':
102 # means chi is primitive
103 self.chi_0 = self.chi_pari
104 self.indlabel = self.number
105 return int(B)
106 else:
107 self.chi_0 = pari("chi0")
108 G_0 = Pari("znstar({},1)".format(B))
109 self.indlabel = int(pari("znconreyexp(%s,%s)"%(G_0,self.chi_0)))
110 return int(B[0])
111
112 def is_primitive(self):
113 return self.conductor() == self.modulus
114
115 @cached_method
116 def parity(self):
117 number = self.number
118 par = 0
119 for p,e in self.modfactor():
120 if p == 2:
121 if number % 4 == 3:
122 par = 1 - par
123 else:
124 phi2 = (p-1)/Integer(2) * p **(e-1)
125 if Mod(number, p ** e)**phi2 != 1:
126 par = 1 - par
127 return par
128
129 def is_odd(self):
130 return self.parity() == 1
131
132 def is_even(self):
133 return self.parity() == 0
134
135 @cached_method
136 def multiplicative_order(self):
137 return Mod(self.number, self.modulus).multiplicative_order()
138
139 @property
140 def order(self):
141 return self.multiplicative_order()
142
143 @cached_method
144 def kronecker_symbol(self):
145 c = self.conductor()
146 p = self.parity()
147 return kronecker_symbol(symbol_numerator(c, p))
148
149 def conreyangle(self,x):
150 return Rational(pari("chareval(%s,znconreylog(%s,%d),%d)"%(self.G,self.G,self.number,x)))
151
152 def gauss_sum_numerical(self, a):
153 return pari("znchargauss(%s,%s,a=%d)"%(self.G,self.chi_pari,a))
154
155 def sage_zeta_order(self, order):
156 return DirichletGroup(self.modulus, base_ring=CyclotomicField(order)).zeta_order()
157
158 def sage_character(self, order, genvalues):
159 H = DirichletGroup(self.modulus, base_ring=CyclotomicField(order))
160 M = H._module
161 order_corrected_genvalues = get_sage_genvalues(self.modulus, order, genvalues, H.zeta_order())
162 return DirichletCharacter(H,M(order_corrected_genvalues))
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lmfdb/characters/TinyConrey.py b/lmfdb/characters/TinyConrey.py
--- a/lmfdb/characters/TinyConrey.py
+++ b/lmfdb/characters/TinyConrey.py
@@ -1,5 +1,5 @@
from sage.all import (gcd, Mod, Integer, Integers, Rational, pari, Pari,
- DirichletGroup, CyclotomicField, euler_phi)
+ DirichletGroup, CyclotomicField, euler_phi, lcm)
from sage.misc.cachefunc import cached_method
from sage.modular.dirichlet import DirichletCharacter
@@ -153,7 +153,7 @@
return pari("znchargauss(%s,%s,a=%d)"%(self.G,self.chi_pari,a))
def sage_zeta_order(self, order):
- return DirichletGroup(self.modulus, base_ring=CyclotomicField(order)).zeta_order()
+ return 1 if self.modulus <= 2 else lcm(2,order)
def sage_character(self, order, genvalues):
H = DirichletGroup(self.modulus, base_ring=CyclotomicField(order))
|
{"golden_diff": "diff --git a/lmfdb/characters/TinyConrey.py b/lmfdb/characters/TinyConrey.py\n--- a/lmfdb/characters/TinyConrey.py\n+++ b/lmfdb/characters/TinyConrey.py\n@@ -1,5 +1,5 @@\n from sage.all import (gcd, Mod, Integer, Integers, Rational, pari, Pari,\n- DirichletGroup, CyclotomicField, euler_phi)\n+ DirichletGroup, CyclotomicField, euler_phi, lcm)\n from sage.misc.cachefunc import cached_method\n from sage.modular.dirichlet import DirichletCharacter\n \n@@ -153,7 +153,7 @@\n return pari(\"znchargauss(%s,%s,a=%d)\"%(self.G,self.chi_pari,a))\n \n def sage_zeta_order(self, order):\n- return DirichletGroup(self.modulus, base_ring=CyclotomicField(order)).zeta_order()\n+ return 1 if self.modulus <= 2 else lcm(2,order)\n \n def sage_character(self, order, genvalues):\n H = DirichletGroup(self.modulus, base_ring=CyclotomicField(order))\n", "issue": "Some Dirichlet character pages are failing to load\nThe page https://www.lmfdb.org/Character/Dirichlet/947/934 is timing out. Some similar pages such as https://www.lmfdb.org/Character/Dirichlet/947/933 and https://www.lmfdb.org/Character/Dirichlet/947/935 work but are slow too load and the knowl for the fixed field does not work. I believe this is due to some of the recent changes that were made #4231 -- @BarinderBanwait can you take a look at this?\r\n\r\nBelow is the trace back from the page that is failing to load. the failure is inside the call to \"zeta_order\" on line 156 of https://github.com/LMFDB/lmfdb/blob/master/lmfdb/characters/TinyConrey.py. I don't think that call should be taking any time, but if Sage is doing something silly we should just compute zeta_order directly. I confess it's not clear to me why we are using Sage DirichletGroup and Sage characters at all (it appears they are being used in just 2 places).\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py\", line 2447, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/_compat.py\", line 39, in reraise\r\n raise value\r\n File \"/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/home/sage/sage-9.1/local/lib/python3.7/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/home/lmfdb/lmfdb-git-web/lmfdb/characters/main.py\", line 367, in render_Dirichletwebpage\r\n webchar = make_webchar(args)\r\n File \"/home/lmfdb/lmfdb-git-web/lmfdb/characters/main.py\", line 313, in make_webchar\r\n return WebDBDirichletCharacter(**args)\r\n File \"/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py\", line 925, in __init__\r\n WebDBDirichlet.__init__(self, **kwargs)\r\n File \"/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py\", line 568, in __init__\r\n self._compute()\r\n File \"/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py\", line 575, in _compute\r\n self._populate_from_db()\r\n File \"/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py\", line 589, in _populate_from_db\r\n self._set_generators_and_genvalues(values_data)\r\n File \"/home/lmfdb/lmfdb-git-web/lmfdb/characters/web_character.py\", line 615, in _set_generators_and_genvalues\r\n self._genvalues_for_code = get_sage_genvalues(self.modulus, self.order, vals, self.chi.sage_zeta_order(self.order))\r\n File \"/home/lmfdb/lmfdb-git-web/lmfdb/characters/TinyConrey.py\", line 156, in sage_zeta_order\r\n return DirichletGroup(self.modulus, base_ring=CyclotomicField(order)).zeta_order()\r\n File \"sage/misc/cachefunc.pyx\", line 2310, in sage.misc.cachefunc.CachedMethodCallerNoArgs.__call__ (build/cythonized/sage/misc/cachefunc.c:12712)\r\n self.cache = f(self._instance)\r\n File \"/home/sage/sage-9.1/local/lib/python3.7/site-packages/sage/modular/dirichlet.py\", line 2880, in zeta_order\r\n order = self.zeta().multiplicative_order()\r\n File \"sage/rings/number_field/number_field_element.pyx\", line 3229, in sage.rings.number_field.number_field_element.NumberFieldElement.multiplicative_order (build/cythonized/sage/rings/number_field/number_field_element.cpp:27976)\r\n elif not (self.is_integral() and self.norm().is_one()):\r\n File \"sage/rings/number_field/number_field_element.pyx\", line 3576, in sage.rings.number_field.number_field_element.NumberFieldElement.is_integral (build/cythonized/sage/rings/number_field/number_field_element.cpp:30234)\r\n return all(a in ZZ for a in self.absolute_minpoly())\r\n File \"sage/rings/number_field/number_field_element.pyx\", line 3576, in genexpr (build/cythonized/sage/rings/number_field/number_field_element.cpp:30109)\r\n return all(a in ZZ for a in self.absolute_minpoly())\r\n File \"sage/rings/number_field/number_field_element.pyx\", line 4488, in sage.rings.number_field.number_field_element.NumberFieldElement_absolute.absolute_minpoly (build/cythonized/sage/rings/number_field/number_field_element.cpp:37507)\r\n return self.minpoly(var)\r\n File \"sage/rings/number_field/number_field_element.pyx\", line 4576, in sage.rings.number_field.number_field_element.NumberFieldElement_absolute.minpoly (build/cythonized/sage/rings/number_field/number_field_element.cpp:38144)\r\n return self.charpoly(var, algorithm).radical() # square free part of charpoly\r\n File \"sage/rings/number_field/number_field_element.pyx\", line 4543, in sage.rings.number_field.number_field_element.NumberFieldElement_absolute.charpoly (build/cythonized/sage/rings/number_field/number_field_element.cpp:37945)\r\n return R(self.matrix().charpoly())\r\n File \"sage/matrix/matrix_rational_dense.pyx\", line 1034, in sage.matrix.matrix_rational_dense.Matrix_rational_dense.charpoly (build/cythonized/sage/matrix/matrix_rational_dense.c:10660)\r\n f = A.charpoly(var, algorithm=algorithm)\r\n File \"sage/matrix/matrix_integer_dense.pyx\", line 1336, in sage.matrix.matrix_integer_dense.Matrix_integer_dense.charpoly (build/cythonized/sage/matrix/matrix_integer_dense.c:12941)\r\n sig_on()\r\n```\n", "before_files": [{"content": "from sage.all import (gcd, Mod, Integer, Integers, Rational, pari, Pari,\n DirichletGroup, CyclotomicField, euler_phi)\nfrom sage.misc.cachefunc import cached_method\nfrom sage.modular.dirichlet import DirichletCharacter\n\ndef symbol_numerator(cond, parity):\n # Reference: Sect. 9.3, Montgomery, Hugh L; Vaughan, Robert C. (2007).\n # Multiplicative number theory. I. Classical theory. Cambridge Studies in\n # Advanced Mathematics 97\n #\n # Let F = Q(\\sqrt(d)) with d a non zero squarefree integer then a real\n # Dirichlet character \\chi(n) can be represented as a Kronecker symbol\n # (m / n) where { m = d if # d = 1 mod 4 else m = 4d if d = 2,3 (mod) 4 }\n # and m is the discriminant of F. The conductor of \\chi is |m|.\n #\n # symbol_numerator returns the appropriate Kronecker symbol depending on\n # the conductor of \\chi.\n m = cond\n if cond % 2 == 1:\n if cond % 4 == 3:\n m = -cond\n elif cond % 8 == 4:\n # Fixed cond % 16 == 4 and cond % 16 == 12 were switched in the\n # previous version of the code.\n #\n # Let d be a non zero squarefree integer. If d = 2,3 (mod) 4 and if\n # cond = 4d = 4 ( 4n + 2) or 4 (4n + 3) = 16 n + 8 or 16n + 12 then we\n # set m = cond. On the other hand if d = 1 (mod) 4 and cond = 4d = 4\n # (4n +1) = 16n + 4 then we set m = -cond.\n if cond % 16 == 4:\n m = -cond\n elif cond % 16 == 8:\n if parity == 1:\n m = -cond\n else:\n return None\n return m\n\n\ndef kronecker_symbol(m):\n if m:\n return r'\\(\\displaystyle\\left(\\frac{%s}{\\bullet}\\right)\\)' % (m)\n else:\n return None\n\n###############################################################################\n## Conrey character with no call to Jonathan's code\n## in order to handle big moduli\n##\n\ndef get_sage_genvalues(modulus, order, genvalues, zeta_order):\n \"\"\"\n Helper method for computing correct genvalues when constructing\n the sage character\n \"\"\"\n phi_mod = euler_phi(modulus)\n exponent_factor = phi_mod / order\n genvalues_exponent = [x * exponent_factor for x in genvalues]\n return [x * zeta_order / phi_mod for x in genvalues_exponent]\n\n\nclass PariConreyGroup(object):\n\n def __init__(self, modulus):\n self.modulus = int(modulus)\n self.G = Pari(\"znstar({},1)\".format(modulus))\n\n def gens(self):\n return Integers(self.modulus).unit_gens()\n\n def invariants(self):\n return pari(\"znstar({},1).cyc\".format(self.modulus))\n\n\nclass ConreyCharacter(object):\n \"\"\"\n tiny implementation on Conrey index only\n \"\"\"\n\n def __init__(self, modulus, number):\n assert gcd(modulus, number)==1\n self.modulus = Integer(modulus)\n self.number = Integer(number)\n self.G = Pari(\"znstar({},1)\".format(modulus))\n self.chi_pari = pari(\"znconreylog(%s,%d)\"%(self.G,self.number))\n self.chi_0 = None\n self.indlabel = None\n\n @property\n def texname(self):\n from lmfdb.characters.web_character import WebDirichlet\n return WebDirichlet.char2tex(self.modulus, self.number)\n\n @cached_method\n def modfactor(self):\n return self.modulus.factor()\n\n @cached_method\n def conductor(self):\n B = pari(\"znconreyconductor(%s,%s,&chi0)\"%(self.G, self.chi_pari))\n if B.type() == 't_INT':\n # means chi is primitive\n self.chi_0 = self.chi_pari\n self.indlabel = self.number\n return int(B)\n else:\n self.chi_0 = pari(\"chi0\")\n G_0 = Pari(\"znstar({},1)\".format(B))\n self.indlabel = int(pari(\"znconreyexp(%s,%s)\"%(G_0,self.chi_0)))\n return int(B[0])\n\n def is_primitive(self):\n return self.conductor() == self.modulus\n\n @cached_method\n def parity(self):\n number = self.number\n par = 0\n for p,e in self.modfactor():\n if p == 2:\n if number % 4 == 3:\n par = 1 - par\n else:\n phi2 = (p-1)/Integer(2) * p **(e-1)\n if Mod(number, p ** e)**phi2 != 1:\n par = 1 - par\n return par\n\n def is_odd(self):\n return self.parity() == 1\n\n def is_even(self):\n return self.parity() == 0\n\n @cached_method\n def multiplicative_order(self):\n return Mod(self.number, self.modulus).multiplicative_order()\n\n @property\n def order(self):\n return self.multiplicative_order()\n\n @cached_method\n def kronecker_symbol(self):\n c = self.conductor()\n p = self.parity()\n return kronecker_symbol(symbol_numerator(c, p))\n\n def conreyangle(self,x):\n return Rational(pari(\"chareval(%s,znconreylog(%s,%d),%d)\"%(self.G,self.G,self.number,x)))\n\n def gauss_sum_numerical(self, a):\n return pari(\"znchargauss(%s,%s,a=%d)\"%(self.G,self.chi_pari,a))\n\n def sage_zeta_order(self, order):\n return DirichletGroup(self.modulus, base_ring=CyclotomicField(order)).zeta_order()\n\n def sage_character(self, order, genvalues):\n H = DirichletGroup(self.modulus, base_ring=CyclotomicField(order))\n M = H._module\n order_corrected_genvalues = get_sage_genvalues(self.modulus, order, genvalues, H.zeta_order())\n return DirichletCharacter(H,M(order_corrected_genvalues))\n", "path": "lmfdb/characters/TinyConrey.py"}], "after_files": [{"content": "from sage.all import (gcd, Mod, Integer, Integers, Rational, pari, Pari,\n DirichletGroup, CyclotomicField, euler_phi, lcm)\nfrom sage.misc.cachefunc import cached_method\nfrom sage.modular.dirichlet import DirichletCharacter\n\ndef symbol_numerator(cond, parity):\n # Reference: Sect. 9.3, Montgomery, Hugh L; Vaughan, Robert C. (2007).\n # Multiplicative number theory. I. Classical theory. Cambridge Studies in\n # Advanced Mathematics 97\n #\n # Let F = Q(\\sqrt(d)) with d a non zero squarefree integer then a real\n # Dirichlet character \\chi(n) can be represented as a Kronecker symbol\n # (m / n) where { m = d if # d = 1 mod 4 else m = 4d if d = 2,3 (mod) 4 }\n # and m is the discriminant of F. The conductor of \\chi is |m|.\n #\n # symbol_numerator returns the appropriate Kronecker symbol depending on\n # the conductor of \\chi.\n m = cond\n if cond % 2 == 1:\n if cond % 4 == 3:\n m = -cond\n elif cond % 8 == 4:\n # Fixed cond % 16 == 4 and cond % 16 == 12 were switched in the\n # previous version of the code.\n #\n # Let d be a non zero squarefree integer. If d = 2,3 (mod) 4 and if\n # cond = 4d = 4 ( 4n + 2) or 4 (4n + 3) = 16 n + 8 or 16n + 12 then we\n # set m = cond. On the other hand if d = 1 (mod) 4 and cond = 4d = 4\n # (4n +1) = 16n + 4 then we set m = -cond.\n if cond % 16 == 4:\n m = -cond\n elif cond % 16 == 8:\n if parity == 1:\n m = -cond\n else:\n return None\n return m\n\n\ndef kronecker_symbol(m):\n if m:\n return r'\\(\\displaystyle\\left(\\frac{%s}{\\bullet}\\right)\\)' % (m)\n else:\n return None\n\n###############################################################################\n## Conrey character with no call to Jonathan's code\n## in order to handle big moduli\n##\n\ndef get_sage_genvalues(modulus, order, genvalues, zeta_order):\n \"\"\"\n Helper method for computing correct genvalues when constructing\n the sage character\n \"\"\"\n phi_mod = euler_phi(modulus)\n exponent_factor = phi_mod / order\n genvalues_exponent = [x * exponent_factor for x in genvalues]\n return [x * zeta_order / phi_mod for x in genvalues_exponent]\n\n\nclass PariConreyGroup(object):\n\n def __init__(self, modulus):\n self.modulus = int(modulus)\n self.G = Pari(\"znstar({},1)\".format(modulus))\n\n def gens(self):\n return Integers(self.modulus).unit_gens()\n\n def invariants(self):\n return pari(\"znstar({},1).cyc\".format(self.modulus))\n\n\nclass ConreyCharacter(object):\n \"\"\"\n tiny implementation on Conrey index only\n \"\"\"\n\n def __init__(self, modulus, number):\n assert gcd(modulus, number)==1\n self.modulus = Integer(modulus)\n self.number = Integer(number)\n self.G = Pari(\"znstar({},1)\".format(modulus))\n self.chi_pari = pari(\"znconreylog(%s,%d)\"%(self.G,self.number))\n self.chi_0 = None\n self.indlabel = None\n\n @property\n def texname(self):\n from lmfdb.characters.web_character import WebDirichlet\n return WebDirichlet.char2tex(self.modulus, self.number)\n\n @cached_method\n def modfactor(self):\n return self.modulus.factor()\n\n @cached_method\n def conductor(self):\n B = pari(\"znconreyconductor(%s,%s,&chi0)\"%(self.G, self.chi_pari))\n if B.type() == 't_INT':\n # means chi is primitive\n self.chi_0 = self.chi_pari\n self.indlabel = self.number\n return int(B)\n else:\n self.chi_0 = pari(\"chi0\")\n G_0 = Pari(\"znstar({},1)\".format(B))\n self.indlabel = int(pari(\"znconreyexp(%s,%s)\"%(G_0,self.chi_0)))\n return int(B[0])\n\n def is_primitive(self):\n return self.conductor() == self.modulus\n\n @cached_method\n def parity(self):\n number = self.number\n par = 0\n for p,e in self.modfactor():\n if p == 2:\n if number % 4 == 3:\n par = 1 - par\n else:\n phi2 = (p-1)/Integer(2) * p **(e-1)\n if Mod(number, p ** e)**phi2 != 1:\n par = 1 - par\n return par\n\n def is_odd(self):\n return self.parity() == 1\n\n def is_even(self):\n return self.parity() == 0\n\n @cached_method\n def multiplicative_order(self):\n return Mod(self.number, self.modulus).multiplicative_order()\n\n @property\n def order(self):\n return self.multiplicative_order()\n\n @cached_method\n def kronecker_symbol(self):\n c = self.conductor()\n p = self.parity()\n return kronecker_symbol(symbol_numerator(c, p))\n\n def conreyangle(self,x):\n return Rational(pari(\"chareval(%s,znconreylog(%s,%d),%d)\"%(self.G,self.G,self.number,x)))\n\n def gauss_sum_numerical(self, a):\n return pari(\"znchargauss(%s,%s,a=%d)\"%(self.G,self.chi_pari,a))\n\n def sage_zeta_order(self, order):\n return 1 if self.modulus <= 2 else lcm(2,order)\n\n def sage_character(self, order, genvalues):\n H = DirichletGroup(self.modulus, base_ring=CyclotomicField(order))\n M = H._module\n order_corrected_genvalues = get_sage_genvalues(self.modulus, order, genvalues, H.zeta_order())\n return DirichletCharacter(H,M(order_corrected_genvalues))\n", "path": "lmfdb/characters/TinyConrey.py"}]}
| 3,779 | 264 |
gh_patches_debug_21163
|
rasdani/github-patches
|
git_diff
|
voxel51__fiftyone-963
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FR] Limit integer label filter sliders to integers
Here's a dataset with some scalar values on its samples that have type `IntField`:
```
Name: quickstart
Media type image
Num samples: 200
Persistent: False
Info: {}
Tags: ['validation']
Sample fields:
media_type: fiftyone.core.fields.StringField
filepath: fiftyone.core.fields.StringField
tags: fiftyone.core.fields.ListField(fiftyone.core.fields.StringField)
metadata: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.metadata.Metadata)
ground_truth: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)
uniqueness: fiftyone.core.fields.FloatField
predictions: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)
tp_iou_0_75: fiftyone.core.fields.IntField
fp_iou_0_75: fiftyone.core.fields.IntField
fn_iou_0_75: fiftyone.core.fields.IntField
```
It would be desirable if the label filter in the App respected the int data type and locked the slider into int values, rather than allowing/showing `[0.00 [34.92, 81.48] 97.00]` in the App
<img width="267" alt="Screen Shot 2020-10-07 at 2 01 12 PM" src="https://user-images.githubusercontent.com/25985824/95369815-f0f2c480-08a5-11eb-9b16-29c59c7abd7b.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fiftyone/core/config.py`
Content:
```
1 """
2 FiftyOne config.
3
4 | Copyright 2017-2021, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 import logging
9 import os
10
11 try:
12 from importlib import metadata as importlib_metadata # Python 3.8
13 except ImportError:
14 import importlib_metadata # Python < 3.8
15
16 import eta
17 import eta.core.config as etac
18
19 import fiftyone.constants as foc
20
21
22 logger = logging.getLogger(__name__)
23
24
25 class Config(etac.Config):
26 """Base class for JSON serializable config classes."""
27
28 def __repr__(self):
29 return self.__str__()
30
31
32 class Configurable(etac.Configurable):
33 """Base class for classes that can be initialized with a :class:`Config`
34 instance that configures their behavior.
35
36 :class:`Configurable` subclasses must obey the following rules:
37
38 (a) Configurable class ``Foo`` has an associated Config class
39 ``FooConfig`` that is importable from the same namespace as ``Foo``
40
41 (b) Configurable class ``Foo`` must be initializable via the syntax
42 ``Foo(config)``, where config is a ``FooConfig`` instance
43
44 Args:
45 config: a :class:`Config`
46 """
47
48 def __init__(self, config):
49 self.validate(config)
50 self.config = config
51
52
53 class EnvConfig(etac.EnvConfig):
54 def __repr__(self):
55 return self.__str__()
56
57
58 class FiftyOneConfig(EnvConfig):
59 """FiftyOne configuration settings."""
60
61 def __init__(self, d=None):
62 if d is None:
63 d = {}
64
65 self.database_dir = self.parse_string(
66 d,
67 "database_dir",
68 env_var="FIFTYONE_DATABASE_DIR",
69 default=foc.DEFAULT_DB_DIR,
70 )
71 self.dataset_zoo_dir = self.parse_string(
72 d,
73 "dataset_zoo_dir",
74 env_var="FIFTYONE_DATASET_ZOO_DIR",
75 default=None,
76 )
77 self.model_zoo_dir = self.parse_string(
78 d, "model_zoo_dir", env_var="FIFTYONE_MODEL_ZOO_DIR", default=None,
79 )
80 self.dataset_zoo_manifest_paths = self.parse_string_array(
81 d,
82 "dataset_zoo_manifest_paths",
83 env_var="FIFTYONE_DATASET_ZOO_MANIFEST_PATHS",
84 default=None,
85 )
86 self.model_zoo_manifest_paths = self.parse_string_array(
87 d,
88 "model_zoo_manifest_paths",
89 env_var="FIFTYONE_MODEL_ZOO_MANIFEST_PATHS",
90 default=None,
91 )
92 self.default_dataset_dir = self.parse_string(
93 d,
94 "default_dataset_dir",
95 env_var="FIFTYONE_DEFAULT_DATASET_DIR",
96 default=None,
97 )
98 self.default_ml_backend = self.parse_string(
99 d,
100 "default_ml_backend",
101 env_var="FIFTYONE_DEFAULT_ML_BACKEND",
102 default=None,
103 )
104 self.default_batch_size = self.parse_int(
105 d,
106 "default_batch_size",
107 env_var="FIFTYONE_DEFAULT_BATCH_SIZE",
108 default=None,
109 )
110 self.default_sequence_idx = self.parse_string(
111 d,
112 "default_sequence_idx",
113 env_var="FIFTYONE_DEFAULT_SEQUENCE_IDX",
114 default="%06d",
115 )
116 self.default_image_ext = self.parse_string(
117 d,
118 "default_image_ext",
119 env_var="FIFTYONE_DEFAULT_IMAGE_EXT",
120 default=".jpg",
121 )
122 self.default_video_ext = self.parse_string(
123 d,
124 "default_video_ext",
125 env_var="FIFTYONE_DEFAULT_VIDEO_EXT",
126 default=".mp4",
127 )
128 self.default_app_port = self.parse_int(
129 d,
130 "default_app_port",
131 env_var="FIFTYONE_DEFAULT_APP_PORT",
132 default=5151,
133 )
134 self.desktop_app = self.parse_bool(
135 d, "desktop_app", env_var="FIFTYONE_DESKTOP_APP", default=False,
136 )
137 self._show_progress_bars = None # declare
138 self.show_progress_bars = self.parse_bool(
139 d,
140 "show_progress_bars",
141 env_var="FIFTYONE_SHOW_PROGRESS_BARS",
142 default=True,
143 )
144 self.do_not_track = self.parse_bool(
145 d, "do_not_track", env_var="FIFTYONE_DO_NOT_TRACK", default=False,
146 )
147 self.requirement_error_level = self.parse_int(
148 d,
149 "requirement_error_level",
150 env_var="FIFTYONE_REQUIREMENT_ERROR_LEVEL",
151 default=0,
152 )
153
154 self._set_defaults()
155 self._validate()
156
157 @property
158 def show_progress_bars(self):
159 return self._show_progress_bars
160
161 @show_progress_bars.setter
162 def show_progress_bars(self, value):
163 self._show_progress_bars = value
164 try:
165 # Keep ETA config in-sync
166 eta.config.show_progress_bars = value
167 except:
168 pass
169
170 def attributes(self):
171 # Includes `show_progress_bars`
172 return super().custom_attributes(dynamic=True)
173
174 def _set_defaults(self):
175 if self.default_dataset_dir is None:
176 self.default_dataset_dir = os.path.join(
177 os.path.expanduser("~"), "fiftyone"
178 )
179
180 if self.dataset_zoo_dir is None:
181 self.dataset_zoo_dir = self.default_dataset_dir
182
183 if self.model_zoo_dir is None:
184 self.model_zoo_dir = os.path.join(
185 self.default_dataset_dir, "__models__"
186 )
187
188 if self.default_ml_backend is None:
189 installed_packages = _get_installed_packages()
190
191 if "torch" in installed_packages:
192 self.default_ml_backend = "torch"
193 elif "tensorflow" in installed_packages:
194 self.default_ml_backend = "tensorflow"
195
196 def _validate(self):
197 if self.default_ml_backend is not None:
198 self.default_ml_backend = self.default_ml_backend.lower()
199
200
201 class AppConfig(EnvConfig):
202 """FiftyOne App configuration settings."""
203
204 def __init__(self, d=None):
205 if d is None:
206 d = {}
207
208 self.color_pool = self.parse_string_array(
209 d,
210 "color_pool",
211 env_var="FIFTYONE_APP_COLOR_POOL",
212 default=foc.DEFAULT_APP_COLOR_POOL,
213 )
214 self.notebook_height = self.parse_int(
215 d,
216 "notebook_height",
217 env_var="FIFTYONE_APP_NOTEBOOK_HEIGHT",
218 default=800,
219 )
220 self.show_attributes = self.parse_bool(
221 d,
222 "show_attributes",
223 env_var="FIFTYONE_APP_SHOW_ATTRIBUTES",
224 default=False,
225 )
226 self.show_confidence = self.parse_bool(
227 d,
228 "show_confidence",
229 env_var="FIFTYONE_APP_SHOW_CONFIDENCE",
230 default=True,
231 )
232 self.show_tooltip = self.parse_bool(
233 d,
234 "show_tooltip",
235 env_var="FIFTYONE_APP_SHOW_TOOLTIP",
236 default=True,
237 )
238
239
240 def locate_config():
241 """Returns the path to the :class:`FiftyOneConfig` on disk.
242
243 The default location is ``~/.fiftyone/config.json``, but you can override
244 this path by setting the ``FIFTYONE_CONFIG_PATH`` environment variable.
245
246 Note that a config file may not actually exist on disk in the default
247 location, in which case the default config settings will be used.
248
249 Returns:
250 the path to the :class:`FiftyOneConfig` on disk
251
252 Raises:
253 OSError: if the config path has been customized but the file does not
254 exist on disk
255 """
256 if "FIFTYONE_CONFIG_PATH" not in os.environ:
257 return foc.FIFTYONE_CONFIG_PATH
258
259 config_path = os.environ["FIFTYONE_CONFIG_PATH"]
260 if not os.path.isfile(config_path):
261 raise OSError("Config file '%s' not found" % config_path)
262
263 return config_path
264
265
266 def locate_app_config():
267 """Returns the path to the :class:`AppConfig` on disk.
268
269 The default location is ``~/.fiftyone/app_config.json``, but you can
270 override this path by setting the ``FIFTYONE_APP_CONFIG_PATH`` environment
271 variable.
272
273 Note that a config file may not actually exist on disk in the default
274 location, in which case the default config settings will be used.
275
276 Returns:
277 the path to the :class:`AppConfig` on disk
278
279 Raises:
280 OSError: if the App config path has been customized but the file does
281 not exist on disk
282 """
283 if "FIFTYONE_APP_CONFIG_PATH" not in os.environ:
284 return foc.FIFTYONE_APP_CONFIG_PATH
285
286 config_path = os.environ["FIFTYONE_APP_CONFIG_PATH"]
287 if not os.path.isfile(config_path):
288 raise OSError("App config file '%s' not found" % config_path)
289
290 return config_path
291
292
293 def load_config():
294 """Loads the FiftyOne config.
295
296 Returns:
297 a :class:`FiftyOneConfig` instance
298 """
299 config_path = locate_config()
300 if os.path.isfile(config_path):
301 return FiftyOneConfig.from_json(config_path)
302
303 return FiftyOneConfig()
304
305
306 def load_app_config():
307 """Loads the FiftyOne App config.
308
309 Returns:
310 an :class:`AppConfig` instance
311 """
312 app_config_path = locate_app_config()
313 if os.path.isfile(app_config_path):
314 return AppConfig.from_json(app_config_path)
315
316 return AppConfig()
317
318
319 def set_config_settings(**kwargs):
320 """Sets the given FiftyOne config setting(s).
321
322 Args:
323 **kwargs: keyword arguments defining valid :class:`FiftyOneConfig`
324 attributes and values
325
326 Raises:
327 EnvConfigError: if the settings were invalid
328 """
329 import fiftyone as fo
330
331 # Validiate settings
332 FiftyOneConfig.from_dict(kwargs)
333
334 _set_settings(fo.config, kwargs)
335
336
337 def set_app_config_settings(**kwargs):
338 """Sets the given FiftyOne App config setting(s).
339
340 Args:
341 **kwargs: keyword arguments defining valid :class:`AppConfig`
342 attributes and values
343
344 Raises:
345 EnvConfigError: if the settings were invalid
346 """
347 import fiftyone as fo
348
349 # Validiate settings
350 AppConfig.from_dict(kwargs)
351
352 _set_settings(fo.app_config, kwargs)
353
354
355 def _set_settings(config, kwargs):
356 # Apply settings
357 for field, val in kwargs.items():
358 if not hasattr(config, field):
359 logger.warning("Skipping unknown config setting '%s'", field)
360 continue
361
362 setattr(config, field, val)
363
364
365 def _get_installed_packages():
366 try:
367 return set(
368 d.metadata["Name"] for d in importlib_metadata.distributions()
369 )
370 except:
371 logger.debug("Failed to get installed packages")
372 return set()
373
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/fiftyone/core/config.py b/fiftyone/core/config.py
--- a/fiftyone/core/config.py
+++ b/fiftyone/core/config.py
@@ -211,6 +211,12 @@
env_var="FIFTYONE_APP_COLOR_POOL",
default=foc.DEFAULT_APP_COLOR_POOL,
)
+ self.default_grid_zoom = self.parse_int(
+ d,
+ "default_grid_zoom",
+ env_var="FIFTYONE_APP_GRID_ZOOM",
+ default=5,
+ )
self.notebook_height = self.parse_int(
d,
"notebook_height",
@@ -236,6 +242,23 @@
default=True,
)
+ self._validate()
+
+ def _validate(self):
+ if self.default_grid_zoom < 0 or self.default_grid_zoom > 10:
+ raise AppConfigError(
+ "`default_grid_zoom` must be in [0, 10]; found %d"
+ % self.default_grid_zoom
+ )
+
+
+class AppConfigError(etac.EnvConfigError):
+ """Exception raised when an invalid :class:`AppConfig` instance is
+ encountered.
+ """
+
+ pass
+
def locate_config():
"""Returns the path to the :class:`FiftyOneConfig` on disk.
|
{"golden_diff": "diff --git a/fiftyone/core/config.py b/fiftyone/core/config.py\n--- a/fiftyone/core/config.py\n+++ b/fiftyone/core/config.py\n@@ -211,6 +211,12 @@\n env_var=\"FIFTYONE_APP_COLOR_POOL\",\n default=foc.DEFAULT_APP_COLOR_POOL,\n )\n+ self.default_grid_zoom = self.parse_int(\n+ d,\n+ \"default_grid_zoom\",\n+ env_var=\"FIFTYONE_APP_GRID_ZOOM\",\n+ default=5,\n+ )\n self.notebook_height = self.parse_int(\n d,\n \"notebook_height\",\n@@ -236,6 +242,23 @@\n default=True,\n )\n \n+ self._validate()\n+\n+ def _validate(self):\n+ if self.default_grid_zoom < 0 or self.default_grid_zoom > 10:\n+ raise AppConfigError(\n+ \"`default_grid_zoom` must be in [0, 10]; found %d\"\n+ % self.default_grid_zoom\n+ )\n+\n+\n+class AppConfigError(etac.EnvConfigError):\n+ \"\"\"Exception raised when an invalid :class:`AppConfig` instance is\n+ encountered.\n+ \"\"\"\n+\n+ pass\n+\n \n def locate_config():\n \"\"\"Returns the path to the :class:`FiftyOneConfig` on disk.\n", "issue": "[FR] Limit integer label filter sliders to integers\nHere's a dataset with some scalar values on its samples that have type `IntField`:\r\n\r\n```\r\nName: quickstart\r\nMedia type image\r\nNum samples: 200\r\nPersistent: False\r\nInfo: {}\r\nTags: ['validation']\r\nSample fields:\r\n media_type: fiftyone.core.fields.StringField\r\n filepath: fiftyone.core.fields.StringField\r\n tags: fiftyone.core.fields.ListField(fiftyone.core.fields.StringField)\r\n metadata: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.metadata.Metadata)\r\n ground_truth: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\r\n uniqueness: fiftyone.core.fields.FloatField\r\n predictions: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Detections)\r\n tp_iou_0_75: fiftyone.core.fields.IntField\r\n fp_iou_0_75: fiftyone.core.fields.IntField\r\n fn_iou_0_75: fiftyone.core.fields.IntField\r\n```\r\n\r\nIt would be desirable if the label filter in the App respected the int data type and locked the slider into int values, rather than allowing/showing `[0.00 [34.92, 81.48] 97.00]` in the App\r\n\r\n<img width=\"267\" alt=\"Screen Shot 2020-10-07 at 2 01 12 PM\" src=\"https://user-images.githubusercontent.com/25985824/95369815-f0f2c480-08a5-11eb-9b16-29c59c7abd7b.png\">\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne config.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport logging\nimport os\n\ntry:\n from importlib import metadata as importlib_metadata # Python 3.8\nexcept ImportError:\n import importlib_metadata # Python < 3.8\n\nimport eta\nimport eta.core.config as etac\n\nimport fiftyone.constants as foc\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Config(etac.Config):\n \"\"\"Base class for JSON serializable config classes.\"\"\"\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Configurable(etac.Configurable):\n \"\"\"Base class for classes that can be initialized with a :class:`Config`\n instance that configures their behavior.\n\n :class:`Configurable` subclasses must obey the following rules:\n\n (a) Configurable class ``Foo`` has an associated Config class\n ``FooConfig`` that is importable from the same namespace as ``Foo``\n\n (b) Configurable class ``Foo`` must be initializable via the syntax\n ``Foo(config)``, where config is a ``FooConfig`` instance\n\n Args:\n config: a :class:`Config`\n \"\"\"\n\n def __init__(self, config):\n self.validate(config)\n self.config = config\n\n\nclass EnvConfig(etac.EnvConfig):\n def __repr__(self):\n return self.__str__()\n\n\nclass FiftyOneConfig(EnvConfig):\n \"\"\"FiftyOne configuration settings.\"\"\"\n\n def __init__(self, d=None):\n if d is None:\n d = {}\n\n self.database_dir = self.parse_string(\n d,\n \"database_dir\",\n env_var=\"FIFTYONE_DATABASE_DIR\",\n default=foc.DEFAULT_DB_DIR,\n )\n self.dataset_zoo_dir = self.parse_string(\n d,\n \"dataset_zoo_dir\",\n env_var=\"FIFTYONE_DATASET_ZOO_DIR\",\n default=None,\n )\n self.model_zoo_dir = self.parse_string(\n d, \"model_zoo_dir\", env_var=\"FIFTYONE_MODEL_ZOO_DIR\", default=None,\n )\n self.dataset_zoo_manifest_paths = self.parse_string_array(\n d,\n \"dataset_zoo_manifest_paths\",\n env_var=\"FIFTYONE_DATASET_ZOO_MANIFEST_PATHS\",\n default=None,\n )\n self.model_zoo_manifest_paths = self.parse_string_array(\n d,\n \"model_zoo_manifest_paths\",\n env_var=\"FIFTYONE_MODEL_ZOO_MANIFEST_PATHS\",\n default=None,\n )\n self.default_dataset_dir = self.parse_string(\n d,\n \"default_dataset_dir\",\n env_var=\"FIFTYONE_DEFAULT_DATASET_DIR\",\n default=None,\n )\n self.default_ml_backend = self.parse_string(\n d,\n \"default_ml_backend\",\n env_var=\"FIFTYONE_DEFAULT_ML_BACKEND\",\n default=None,\n )\n self.default_batch_size = self.parse_int(\n d,\n \"default_batch_size\",\n env_var=\"FIFTYONE_DEFAULT_BATCH_SIZE\",\n default=None,\n )\n self.default_sequence_idx = self.parse_string(\n d,\n \"default_sequence_idx\",\n env_var=\"FIFTYONE_DEFAULT_SEQUENCE_IDX\",\n default=\"%06d\",\n )\n self.default_image_ext = self.parse_string(\n d,\n \"default_image_ext\",\n env_var=\"FIFTYONE_DEFAULT_IMAGE_EXT\",\n default=\".jpg\",\n )\n self.default_video_ext = self.parse_string(\n d,\n \"default_video_ext\",\n env_var=\"FIFTYONE_DEFAULT_VIDEO_EXT\",\n default=\".mp4\",\n )\n self.default_app_port = self.parse_int(\n d,\n \"default_app_port\",\n env_var=\"FIFTYONE_DEFAULT_APP_PORT\",\n default=5151,\n )\n self.desktop_app = self.parse_bool(\n d, \"desktop_app\", env_var=\"FIFTYONE_DESKTOP_APP\", default=False,\n )\n self._show_progress_bars = None # declare\n self.show_progress_bars = self.parse_bool(\n d,\n \"show_progress_bars\",\n env_var=\"FIFTYONE_SHOW_PROGRESS_BARS\",\n default=True,\n )\n self.do_not_track = self.parse_bool(\n d, \"do_not_track\", env_var=\"FIFTYONE_DO_NOT_TRACK\", default=False,\n )\n self.requirement_error_level = self.parse_int(\n d,\n \"requirement_error_level\",\n env_var=\"FIFTYONE_REQUIREMENT_ERROR_LEVEL\",\n default=0,\n )\n\n self._set_defaults()\n self._validate()\n\n @property\n def show_progress_bars(self):\n return self._show_progress_bars\n\n @show_progress_bars.setter\n def show_progress_bars(self, value):\n self._show_progress_bars = value\n try:\n # Keep ETA config in-sync\n eta.config.show_progress_bars = value\n except:\n pass\n\n def attributes(self):\n # Includes `show_progress_bars`\n return super().custom_attributes(dynamic=True)\n\n def _set_defaults(self):\n if self.default_dataset_dir is None:\n self.default_dataset_dir = os.path.join(\n os.path.expanduser(\"~\"), \"fiftyone\"\n )\n\n if self.dataset_zoo_dir is None:\n self.dataset_zoo_dir = self.default_dataset_dir\n\n if self.model_zoo_dir is None:\n self.model_zoo_dir = os.path.join(\n self.default_dataset_dir, \"__models__\"\n )\n\n if self.default_ml_backend is None:\n installed_packages = _get_installed_packages()\n\n if \"torch\" in installed_packages:\n self.default_ml_backend = \"torch\"\n elif \"tensorflow\" in installed_packages:\n self.default_ml_backend = \"tensorflow\"\n\n def _validate(self):\n if self.default_ml_backend is not None:\n self.default_ml_backend = self.default_ml_backend.lower()\n\n\nclass AppConfig(EnvConfig):\n \"\"\"FiftyOne App configuration settings.\"\"\"\n\n def __init__(self, d=None):\n if d is None:\n d = {}\n\n self.color_pool = self.parse_string_array(\n d,\n \"color_pool\",\n env_var=\"FIFTYONE_APP_COLOR_POOL\",\n default=foc.DEFAULT_APP_COLOR_POOL,\n )\n self.notebook_height = self.parse_int(\n d,\n \"notebook_height\",\n env_var=\"FIFTYONE_APP_NOTEBOOK_HEIGHT\",\n default=800,\n )\n self.show_attributes = self.parse_bool(\n d,\n \"show_attributes\",\n env_var=\"FIFTYONE_APP_SHOW_ATTRIBUTES\",\n default=False,\n )\n self.show_confidence = self.parse_bool(\n d,\n \"show_confidence\",\n env_var=\"FIFTYONE_APP_SHOW_CONFIDENCE\",\n default=True,\n )\n self.show_tooltip = self.parse_bool(\n d,\n \"show_tooltip\",\n env_var=\"FIFTYONE_APP_SHOW_TOOLTIP\",\n default=True,\n )\n\n\ndef locate_config():\n \"\"\"Returns the path to the :class:`FiftyOneConfig` on disk.\n\n The default location is ``~/.fiftyone/config.json``, but you can override\n this path by setting the ``FIFTYONE_CONFIG_PATH`` environment variable.\n\n Note that a config file may not actually exist on disk in the default\n location, in which case the default config settings will be used.\n\n Returns:\n the path to the :class:`FiftyOneConfig` on disk\n\n Raises:\n OSError: if the config path has been customized but the file does not\n exist on disk\n \"\"\"\n if \"FIFTYONE_CONFIG_PATH\" not in os.environ:\n return foc.FIFTYONE_CONFIG_PATH\n\n config_path = os.environ[\"FIFTYONE_CONFIG_PATH\"]\n if not os.path.isfile(config_path):\n raise OSError(\"Config file '%s' not found\" % config_path)\n\n return config_path\n\n\ndef locate_app_config():\n \"\"\"Returns the path to the :class:`AppConfig` on disk.\n\n The default location is ``~/.fiftyone/app_config.json``, but you can\n override this path by setting the ``FIFTYONE_APP_CONFIG_PATH`` environment\n variable.\n\n Note that a config file may not actually exist on disk in the default\n location, in which case the default config settings will be used.\n\n Returns:\n the path to the :class:`AppConfig` on disk\n\n Raises:\n OSError: if the App config path has been customized but the file does\n not exist on disk\n \"\"\"\n if \"FIFTYONE_APP_CONFIG_PATH\" not in os.environ:\n return foc.FIFTYONE_APP_CONFIG_PATH\n\n config_path = os.environ[\"FIFTYONE_APP_CONFIG_PATH\"]\n if not os.path.isfile(config_path):\n raise OSError(\"App config file '%s' not found\" % config_path)\n\n return config_path\n\n\ndef load_config():\n \"\"\"Loads the FiftyOne config.\n\n Returns:\n a :class:`FiftyOneConfig` instance\n \"\"\"\n config_path = locate_config()\n if os.path.isfile(config_path):\n return FiftyOneConfig.from_json(config_path)\n\n return FiftyOneConfig()\n\n\ndef load_app_config():\n \"\"\"Loads the FiftyOne App config.\n\n Returns:\n an :class:`AppConfig` instance\n \"\"\"\n app_config_path = locate_app_config()\n if os.path.isfile(app_config_path):\n return AppConfig.from_json(app_config_path)\n\n return AppConfig()\n\n\ndef set_config_settings(**kwargs):\n \"\"\"Sets the given FiftyOne config setting(s).\n\n Args:\n **kwargs: keyword arguments defining valid :class:`FiftyOneConfig`\n attributes and values\n\n Raises:\n EnvConfigError: if the settings were invalid\n \"\"\"\n import fiftyone as fo\n\n # Validiate settings\n FiftyOneConfig.from_dict(kwargs)\n\n _set_settings(fo.config, kwargs)\n\n\ndef set_app_config_settings(**kwargs):\n \"\"\"Sets the given FiftyOne App config setting(s).\n\n Args:\n **kwargs: keyword arguments defining valid :class:`AppConfig`\n attributes and values\n\n Raises:\n EnvConfigError: if the settings were invalid\n \"\"\"\n import fiftyone as fo\n\n # Validiate settings\n AppConfig.from_dict(kwargs)\n\n _set_settings(fo.app_config, kwargs)\n\n\ndef _set_settings(config, kwargs):\n # Apply settings\n for field, val in kwargs.items():\n if not hasattr(config, field):\n logger.warning(\"Skipping unknown config setting '%s'\", field)\n continue\n\n setattr(config, field, val)\n\n\ndef _get_installed_packages():\n try:\n return set(\n d.metadata[\"Name\"] for d in importlib_metadata.distributions()\n )\n except:\n logger.debug(\"Failed to get installed packages\")\n return set()\n", "path": "fiftyone/core/config.py"}], "after_files": [{"content": "\"\"\"\nFiftyOne config.\n\n| Copyright 2017-2021, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nimport logging\nimport os\n\ntry:\n from importlib import metadata as importlib_metadata # Python 3.8\nexcept ImportError:\n import importlib_metadata # Python < 3.8\n\nimport eta\nimport eta.core.config as etac\n\nimport fiftyone.constants as foc\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Config(etac.Config):\n \"\"\"Base class for JSON serializable config classes.\"\"\"\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Configurable(etac.Configurable):\n \"\"\"Base class for classes that can be initialized with a :class:`Config`\n instance that configures their behavior.\n\n :class:`Configurable` subclasses must obey the following rules:\n\n (a) Configurable class ``Foo`` has an associated Config class\n ``FooConfig`` that is importable from the same namespace as ``Foo``\n\n (b) Configurable class ``Foo`` must be initializable via the syntax\n ``Foo(config)``, where config is a ``FooConfig`` instance\n\n Args:\n config: a :class:`Config`\n \"\"\"\n\n def __init__(self, config):\n self.validate(config)\n self.config = config\n\n\nclass EnvConfig(etac.EnvConfig):\n def __repr__(self):\n return self.__str__()\n\n\nclass FiftyOneConfig(EnvConfig):\n \"\"\"FiftyOne configuration settings.\"\"\"\n\n def __init__(self, d=None):\n if d is None:\n d = {}\n\n self.database_dir = self.parse_string(\n d,\n \"database_dir\",\n env_var=\"FIFTYONE_DATABASE_DIR\",\n default=foc.DEFAULT_DB_DIR,\n )\n self.dataset_zoo_dir = self.parse_string(\n d,\n \"dataset_zoo_dir\",\n env_var=\"FIFTYONE_DATASET_ZOO_DIR\",\n default=None,\n )\n self.model_zoo_dir = self.parse_string(\n d, \"model_zoo_dir\", env_var=\"FIFTYONE_MODEL_ZOO_DIR\", default=None,\n )\n self.dataset_zoo_manifest_paths = self.parse_string_array(\n d,\n \"dataset_zoo_manifest_paths\",\n env_var=\"FIFTYONE_DATASET_ZOO_MANIFEST_PATHS\",\n default=None,\n )\n self.model_zoo_manifest_paths = self.parse_string_array(\n d,\n \"model_zoo_manifest_paths\",\n env_var=\"FIFTYONE_MODEL_ZOO_MANIFEST_PATHS\",\n default=None,\n )\n self.default_dataset_dir = self.parse_string(\n d,\n \"default_dataset_dir\",\n env_var=\"FIFTYONE_DEFAULT_DATASET_DIR\",\n default=None,\n )\n self.default_ml_backend = self.parse_string(\n d,\n \"default_ml_backend\",\n env_var=\"FIFTYONE_DEFAULT_ML_BACKEND\",\n default=None,\n )\n self.default_batch_size = self.parse_int(\n d,\n \"default_batch_size\",\n env_var=\"FIFTYONE_DEFAULT_BATCH_SIZE\",\n default=None,\n )\n self.default_sequence_idx = self.parse_string(\n d,\n \"default_sequence_idx\",\n env_var=\"FIFTYONE_DEFAULT_SEQUENCE_IDX\",\n default=\"%06d\",\n )\n self.default_image_ext = self.parse_string(\n d,\n \"default_image_ext\",\n env_var=\"FIFTYONE_DEFAULT_IMAGE_EXT\",\n default=\".jpg\",\n )\n self.default_video_ext = self.parse_string(\n d,\n \"default_video_ext\",\n env_var=\"FIFTYONE_DEFAULT_VIDEO_EXT\",\n default=\".mp4\",\n )\n self.default_app_port = self.parse_int(\n d,\n \"default_app_port\",\n env_var=\"FIFTYONE_DEFAULT_APP_PORT\",\n default=5151,\n )\n self.desktop_app = self.parse_bool(\n d, \"desktop_app\", env_var=\"FIFTYONE_DESKTOP_APP\", default=False,\n )\n self._show_progress_bars = None # declare\n self.show_progress_bars = self.parse_bool(\n d,\n \"show_progress_bars\",\n env_var=\"FIFTYONE_SHOW_PROGRESS_BARS\",\n default=True,\n )\n self.do_not_track = self.parse_bool(\n d, \"do_not_track\", env_var=\"FIFTYONE_DO_NOT_TRACK\", default=False,\n )\n self.requirement_error_level = self.parse_int(\n d,\n \"requirement_error_level\",\n env_var=\"FIFTYONE_REQUIREMENT_ERROR_LEVEL\",\n default=0,\n )\n\n self._set_defaults()\n self._validate()\n\n @property\n def show_progress_bars(self):\n return self._show_progress_bars\n\n @show_progress_bars.setter\n def show_progress_bars(self, value):\n self._show_progress_bars = value\n try:\n # Keep ETA config in-sync\n eta.config.show_progress_bars = value\n except:\n pass\n\n def attributes(self):\n # Includes `show_progress_bars`\n return super().custom_attributes(dynamic=True)\n\n def _set_defaults(self):\n if self.default_dataset_dir is None:\n self.default_dataset_dir = os.path.join(\n os.path.expanduser(\"~\"), \"fiftyone\"\n )\n\n if self.dataset_zoo_dir is None:\n self.dataset_zoo_dir = self.default_dataset_dir\n\n if self.model_zoo_dir is None:\n self.model_zoo_dir = os.path.join(\n self.default_dataset_dir, \"__models__\"\n )\n\n if self.default_ml_backend is None:\n installed_packages = _get_installed_packages()\n\n if \"torch\" in installed_packages:\n self.default_ml_backend = \"torch\"\n elif \"tensorflow\" in installed_packages:\n self.default_ml_backend = \"tensorflow\"\n\n def _validate(self):\n if self.default_ml_backend is not None:\n self.default_ml_backend = self.default_ml_backend.lower()\n\n\nclass AppConfig(EnvConfig):\n \"\"\"FiftyOne App configuration settings.\"\"\"\n\n def __init__(self, d=None):\n if d is None:\n d = {}\n\n self.color_pool = self.parse_string_array(\n d,\n \"color_pool\",\n env_var=\"FIFTYONE_APP_COLOR_POOL\",\n default=foc.DEFAULT_APP_COLOR_POOL,\n )\n self.default_grid_zoom = self.parse_int(\n d,\n \"default_grid_zoom\",\n env_var=\"FIFTYONE_APP_GRID_ZOOM\",\n default=5,\n )\n self.notebook_height = self.parse_int(\n d,\n \"notebook_height\",\n env_var=\"FIFTYONE_APP_NOTEBOOK_HEIGHT\",\n default=800,\n )\n self.show_attributes = self.parse_bool(\n d,\n \"show_attributes\",\n env_var=\"FIFTYONE_APP_SHOW_ATTRIBUTES\",\n default=False,\n )\n self.show_confidence = self.parse_bool(\n d,\n \"show_confidence\",\n env_var=\"FIFTYONE_APP_SHOW_CONFIDENCE\",\n default=True,\n )\n self.show_tooltip = self.parse_bool(\n d,\n \"show_tooltip\",\n env_var=\"FIFTYONE_APP_SHOW_TOOLTIP\",\n default=True,\n )\n\n self._validate()\n\n def _validate(self):\n if self.default_grid_zoom < 0 or self.default_grid_zoom > 10:\n raise AppConfigError(\n \"`default_grid_zoom` must be in [0, 10]; found %d\"\n % self.default_grid_zoom\n )\n\n\nclass AppConfigError(etac.EnvConfigError):\n \"\"\"Exception raised when an invalid :class:`AppConfig` instance is\n encountered.\n \"\"\"\n\n pass\n\n\ndef locate_config():\n \"\"\"Returns the path to the :class:`FiftyOneConfig` on disk.\n\n The default location is ``~/.fiftyone/config.json``, but you can override\n this path by setting the ``FIFTYONE_CONFIG_PATH`` environment variable.\n\n Note that a config file may not actually exist on disk in the default\n location, in which case the default config settings will be used.\n\n Returns:\n the path to the :class:`FiftyOneConfig` on disk\n\n Raises:\n OSError: if the config path has been customized but the file does not\n exist on disk\n \"\"\"\n if \"FIFTYONE_CONFIG_PATH\" not in os.environ:\n return foc.FIFTYONE_CONFIG_PATH\n\n config_path = os.environ[\"FIFTYONE_CONFIG_PATH\"]\n if not os.path.isfile(config_path):\n raise OSError(\"Config file '%s' not found\" % config_path)\n\n return config_path\n\n\ndef locate_app_config():\n \"\"\"Returns the path to the :class:`AppConfig` on disk.\n\n The default location is ``~/.fiftyone/app_config.json``, but you can\n override this path by setting the ``FIFTYONE_APP_CONFIG_PATH`` environment\n variable.\n\n Note that a config file may not actually exist on disk in the default\n location, in which case the default config settings will be used.\n\n Returns:\n the path to the :class:`AppConfig` on disk\n\n Raises:\n OSError: if the App config path has been customized but the file does\n not exist on disk\n \"\"\"\n if \"FIFTYONE_APP_CONFIG_PATH\" not in os.environ:\n return foc.FIFTYONE_APP_CONFIG_PATH\n\n config_path = os.environ[\"FIFTYONE_APP_CONFIG_PATH\"]\n if not os.path.isfile(config_path):\n raise OSError(\"App config file '%s' not found\" % config_path)\n\n return config_path\n\n\ndef load_config():\n \"\"\"Loads the FiftyOne config.\n\n Returns:\n a :class:`FiftyOneConfig` instance\n \"\"\"\n config_path = locate_config()\n if os.path.isfile(config_path):\n return FiftyOneConfig.from_json(config_path)\n\n return FiftyOneConfig()\n\n\ndef load_app_config():\n \"\"\"Loads the FiftyOne App config.\n\n Returns:\n an :class:`AppConfig` instance\n \"\"\"\n app_config_path = locate_app_config()\n if os.path.isfile(app_config_path):\n return AppConfig.from_json(app_config_path)\n\n return AppConfig()\n\n\ndef set_config_settings(**kwargs):\n \"\"\"Sets the given FiftyOne config setting(s).\n\n Args:\n **kwargs: keyword arguments defining valid :class:`FiftyOneConfig`\n attributes and values\n\n Raises:\n EnvConfigError: if the settings were invalid\n \"\"\"\n import fiftyone as fo\n\n # Validiate settings\n FiftyOneConfig.from_dict(kwargs)\n\n _set_settings(fo.config, kwargs)\n\n\ndef set_app_config_settings(**kwargs):\n \"\"\"Sets the given FiftyOne App config setting(s).\n\n Args:\n **kwargs: keyword arguments defining valid :class:`AppConfig`\n attributes and values\n\n Raises:\n EnvConfigError: if the settings were invalid\n \"\"\"\n import fiftyone as fo\n\n # Validiate settings\n AppConfig.from_dict(kwargs)\n\n _set_settings(fo.app_config, kwargs)\n\n\ndef _set_settings(config, kwargs):\n # Apply settings\n for field, val in kwargs.items():\n if not hasattr(config, field):\n logger.warning(\"Skipping unknown config setting '%s'\", field)\n continue\n\n setattr(config, field, val)\n\n\ndef _get_installed_packages():\n try:\n return set(\n d.metadata[\"Name\"] for d in importlib_metadata.distributions()\n )\n except:\n logger.debug(\"Failed to get installed packages\")\n return set()\n", "path": "fiftyone/core/config.py"}]}
| 4,051 | 297 |
gh_patches_debug_26710
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-731
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip-compile generates setup.txt by given explicitly setup.py in a source file
There is a possible wrong behavior on a command `pip-compile setup.py`, because it generates `setup.txt` file instead of `requirements.txt`.
##### Environment Versions
1. OS Type: Any (tested on MacOSX)
1. Python version: 3.7.x
1. pip version: 19.1
1. pip-tools version: 3.3.2
##### Steps to replicate
1. Prepare a `setup.py` file:
```
cat << EOF > setup.py
from setuptools import setup
setup(install_requires=[])
EOF
```
2. Run: `pip-compile setup.py`
##### Expected result
```
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile --output-file requirements.txt setup.py
#
```
##### Actual result
```
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile --output-file setup.txt setup.py
#
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/scripts/compile.py`
Content:
```
1 # coding: utf-8
2 from __future__ import (absolute_import, division, print_function,
3 unicode_literals)
4
5 import os
6 import sys
7 import tempfile
8
9 from .._compat import (
10 install_req_from_line,
11 parse_requirements,
12 )
13
14 from .. import click
15 from ..exceptions import PipToolsError
16 from ..logging import log
17 from ..pip import get_pip_command, pip_defaults
18 from ..repositories import LocalRequirementsRepository, PyPIRepository
19 from ..resolver import Resolver
20 from ..utils import (dedup, is_pinned_requirement, key_from_req, UNSAFE_PACKAGES)
21 from ..writer import OutputWriter
22
23 DEFAULT_REQUIREMENTS_FILE = 'requirements.in'
24
25
26 @click.command()
27 @click.version_option()
28 @click.option('-v', '--verbose', count=True, help="Show more output")
29 @click.option('-q', '--quiet', count=True, help="Give less output")
30 @click.option('-n', '--dry-run', is_flag=True, help="Only show what would happen, don't change anything")
31 @click.option('-p', '--pre', is_flag=True, default=None, help="Allow resolving to prereleases (default is not)")
32 @click.option('-r', '--rebuild', is_flag=True, help="Clear any caches upfront, rebuild from scratch")
33 @click.option('-f', '--find-links', multiple=True, help="Look for archives in this directory or on this HTML page", envvar='PIP_FIND_LINKS') # noqa
34 @click.option('-i', '--index-url', help="Change index URL (defaults to {})".format(pip_defaults.index_url), envvar='PIP_INDEX_URL') # noqa
35 @click.option('--extra-index-url', multiple=True, help="Add additional index URL to search", envvar='PIP_EXTRA_INDEX_URL') # noqa
36 @click.option('--cert', help="Path to alternate CA bundle.")
37 @click.option('--client-cert', help="Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.") # noqa
38 @click.option('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',
39 help="Mark this host as trusted, even though it does not have "
40 "valid or any HTTPS.")
41 @click.option('--header/--no-header', is_flag=True, default=True,
42 help="Add header to generated file")
43 @click.option('--index/--no-index', is_flag=True, default=True,
44 help="Add index URL to generated file")
45 @click.option('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,
46 default=True, help="Add trusted host option to generated file")
47 @click.option('--annotate/--no-annotate', is_flag=True, default=True,
48 help="Annotate results, indicating where dependencies come from")
49 @click.option('-U', '--upgrade', is_flag=True, default=False,
50 help='Try to upgrade all dependencies to their latest versions')
51 @click.option('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,
52 help="Specify particular packages to upgrade.")
53 @click.option('-o', '--output-file', nargs=1, type=str, default=None,
54 help=('Output file name. Required if more than one input file is given. '
55 'Will be derived from input file otherwise.'))
56 @click.option('--allow-unsafe', is_flag=True, default=False,
57 help="Pin packages considered unsafe: {}".format(', '.join(sorted(UNSAFE_PACKAGES))))
58 @click.option('--generate-hashes', is_flag=True, default=False,
59 help="Generate pip 8 style hashes in the resulting requirements file.")
60 @click.option('--max-rounds', default=10,
61 help="Maximum number of rounds before resolving the requirements aborts.")
62 @click.argument('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))
63 def cli(verbose, quiet, dry_run, pre, rebuild, find_links, index_url, extra_index_url,
64 cert, client_cert, trusted_host, header, index, emit_trusted_host, annotate,
65 upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,
66 src_files, max_rounds):
67 """Compiles requirements.txt from requirements.in specs."""
68 log.verbosity = verbose - quiet
69
70 if len(src_files) == 0:
71 if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
72 src_files = (DEFAULT_REQUIREMENTS_FILE,)
73 elif os.path.exists('setup.py'):
74 src_files = ('setup.py',)
75 if not output_file:
76 output_file = 'requirements.txt'
77 else:
78 raise click.BadParameter(("If you do not specify an input file, "
79 "the default is {} or setup.py").format(DEFAULT_REQUIREMENTS_FILE))
80
81 if len(src_files) == 1 and src_files[0] == '-':
82 if not output_file:
83 raise click.BadParameter('--output-file is required if input is from stdin')
84
85 if len(src_files) > 1 and not output_file:
86 raise click.BadParameter('--output-file is required if two or more input files are given.')
87
88 if output_file:
89 dst_file = output_file
90 else:
91 base_name = src_files[0].rsplit('.', 1)[0]
92 dst_file = base_name + '.txt'
93
94 if upgrade and upgrade_packages:
95 raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')
96
97 ###
98 # Setup
99 ###
100
101 pip_command = get_pip_command()
102
103 pip_args = []
104 if find_links:
105 for link in find_links:
106 pip_args.extend(['-f', link])
107 if index_url:
108 pip_args.extend(['-i', index_url])
109 if extra_index_url:
110 for extra_index in extra_index_url:
111 pip_args.extend(['--extra-index-url', extra_index])
112 if cert:
113 pip_args.extend(['--cert', cert])
114 if client_cert:
115 pip_args.extend(['--client-cert', client_cert])
116 if pre:
117 pip_args.extend(['--pre'])
118 if trusted_host:
119 for host in trusted_host:
120 pip_args.extend(['--trusted-host', host])
121
122 pip_options, _ = pip_command.parse_args(pip_args)
123
124 session = pip_command._build_session(pip_options)
125 repository = PyPIRepository(pip_options, session)
126
127 upgrade_install_reqs = {}
128 # Proxy with a LocalRequirementsRepository if --upgrade is not specified
129 # (= default invocation)
130 if not upgrade and os.path.exists(dst_file):
131 ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)
132 # Exclude packages from --upgrade-package/-P from the existing pins: We want to upgrade.
133 upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages)
134 upgrade_install_reqs = {key_from_req(install_req.req): install_req for install_req in upgrade_reqs_gen}
135
136 existing_pins = {key_from_req(ireq.req): ireq
137 for ireq in ireqs
138 if is_pinned_requirement(ireq) and key_from_req(ireq.req) not in upgrade_install_reqs}
139 repository = LocalRequirementsRepository(existing_pins, repository)
140
141 log.debug('Using indexes:')
142 # remove duplicate index urls before processing
143 repository.finder.index_urls = list(dedup(repository.finder.index_urls))
144 for index_url in repository.finder.index_urls:
145 log.debug(' {}'.format(index_url))
146
147 if repository.finder.find_links:
148 log.debug('')
149 log.debug('Configuration:')
150 for find_link in repository.finder.find_links:
151 log.debug(' -f {}'.format(find_link))
152
153 ###
154 # Parsing/collecting initial requirements
155 ###
156
157 constraints = []
158 for src_file in src_files:
159 is_setup_file = os.path.basename(src_file) == 'setup.py'
160 if is_setup_file or src_file == '-':
161 # pip requires filenames and not files. Since we want to support
162 # piping from stdin, we need to briefly save the input from stdin
163 # to a temporary file and have pip read that. also used for
164 # reading requirements from install_requires in setup.py.
165 tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)
166 if is_setup_file:
167 from distutils.core import run_setup
168 dist = run_setup(src_file)
169 tmpfile.write('\n'.join(dist.install_requires))
170 else:
171 tmpfile.write(sys.stdin.read())
172 tmpfile.flush()
173 constraints.extend(parse_requirements(
174 tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))
175 else:
176 constraints.extend(parse_requirements(
177 src_file, finder=repository.finder, session=repository.session, options=pip_options))
178
179 constraints.extend(upgrade_install_reqs.values())
180
181 # Filter out pip environment markers which do not match (PEP496)
182 constraints = [req for req in constraints
183 if req.markers is None or req.markers.evaluate()]
184
185 # Check the given base set of constraints first
186 Resolver.check_constraints(constraints)
187
188 try:
189 resolver = Resolver(constraints, repository, prereleases=pre,
190 clear_caches=rebuild, allow_unsafe=allow_unsafe)
191 results = resolver.resolve(max_rounds=max_rounds)
192 if generate_hashes:
193 hashes = resolver.resolve_hashes(results)
194 else:
195 hashes = None
196 except PipToolsError as e:
197 log.error(str(e))
198 sys.exit(2)
199
200 log.debug('')
201
202 ##
203 # Output
204 ##
205
206 # Compute reverse dependency annotations statically, from the
207 # dependency cache that the resolver has populated by now.
208 #
209 # TODO (1a): reverse deps for any editable package are lost
210 # what SHOULD happen is that they are cached in memory, just
211 # not persisted to disk!
212 #
213 # TODO (1b): perhaps it's easiest if the dependency cache has an API
214 # that could take InstallRequirements directly, like:
215 #
216 # cache.set(ireq, ...)
217 #
218 # then, when ireq is editable, it would store in
219 #
220 # editables[egg_name][link_without_fragment] = deps
221 # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}
222 #
223 # otherwise:
224 #
225 # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}
226 #
227 reverse_dependencies = None
228 if annotate:
229 reverse_dependencies = resolver.reverse_dependencies(results)
230
231 writer = OutputWriter(src_files, dst_file, dry_run=dry_run,
232 emit_header=header, emit_index=index,
233 emit_trusted_host=emit_trusted_host,
234 annotate=annotate,
235 generate_hashes=generate_hashes,
236 default_index_url=repository.DEFAULT_INDEX_URL,
237 index_urls=repository.finder.index_urls,
238 trusted_hosts=pip_options.trusted_hosts,
239 format_control=repository.finder.format_control,
240 allow_unsafe=allow_unsafe)
241 writer.write(results=results,
242 unsafe_requirements=resolver.unsafe_constraints,
243 reverse_dependencies=reverse_dependencies,
244 primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},
245 markers={key_from_req(ireq.req): ireq.markers
246 for ireq in constraints if ireq.markers},
247 hashes=hashes)
248
249 if dry_run:
250 log.warning('Dry-run, so nothing updated.')
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/piptools/scripts/compile.py b/piptools/scripts/compile.py
--- a/piptools/scripts/compile.py
+++ b/piptools/scripts/compile.py
@@ -21,6 +21,7 @@
from ..writer import OutputWriter
DEFAULT_REQUIREMENTS_FILE = 'requirements.in'
+DEFAULT_REQUIREMENTS_OUTPUT_FILE = 'requirements.txt'
@click.command()
@@ -72,15 +73,14 @@
src_files = (DEFAULT_REQUIREMENTS_FILE,)
elif os.path.exists('setup.py'):
src_files = ('setup.py',)
- if not output_file:
- output_file = 'requirements.txt'
else:
raise click.BadParameter(("If you do not specify an input file, "
"the default is {} or setup.py").format(DEFAULT_REQUIREMENTS_FILE))
- if len(src_files) == 1 and src_files[0] == '-':
- if not output_file:
- raise click.BadParameter('--output-file is required if input is from stdin')
+ if src_files == ('-',) and not output_file:
+ raise click.BadParameter('--output-file is required if input is from stdin')
+ elif src_files == ('setup.py',):
+ output_file = DEFAULT_REQUIREMENTS_OUTPUT_FILE
if len(src_files) > 1 and not output_file:
raise click.BadParameter('--output-file is required if two or more input files are given.')
|
{"golden_diff": "diff --git a/piptools/scripts/compile.py b/piptools/scripts/compile.py\n--- a/piptools/scripts/compile.py\n+++ b/piptools/scripts/compile.py\n@@ -21,6 +21,7 @@\n from ..writer import OutputWriter\n \n DEFAULT_REQUIREMENTS_FILE = 'requirements.in'\n+DEFAULT_REQUIREMENTS_OUTPUT_FILE = 'requirements.txt'\n \n \n @click.command()\n@@ -72,15 +73,14 @@\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n- if not output_file:\n- output_file = 'requirements.txt'\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n \n- if len(src_files) == 1 and src_files[0] == '-':\n- if not output_file:\n- raise click.BadParameter('--output-file is required if input is from stdin')\n+ if src_files == ('-',) and not output_file:\n+ raise click.BadParameter('--output-file is required if input is from stdin')\n+ elif src_files == ('setup.py',):\n+ output_file = DEFAULT_REQUIREMENTS_OUTPUT_FILE\n \n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n", "issue": "pip-compile generates setup.txt by given explicitly setup.py in a source file\nThere is a possible wrong behavior on a command `pip-compile setup.py`, because it generates `setup.txt` file instead of `requirements.txt`.\r\n\r\n##### Environment Versions\r\n\r\n1. OS Type: Any (tested on MacOSX)\r\n1. Python version: 3.7.x\r\n1. pip version: 19.1\r\n1. pip-tools version: 3.3.2\r\n\r\n##### Steps to replicate\r\n\r\n1. Prepare a `setup.py` file:\r\n```\r\ncat << EOF > setup.py\r\nfrom setuptools import setup\r\nsetup(install_requires=[])\r\nEOF\r\n```\r\n2. Run: `pip-compile setup.py`\r\n\r\n##### Expected result\r\n\r\n```\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile --output-file requirements.txt setup.py\r\n#\r\n```\r\n\r\n##### Actual result\r\n\r\n```\r\n#\r\n# This file is autogenerated by pip-compile\r\n# To update, run:\r\n#\r\n# pip-compile --output-file setup.txt setup.py\r\n#\r\n```\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport sys\nimport tempfile\n\nfrom .._compat import (\n install_req_from_line,\n parse_requirements,\n)\n\nfrom .. import click\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..pip import get_pip_command, pip_defaults\nfrom ..repositories import LocalRequirementsRepository, PyPIRepository\nfrom ..resolver import Resolver\nfrom ..utils import (dedup, is_pinned_requirement, key_from_req, UNSAFE_PACKAGES)\nfrom ..writer import OutputWriter\n\nDEFAULT_REQUIREMENTS_FILE = 'requirements.in'\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', count=True, help=\"Show more output\")\[email protected]('-q', '--quiet', count=True, help=\"Give less output\")\[email protected]('-n', '--dry-run', is_flag=True, help=\"Only show what would happen, don't change anything\")\[email protected]('-p', '--pre', is_flag=True, default=None, help=\"Allow resolving to prereleases (default is not)\")\[email protected]('-r', '--rebuild', is_flag=True, help=\"Clear any caches upfront, rebuild from scratch\")\[email protected]('-f', '--find-links', multiple=True, help=\"Look for archives in this directory or on this HTML page\", envvar='PIP_FIND_LINKS') # noqa\[email protected]('-i', '--index-url', help=\"Change index URL (defaults to {})\".format(pip_defaults.index_url), envvar='PIP_INDEX_URL') # noqa\[email protected]('--extra-index-url', multiple=True, help=\"Add additional index URL to search\", envvar='PIP_EXTRA_INDEX_URL') # noqa\[email protected]('--cert', help=\"Path to alternate CA bundle.\")\[email protected]('--client-cert', help=\"Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.\") # noqa\[email protected]('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',\n help=\"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS.\")\[email protected]('--header/--no-header', is_flag=True, default=True,\n help=\"Add header to generated file\")\[email protected]('--index/--no-index', is_flag=True, default=True,\n help=\"Add index URL to generated file\")\[email protected]('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,\n default=True, help=\"Add trusted host option to generated file\")\[email protected]('--annotate/--no-annotate', is_flag=True, default=True,\n help=\"Annotate results, indicating where dependencies come from\")\[email protected]('-U', '--upgrade', is_flag=True, default=False,\n help='Try to upgrade all dependencies to their latest versions')\[email protected]('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,\n help=\"Specify particular packages to upgrade.\")\[email protected]('-o', '--output-file', nargs=1, type=str, default=None,\n help=('Output file name. Required if more than one input file is given. '\n 'Will be derived from input file otherwise.'))\[email protected]('--allow-unsafe', is_flag=True, default=False,\n help=\"Pin packages considered unsafe: {}\".format(', '.join(sorted(UNSAFE_PACKAGES))))\[email protected]('--generate-hashes', is_flag=True, default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\")\[email protected]('--max-rounds', default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\")\[email protected]('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))\ndef cli(verbose, quiet, dry_run, pre, rebuild, find_links, index_url, extra_index_url,\n cert, client_cert, trusted_host, header, index, emit_trusted_host, annotate,\n upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,\n src_files, max_rounds):\n \"\"\"Compiles requirements.txt from requirements.in specs.\"\"\"\n log.verbosity = verbose - quiet\n\n if len(src_files) == 0:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n if not output_file:\n output_file = 'requirements.txt'\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n\n if len(src_files) == 1 and src_files[0] == '-':\n if not output_file:\n raise click.BadParameter('--output-file is required if input is from stdin')\n\n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n\n if output_file:\n dst_file = output_file\n else:\n base_name = src_files[0].rsplit('.', 1)[0]\n dst_file = base_name + '.txt'\n\n if upgrade and upgrade_packages:\n raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')\n\n ###\n # Setup\n ###\n\n pip_command = get_pip_command()\n\n pip_args = []\n if find_links:\n for link in find_links:\n pip_args.extend(['-f', link])\n if index_url:\n pip_args.extend(['-i', index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n pip_args.extend(['--extra-index-url', extra_index])\n if cert:\n pip_args.extend(['--cert', cert])\n if client_cert:\n pip_args.extend(['--client-cert', client_cert])\n if pre:\n pip_args.extend(['--pre'])\n if trusted_host:\n for host in trusted_host:\n pip_args.extend(['--trusted-host', host])\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n\n upgrade_install_reqs = {}\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n if not upgrade and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n # Exclude packages from --upgrade-package/-P from the existing pins: We want to upgrade.\n upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages)\n upgrade_install_reqs = {key_from_req(install_req.req): install_req for install_req in upgrade_reqs_gen}\n\n existing_pins = {key_from_req(ireq.req): ireq\n for ireq in ireqs\n if is_pinned_requirement(ireq) and key_from_req(ireq.req) not in upgrade_install_reqs}\n repository = LocalRequirementsRepository(existing_pins, repository)\n\n log.debug('Using indexes:')\n # remove duplicate index urls before processing\n repository.finder.index_urls = list(dedup(repository.finder.index_urls))\n for index_url in repository.finder.index_urls:\n log.debug(' {}'.format(index_url))\n\n if repository.finder.find_links:\n log.debug('')\n log.debug('Configuration:')\n for find_link in repository.finder.find_links:\n log.debug(' -f {}'.format(find_link))\n\n ###\n # Parsing/collecting initial requirements\n ###\n\n constraints = []\n for src_file in src_files:\n is_setup_file = os.path.basename(src_file) == 'setup.py'\n if is_setup_file or src_file == '-':\n # pip requires filenames and not files. Since we want to support\n # piping from stdin, we need to briefly save the input from stdin\n # to a temporary file and have pip read that. also used for\n # reading requirements from install_requires in setup.py.\n tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n if is_setup_file:\n from distutils.core import run_setup\n dist = run_setup(src_file)\n tmpfile.write('\\n'.join(dist.install_requires))\n else:\n tmpfile.write(sys.stdin.read())\n tmpfile.flush()\n constraints.extend(parse_requirements(\n tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))\n else:\n constraints.extend(parse_requirements(\n src_file, finder=repository.finder, session=repository.session, options=pip_options))\n\n constraints.extend(upgrade_install_reqs.values())\n\n # Filter out pip environment markers which do not match (PEP496)\n constraints = [req for req in constraints\n if req.markers is None or req.markers.evaluate()]\n\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n results = resolver.resolve(max_rounds=max_rounds)\n if generate_hashes:\n hashes = resolver.resolve_hashes(results)\n else:\n hashes = None\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n log.debug('')\n\n ##\n # Output\n ##\n\n # Compute reverse dependency annotations statically, from the\n # dependency cache that the resolver has populated by now.\n #\n # TODO (1a): reverse deps for any editable package are lost\n # what SHOULD happen is that they are cached in memory, just\n # not persisted to disk!\n #\n # TODO (1b): perhaps it's easiest if the dependency cache has an API\n # that could take InstallRequirements directly, like:\n #\n # cache.set(ireq, ...)\n #\n # then, when ireq is editable, it would store in\n #\n # editables[egg_name][link_without_fragment] = deps\n # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}\n #\n # otherwise:\n #\n # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}\n #\n reverse_dependencies = None\n if annotate:\n reverse_dependencies = resolver.reverse_dependencies(results)\n\n writer = OutputWriter(src_files, dst_file, dry_run=dry_run,\n emit_header=header, emit_index=index,\n emit_trusted_host=emit_trusted_host,\n annotate=annotate,\n generate_hashes=generate_hashes,\n default_index_url=repository.DEFAULT_INDEX_URL,\n index_urls=repository.finder.index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n format_control=repository.finder.format_control,\n allow_unsafe=allow_unsafe)\n writer.write(results=results,\n unsafe_requirements=resolver.unsafe_constraints,\n reverse_dependencies=reverse_dependencies,\n primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n\n if dry_run:\n log.warning('Dry-run, so nothing updated.')\n", "path": "piptools/scripts/compile.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport os\nimport sys\nimport tempfile\n\nfrom .._compat import (\n install_req_from_line,\n parse_requirements,\n)\n\nfrom .. import click\nfrom ..exceptions import PipToolsError\nfrom ..logging import log\nfrom ..pip import get_pip_command, pip_defaults\nfrom ..repositories import LocalRequirementsRepository, PyPIRepository\nfrom ..resolver import Resolver\nfrom ..utils import (dedup, is_pinned_requirement, key_from_req, UNSAFE_PACKAGES)\nfrom ..writer import OutputWriter\n\nDEFAULT_REQUIREMENTS_FILE = 'requirements.in'\nDEFAULT_REQUIREMENTS_OUTPUT_FILE = 'requirements.txt'\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', count=True, help=\"Show more output\")\[email protected]('-q', '--quiet', count=True, help=\"Give less output\")\[email protected]('-n', '--dry-run', is_flag=True, help=\"Only show what would happen, don't change anything\")\[email protected]('-p', '--pre', is_flag=True, default=None, help=\"Allow resolving to prereleases (default is not)\")\[email protected]('-r', '--rebuild', is_flag=True, help=\"Clear any caches upfront, rebuild from scratch\")\[email protected]('-f', '--find-links', multiple=True, help=\"Look for archives in this directory or on this HTML page\", envvar='PIP_FIND_LINKS') # noqa\[email protected]('-i', '--index-url', help=\"Change index URL (defaults to {})\".format(pip_defaults.index_url), envvar='PIP_INDEX_URL') # noqa\[email protected]('--extra-index-url', multiple=True, help=\"Add additional index URL to search\", envvar='PIP_EXTRA_INDEX_URL') # noqa\[email protected]('--cert', help=\"Path to alternate CA bundle.\")\[email protected]('--client-cert', help=\"Path to SSL client certificate, a single file containing the private key and the certificate in PEM format.\") # noqa\[email protected]('--trusted-host', multiple=True, envvar='PIP_TRUSTED_HOST',\n help=\"Mark this host as trusted, even though it does not have \"\n \"valid or any HTTPS.\")\[email protected]('--header/--no-header', is_flag=True, default=True,\n help=\"Add header to generated file\")\[email protected]('--index/--no-index', is_flag=True, default=True,\n help=\"Add index URL to generated file\")\[email protected]('--emit-trusted-host/--no-emit-trusted-host', is_flag=True,\n default=True, help=\"Add trusted host option to generated file\")\[email protected]('--annotate/--no-annotate', is_flag=True, default=True,\n help=\"Annotate results, indicating where dependencies come from\")\[email protected]('-U', '--upgrade', is_flag=True, default=False,\n help='Try to upgrade all dependencies to their latest versions')\[email protected]('-P', '--upgrade-package', 'upgrade_packages', nargs=1, multiple=True,\n help=\"Specify particular packages to upgrade.\")\[email protected]('-o', '--output-file', nargs=1, type=str, default=None,\n help=('Output file name. Required if more than one input file is given. '\n 'Will be derived from input file otherwise.'))\[email protected]('--allow-unsafe', is_flag=True, default=False,\n help=\"Pin packages considered unsafe: {}\".format(', '.join(sorted(UNSAFE_PACKAGES))))\[email protected]('--generate-hashes', is_flag=True, default=False,\n help=\"Generate pip 8 style hashes in the resulting requirements file.\")\[email protected]('--max-rounds', default=10,\n help=\"Maximum number of rounds before resolving the requirements aborts.\")\[email protected]('src_files', nargs=-1, type=click.Path(exists=True, allow_dash=True))\ndef cli(verbose, quiet, dry_run, pre, rebuild, find_links, index_url, extra_index_url,\n cert, client_cert, trusted_host, header, index, emit_trusted_host, annotate,\n upgrade, upgrade_packages, output_file, allow_unsafe, generate_hashes,\n src_files, max_rounds):\n \"\"\"Compiles requirements.txt from requirements.in specs.\"\"\"\n log.verbosity = verbose - quiet\n\n if len(src_files) == 0:\n if os.path.exists(DEFAULT_REQUIREMENTS_FILE):\n src_files = (DEFAULT_REQUIREMENTS_FILE,)\n elif os.path.exists('setup.py'):\n src_files = ('setup.py',)\n else:\n raise click.BadParameter((\"If you do not specify an input file, \"\n \"the default is {} or setup.py\").format(DEFAULT_REQUIREMENTS_FILE))\n\n if src_files == ('-',) and not output_file:\n raise click.BadParameter('--output-file is required if input is from stdin')\n elif src_files == ('setup.py',):\n output_file = DEFAULT_REQUIREMENTS_OUTPUT_FILE\n\n if len(src_files) > 1 and not output_file:\n raise click.BadParameter('--output-file is required if two or more input files are given.')\n\n if output_file:\n dst_file = output_file\n else:\n base_name = src_files[0].rsplit('.', 1)[0]\n dst_file = base_name + '.txt'\n\n if upgrade and upgrade_packages:\n raise click.BadParameter('Only one of --upgrade or --upgrade-package can be provided as an argument.')\n\n ###\n # Setup\n ###\n\n pip_command = get_pip_command()\n\n pip_args = []\n if find_links:\n for link in find_links:\n pip_args.extend(['-f', link])\n if index_url:\n pip_args.extend(['-i', index_url])\n if extra_index_url:\n for extra_index in extra_index_url:\n pip_args.extend(['--extra-index-url', extra_index])\n if cert:\n pip_args.extend(['--cert', cert])\n if client_cert:\n pip_args.extend(['--client-cert', client_cert])\n if pre:\n pip_args.extend(['--pre'])\n if trusted_host:\n for host in trusted_host:\n pip_args.extend(['--trusted-host', host])\n\n pip_options, _ = pip_command.parse_args(pip_args)\n\n session = pip_command._build_session(pip_options)\n repository = PyPIRepository(pip_options, session)\n\n upgrade_install_reqs = {}\n # Proxy with a LocalRequirementsRepository if --upgrade is not specified\n # (= default invocation)\n if not upgrade and os.path.exists(dst_file):\n ireqs = parse_requirements(dst_file, finder=repository.finder, session=repository.session, options=pip_options)\n # Exclude packages from --upgrade-package/-P from the existing pins: We want to upgrade.\n upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages)\n upgrade_install_reqs = {key_from_req(install_req.req): install_req for install_req in upgrade_reqs_gen}\n\n existing_pins = {key_from_req(ireq.req): ireq\n for ireq in ireqs\n if is_pinned_requirement(ireq) and key_from_req(ireq.req) not in upgrade_install_reqs}\n repository = LocalRequirementsRepository(existing_pins, repository)\n\n log.debug('Using indexes:')\n # remove duplicate index urls before processing\n repository.finder.index_urls = list(dedup(repository.finder.index_urls))\n for index_url in repository.finder.index_urls:\n log.debug(' {}'.format(index_url))\n\n if repository.finder.find_links:\n log.debug('')\n log.debug('Configuration:')\n for find_link in repository.finder.find_links:\n log.debug(' -f {}'.format(find_link))\n\n ###\n # Parsing/collecting initial requirements\n ###\n\n constraints = []\n for src_file in src_files:\n is_setup_file = os.path.basename(src_file) == 'setup.py'\n if is_setup_file or src_file == '-':\n # pip requires filenames and not files. Since we want to support\n # piping from stdin, we need to briefly save the input from stdin\n # to a temporary file and have pip read that. also used for\n # reading requirements from install_requires in setup.py.\n tmpfile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n if is_setup_file:\n from distutils.core import run_setup\n dist = run_setup(src_file)\n tmpfile.write('\\n'.join(dist.install_requires))\n else:\n tmpfile.write(sys.stdin.read())\n tmpfile.flush()\n constraints.extend(parse_requirements(\n tmpfile.name, finder=repository.finder, session=repository.session, options=pip_options))\n else:\n constraints.extend(parse_requirements(\n src_file, finder=repository.finder, session=repository.session, options=pip_options))\n\n constraints.extend(upgrade_install_reqs.values())\n\n # Filter out pip environment markers which do not match (PEP496)\n constraints = [req for req in constraints\n if req.markers is None or req.markers.evaluate()]\n\n # Check the given base set of constraints first\n Resolver.check_constraints(constraints)\n\n try:\n resolver = Resolver(constraints, repository, prereleases=pre,\n clear_caches=rebuild, allow_unsafe=allow_unsafe)\n results = resolver.resolve(max_rounds=max_rounds)\n if generate_hashes:\n hashes = resolver.resolve_hashes(results)\n else:\n hashes = None\n except PipToolsError as e:\n log.error(str(e))\n sys.exit(2)\n\n log.debug('')\n\n ##\n # Output\n ##\n\n # Compute reverse dependency annotations statically, from the\n # dependency cache that the resolver has populated by now.\n #\n # TODO (1a): reverse deps for any editable package are lost\n # what SHOULD happen is that they are cached in memory, just\n # not persisted to disk!\n #\n # TODO (1b): perhaps it's easiest if the dependency cache has an API\n # that could take InstallRequirements directly, like:\n #\n # cache.set(ireq, ...)\n #\n # then, when ireq is editable, it would store in\n #\n # editables[egg_name][link_without_fragment] = deps\n # editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}\n #\n # otherwise:\n #\n # self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}\n #\n reverse_dependencies = None\n if annotate:\n reverse_dependencies = resolver.reverse_dependencies(results)\n\n writer = OutputWriter(src_files, dst_file, dry_run=dry_run,\n emit_header=header, emit_index=index,\n emit_trusted_host=emit_trusted_host,\n annotate=annotate,\n generate_hashes=generate_hashes,\n default_index_url=repository.DEFAULT_INDEX_URL,\n index_urls=repository.finder.index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n format_control=repository.finder.format_control,\n allow_unsafe=allow_unsafe)\n writer.write(results=results,\n unsafe_requirements=resolver.unsafe_constraints,\n reverse_dependencies=reverse_dependencies,\n primary_packages={key_from_req(ireq.req) for ireq in constraints if not ireq.constraint},\n markers={key_from_req(ireq.req): ireq.markers\n for ireq in constraints if ireq.markers},\n hashes=hashes)\n\n if dry_run:\n log.warning('Dry-run, so nothing updated.')\n", "path": "piptools/scripts/compile.py"}]}
| 3,596 | 306 |
gh_patches_debug_26092
|
rasdani/github-patches
|
git_diff
|
twisted__twisted-11749
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove check for PR links to issue IDs
> If this is a pain there are 2 options:
It does just seem to be the most common type of misconfiguration, which I bump into on almost every code review or submission.
I'm not sure it's providing enough value to be worth the hassle, so perhaps disabling would be best for now. The "fixes:" line does most of the work.
_Originally posted by @glyph in https://github.com/twisted/twisted/issues/11747#issuecomment-1304901054_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `.github/scripts/check-pr-text.py`
Content:
```
1 #
2 # This script is designed to be called by the GHA workflow.
3 #
4 # It is designed to check that the PR text complies to our dev standards.
5 #
6 # The input is received via the environmet variables:
7 # * PR_TITLE - title of the PR
8 # * PR_BODY - the description of the PR
9 #
10 # To test it run
11 #
12 # $ export PR_TITLE='#1234 Test Title'
13 # $ export PR_BODY='some lines
14 # > Fixes #12345
15 # > more lines'
16 # $ python3 .github/scripts/check-pr-text.py
17 #
18 import os
19 import re
20 import sys
21
22 pr_title = os.environ.get("PR_TITLE", "")
23 pr_body = os.environ.get("PR_BODY", "")
24
25 print("--- DEBUG ---")
26 print(f"Title: {pr_title}")
27 print(f"Body:\n {pr_body}")
28 print("-------------")
29
30
31 def fail(message):
32 print(message)
33 print("Fix the title and then trigger a new push.")
34 print("A re-run for this job will not work.")
35 sys.exit(1)
36
37
38 if not pr_title:
39 fail("Title for the PR not found. " "Maybe missing PR_TITLE env var.")
40
41 if not pr_body:
42 fail("Body for the PR not found. " "Maybe missing PR_BODY env var.")
43
44 title_search = re.search(r"^(#\d+):? .+", pr_title)
45 if not title_search:
46 fail(
47 "Title of PR has no issue ID reference. It must look like β#1234 Foo bar bazβ."
48 )
49 else:
50 print(f"PR title is complaint for {title_search[1]}. Good job.")
51
52
53 body_search = re.search(r".*Fixes (#\d+).+", pr_body)
54 if not body_search:
55 fail('Body of PR has no "Fixes #12345" issue ID reference.')
56 else:
57 print(f"PR description is complaint for {body_search[1]}. Good job.")
58
59
60 if title_search[1] != body_search[1]:
61 fail("PR title and description have different IDs.")
62
63 # All good.
64 sys.exit(0)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/.github/scripts/check-pr-text.py b/.github/scripts/check-pr-text.py
deleted file mode 100644
--- a/.github/scripts/check-pr-text.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#
-# This script is designed to be called by the GHA workflow.
-#
-# It is designed to check that the PR text complies to our dev standards.
-#
-# The input is received via the environmet variables:
-# * PR_TITLE - title of the PR
-# * PR_BODY - the description of the PR
-#
-# To test it run
-#
-# $ export PR_TITLE='#1234 Test Title'
-# $ export PR_BODY='some lines
-# > Fixes #12345
-# > more lines'
-# $ python3 .github/scripts/check-pr-text.py
-#
-import os
-import re
-import sys
-
-pr_title = os.environ.get("PR_TITLE", "")
-pr_body = os.environ.get("PR_BODY", "")
-
-print("--- DEBUG ---")
-print(f"Title: {pr_title}")
-print(f"Body:\n {pr_body}")
-print("-------------")
-
-
-def fail(message):
- print(message)
- print("Fix the title and then trigger a new push.")
- print("A re-run for this job will not work.")
- sys.exit(1)
-
-
-if not pr_title:
- fail("Title for the PR not found. " "Maybe missing PR_TITLE env var.")
-
-if not pr_body:
- fail("Body for the PR not found. " "Maybe missing PR_BODY env var.")
-
-title_search = re.search(r"^(#\d+):? .+", pr_title)
-if not title_search:
- fail(
- "Title of PR has no issue ID reference. It must look like β#1234 Foo bar bazβ."
- )
-else:
- print(f"PR title is complaint for {title_search[1]}. Good job.")
-
-
-body_search = re.search(r".*Fixes (#\d+).+", pr_body)
-if not body_search:
- fail('Body of PR has no "Fixes #12345" issue ID reference.')
-else:
- print(f"PR description is complaint for {body_search[1]}. Good job.")
-
-
-if title_search[1] != body_search[1]:
- fail("PR title and description have different IDs.")
-
-# All good.
-sys.exit(0)
|
{"golden_diff": "diff --git a/.github/scripts/check-pr-text.py b/.github/scripts/check-pr-text.py\ndeleted file mode 100644\n--- a/.github/scripts/check-pr-text.py\n+++ /dev/null\n@@ -1,64 +0,0 @@\n-#\n-# This script is designed to be called by the GHA workflow.\n-#\n-# It is designed to check that the PR text complies to our dev standards.\n-#\n-# The input is received via the environmet variables:\n-# * PR_TITLE - title of the PR\n-# * PR_BODY - the description of the PR\n-#\n-# To test it run\n-#\n-# $ export PR_TITLE='#1234 Test Title'\n-# $ export PR_BODY='some lines\n-# > Fixes #12345\n-# > more lines'\n-# $ python3 .github/scripts/check-pr-text.py\n-#\n-import os\n-import re\n-import sys\n-\n-pr_title = os.environ.get(\"PR_TITLE\", \"\")\n-pr_body = os.environ.get(\"PR_BODY\", \"\")\n-\n-print(\"--- DEBUG ---\")\n-print(f\"Title: {pr_title}\")\n-print(f\"Body:\\n {pr_body}\")\n-print(\"-------------\")\n-\n-\n-def fail(message):\n- print(message)\n- print(\"Fix the title and then trigger a new push.\")\n- print(\"A re-run for this job will not work.\")\n- sys.exit(1)\n-\n-\n-if not pr_title:\n- fail(\"Title for the PR not found. \" \"Maybe missing PR_TITLE env var.\")\n-\n-if not pr_body:\n- fail(\"Body for the PR not found. \" \"Maybe missing PR_BODY env var.\")\n-\n-title_search = re.search(r\"^(#\\d+):? .+\", pr_title)\n-if not title_search:\n- fail(\n- \"Title of PR has no issue ID reference. It must look like \u201c#1234 Foo bar baz\u201d.\"\n- )\n-else:\n- print(f\"PR title is complaint for {title_search[1]}. Good job.\")\n-\n-\n-body_search = re.search(r\".*Fixes (#\\d+).+\", pr_body)\n-if not body_search:\n- fail('Body of PR has no \"Fixes #12345\" issue ID reference.')\n-else:\n- print(f\"PR description is complaint for {body_search[1]}. Good job.\")\n-\n-\n-if title_search[1] != body_search[1]:\n- fail(\"PR title and description have different IDs.\")\n-\n-# All good.\n-sys.exit(0)\n", "issue": "Remove check for PR links to issue IDs\n > If this is a pain there are 2 options:\r\n\r\nIt does just seem to be the most common type of misconfiguration, which I bump into on almost every code review or submission.\r\n\r\nI'm not sure it's providing enough value to be worth the hassle, so perhaps disabling would be best for now. The \"fixes:\" line does most of the work.\r\n\r\n_Originally posted by @glyph in https://github.com/twisted/twisted/issues/11747#issuecomment-1304901054_\r\n \n", "before_files": [{"content": "#\n# This script is designed to be called by the GHA workflow.\n#\n# It is designed to check that the PR text complies to our dev standards.\n#\n# The input is received via the environmet variables:\n# * PR_TITLE - title of the PR\n# * PR_BODY - the description of the PR\n#\n# To test it run\n#\n# $ export PR_TITLE='#1234 Test Title'\n# $ export PR_BODY='some lines\n# > Fixes #12345\n# > more lines'\n# $ python3 .github/scripts/check-pr-text.py\n#\nimport os\nimport re\nimport sys\n\npr_title = os.environ.get(\"PR_TITLE\", \"\")\npr_body = os.environ.get(\"PR_BODY\", \"\")\n\nprint(\"--- DEBUG ---\")\nprint(f\"Title: {pr_title}\")\nprint(f\"Body:\\n {pr_body}\")\nprint(\"-------------\")\n\n\ndef fail(message):\n print(message)\n print(\"Fix the title and then trigger a new push.\")\n print(\"A re-run for this job will not work.\")\n sys.exit(1)\n\n\nif not pr_title:\n fail(\"Title for the PR not found. \" \"Maybe missing PR_TITLE env var.\")\n\nif not pr_body:\n fail(\"Body for the PR not found. \" \"Maybe missing PR_BODY env var.\")\n\ntitle_search = re.search(r\"^(#\\d+):? .+\", pr_title)\nif not title_search:\n fail(\n \"Title of PR has no issue ID reference. It must look like \u201c#1234 Foo bar baz\u201d.\"\n )\nelse:\n print(f\"PR title is complaint for {title_search[1]}. Good job.\")\n\n\nbody_search = re.search(r\".*Fixes (#\\d+).+\", pr_body)\nif not body_search:\n fail('Body of PR has no \"Fixes #12345\" issue ID reference.')\nelse:\n print(f\"PR description is complaint for {body_search[1]}. Good job.\")\n\n\nif title_search[1] != body_search[1]:\n fail(\"PR title and description have different IDs.\")\n\n# All good.\nsys.exit(0)\n", "path": ".github/scripts/check-pr-text.py"}], "after_files": [{"content": null, "path": ".github/scripts/check-pr-text.py"}]}
| 964 | 549 |
gh_patches_debug_8068
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2497
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Untranslated email subject
### Describe the bug
We just got an email from Cato:
<img width="655" alt="image" src="https://user-images.githubusercontent.com/1576660/191570837-0b3bf3e8-4c38-4113-8351-b505d83e97ea.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/members/emails.py`
Content:
```
1 """The emails defined by the members package."""
2 from datetime import timedelta
3 import logging
4
5 from django.conf import settings
6 from django.core.mail import EmailMultiAlternatives
7 from django.template.loader import get_template
8 from django.core import mail
9 from django.template import loader
10 from django.template.defaultfilters import floatformat
11 from django.urls import reverse
12 from django.utils import timezone
13 from django.utils.translation import gettext as _
14
15 from members.models import Member, Membership
16
17 logger = logging.getLogger(__name__)
18
19
20 def send_membership_announcement(dry_run=False):
21 """Send an email to all members with a never ending membership excluding honorary members.
22
23 :param dry_run: does not really send emails if True
24 """
25 members = (
26 Member.current_members.filter(membership__since__lt=timezone.now())
27 .filter(membership__until__isnull=True)
28 .exclude(membership__type=Membership.HONORARY)
29 .exclude(email="")
30 .distinct()
31 )
32
33 with mail.get_connection() as connection:
34 for member in members:
35 logger.info("Sent email to %s (%s)", member.get_full_name(), member.email)
36 if not dry_run:
37 email_body = loader.render_to_string(
38 "members/email/membership_announcement.txt",
39 {"name": member.get_full_name()},
40 )
41 mail.EmailMessage(
42 f"[THALIA] {_('Membership announcement')}",
43 email_body,
44 settings.DEFAULT_FROM_EMAIL,
45 [member.email],
46 bcc=[settings.BOARD_NOTIFICATION_ADDRESS],
47 connection=connection,
48 ).send()
49
50 if not dry_run:
51 mail.mail_managers(
52 _("Membership announcement sent"),
53 loader.render_to_string(
54 "members/email/membership_announcement_notification.txt",
55 {"members": members},
56 ),
57 connection=connection,
58 )
59
60
61 def send_information_request(dry_run=False):
62 """Send an email to all members to have them check their personal information.
63
64 :param dry_run: does not really send emails if True
65 """
66 members = Member.current_members.all().exclude(email="")
67
68 with mail.get_connection() as connection:
69 for member in members:
70 logger.info("Sent email to %s (%s)", member.get_full_name(), member.email)
71 if not dry_run:
72
73 email_context = {
74 k: x if x else ""
75 for k, x in {
76 "name": member.first_name,
77 "username": member.username,
78 "full_name": member.get_full_name(),
79 "address_street": member.profile.address_street,
80 "address_street2": member.profile.address_street2,
81 "address_postal_code": member.profile.address_postal_code,
82 "address_city": member.profile.address_city,
83 "address_country": member.profile.get_address_country_display(),
84 "phone_number": member.profile.phone_number,
85 "birthday": member.profile.birthday,
86 "email": member.email,
87 "student_number": member.profile.student_number,
88 "starting_year": member.profile.starting_year,
89 "programme": member.profile.get_programme_display(),
90 }.items()
91 }
92 html_template = get_template("members/email/information_check.html")
93 text_template = get_template("members/email/information_check.txt")
94 subject = "[THALIA] " + _("Membership information check")
95 html_message = html_template.render(email_context)
96 text_message = text_template.render(email_context)
97
98 msg = EmailMultiAlternatives(
99 subject,
100 text_message,
101 settings.DEFAULT_FROM_EMAIL,
102 [member.email],
103 )
104 msg.attach_alternative(html_message, "text/html")
105 msg.send()
106
107 if not dry_run:
108 mail.mail_managers(
109 _("Membership information check sent"),
110 loader.render_to_string(
111 "members/email/information_check_notification.txt",
112 {"members": members},
113 ),
114 connection=connection,
115 )
116
117
118 def send_expiration_announcement(dry_run=False):
119 """Send an email to all members whose membership will end in the next 31 days to warn them about this.
120
121 :param dry_run: does not really send emails if True
122 """
123 expiry_date = timezone.now() + timedelta(days=31)
124 members = (
125 Member.current_members.filter(membership__until__lte=expiry_date)
126 .exclude(membership__until__isnull=True)
127 .exclude(email="")
128 .distinct()
129 )
130
131 with mail.get_connection() as connection:
132 for member in members:
133 logger.info("Sent email to %s (%s)", member.get_full_name(), member.email)
134 if not dry_run:
135
136 renewal_url = settings.BASE_URL + reverse("registrations:renew")
137 email_body = loader.render_to_string(
138 "members/email/expiration_announcement.txt",
139 {
140 "name": member.get_full_name(),
141 "membership_price": floatformat(
142 settings.MEMBERSHIP_PRICES["year"], 2
143 ),
144 "renewal_url": renewal_url,
145 },
146 )
147 mail.EmailMessage(
148 f"[THALIA] {_('Membership expiration announcement')}",
149 email_body,
150 settings.DEFAULT_FROM_EMAIL,
151 [member.email],
152 bcc=[settings.BOARD_NOTIFICATION_ADDRESS],
153 connection=connection,
154 ).send()
155
156 if not dry_run:
157 mail.mail_managers(
158 _("Membership expiration announcement sent"),
159 loader.render_to_string(
160 "members/email/expiration_announcement_notification.txt",
161 {"members": members},
162 ),
163 connection=connection,
164 )
165
166
167 def send_welcome_message(user, password):
168 """Send an email to a new mail welcoming them.
169
170 :param user: the new user
171 :param password: randomly generated password
172 """
173 email_body = loader.render_to_string(
174 "members/email/welcome.txt",
175 {
176 "full_name": user.get_full_name(),
177 "username": user.username,
178 "password": password,
179 "url": settings.BASE_URL,
180 },
181 )
182 user.email_user(_("Welcome to Study Association Thalia"), email_body)
183
184
185 def send_email_change_confirmation_messages(change_request):
186 """Send emails to the old and new email address of a member to confirm the email change.
187
188 :param change_request the email change request entered by the user
189 """
190 member = change_request.member
191
192 confirm_link = settings.BASE_URL + reverse(
193 "members:email-change-confirm",
194 args=[change_request.confirm_key],
195 )
196 mail.EmailMessage(
197 f"[THALIA] {_('Please confirm your email change')}",
198 loader.render_to_string(
199 "members/email/email_change_confirm.txt",
200 {
201 "confirm_link": confirm_link,
202 "name": member.first_name,
203 },
204 ),
205 settings.DEFAULT_FROM_EMAIL,
206 [member.email],
207 ).send()
208
209 confirm_link = settings.BASE_URL + reverse(
210 "members:email-change-verify",
211 args=[change_request.verify_key],
212 )
213 mail.EmailMessage(
214 f"[THALIA] {_('Please verify your email address')}",
215 loader.render_to_string(
216 "members/email/email_change_verify.txt",
217 {
218 "confirm_link": confirm_link,
219 "name": member.first_name,
220 },
221 ),
222 settings.DEFAULT_FROM_EMAIL,
223 [change_request.email],
224 ).send()
225
226
227 def send_email_change_completion_message(change_request):
228 """Send email to the member to confirm the email change.
229
230 :param change_request the email change request entered by the user
231 """
232 change_request.member.email_user(
233 "[THALIA] {_('Your email address has been changed')}",
234 loader.render_to_string(
235 "members/email/email_change_completed.txt",
236 {"name": change_request.member.first_name},
237 ),
238 )
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/members/emails.py b/website/members/emails.py
--- a/website/members/emails.py
+++ b/website/members/emails.py
@@ -230,7 +230,7 @@
:param change_request the email change request entered by the user
"""
change_request.member.email_user(
- "[THALIA] {_('Your email address has been changed')}",
+ f"[THALIA] {_('Your email address has been changed')}",
loader.render_to_string(
"members/email/email_change_completed.txt",
{"name": change_request.member.first_name},
|
{"golden_diff": "diff --git a/website/members/emails.py b/website/members/emails.py\n--- a/website/members/emails.py\n+++ b/website/members/emails.py\n@@ -230,7 +230,7 @@\n :param change_request the email change request entered by the user\n \"\"\"\n change_request.member.email_user(\n- \"[THALIA] {_('Your email address has been changed')}\",\n+ f\"[THALIA] {_('Your email address has been changed')}\",\n loader.render_to_string(\n \"members/email/email_change_completed.txt\",\n {\"name\": change_request.member.first_name},\n", "issue": "Untranslated email subject\n### Describe the bug\r\n\r\nWe just got an email from Cato:\r\n\r\n<img width=\"655\" alt=\"image\" src=\"https://user-images.githubusercontent.com/1576660/191570837-0b3bf3e8-4c38-4113-8351-b505d83e97ea.png\">\r\n\r\n\n", "before_files": [{"content": "\"\"\"The emails defined by the members package.\"\"\"\nfrom datetime import timedelta\nimport logging\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\nfrom django.core import mail\nfrom django.template import loader\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\n\nfrom members.models import Member, Membership\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_membership_announcement(dry_run=False):\n \"\"\"Send an email to all members with a never ending membership excluding honorary members.\n\n :param dry_run: does not really send emails if True\n \"\"\"\n members = (\n Member.current_members.filter(membership__since__lt=timezone.now())\n .filter(membership__until__isnull=True)\n .exclude(membership__type=Membership.HONORARY)\n .exclude(email=\"\")\n .distinct()\n )\n\n with mail.get_connection() as connection:\n for member in members:\n logger.info(\"Sent email to %s (%s)\", member.get_full_name(), member.email)\n if not dry_run:\n email_body = loader.render_to_string(\n \"members/email/membership_announcement.txt\",\n {\"name\": member.get_full_name()},\n )\n mail.EmailMessage(\n f\"[THALIA] {_('Membership announcement')}\",\n email_body,\n settings.DEFAULT_FROM_EMAIL,\n [member.email],\n bcc=[settings.BOARD_NOTIFICATION_ADDRESS],\n connection=connection,\n ).send()\n\n if not dry_run:\n mail.mail_managers(\n _(\"Membership announcement sent\"),\n loader.render_to_string(\n \"members/email/membership_announcement_notification.txt\",\n {\"members\": members},\n ),\n connection=connection,\n )\n\n\ndef send_information_request(dry_run=False):\n \"\"\"Send an email to all members to have them check their personal information.\n\n :param dry_run: does not really send emails if True\n \"\"\"\n members = Member.current_members.all().exclude(email=\"\")\n\n with mail.get_connection() as connection:\n for member in members:\n logger.info(\"Sent email to %s (%s)\", member.get_full_name(), member.email)\n if not dry_run:\n\n email_context = {\n k: x if x else \"\"\n for k, x in {\n \"name\": member.first_name,\n \"username\": member.username,\n \"full_name\": member.get_full_name(),\n \"address_street\": member.profile.address_street,\n \"address_street2\": member.profile.address_street2,\n \"address_postal_code\": member.profile.address_postal_code,\n \"address_city\": member.profile.address_city,\n \"address_country\": member.profile.get_address_country_display(),\n \"phone_number\": member.profile.phone_number,\n \"birthday\": member.profile.birthday,\n \"email\": member.email,\n \"student_number\": member.profile.student_number,\n \"starting_year\": member.profile.starting_year,\n \"programme\": member.profile.get_programme_display(),\n }.items()\n }\n html_template = get_template(\"members/email/information_check.html\")\n text_template = get_template(\"members/email/information_check.txt\")\n subject = \"[THALIA] \" + _(\"Membership information check\")\n html_message = html_template.render(email_context)\n text_message = text_template.render(email_context)\n\n msg = EmailMultiAlternatives(\n subject,\n text_message,\n settings.DEFAULT_FROM_EMAIL,\n [member.email],\n )\n msg.attach_alternative(html_message, \"text/html\")\n msg.send()\n\n if not dry_run:\n mail.mail_managers(\n _(\"Membership information check sent\"),\n loader.render_to_string(\n \"members/email/information_check_notification.txt\",\n {\"members\": members},\n ),\n connection=connection,\n )\n\n\ndef send_expiration_announcement(dry_run=False):\n \"\"\"Send an email to all members whose membership will end in the next 31 days to warn them about this.\n\n :param dry_run: does not really send emails if True\n \"\"\"\n expiry_date = timezone.now() + timedelta(days=31)\n members = (\n Member.current_members.filter(membership__until__lte=expiry_date)\n .exclude(membership__until__isnull=True)\n .exclude(email=\"\")\n .distinct()\n )\n\n with mail.get_connection() as connection:\n for member in members:\n logger.info(\"Sent email to %s (%s)\", member.get_full_name(), member.email)\n if not dry_run:\n\n renewal_url = settings.BASE_URL + reverse(\"registrations:renew\")\n email_body = loader.render_to_string(\n \"members/email/expiration_announcement.txt\",\n {\n \"name\": member.get_full_name(),\n \"membership_price\": floatformat(\n settings.MEMBERSHIP_PRICES[\"year\"], 2\n ),\n \"renewal_url\": renewal_url,\n },\n )\n mail.EmailMessage(\n f\"[THALIA] {_('Membership expiration announcement')}\",\n email_body,\n settings.DEFAULT_FROM_EMAIL,\n [member.email],\n bcc=[settings.BOARD_NOTIFICATION_ADDRESS],\n connection=connection,\n ).send()\n\n if not dry_run:\n mail.mail_managers(\n _(\"Membership expiration announcement sent\"),\n loader.render_to_string(\n \"members/email/expiration_announcement_notification.txt\",\n {\"members\": members},\n ),\n connection=connection,\n )\n\n\ndef send_welcome_message(user, password):\n \"\"\"Send an email to a new mail welcoming them.\n\n :param user: the new user\n :param password: randomly generated password\n \"\"\"\n email_body = loader.render_to_string(\n \"members/email/welcome.txt\",\n {\n \"full_name\": user.get_full_name(),\n \"username\": user.username,\n \"password\": password,\n \"url\": settings.BASE_URL,\n },\n )\n user.email_user(_(\"Welcome to Study Association Thalia\"), email_body)\n\n\ndef send_email_change_confirmation_messages(change_request):\n \"\"\"Send emails to the old and new email address of a member to confirm the email change.\n\n :param change_request the email change request entered by the user\n \"\"\"\n member = change_request.member\n\n confirm_link = settings.BASE_URL + reverse(\n \"members:email-change-confirm\",\n args=[change_request.confirm_key],\n )\n mail.EmailMessage(\n f\"[THALIA] {_('Please confirm your email change')}\",\n loader.render_to_string(\n \"members/email/email_change_confirm.txt\",\n {\n \"confirm_link\": confirm_link,\n \"name\": member.first_name,\n },\n ),\n settings.DEFAULT_FROM_EMAIL,\n [member.email],\n ).send()\n\n confirm_link = settings.BASE_URL + reverse(\n \"members:email-change-verify\",\n args=[change_request.verify_key],\n )\n mail.EmailMessage(\n f\"[THALIA] {_('Please verify your email address')}\",\n loader.render_to_string(\n \"members/email/email_change_verify.txt\",\n {\n \"confirm_link\": confirm_link,\n \"name\": member.first_name,\n },\n ),\n settings.DEFAULT_FROM_EMAIL,\n [change_request.email],\n ).send()\n\n\ndef send_email_change_completion_message(change_request):\n \"\"\"Send email to the member to confirm the email change.\n\n :param change_request the email change request entered by the user\n \"\"\"\n change_request.member.email_user(\n \"[THALIA] {_('Your email address has been changed')}\",\n loader.render_to_string(\n \"members/email/email_change_completed.txt\",\n {\"name\": change_request.member.first_name},\n ),\n )\n", "path": "website/members/emails.py"}], "after_files": [{"content": "\"\"\"The emails defined by the members package.\"\"\"\nfrom datetime import timedelta\nimport logging\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\nfrom django.core import mail\nfrom django.template import loader\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\n\nfrom members.models import Member, Membership\n\nlogger = logging.getLogger(__name__)\n\n\ndef send_membership_announcement(dry_run=False):\n \"\"\"Send an email to all members with a never ending membership excluding honorary members.\n\n :param dry_run: does not really send emails if True\n \"\"\"\n members = (\n Member.current_members.filter(membership__since__lt=timezone.now())\n .filter(membership__until__isnull=True)\n .exclude(membership__type=Membership.HONORARY)\n .exclude(email=\"\")\n .distinct()\n )\n\n with mail.get_connection() as connection:\n for member in members:\n logger.info(\"Sent email to %s (%s)\", member.get_full_name(), member.email)\n if not dry_run:\n email_body = loader.render_to_string(\n \"members/email/membership_announcement.txt\",\n {\"name\": member.get_full_name()},\n )\n mail.EmailMessage(\n f\"[THALIA] {_('Membership announcement')}\",\n email_body,\n settings.DEFAULT_FROM_EMAIL,\n [member.email],\n bcc=[settings.BOARD_NOTIFICATION_ADDRESS],\n connection=connection,\n ).send()\n\n if not dry_run:\n mail.mail_managers(\n _(\"Membership announcement sent\"),\n loader.render_to_string(\n \"members/email/membership_announcement_notification.txt\",\n {\"members\": members},\n ),\n connection=connection,\n )\n\n\ndef send_information_request(dry_run=False):\n \"\"\"Send an email to all members to have them check their personal information.\n\n :param dry_run: does not really send emails if True\n \"\"\"\n members = Member.current_members.all().exclude(email=\"\")\n\n with mail.get_connection() as connection:\n for member in members:\n logger.info(\"Sent email to %s (%s)\", member.get_full_name(), member.email)\n if not dry_run:\n\n email_context = {\n k: x if x else \"\"\n for k, x in {\n \"name\": member.first_name,\n \"username\": member.username,\n \"full_name\": member.get_full_name(),\n \"address_street\": member.profile.address_street,\n \"address_street2\": member.profile.address_street2,\n \"address_postal_code\": member.profile.address_postal_code,\n \"address_city\": member.profile.address_city,\n \"address_country\": member.profile.get_address_country_display(),\n \"phone_number\": member.profile.phone_number,\n \"birthday\": member.profile.birthday,\n \"email\": member.email,\n \"student_number\": member.profile.student_number,\n \"starting_year\": member.profile.starting_year,\n \"programme\": member.profile.get_programme_display(),\n }.items()\n }\n html_template = get_template(\"members/email/information_check.html\")\n text_template = get_template(\"members/email/information_check.txt\")\n subject = \"[THALIA] \" + _(\"Membership information check\")\n html_message = html_template.render(email_context)\n text_message = text_template.render(email_context)\n\n msg = EmailMultiAlternatives(\n subject,\n text_message,\n settings.DEFAULT_FROM_EMAIL,\n [member.email],\n )\n msg.attach_alternative(html_message, \"text/html\")\n msg.send()\n\n if not dry_run:\n mail.mail_managers(\n _(\"Membership information check sent\"),\n loader.render_to_string(\n \"members/email/information_check_notification.txt\",\n {\"members\": members},\n ),\n connection=connection,\n )\n\n\ndef send_expiration_announcement(dry_run=False):\n \"\"\"Send an email to all members whose membership will end in the next 31 days to warn them about this.\n\n :param dry_run: does not really send emails if True\n \"\"\"\n expiry_date = timezone.now() + timedelta(days=31)\n members = (\n Member.current_members.filter(membership__until__lte=expiry_date)\n .exclude(membership__until__isnull=True)\n .exclude(email=\"\")\n .distinct()\n )\n\n with mail.get_connection() as connection:\n for member in members:\n logger.info(\"Sent email to %s (%s)\", member.get_full_name(), member.email)\n if not dry_run:\n\n renewal_url = settings.BASE_URL + reverse(\"registrations:renew\")\n email_body = loader.render_to_string(\n \"members/email/expiration_announcement.txt\",\n {\n \"name\": member.get_full_name(),\n \"membership_price\": floatformat(\n settings.MEMBERSHIP_PRICES[\"year\"], 2\n ),\n \"renewal_url\": renewal_url,\n },\n )\n mail.EmailMessage(\n f\"[THALIA] {_('Membership expiration announcement')}\",\n email_body,\n settings.DEFAULT_FROM_EMAIL,\n [member.email],\n bcc=[settings.BOARD_NOTIFICATION_ADDRESS],\n connection=connection,\n ).send()\n\n if not dry_run:\n mail.mail_managers(\n _(\"Membership expiration announcement sent\"),\n loader.render_to_string(\n \"members/email/expiration_announcement_notification.txt\",\n {\"members\": members},\n ),\n connection=connection,\n )\n\n\ndef send_welcome_message(user, password):\n \"\"\"Send an email to a new mail welcoming them.\n\n :param user: the new user\n :param password: randomly generated password\n \"\"\"\n email_body = loader.render_to_string(\n \"members/email/welcome.txt\",\n {\n \"full_name\": user.get_full_name(),\n \"username\": user.username,\n \"password\": password,\n \"url\": settings.BASE_URL,\n },\n )\n user.email_user(_(\"Welcome to Study Association Thalia\"), email_body)\n\n\ndef send_email_change_confirmation_messages(change_request):\n \"\"\"Send emails to the old and new email address of a member to confirm the email change.\n\n :param change_request the email change request entered by the user\n \"\"\"\n member = change_request.member\n\n confirm_link = settings.BASE_URL + reverse(\n \"members:email-change-confirm\",\n args=[change_request.confirm_key],\n )\n mail.EmailMessage(\n f\"[THALIA] {_('Please confirm your email change')}\",\n loader.render_to_string(\n \"members/email/email_change_confirm.txt\",\n {\n \"confirm_link\": confirm_link,\n \"name\": member.first_name,\n },\n ),\n settings.DEFAULT_FROM_EMAIL,\n [member.email],\n ).send()\n\n confirm_link = settings.BASE_URL + reverse(\n \"members:email-change-verify\",\n args=[change_request.verify_key],\n )\n mail.EmailMessage(\n f\"[THALIA] {_('Please verify your email address')}\",\n loader.render_to_string(\n \"members/email/email_change_verify.txt\",\n {\n \"confirm_link\": confirm_link,\n \"name\": member.first_name,\n },\n ),\n settings.DEFAULT_FROM_EMAIL,\n [change_request.email],\n ).send()\n\n\ndef send_email_change_completion_message(change_request):\n \"\"\"Send email to the member to confirm the email change.\n\n :param change_request the email change request entered by the user\n \"\"\"\n change_request.member.email_user(\n f\"[THALIA] {_('Your email address has been changed')}\",\n loader.render_to_string(\n \"members/email/email_change_completed.txt\",\n {\"name\": change_request.member.first_name},\n ),\n )\n", "path": "website/members/emails.py"}]}
| 2,581 | 137 |
gh_patches_debug_21000
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-7730
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
curious behavior on Ijulia on IPython3.0b1 on windows
Hi,
Trying `Pkg.`+`<Tab>` on a cell of a IJulia notebook, I can select `build` in the dropdown window, but when I validate it, only `build`remains in my cell (instead of `Pkg.build`)
(on Firefox as on Chrome)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/kernel/adapter.py`
Content:
```
1 """Adapters for IPython msg spec versions."""
2
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 import json
7
8 from IPython.core.release import kernel_protocol_version_info
9 from IPython.utils.tokenutil import token_at_cursor
10
11
12 def code_to_line(code, cursor_pos):
13 """Turn a multiline code block and cursor position into a single line
14 and new cursor position.
15
16 For adapting ``complete_`` and ``object_info_request``.
17 """
18 if not code:
19 return "", 0
20 for line in code.splitlines(True):
21 n = len(line)
22 if cursor_pos > n:
23 cursor_pos -= n
24 else:
25 break
26 return line, cursor_pos
27
28
29 class Adapter(object):
30 """Base class for adapting messages
31
32 Override message_type(msg) methods to create adapters.
33 """
34
35 msg_type_map = {}
36
37 def update_header(self, msg):
38 return msg
39
40 def update_metadata(self, msg):
41 return msg
42
43 def update_msg_type(self, msg):
44 header = msg['header']
45 msg_type = header['msg_type']
46 if msg_type in self.msg_type_map:
47 msg['msg_type'] = header['msg_type'] = self.msg_type_map[msg_type]
48 return msg
49
50 def handle_reply_status_error(self, msg):
51 """This will be called *instead of* the regular handler
52
53 on any reply with status != ok
54 """
55 return msg
56
57 def __call__(self, msg):
58 msg = self.update_header(msg)
59 msg = self.update_metadata(msg)
60 msg = self.update_msg_type(msg)
61 header = msg['header']
62
63 handler = getattr(self, header['msg_type'], None)
64 if handler is None:
65 return msg
66
67 # handle status=error replies separately (no change, at present)
68 if msg['content'].get('status', None) in {'error', 'aborted'}:
69 return self.handle_reply_status_error(msg)
70 return handler(msg)
71
72 def _version_str_to_list(version):
73 """convert a version string to a list of ints
74
75 non-int segments are excluded
76 """
77 v = []
78 for part in version.split('.'):
79 try:
80 v.append(int(part))
81 except ValueError:
82 pass
83 return v
84
85 class V5toV4(Adapter):
86 """Adapt msg protocol v5 to v4"""
87
88 version = '4.1'
89
90 msg_type_map = {
91 'execute_result' : 'pyout',
92 'execute_input' : 'pyin',
93 'error' : 'pyerr',
94 'inspect_request' : 'object_info_request',
95 'inspect_reply' : 'object_info_reply',
96 }
97
98 def update_header(self, msg):
99 msg['header'].pop('version', None)
100 return msg
101
102 # shell channel
103
104 def kernel_info_reply(self, msg):
105 v4c = {}
106 content = msg['content']
107 for key in ('language_version', 'protocol_version'):
108 if key in content:
109 v4c[key] = _version_str_to_list(content[key])
110 if content.get('implementation', '') == 'ipython' \
111 and 'implementation_version' in content:
112 v4c['ipython_version'] = _version_str_to_list(content['implementation_version'])
113 language_info = content.get('language_info', {})
114 language = language_info.get('name', '')
115 v4c.setdefault('language', language)
116 if 'version' in language_info:
117 v4c.setdefault('language_version', _version_str_to_list(language_info['version']))
118 msg['content'] = v4c
119 return msg
120
121 def execute_request(self, msg):
122 content = msg['content']
123 content.setdefault('user_variables', [])
124 return msg
125
126 def execute_reply(self, msg):
127 content = msg['content']
128 content.setdefault('user_variables', {})
129 # TODO: handle payloads
130 return msg
131
132 def complete_request(self, msg):
133 content = msg['content']
134 code = content['code']
135 cursor_pos = content['cursor_pos']
136 line, cursor_pos = code_to_line(code, cursor_pos)
137
138 new_content = msg['content'] = {}
139 new_content['text'] = ''
140 new_content['line'] = line
141 new_content['block'] = None
142 new_content['cursor_pos'] = cursor_pos
143 return msg
144
145 def complete_reply(self, msg):
146 content = msg['content']
147 cursor_start = content.pop('cursor_start')
148 cursor_end = content.pop('cursor_end')
149 match_len = cursor_end - cursor_start
150 content['matched_text'] = content['matches'][0][:match_len]
151 content.pop('metadata', None)
152 return msg
153
154 def object_info_request(self, msg):
155 content = msg['content']
156 code = content['code']
157 cursor_pos = content['cursor_pos']
158 line, _ = code_to_line(code, cursor_pos)
159
160 new_content = msg['content'] = {}
161 new_content['oname'] = token_at_cursor(code, cursor_pos)
162 new_content['detail_level'] = content['detail_level']
163 return msg
164
165 def object_info_reply(self, msg):
166 """inspect_reply can't be easily backward compatible"""
167 msg['content'] = {'found' : False, 'oname' : 'unknown'}
168 return msg
169
170 # iopub channel
171
172 def stream(self, msg):
173 content = msg['content']
174 content['data'] = content.pop('text')
175 return msg
176
177 def display_data(self, msg):
178 content = msg['content']
179 content.setdefault("source", "display")
180 data = content['data']
181 if 'application/json' in data:
182 try:
183 data['application/json'] = json.dumps(data['application/json'])
184 except Exception:
185 # warn?
186 pass
187 return msg
188
189 # stdin channel
190
191 def input_request(self, msg):
192 msg['content'].pop('password', None)
193 return msg
194
195
196 class V4toV5(Adapter):
197 """Convert msg spec V4 to V5"""
198 version = '5.0'
199
200 # invert message renames above
201 msg_type_map = {v:k for k,v in V5toV4.msg_type_map.items()}
202
203 def update_header(self, msg):
204 msg['header']['version'] = self.version
205 return msg
206
207 # shell channel
208
209 def kernel_info_reply(self, msg):
210 content = msg['content']
211 for key in ('protocol_version', 'ipython_version'):
212 if key in content:
213 content[key] = '.'.join(map(str, content[key]))
214
215 content.setdefault('protocol_version', '4.1')
216
217 if content['language'].startswith('python') and 'ipython_version' in content:
218 content['implementation'] = 'ipython'
219 content['implementation_version'] = content.pop('ipython_version')
220
221 language = content.pop('language')
222 language_info = content.setdefault('language_info', {})
223 language_info.setdefault('name', language)
224 if 'language_version' in content:
225 language_version = '.'.join(map(str, content.pop('language_version')))
226 language_info.setdefault('version', language_version)
227
228 content['banner'] = ''
229 return msg
230
231 def execute_request(self, msg):
232 content = msg['content']
233 user_variables = content.pop('user_variables', [])
234 user_expressions = content.setdefault('user_expressions', {})
235 for v in user_variables:
236 user_expressions[v] = v
237 return msg
238
239 def execute_reply(self, msg):
240 content = msg['content']
241 user_expressions = content.setdefault('user_expressions', {})
242 user_variables = content.pop('user_variables', {})
243 if user_variables:
244 user_expressions.update(user_variables)
245
246 # Pager payloads became a mime bundle
247 for payload in content.get('payload', []):
248 if payload.get('source', None) == 'page' and ('text' in payload):
249 if 'data' not in payload:
250 payload['data'] = {}
251 payload['data']['text/plain'] = payload.pop('text')
252
253 return msg
254
255 def complete_request(self, msg):
256 old_content = msg['content']
257
258 new_content = msg['content'] = {}
259 new_content['code'] = old_content['line']
260 new_content['cursor_pos'] = old_content['cursor_pos']
261 return msg
262
263 def complete_reply(self, msg):
264 # complete_reply needs more context than we have to get cursor_start and end.
265 # use special value of `-1` to indicate to frontend that it should be at
266 # the current cursor position.
267 content = msg['content']
268 new_content = msg['content'] = {'status' : 'ok'}
269 new_content['matches'] = content['matches']
270 new_content['cursor_start'] = -len(content['matched_text'])
271 new_content['cursor_end'] = None
272 new_content['metadata'] = {}
273 return msg
274
275 def inspect_request(self, msg):
276 content = msg['content']
277 name = content['oname']
278
279 new_content = msg['content'] = {}
280 new_content['code'] = name
281 new_content['cursor_pos'] = len(name)
282 new_content['detail_level'] = content['detail_level']
283 return msg
284
285 def inspect_reply(self, msg):
286 """inspect_reply can't be easily backward compatible"""
287 content = msg['content']
288 new_content = msg['content'] = {'status' : 'ok'}
289 found = new_content['found'] = content['found']
290 new_content['name'] = content['oname']
291 new_content['data'] = data = {}
292 new_content['metadata'] = {}
293 if found:
294 lines = []
295 for key in ('call_def', 'init_definition', 'definition'):
296 if content.get(key, False):
297 lines.append(content[key])
298 break
299 for key in ('call_docstring', 'init_docstring', 'docstring'):
300 if content.get(key, False):
301 lines.append(content[key])
302 break
303 if not lines:
304 lines.append("<empty docstring>")
305 data['text/plain'] = '\n'.join(lines)
306 return msg
307
308 # iopub channel
309
310 def stream(self, msg):
311 content = msg['content']
312 content['text'] = content.pop('data')
313 return msg
314
315 def display_data(self, msg):
316 content = msg['content']
317 content.pop("source", None)
318 data = content['data']
319 if 'application/json' in data:
320 try:
321 data['application/json'] = json.loads(data['application/json'])
322 except Exception:
323 # warn?
324 pass
325 return msg
326
327 # stdin channel
328
329 def input_request(self, msg):
330 msg['content'].setdefault('password', False)
331 return msg
332
333
334
335 def adapt(msg, to_version=kernel_protocol_version_info[0]):
336 """Adapt a single message to a target version
337
338 Parameters
339 ----------
340
341 msg : dict
342 An IPython message.
343 to_version : int, optional
344 The target major version.
345 If unspecified, adapt to the current version for IPython.
346
347 Returns
348 -------
349
350 msg : dict
351 An IPython message appropriate in the new version.
352 """
353 header = msg['header']
354 if 'version' in header:
355 from_version = int(header['version'].split('.')[0])
356 else:
357 # assume last version before adding the key to the header
358 from_version = 4
359 adapter = adapters.get((from_version, to_version), None)
360 if adapter is None:
361 return msg
362 return adapter(msg)
363
364
365 # one adapter per major version from,to
366 adapters = {
367 (5,4) : V5toV4(),
368 (4,5) : V4toV5(),
369 }
370
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/IPython/kernel/adapter.py b/IPython/kernel/adapter.py
--- a/IPython/kernel/adapter.py
+++ b/IPython/kernel/adapter.py
@@ -262,12 +262,17 @@
def complete_reply(self, msg):
# complete_reply needs more context than we have to get cursor_start and end.
- # use special value of `-1` to indicate to frontend that it should be at
- # the current cursor position.
+ # use special end=null to indicate current cursor position and negative offset
+ # for start relative to the cursor.
+ # start=None indicates that start == end (accounts for no -0).
content = msg['content']
new_content = msg['content'] = {'status' : 'ok'}
new_content['matches'] = content['matches']
- new_content['cursor_start'] = -len(content['matched_text'])
+ if content['matched_text']:
+ new_content['cursor_start'] = -len(content['matched_text'])
+ else:
+ # no -0, use None to indicate that start == end
+ new_content['cursor_start'] = None
new_content['cursor_end'] = None
new_content['metadata'] = {}
return msg
|
{"golden_diff": "diff --git a/IPython/kernel/adapter.py b/IPython/kernel/adapter.py\n--- a/IPython/kernel/adapter.py\n+++ b/IPython/kernel/adapter.py\n@@ -262,12 +262,17 @@\n \n def complete_reply(self, msg):\n # complete_reply needs more context than we have to get cursor_start and end.\n- # use special value of `-1` to indicate to frontend that it should be at\n- # the current cursor position.\n+ # use special end=null to indicate current cursor position and negative offset\n+ # for start relative to the cursor.\n+ # start=None indicates that start == end (accounts for no -0).\n content = msg['content']\n new_content = msg['content'] = {'status' : 'ok'}\n new_content['matches'] = content['matches']\n- new_content['cursor_start'] = -len(content['matched_text'])\n+ if content['matched_text']:\n+ new_content['cursor_start'] = -len(content['matched_text'])\n+ else:\n+ # no -0, use None to indicate that start == end\n+ new_content['cursor_start'] = None\n new_content['cursor_end'] = None\n new_content['metadata'] = {}\n return msg\n", "issue": "curious behavior on Ijulia on IPython3.0b1 on windows\nHi,\n\nTrying `Pkg.`+`<Tab>` on a cell of a IJulia notebook, I can select `build` in the dropdown window, but when I validate it, only `build`remains in my cell (instead of `Pkg.build`)\n\n(on Firefox as on Chrome)\n\n", "before_files": [{"content": "\"\"\"Adapters for IPython msg spec versions.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom IPython.core.release import kernel_protocol_version_info\nfrom IPython.utils.tokenutil import token_at_cursor\n\n\ndef code_to_line(code, cursor_pos):\n \"\"\"Turn a multiline code block and cursor position into a single line\n and new cursor position.\n \n For adapting ``complete_`` and ``object_info_request``.\n \"\"\"\n if not code:\n return \"\", 0\n for line in code.splitlines(True):\n n = len(line)\n if cursor_pos > n:\n cursor_pos -= n\n else:\n break\n return line, cursor_pos\n\n\nclass Adapter(object):\n \"\"\"Base class for adapting messages\n \n Override message_type(msg) methods to create adapters.\n \"\"\"\n \n msg_type_map = {}\n \n def update_header(self, msg):\n return msg\n \n def update_metadata(self, msg):\n return msg\n \n def update_msg_type(self, msg):\n header = msg['header']\n msg_type = header['msg_type']\n if msg_type in self.msg_type_map:\n msg['msg_type'] = header['msg_type'] = self.msg_type_map[msg_type]\n return msg\n \n def handle_reply_status_error(self, msg):\n \"\"\"This will be called *instead of* the regular handler\n \n on any reply with status != ok\n \"\"\"\n return msg\n \n def __call__(self, msg):\n msg = self.update_header(msg)\n msg = self.update_metadata(msg)\n msg = self.update_msg_type(msg)\n header = msg['header']\n \n handler = getattr(self, header['msg_type'], None)\n if handler is None:\n return msg\n \n # handle status=error replies separately (no change, at present)\n if msg['content'].get('status', None) in {'error', 'aborted'}:\n return self.handle_reply_status_error(msg)\n return handler(msg)\n\ndef _version_str_to_list(version):\n \"\"\"convert a version string to a list of ints\n \n non-int segments are excluded\n \"\"\"\n v = []\n for part in version.split('.'):\n try:\n v.append(int(part))\n except ValueError:\n pass\n return v\n\nclass V5toV4(Adapter):\n \"\"\"Adapt msg protocol v5 to v4\"\"\"\n \n version = '4.1'\n \n msg_type_map = {\n 'execute_result' : 'pyout',\n 'execute_input' : 'pyin',\n 'error' : 'pyerr',\n 'inspect_request' : 'object_info_request',\n 'inspect_reply' : 'object_info_reply',\n }\n \n def update_header(self, msg):\n msg['header'].pop('version', None)\n return msg\n \n # shell channel\n \n def kernel_info_reply(self, msg):\n v4c = {}\n content = msg['content']\n for key in ('language_version', 'protocol_version'):\n if key in content:\n v4c[key] = _version_str_to_list(content[key])\n if content.get('implementation', '') == 'ipython' \\\n and 'implementation_version' in content:\n v4c['ipython_version'] = _version_str_to_list(content['implementation_version'])\n language_info = content.get('language_info', {})\n language = language_info.get('name', '')\n v4c.setdefault('language', language)\n if 'version' in language_info:\n v4c.setdefault('language_version', _version_str_to_list(language_info['version']))\n msg['content'] = v4c\n return msg\n \n def execute_request(self, msg):\n content = msg['content']\n content.setdefault('user_variables', [])\n return msg\n \n def execute_reply(self, msg):\n content = msg['content']\n content.setdefault('user_variables', {})\n # TODO: handle payloads\n return msg\n \n def complete_request(self, msg):\n content = msg['content']\n code = content['code']\n cursor_pos = content['cursor_pos']\n line, cursor_pos = code_to_line(code, cursor_pos)\n \n new_content = msg['content'] = {}\n new_content['text'] = ''\n new_content['line'] = line\n new_content['block'] = None\n new_content['cursor_pos'] = cursor_pos\n return msg\n \n def complete_reply(self, msg):\n content = msg['content']\n cursor_start = content.pop('cursor_start')\n cursor_end = content.pop('cursor_end')\n match_len = cursor_end - cursor_start\n content['matched_text'] = content['matches'][0][:match_len]\n content.pop('metadata', None)\n return msg\n \n def object_info_request(self, msg):\n content = msg['content']\n code = content['code']\n cursor_pos = content['cursor_pos']\n line, _ = code_to_line(code, cursor_pos)\n \n new_content = msg['content'] = {}\n new_content['oname'] = token_at_cursor(code, cursor_pos)\n new_content['detail_level'] = content['detail_level']\n return msg\n \n def object_info_reply(self, msg):\n \"\"\"inspect_reply can't be easily backward compatible\"\"\"\n msg['content'] = {'found' : False, 'oname' : 'unknown'}\n return msg\n \n # iopub channel\n \n def stream(self, msg):\n content = msg['content']\n content['data'] = content.pop('text')\n return msg\n\n def display_data(self, msg):\n content = msg['content']\n content.setdefault(\"source\", \"display\")\n data = content['data']\n if 'application/json' in data:\n try:\n data['application/json'] = json.dumps(data['application/json'])\n except Exception:\n # warn?\n pass\n return msg\n \n # stdin channel\n \n def input_request(self, msg):\n msg['content'].pop('password', None)\n return msg\n\n\nclass V4toV5(Adapter):\n \"\"\"Convert msg spec V4 to V5\"\"\"\n version = '5.0'\n \n # invert message renames above\n msg_type_map = {v:k for k,v in V5toV4.msg_type_map.items()}\n \n def update_header(self, msg):\n msg['header']['version'] = self.version\n return msg\n \n # shell channel\n \n def kernel_info_reply(self, msg):\n content = msg['content']\n for key in ('protocol_version', 'ipython_version'):\n if key in content:\n content[key] = '.'.join(map(str, content[key]))\n \n content.setdefault('protocol_version', '4.1')\n \n if content['language'].startswith('python') and 'ipython_version' in content:\n content['implementation'] = 'ipython'\n content['implementation_version'] = content.pop('ipython_version')\n \n language = content.pop('language')\n language_info = content.setdefault('language_info', {})\n language_info.setdefault('name', language)\n if 'language_version' in content:\n language_version = '.'.join(map(str, content.pop('language_version')))\n language_info.setdefault('version', language_version)\n \n content['banner'] = ''\n return msg\n \n def execute_request(self, msg):\n content = msg['content']\n user_variables = content.pop('user_variables', [])\n user_expressions = content.setdefault('user_expressions', {})\n for v in user_variables:\n user_expressions[v] = v\n return msg\n \n def execute_reply(self, msg):\n content = msg['content']\n user_expressions = content.setdefault('user_expressions', {})\n user_variables = content.pop('user_variables', {})\n if user_variables:\n user_expressions.update(user_variables)\n\n # Pager payloads became a mime bundle\n for payload in content.get('payload', []):\n if payload.get('source', None) == 'page' and ('text' in payload):\n if 'data' not in payload:\n payload['data'] = {}\n payload['data']['text/plain'] = payload.pop('text')\n\n return msg\n \n def complete_request(self, msg):\n old_content = msg['content']\n \n new_content = msg['content'] = {}\n new_content['code'] = old_content['line']\n new_content['cursor_pos'] = old_content['cursor_pos']\n return msg\n \n def complete_reply(self, msg):\n # complete_reply needs more context than we have to get cursor_start and end.\n # use special value of `-1` to indicate to frontend that it should be at\n # the current cursor position.\n content = msg['content']\n new_content = msg['content'] = {'status' : 'ok'}\n new_content['matches'] = content['matches']\n new_content['cursor_start'] = -len(content['matched_text'])\n new_content['cursor_end'] = None\n new_content['metadata'] = {}\n return msg\n \n def inspect_request(self, msg):\n content = msg['content']\n name = content['oname']\n \n new_content = msg['content'] = {}\n new_content['code'] = name\n new_content['cursor_pos'] = len(name)\n new_content['detail_level'] = content['detail_level']\n return msg\n \n def inspect_reply(self, msg):\n \"\"\"inspect_reply can't be easily backward compatible\"\"\"\n content = msg['content']\n new_content = msg['content'] = {'status' : 'ok'}\n found = new_content['found'] = content['found']\n new_content['name'] = content['oname']\n new_content['data'] = data = {}\n new_content['metadata'] = {}\n if found:\n lines = []\n for key in ('call_def', 'init_definition', 'definition'):\n if content.get(key, False):\n lines.append(content[key])\n break\n for key in ('call_docstring', 'init_docstring', 'docstring'):\n if content.get(key, False):\n lines.append(content[key])\n break\n if not lines:\n lines.append(\"<empty docstring>\")\n data['text/plain'] = '\\n'.join(lines)\n return msg\n \n # iopub channel\n \n def stream(self, msg):\n content = msg['content']\n content['text'] = content.pop('data')\n return msg\n\n def display_data(self, msg):\n content = msg['content']\n content.pop(\"source\", None)\n data = content['data']\n if 'application/json' in data:\n try:\n data['application/json'] = json.loads(data['application/json'])\n except Exception:\n # warn?\n pass\n return msg\n \n # stdin channel\n \n def input_request(self, msg):\n msg['content'].setdefault('password', False)\n return msg\n \n\n\ndef adapt(msg, to_version=kernel_protocol_version_info[0]):\n \"\"\"Adapt a single message to a target version\n \n Parameters\n ----------\n \n msg : dict\n An IPython message.\n to_version : int, optional\n The target major version.\n If unspecified, adapt to the current version for IPython.\n \n Returns\n -------\n \n msg : dict\n An IPython message appropriate in the new version.\n \"\"\"\n header = msg['header']\n if 'version' in header:\n from_version = int(header['version'].split('.')[0])\n else:\n # assume last version before adding the key to the header\n from_version = 4\n adapter = adapters.get((from_version, to_version), None)\n if adapter is None:\n return msg\n return adapter(msg)\n\n\n# one adapter per major version from,to\nadapters = {\n (5,4) : V5toV4(),\n (4,5) : V4toV5(),\n}\n", "path": "IPython/kernel/adapter.py"}], "after_files": [{"content": "\"\"\"Adapters for IPython msg spec versions.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom IPython.core.release import kernel_protocol_version_info\nfrom IPython.utils.tokenutil import token_at_cursor\n\n\ndef code_to_line(code, cursor_pos):\n \"\"\"Turn a multiline code block and cursor position into a single line\n and new cursor position.\n \n For adapting ``complete_`` and ``object_info_request``.\n \"\"\"\n if not code:\n return \"\", 0\n for line in code.splitlines(True):\n n = len(line)\n if cursor_pos > n:\n cursor_pos -= n\n else:\n break\n return line, cursor_pos\n\n\nclass Adapter(object):\n \"\"\"Base class for adapting messages\n \n Override message_type(msg) methods to create adapters.\n \"\"\"\n \n msg_type_map = {}\n \n def update_header(self, msg):\n return msg\n \n def update_metadata(self, msg):\n return msg\n \n def update_msg_type(self, msg):\n header = msg['header']\n msg_type = header['msg_type']\n if msg_type in self.msg_type_map:\n msg['msg_type'] = header['msg_type'] = self.msg_type_map[msg_type]\n return msg\n \n def handle_reply_status_error(self, msg):\n \"\"\"This will be called *instead of* the regular handler\n \n on any reply with status != ok\n \"\"\"\n return msg\n \n def __call__(self, msg):\n msg = self.update_header(msg)\n msg = self.update_metadata(msg)\n msg = self.update_msg_type(msg)\n header = msg['header']\n \n handler = getattr(self, header['msg_type'], None)\n if handler is None:\n return msg\n \n # handle status=error replies separately (no change, at present)\n if msg['content'].get('status', None) in {'error', 'aborted'}:\n return self.handle_reply_status_error(msg)\n return handler(msg)\n\ndef _version_str_to_list(version):\n \"\"\"convert a version string to a list of ints\n \n non-int segments are excluded\n \"\"\"\n v = []\n for part in version.split('.'):\n try:\n v.append(int(part))\n except ValueError:\n pass\n return v\n\nclass V5toV4(Adapter):\n \"\"\"Adapt msg protocol v5 to v4\"\"\"\n \n version = '4.1'\n \n msg_type_map = {\n 'execute_result' : 'pyout',\n 'execute_input' : 'pyin',\n 'error' : 'pyerr',\n 'inspect_request' : 'object_info_request',\n 'inspect_reply' : 'object_info_reply',\n }\n \n def update_header(self, msg):\n msg['header'].pop('version', None)\n return msg\n \n # shell channel\n \n def kernel_info_reply(self, msg):\n v4c = {}\n content = msg['content']\n for key in ('language_version', 'protocol_version'):\n if key in content:\n v4c[key] = _version_str_to_list(content[key])\n if content.get('implementation', '') == 'ipython' \\\n and 'implementation_version' in content:\n v4c['ipython_version'] = _version_str_to_list(content['implementation_version'])\n language_info = content.get('language_info', {})\n language = language_info.get('name', '')\n v4c.setdefault('language', language)\n if 'version' in language_info:\n v4c.setdefault('language_version', _version_str_to_list(language_info['version']))\n msg['content'] = v4c\n return msg\n \n def execute_request(self, msg):\n content = msg['content']\n content.setdefault('user_variables', [])\n return msg\n \n def execute_reply(self, msg):\n content = msg['content']\n content.setdefault('user_variables', {})\n # TODO: handle payloads\n return msg\n \n def complete_request(self, msg):\n content = msg['content']\n code = content['code']\n cursor_pos = content['cursor_pos']\n line, cursor_pos = code_to_line(code, cursor_pos)\n \n new_content = msg['content'] = {}\n new_content['text'] = ''\n new_content['line'] = line\n new_content['block'] = None\n new_content['cursor_pos'] = cursor_pos\n return msg\n \n def complete_reply(self, msg):\n content = msg['content']\n cursor_start = content.pop('cursor_start')\n cursor_end = content.pop('cursor_end')\n match_len = cursor_end - cursor_start\n content['matched_text'] = content['matches'][0][:match_len]\n content.pop('metadata', None)\n return msg\n \n def object_info_request(self, msg):\n content = msg['content']\n code = content['code']\n cursor_pos = content['cursor_pos']\n line, _ = code_to_line(code, cursor_pos)\n \n new_content = msg['content'] = {}\n new_content['oname'] = token_at_cursor(code, cursor_pos)\n new_content['detail_level'] = content['detail_level']\n return msg\n \n def object_info_reply(self, msg):\n \"\"\"inspect_reply can't be easily backward compatible\"\"\"\n msg['content'] = {'found' : False, 'oname' : 'unknown'}\n return msg\n \n # iopub channel\n \n def stream(self, msg):\n content = msg['content']\n content['data'] = content.pop('text')\n return msg\n\n def display_data(self, msg):\n content = msg['content']\n content.setdefault(\"source\", \"display\")\n data = content['data']\n if 'application/json' in data:\n try:\n data['application/json'] = json.dumps(data['application/json'])\n except Exception:\n # warn?\n pass\n return msg\n \n # stdin channel\n \n def input_request(self, msg):\n msg['content'].pop('password', None)\n return msg\n\n\nclass V4toV5(Adapter):\n \"\"\"Convert msg spec V4 to V5\"\"\"\n version = '5.0'\n \n # invert message renames above\n msg_type_map = {v:k for k,v in V5toV4.msg_type_map.items()}\n \n def update_header(self, msg):\n msg['header']['version'] = self.version\n return msg\n \n # shell channel\n \n def kernel_info_reply(self, msg):\n content = msg['content']\n for key in ('protocol_version', 'ipython_version'):\n if key in content:\n content[key] = '.'.join(map(str, content[key]))\n \n content.setdefault('protocol_version', '4.1')\n \n if content['language'].startswith('python') and 'ipython_version' in content:\n content['implementation'] = 'ipython'\n content['implementation_version'] = content.pop('ipython_version')\n \n language = content.pop('language')\n language_info = content.setdefault('language_info', {})\n language_info.setdefault('name', language)\n if 'language_version' in content:\n language_version = '.'.join(map(str, content.pop('language_version')))\n language_info.setdefault('version', language_version)\n \n content['banner'] = ''\n return msg\n \n def execute_request(self, msg):\n content = msg['content']\n user_variables = content.pop('user_variables', [])\n user_expressions = content.setdefault('user_expressions', {})\n for v in user_variables:\n user_expressions[v] = v\n return msg\n \n def execute_reply(self, msg):\n content = msg['content']\n user_expressions = content.setdefault('user_expressions', {})\n user_variables = content.pop('user_variables', {})\n if user_variables:\n user_expressions.update(user_variables)\n\n # Pager payloads became a mime bundle\n for payload in content.get('payload', []):\n if payload.get('source', None) == 'page' and ('text' in payload):\n if 'data' not in payload:\n payload['data'] = {}\n payload['data']['text/plain'] = payload.pop('text')\n\n return msg\n \n def complete_request(self, msg):\n old_content = msg['content']\n \n new_content = msg['content'] = {}\n new_content['code'] = old_content['line']\n new_content['cursor_pos'] = old_content['cursor_pos']\n return msg\n \n def complete_reply(self, msg):\n # complete_reply needs more context than we have to get cursor_start and end.\n # use special end=null to indicate current cursor position and negative offset\n # for start relative to the cursor.\n # start=None indicates that start == end (accounts for no -0).\n content = msg['content']\n new_content = msg['content'] = {'status' : 'ok'}\n new_content['matches'] = content['matches']\n if content['matched_text']:\n new_content['cursor_start'] = -len(content['matched_text'])\n else:\n # no -0, use None to indicate that start == end\n new_content['cursor_start'] = None\n new_content['cursor_end'] = None\n new_content['metadata'] = {}\n return msg\n \n def inspect_request(self, msg):\n content = msg['content']\n name = content['oname']\n \n new_content = msg['content'] = {}\n new_content['code'] = name\n new_content['cursor_pos'] = len(name)\n new_content['detail_level'] = content['detail_level']\n return msg\n \n def inspect_reply(self, msg):\n \"\"\"inspect_reply can't be easily backward compatible\"\"\"\n content = msg['content']\n new_content = msg['content'] = {'status' : 'ok'}\n found = new_content['found'] = content['found']\n new_content['name'] = content['oname']\n new_content['data'] = data = {}\n new_content['metadata'] = {}\n if found:\n lines = []\n for key in ('call_def', 'init_definition', 'definition'):\n if content.get(key, False):\n lines.append(content[key])\n break\n for key in ('call_docstring', 'init_docstring', 'docstring'):\n if content.get(key, False):\n lines.append(content[key])\n break\n if not lines:\n lines.append(\"<empty docstring>\")\n data['text/plain'] = '\\n'.join(lines)\n return msg\n \n # iopub channel\n \n def stream(self, msg):\n content = msg['content']\n content['text'] = content.pop('data')\n return msg\n\n def display_data(self, msg):\n content = msg['content']\n content.pop(\"source\", None)\n data = content['data']\n if 'application/json' in data:\n try:\n data['application/json'] = json.loads(data['application/json'])\n except Exception:\n # warn?\n pass\n return msg\n \n # stdin channel\n \n def input_request(self, msg):\n msg['content'].setdefault('password', False)\n return msg\n \n\n\ndef adapt(msg, to_version=kernel_protocol_version_info[0]):\n \"\"\"Adapt a single message to a target version\n \n Parameters\n ----------\n \n msg : dict\n An IPython message.\n to_version : int, optional\n The target major version.\n If unspecified, adapt to the current version for IPython.\n \n Returns\n -------\n \n msg : dict\n An IPython message appropriate in the new version.\n \"\"\"\n header = msg['header']\n if 'version' in header:\n from_version = int(header['version'].split('.')[0])\n else:\n # assume last version before adding the key to the header\n from_version = 4\n adapter = adapters.get((from_version, to_version), None)\n if adapter is None:\n return msg\n return adapter(msg)\n\n\n# one adapter per major version from,to\nadapters = {\n (5,4) : V5toV4(),\n (4,5) : V4toV5(),\n}\n", "path": "IPython/kernel/adapter.py"}]}
| 3,915 | 276 |
gh_patches_debug_2889
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-488
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Login with username
It is currently not possible to login with username, only with email. This *was* possible with the old meinberlin that was based on a3. So the login flow of established users breaks even though we migrated all accounts.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/settings/base.py`
Content:
```
1 """
2 Django settings for meinberlin project.
3
4 Generated by 'django-admin startproject' using Django 1.8.17.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.8/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.8/ref/settings/
11 """
12
13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
14 import os
15
16 PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17 BASE_DIR = os.path.dirname(PROJECT_DIR)
18
19 # Application definition
20
21 INSTALLED_APPS = (
22 'django.contrib.sites',
23 'django.contrib.admin',
24 'django.contrib.auth',
25 'django.contrib.contenttypes',
26 'django.contrib.sessions',
27 'django.contrib.messages',
28 'django.contrib.staticfiles',
29 'django.contrib.humanize',
30
31 'wagtail.wagtailforms',
32 'wagtail.wagtailredirects',
33 'wagtail.wagtailembeds',
34 'wagtail.wagtailsites',
35 'wagtail.wagtailusers',
36 'wagtail.wagtailsnippets',
37 'wagtail.wagtaildocs',
38 'wagtail.wagtailimages',
39 'wagtail.wagtailsearch',
40 'wagtail.wagtailadmin',
41 'wagtail.wagtailcore',
42 'wagtail.contrib.wagtailstyleguide',
43
44 'taggit', # wagtail dependency
45 'widget_tweaks',
46 'rest_framework',
47 'allauth',
48 'allauth.account',
49 'allauth.socialaccount',
50 'rules.apps.AutodiscoverRulesConfig',
51 'easy_thumbnails',
52 'ckeditor',
53 'ckeditor_uploader',
54 'capture_tag',
55
56 'adhocracy4.organisations.apps.OrganisationsConfig',
57 'adhocracy4.projects.apps.ProjectsConfig',
58 'adhocracy4.images.apps.ImagesConfig',
59 'adhocracy4.phases.apps.PhasesConfig',
60 'adhocracy4.modules.apps.ModulesConfig',
61 'adhocracy4.ratings.apps.RatingsConfig',
62 'adhocracy4.reports.apps.ReportsConfig',
63 'adhocracy4.comments.apps.CommentsConfig',
64 'adhocracy4.categories.apps.CategoriesConfig',
65 'adhocracy4.maps.apps.MapsConfig',
66 'adhocracy4.actions.apps.ActionsConfig',
67 'adhocracy4.follows.apps.FollowsConfig',
68
69 'apps.contrib.apps.Config',
70 'apps.cms.apps.Config',
71 'apps.users.apps.Config',
72 'apps.projects.apps.Config',
73 'apps.organisations.apps.Config',
74 'apps.embed.apps.Config',
75 'apps.moderatorfeedback.apps.Config',
76 'apps.maps.apps.Config',
77 'apps.notifications.apps.Config',
78
79 'apps.account.apps.Config',
80 'apps.dashboard.apps.Config',
81
82 'apps.bplan.apps.Config',
83 'apps.budgeting.apps.Config',
84 'apps.documents.apps.Config',
85 'apps.extprojects.apps.Config',
86 'apps.ideas.apps.Config',
87 'apps.kiezkasse.apps.Config',
88 'apps.mapideas.apps.Config',
89 'apps.polls.apps.Config',
90 'apps.topicprio.apps.Config',
91 )
92
93 MIDDLEWARE_CLASSES = (
94 'django.contrib.sessions.middleware.SessionMiddleware',
95 'django.middleware.common.CommonMiddleware',
96 'django.middleware.csrf.CsrfViewMiddleware',
97 'django.contrib.auth.middleware.AuthenticationMiddleware',
98 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
99 'django.contrib.messages.middleware.MessageMiddleware',
100 'django.middleware.clickjacking.XFrameOptionsMiddleware',
101 'django.middleware.security.SecurityMiddleware',
102 'django.middleware.locale.LocaleMiddleware',
103
104 'wagtail.wagtailcore.middleware.SiteMiddleware',
105 'wagtail.wagtailredirects.middleware.RedirectMiddleware',
106
107 'apps.embed.middleware.AjaxPathMiddleware',
108 )
109
110 SITE_ID = 1
111
112 ROOT_URLCONF = 'meinberlin.urls'
113
114 LOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]
115
116 TEMPLATES = [
117 {
118 'BACKEND': 'django.template.backends.django.DjangoTemplates',
119 'DIRS': [
120 os.path.join(PROJECT_DIR, 'templates'),
121 ],
122 'APP_DIRS': True,
123 'OPTIONS': {
124 'context_processors': [
125 'django.template.context_processors.debug',
126 'django.template.context_processors.request',
127 'django.contrib.auth.context_processors.auth',
128 'django.contrib.messages.context_processors.messages',
129 ],
130 },
131 },
132 ]
133
134 WSGI_APPLICATION = 'meinberlin.wsgi.application'
135
136
137 # Database
138 # https://docs.djangoproject.com/en/1.8/ref/settings/#databases
139
140 DATABASES = {
141 'default': {
142 'ENGINE': 'django.db.backends.sqlite3',
143 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
144 'TEST': {
145 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),
146 }
147 }
148 }
149
150
151 # Internationalization
152 # https://docs.djangoproject.com/en/1.8/topics/i18n/
153
154 LANGUAGE_CODE = 'en-us'
155
156 TIME_ZONE = 'Europe/Berlin'
157
158 USE_I18N = True
159
160 USE_L10N = True
161
162 USE_TZ = True
163
164
165 # Static files (CSS, JavaScript, Images)
166 # https://docs.djangoproject.com/en/1.8/howto/static-files/
167
168 STATICFILES_DIRS = [
169 os.path.join(PROJECT_DIR, 'static'),
170 ]
171
172 STATIC_ROOT = os.path.join(BASE_DIR, 'static')
173 STATIC_URL = '/static/'
174
175 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
176 MEDIA_URL = '/media/'
177
178 IMAGE_ALIASES = {
179 '*': {
180 'max_size': 5*10**6,
181 'fileformats': ('image/png', 'image/jpeg', 'image/gif')
182 },
183 'heroimage': {'min_resolution': (1300, 600)},
184 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},
185 'avatar': {'min_resolution': (200, 200)},
186 'idea_image': {'min_resolution': (800, 200)},
187 }
188
189 THUMBNAIL_ALIASES = {
190 '': {
191 'heroimage': {'size': (1500, 500), 'crop': 'smart'},
192 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},
193 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},
194 'idea_image': {'size': (800, 0), 'crop': 'scale'},
195 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},
196 }
197 }
198
199 ALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')
200
201
202 # Wagtail settings
203
204 WAGTAIL_SITE_NAME = 'meinberlin'
205
206 # Base URL to use when referring to full URLs within the Wagtail admin backend -
207 # e.g. in notification emails. Don't include '/admin' or a trailing slash
208 BASE_URL = 'http://localhost:8000'
209
210 # Authentication
211
212 AUTH_USER_MODEL = 'meinberlin_users.User'
213
214 AUTHENTICATION_BACKENDS = (
215 'rules.permissions.ObjectPermissionBackend',
216 'django.contrib.auth.backends.ModelBackend',
217 'allauth.account.auth_backends.AuthenticationBackend',
218 )
219
220 ACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'
221 ACCOUNT_AUTHENTICATION_METHOD = 'email'
222 ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3
223 ACCOUNT_EMAIL_REQUIRED = True
224 ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
225 ACCOUNT_USERNAME_REQUIRED = True
226 ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10
227 ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds
228 ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
229 ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
230 ACCOUNT_SIGNUP_FORM_CLASS = 'apps.users.forms.TermsSignupForm'
231
232 LOGIN_URL = 'account_login'
233 LOGIN_REDIRECT_URL = '/'
234
235 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
236
237 PASSWORD_HASHERS = [
238 'django.contrib.auth.hashers.PBKDF2PasswordHasher',
239 'django.contrib.auth.hashers.BCryptPasswordHasher', # a3
240 'apps.users.hashers.A2PasswordHasher',
241 ]
242
243
244 # ckeditor
245
246 CKEDITOR_UPLOAD_PATH = "uploads/"
247 CKEDITOR_RESTRICT_BY_USER = True
248 CKEDITOR_ALLOW_NONIMAGE_FILES = False
249
250 CKEDITOR_CONFIGS = {
251 'default': {
252 'width': '100%',
253 'toolbar': 'Custom',
254 'toolbar_Custom': [
255 ['Bold', 'Italic', 'Underline'],
256 ['NumberedList', 'BulletedList'],
257 ['Link', 'Unlink']
258 ]
259 },
260 'image-editor': {
261 'width': '100%',
262 'toolbar': 'Custom',
263 'toolbar_Custom': [
264 ['Bold', 'Italic', 'Underline'],
265 ['Image'],
266 ['NumberedList', 'BulletedList'],
267 ['Link', 'Unlink']
268 ]
269 }
270 }
271
272 BLEACH_LIST = {
273 'default' : {
274 'tags': ['p','strong','em','u','ol','li','ul','a'],
275 'attributes': {
276 'a': ['href', 'rel'],
277 },
278 },
279 'image-editor': {
280 'tags': ['p','strong','em','u','ol','li','ul','a','img'],
281 'attributes': {
282 'a': ['href', 'rel'],
283 'img': ['src', 'alt', 'style']
284 },
285 'styles': [
286 'float',
287 'margin',
288 'padding',
289 'width',
290 'height',
291 'margin-bottom',
292 'margin-top',
293 'margin-left',
294 'margin-right',
295 ],
296 }
297 }
298
299
300 # adhocracy4
301
302 A4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'
303
304 A4_RATEABLES = (
305 ('a4comments', 'comment'),
306 ('meinberlin_ideas', 'idea'),
307 ('meinberlin_mapideas', 'mapidea'),
308 ('meinberlin_budgeting', 'proposal'),
309 ('meinberlin_topicprio', 'topic'),
310 ('meinberlin_kiezkasse', 'proposal'),
311 )
312
313 A4_COMMENTABLES = (
314 ('a4comments', 'comment'),
315 ('meinberlin_ideas', 'idea'),
316 ('meinberlin_documents', 'document'),
317 ('meinberlin_documents', 'paragraph'),
318 ('meinberlin_mapideas', 'mapidea'),
319 ('meinberlin_budgeting', 'proposal'),
320 ('meinberlin_topicprio', 'topic'),
321 ('meinberlin_polls', 'poll'),
322 ('meinberlin_kiezkasse', 'proposal'),
323 )
324
325 A4_REPORTABLES = (
326 ('a4comments', 'comment'),
327 ('meinberlin_ideas', 'idea'),
328 ('meinberlin_mapideas', 'mapidea'),
329 ('meinberlin_budgeting', 'proposal'),
330 ('meinberlin_topicprio', 'topic'),
331 ('meinberlin_kiezkasse', 'proposal'),
332 )
333 A4_AUTO_FOLLOWABLES = (('a4comments', 'comment'),)
334
335 A4_ACTIONABLES = (
336 ('a4comments', 'comment'),
337 ('meinberlin_ideas', 'idea'),
338 ('meinberlin_mapideas', 'mapidea'),
339 ('meinberlin_budgeting', 'proposal'),
340 ('meinberlin_kiezkasse', 'proposal'),
341 )
342
343 A4_AUTO_FOLLOWABLES = (
344 ('a4comments', 'comment'),
345 ('meinberlin_ideas', 'idea'),
346 ('meinberlin_mapideas', 'mapidea'),
347 ('meinberlin_budgeting', 'proposal'),
348 ('meinberlin_kiezkasse', 'proposal'),
349 ('meinberlin_polls', 'vote'), # TODO: really?
350 )
351
352 A4_MAP_BASEURL = 'https://maps.berlinonline.de/tile/bright/'
353 A4_MAP_ATTRIBUTION = '© <a href="http://openstreetmap.org/copyright">OpenStreetMap</a> contributors'
354 A4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])
355
356 CONTACT_EMAIL = '[email protected]'
357 EMAIL_DEFAULT_LANGUAGE = 'de'
358
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/settings/base.py b/meinberlin/settings/base.py
--- a/meinberlin/settings/base.py
+++ b/meinberlin/settings/base.py
@@ -218,7 +218,7 @@
)
ACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'
-ACCOUNT_AUTHENTICATION_METHOD = 'email'
+ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
|
{"golden_diff": "diff --git a/meinberlin/settings/base.py b/meinberlin/settings/base.py\n--- a/meinberlin/settings/base.py\n+++ b/meinberlin/settings/base.py\n@@ -218,7 +218,7 @@\n )\n \n ACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'\n-ACCOUNT_AUTHENTICATION_METHOD = 'email'\n+ACCOUNT_AUTHENTICATION_METHOD = 'username_email'\n ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\n ACCOUNT_EMAIL_REQUIRED = True\n ACCOUNT_EMAIL_VERIFICATION = 'mandatory'\n", "issue": "Login with username\nIt is currently not possible to login with username, only with email. This *was* possible with the old meinberlin that was based on a3. So the login flow of established users breaks even though we migrated all accounts.\n", "before_files": [{"content": "\"\"\"\nDjango settings for meinberlin project.\n\nGenerated by 'django-admin startproject' using Django 1.8.17.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'taggit', # wagtail dependency\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.images.apps.ImagesConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n 'adhocracy4.categories.apps.CategoriesConfig',\n 'adhocracy4.maps.apps.MapsConfig',\n 'adhocracy4.actions.apps.ActionsConfig',\n 'adhocracy4.follows.apps.FollowsConfig',\n\n 'apps.contrib.apps.Config',\n 'apps.cms.apps.Config',\n 'apps.users.apps.Config',\n 'apps.projects.apps.Config',\n 'apps.organisations.apps.Config',\n 'apps.embed.apps.Config',\n 'apps.moderatorfeedback.apps.Config',\n 'apps.maps.apps.Config',\n 'apps.notifications.apps.Config',\n\n 'apps.account.apps.Config',\n 'apps.dashboard.apps.Config',\n\n 'apps.bplan.apps.Config',\n 'apps.budgeting.apps.Config',\n 'apps.documents.apps.Config',\n 'apps.extprojects.apps.Config',\n 'apps.ideas.apps.Config',\n 'apps.kiezkasse.apps.Config',\n 'apps.mapideas.apps.Config',\n 'apps.polls.apps.Config',\n 'apps.topicprio.apps.Config',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n\n 'apps.embed.middleware.AjaxPathMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'meinberlin.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'meinberlin.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1300, 600)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = 'meinberlin'\n\n# Base URL to use when referring to full URLs within the Wagtail admin backend -\n# e.g. in notification emails. Don't include '/admin' or a trailing slash\nBASE_URL = 'http://localhost:8000'\n\n# Authentication\n\nAUTH_USER_MODEL = 'meinberlin_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'apps.users.forms.TermsSignupForm'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nPASSWORD_HASHERS = [\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher', # a3\n 'apps.users.hashers.A2PasswordHasher',\n]\n\n\n# ckeditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_documents', 'document'),\n ('meinberlin_documents', 'paragraph'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_polls', 'poll'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\nA4_AUTO_FOLLOWABLES = (('a4comments', 'comment'),)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_kiezkasse', 'proposal'),\n ('meinberlin_polls', 'vote'), # TODO: really?\n)\n\nA4_MAP_BASEURL = 'https://maps.berlinonline.de/tile/bright/'\nA4_MAP_ATTRIBUTION = '© <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])\n\nCONTACT_EMAIL = '[email protected]'\nEMAIL_DEFAULT_LANGUAGE = 'de'\n", "path": "meinberlin/settings/base.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for meinberlin project.\n\nGenerated by 'django-admin startproject' using Django 1.8.17.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'taggit', # wagtail dependency\n 'widget_tweaks',\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n 'capture_tag',\n\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.images.apps.ImagesConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.reports.apps.ReportsConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n 'adhocracy4.categories.apps.CategoriesConfig',\n 'adhocracy4.maps.apps.MapsConfig',\n 'adhocracy4.actions.apps.ActionsConfig',\n 'adhocracy4.follows.apps.FollowsConfig',\n\n 'apps.contrib.apps.Config',\n 'apps.cms.apps.Config',\n 'apps.users.apps.Config',\n 'apps.projects.apps.Config',\n 'apps.organisations.apps.Config',\n 'apps.embed.apps.Config',\n 'apps.moderatorfeedback.apps.Config',\n 'apps.maps.apps.Config',\n 'apps.notifications.apps.Config',\n\n 'apps.account.apps.Config',\n 'apps.dashboard.apps.Config',\n\n 'apps.bplan.apps.Config',\n 'apps.budgeting.apps.Config',\n 'apps.documents.apps.Config',\n 'apps.extprojects.apps.Config',\n 'apps.ideas.apps.Config',\n 'apps.kiezkasse.apps.Config',\n 'apps.mapideas.apps.Config',\n 'apps.polls.apps.Config',\n 'apps.topicprio.apps.Config',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n\n 'apps.embed.middleware.AjaxPathMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'meinberlin.urls'\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'meinberlin.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'TEST': {\n 'NAME': os.path.join(BASE_DIR, 'test_db.sqlite3'),\n }\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Berlin'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1300, 600)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nTHUMBNAIL_ALIASES = {\n '': {\n 'heroimage': {'size': (1500, 500), 'crop': 'smart'},\n 'heroimage_preview': {'size': (880, 220), 'crop': 'smart'},\n 'project_thumbnail': {'size': (520, 330), 'crop': 'smart'},\n 'idea_image': {'size': (800, 0), 'crop': 'scale'},\n 'idea_thumbnail': {'size': (240, 240), 'crop': 'smart'},\n }\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = 'meinberlin'\n\n# Base URL to use when referring to full URLs within the Wagtail admin backend -\n# e.g. in notification emails. Don't include '/admin' or a trailing slash\nBASE_URL = 'http://localhost:8000'\n\n# Authentication\n\nAUTH_USER_MODEL = 'meinberlin_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\nACCOUNT_SIGNUP_FORM_CLASS = 'apps.users.forms.TermsSignupForm'\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nPASSWORD_HASHERS = [\n 'django.contrib.auth.hashers.PBKDF2PasswordHasher',\n 'django.contrib.auth.hashers.BCryptPasswordHasher', # a3\n 'apps.users.hashers.A2PasswordHasher',\n]\n\n\n# ckeditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'\n\nA4_RATEABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_COMMENTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_documents', 'document'),\n ('meinberlin_documents', 'paragraph'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_polls', 'poll'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_REPORTABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_topicprio', 'topic'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\nA4_AUTO_FOLLOWABLES = (('a4comments', 'comment'),)\n\nA4_ACTIONABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_kiezkasse', 'proposal'),\n)\n\nA4_AUTO_FOLLOWABLES = (\n ('a4comments', 'comment'),\n ('meinberlin_ideas', 'idea'),\n ('meinberlin_mapideas', 'mapidea'),\n ('meinberlin_budgeting', 'proposal'),\n ('meinberlin_kiezkasse', 'proposal'),\n ('meinberlin_polls', 'vote'), # TODO: really?\n)\n\nA4_MAP_BASEURL = 'https://maps.berlinonline.de/tile/bright/'\nA4_MAP_ATTRIBUTION = '© <a href=\"http://openstreetmap.org/copyright\">OpenStreetMap</a> contributors'\nA4_MAP_BOUNDING_BOX = ([[52.3517, 13.8229], [52.6839, 12.9543]])\n\nCONTACT_EMAIL = '[email protected]'\nEMAIL_DEFAULT_LANGUAGE = 'de'\n", "path": "meinberlin/settings/base.py"}]}
| 4,003 | 110 |
gh_patches_debug_7789
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-645
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TYPO] Label ambiguity explanation
Current explanation: "Each row in the table shows an example of a data sample and the it's observed labels as a found in the dataset."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/checks/integrity/label_ambiguity.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """module contains Data Duplicates check."""
12 from typing import Union, List
13
14 import pandas as pd
15
16 from deepchecks import Dataset, ConditionResult
17 from deepchecks.base.check import CheckResult, SingleDatasetBaseCheck
18 from deepchecks.errors import DeepchecksValueError
19 from deepchecks.utils.metrics import task_type_validation, ModelType
20 from deepchecks.utils.strings import format_percent
21 from deepchecks.utils.typing import Hashable
22
23
24 __all__ = ['LabelAmbiguity']
25
26
27 class LabelAmbiguity(SingleDatasetBaseCheck):
28 """Find samples with multiple labels.
29
30 Args:
31 columns (Hashable, List[Hashable]):
32 List of columns to check, if none given checks
33 all columns Except ignored ones.
34 ignore_columns (Hashable, List[Hashable]):
35 List of columns to ignore, if none given checks
36 based on columns variable.
37 n_to_show (int):
38 number of most common ambiguous samples to show.
39 """
40
41 def __init__(
42 self,
43 columns: Union[Hashable, List[Hashable], None] = None,
44 ignore_columns: Union[Hashable, List[Hashable], None] = None,
45 n_to_show: int = 5
46 ):
47 super().__init__()
48 self.columns = columns
49 self.ignore_columns = ignore_columns
50 self.n_to_show = n_to_show
51
52 def run(self, dataset: Dataset, model=None) -> CheckResult:
53 """Run check.
54
55 Args:
56 dataset(Dataset): any dataset.
57 model (any): used to check task type (default: None)
58
59 Returns:
60 (CheckResult): percentage of ambiguous samples and display of the top n_to_show most ambiguous.
61 """
62 dataset: Dataset = Dataset.validate_dataset(dataset)
63 dataset = dataset.select(self.columns, self.ignore_columns)
64
65 if model:
66 task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])
67 elif dataset.label_type == 'regression_label':
68 raise DeepchecksValueError('Task type cannot be regression')
69
70 label_col = dataset.label_name
71
72 # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually
73 df = dataset.data
74 category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()
75 if category_columns:
76 df = df.astype({c: 'object' for c in category_columns})
77
78 group_unique_data = df.groupby(dataset.features, dropna=False)
79 group_unique_labels = group_unique_data.nunique()[label_col]
80
81 num_ambiguous = 0
82 ambiguous_label_name = 'Observed Labels'
83 display = pd.DataFrame(columns=[ambiguous_label_name, *dataset.features])
84
85 for num_labels, group_data in sorted(zip(group_unique_labels, group_unique_data),
86 key=lambda x: x[0], reverse=True):
87 if num_labels == 1:
88 break
89
90 group_df = group_data[1]
91 sample_values = dict(group_df[dataset.features].iloc[0])
92 labels = tuple(group_df[label_col].unique())
93 n_data_sample = group_df.shape[0]
94 num_ambiguous += n_data_sample
95
96 display = display.append({ambiguous_label_name: labels, **sample_values}, ignore_index=True)
97
98 display = display.set_index(ambiguous_label_name)
99
100 explanation = ('Each row in the table shows an example of a data sample '
101 'and the it\'s observed labels as a found in the dataset.')
102
103 display = None if display.empty else [explanation, display.head(self.n_to_show)]
104
105 percent_ambiguous = num_ambiguous/dataset.n_samples
106
107 return CheckResult(value=percent_ambiguous, display=display)
108
109 def add_condition_ambiguous_sample_ratio_not_greater_than(self, max_ratio=0):
110 """Add condition - require samples with multiple labels to not be more than max_ratio.
111
112 Args:
113 max_ratio (float): Maximum ratio of samples with multiple labels.
114 """
115 def max_ratio_condition(result: float) -> ConditionResult:
116 if result > max_ratio:
117 return ConditionResult(False, f'Found ratio of samples with multiple labels above threshold: '
118 f'{format_percent(result)}')
119 else:
120 return ConditionResult(True)
121
122 return self.add_condition(f'Ambiguous sample ratio is not greater than {format_percent(max_ratio)}',
123 max_ratio_condition)
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deepchecks/checks/integrity/label_ambiguity.py b/deepchecks/checks/integrity/label_ambiguity.py
--- a/deepchecks/checks/integrity/label_ambiguity.py
+++ b/deepchecks/checks/integrity/label_ambiguity.py
@@ -98,7 +98,7 @@
display = display.set_index(ambiguous_label_name)
explanation = ('Each row in the table shows an example of a data sample '
- 'and the it\'s observed labels as a found in the dataset.')
+ 'and the its observed labels as found in the dataset.')
display = None if display.empty else [explanation, display.head(self.n_to_show)]
|
{"golden_diff": "diff --git a/deepchecks/checks/integrity/label_ambiguity.py b/deepchecks/checks/integrity/label_ambiguity.py\n--- a/deepchecks/checks/integrity/label_ambiguity.py\n+++ b/deepchecks/checks/integrity/label_ambiguity.py\n@@ -98,7 +98,7 @@\n display = display.set_index(ambiguous_label_name)\n \n explanation = ('Each row in the table shows an example of a data sample '\n- 'and the it\\'s observed labels as a found in the dataset.')\n+ 'and the its observed labels as found in the dataset.')\n \n display = None if display.empty else [explanation, display.head(self.n_to_show)]\n", "issue": "[TYPO] Label ambiguity explanation\nCurrent explanation: \"Each row in the table shows an example of a data sample and the it's observed labels as a found in the dataset.\"\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"module contains Data Duplicates check.\"\"\"\nfrom typing import Union, List\n\nimport pandas as pd\n\nfrom deepchecks import Dataset, ConditionResult\nfrom deepchecks.base.check import CheckResult, SingleDatasetBaseCheck\nfrom deepchecks.errors import DeepchecksValueError\nfrom deepchecks.utils.metrics import task_type_validation, ModelType\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.utils.typing import Hashable\n\n\n__all__ = ['LabelAmbiguity']\n\n\nclass LabelAmbiguity(SingleDatasetBaseCheck):\n \"\"\"Find samples with multiple labels.\n\n Args:\n columns (Hashable, List[Hashable]):\n List of columns to check, if none given checks\n all columns Except ignored ones.\n ignore_columns (Hashable, List[Hashable]):\n List of columns to ignore, if none given checks\n based on columns variable.\n n_to_show (int):\n number of most common ambiguous samples to show.\n \"\"\"\n\n def __init__(\n self,\n columns: Union[Hashable, List[Hashable], None] = None,\n ignore_columns: Union[Hashable, List[Hashable], None] = None,\n n_to_show: int = 5\n ):\n super().__init__()\n self.columns = columns\n self.ignore_columns = ignore_columns\n self.n_to_show = n_to_show\n\n def run(self, dataset: Dataset, model=None) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n dataset(Dataset): any dataset.\n model (any): used to check task type (default: None)\n\n Returns:\n (CheckResult): percentage of ambiguous samples and display of the top n_to_show most ambiguous.\n \"\"\"\n dataset: Dataset = Dataset.validate_dataset(dataset)\n dataset = dataset.select(self.columns, self.ignore_columns)\n\n if model:\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n elif dataset.label_type == 'regression_label':\n raise DeepchecksValueError('Task type cannot be regression')\n\n label_col = dataset.label_name\n\n # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually\n df = dataset.data\n category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()\n if category_columns:\n df = df.astype({c: 'object' for c in category_columns})\n\n group_unique_data = df.groupby(dataset.features, dropna=False)\n group_unique_labels = group_unique_data.nunique()[label_col]\n\n num_ambiguous = 0\n ambiguous_label_name = 'Observed Labels'\n display = pd.DataFrame(columns=[ambiguous_label_name, *dataset.features])\n\n for num_labels, group_data in sorted(zip(group_unique_labels, group_unique_data),\n key=lambda x: x[0], reverse=True):\n if num_labels == 1:\n break\n\n group_df = group_data[1]\n sample_values = dict(group_df[dataset.features].iloc[0])\n labels = tuple(group_df[label_col].unique())\n n_data_sample = group_df.shape[0]\n num_ambiguous += n_data_sample\n\n display = display.append({ambiguous_label_name: labels, **sample_values}, ignore_index=True)\n\n display = display.set_index(ambiguous_label_name)\n\n explanation = ('Each row in the table shows an example of a data sample '\n 'and the it\\'s observed labels as a found in the dataset.')\n\n display = None if display.empty else [explanation, display.head(self.n_to_show)]\n\n percent_ambiguous = num_ambiguous/dataset.n_samples\n\n return CheckResult(value=percent_ambiguous, display=display)\n\n def add_condition_ambiguous_sample_ratio_not_greater_than(self, max_ratio=0):\n \"\"\"Add condition - require samples with multiple labels to not be more than max_ratio.\n\n Args:\n max_ratio (float): Maximum ratio of samples with multiple labels.\n \"\"\"\n def max_ratio_condition(result: float) -> ConditionResult:\n if result > max_ratio:\n return ConditionResult(False, f'Found ratio of samples with multiple labels above threshold: '\n f'{format_percent(result)}')\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Ambiguous sample ratio is not greater than {format_percent(max_ratio)}',\n max_ratio_condition)\n", "path": "deepchecks/checks/integrity/label_ambiguity.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"module contains Data Duplicates check.\"\"\"\nfrom typing import Union, List\n\nimport pandas as pd\n\nfrom deepchecks import Dataset, ConditionResult\nfrom deepchecks.base.check import CheckResult, SingleDatasetBaseCheck\nfrom deepchecks.errors import DeepchecksValueError\nfrom deepchecks.utils.metrics import task_type_validation, ModelType\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.utils.typing import Hashable\n\n\n__all__ = ['LabelAmbiguity']\n\n\nclass LabelAmbiguity(SingleDatasetBaseCheck):\n \"\"\"Find samples with multiple labels.\n\n Args:\n columns (Hashable, List[Hashable]):\n List of columns to check, if none given checks\n all columns Except ignored ones.\n ignore_columns (Hashable, List[Hashable]):\n List of columns to ignore, if none given checks\n based on columns variable.\n n_to_show (int):\n number of most common ambiguous samples to show.\n \"\"\"\n\n def __init__(\n self,\n columns: Union[Hashable, List[Hashable], None] = None,\n ignore_columns: Union[Hashable, List[Hashable], None] = None,\n n_to_show: int = 5\n ):\n super().__init__()\n self.columns = columns\n self.ignore_columns = ignore_columns\n self.n_to_show = n_to_show\n\n def run(self, dataset: Dataset, model=None) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n dataset(Dataset): any dataset.\n model (any): used to check task type (default: None)\n\n Returns:\n (CheckResult): percentage of ambiguous samples and display of the top n_to_show most ambiguous.\n \"\"\"\n dataset: Dataset = Dataset.validate_dataset(dataset)\n dataset = dataset.select(self.columns, self.ignore_columns)\n\n if model:\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n elif dataset.label_type == 'regression_label':\n raise DeepchecksValueError('Task type cannot be regression')\n\n label_col = dataset.label_name\n\n # HACK: pandas have bug with groupby on category dtypes, so until it fixed, change dtypes manually\n df = dataset.data\n category_columns = df.dtypes[df.dtypes == 'category'].index.tolist()\n if category_columns:\n df = df.astype({c: 'object' for c in category_columns})\n\n group_unique_data = df.groupby(dataset.features, dropna=False)\n group_unique_labels = group_unique_data.nunique()[label_col]\n\n num_ambiguous = 0\n ambiguous_label_name = 'Observed Labels'\n display = pd.DataFrame(columns=[ambiguous_label_name, *dataset.features])\n\n for num_labels, group_data in sorted(zip(group_unique_labels, group_unique_data),\n key=lambda x: x[0], reverse=True):\n if num_labels == 1:\n break\n\n group_df = group_data[1]\n sample_values = dict(group_df[dataset.features].iloc[0])\n labels = tuple(group_df[label_col].unique())\n n_data_sample = group_df.shape[0]\n num_ambiguous += n_data_sample\n\n display = display.append({ambiguous_label_name: labels, **sample_values}, ignore_index=True)\n\n display = display.set_index(ambiguous_label_name)\n\n explanation = ('Each row in the table shows an example of a data sample '\n 'and the its observed labels as found in the dataset.')\n\n display = None if display.empty else [explanation, display.head(self.n_to_show)]\n\n percent_ambiguous = num_ambiguous/dataset.n_samples\n\n return CheckResult(value=percent_ambiguous, display=display)\n\n def add_condition_ambiguous_sample_ratio_not_greater_than(self, max_ratio=0):\n \"\"\"Add condition - require samples with multiple labels to not be more than max_ratio.\n\n Args:\n max_ratio (float): Maximum ratio of samples with multiple labels.\n \"\"\"\n def max_ratio_condition(result: float) -> ConditionResult:\n if result > max_ratio:\n return ConditionResult(False, f'Found ratio of samples with multiple labels above threshold: '\n f'{format_percent(result)}')\n else:\n return ConditionResult(True)\n\n return self.add_condition(f'Ambiguous sample ratio is not greater than {format_percent(max_ratio)}',\n max_ratio_condition)\n", "path": "deepchecks/checks/integrity/label_ambiguity.py"}]}
| 1,596 | 160 |
gh_patches_debug_12071
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-2287
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
import_wordpress problem with php code in blogpost
```
$ nikola import_wordpress /Users/davidak/Downloads/davidakweblog.wordpress.2016-02-23.xml
[2016-02-23T17:06:37Z] NOTICE: Nikola: The folder new_site already exists - assuming that this is a already existing Nikola site.
Traceback (most recent call last):
File "/usr/local/lib/python3.5/site-packages/doit/doit_cmd.py", line 168, in run
return command.parse_execute(args)
File "/usr/local/lib/python3.5/site-packages/doit/cmd_base.py", line 122, in parse_execute
return self.execute(params, args)
File "/usr/local/lib/python3.5/site-packages/nikola/plugin_categories.py", line 124, in execute
return self._execute(options, args)
File "/usr/local/lib/python3.5/site-packages/nikola/plugins/command/import_wordpress.py", line 332, in _execute
self.import_posts(channel)
File "/usr/local/lib/python3.5/site-packages/nikola/plugins/command/import_wordpress.py", line 1015, in import_posts
self.process_item_if_post_or_page(item)
File "/usr/local/lib/python3.5/site-packages/nikola/plugins/command/import_wordpress.py", line 996, in process_item_if_post_or_page
out_folder_slug = self.import_postpage_item(item, wordpress_namespace, 'posts', attachments)
File "/usr/local/lib/python3.5/site-packages/nikola/plugins/command/import_wordpress.py", line 938, in import_postpage_item
rewrite_html)
File "/usr/local/lib/python3.5/site-packages/nikola/plugins/basic_import.py", line 130, in write_content
doc = html.document_fromstring(content)
File "/usr/local/lib/python3.5/site-packages/lxml/html/__init__.py", line 755, in document_fromstring
"Document is empty")
lxml.etree.ParserError: Document is empty
```
wordpress export of the post:
```
<?xml version="1.0" encoding="UTF-8" ?>
<!-- This is a WordPress eXtended RSS file generated by WordPress as an export of your site. -->
<!-- It contains information about your site's posts, pages, comments, categories, and other content. -->
<!-- You may use this file to transfer that content from one site to another. -->
<!-- This file is not intended to serve as a complete backup of your site. -->
<!-- To import this information into a WordPress site follow these steps: -->
<!-- 1. Log in to that site as an administrator. -->
<!-- 2. Go to Tools: Import in the WordPress admin panel. -->
<!-- 3. Install the "WordPress" importer from the list. -->
<!-- 4. Activate & Run Importer. -->
<!-- 5. Upload this file using the form provided on that page. -->
<!-- 6. You will first be asked to map the authors in this export file to users -->
<!-- on the site. For each author, you may choose to map to an -->
<!-- existing user on the site or to create a new user. -->
<!-- 7. WordPress will then import each of the posts, pages, comments, categories, etc. -->
<!-- contained in this file into your site. -->
<!-- generator="WordPress/4.4.2" created="2016-02-23 17:01" -->
<rss version="2.0"
xmlns:excerpt="http://wordpress.org/export/1.2/excerpt/"
xmlns:content="http://purl.org/rss/1.0/modules/content/"
xmlns:wfw="http://wellformedweb.org/CommentAPI/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:wp="http://wordpress.org/export/1.2/"
>
<channel>
<title>davidak Weblog</title>
<link>http://davidak.de/blog</link>
<description></description>
<pubDate>Tue, 23 Feb 2016 17:01:21 +0000</pubDate>
<language>de-DE</language>
<wp:wxr_version>1.2</wp:wxr_version>
<wp:base_site_url>http://davidak.de/blog</wp:base_site_url>
<wp:base_blog_url>http://davidak.de/blog</wp:base_blog_url>
<wp:author><wp:author_id>3</wp:author_id><wp:author_login><![CDATA[davidak]]></wp:author_login><wp:author_email><![CDATA[[email protected]]]></wp:author_email><wp:author_display_name><![CDATA[davidak]]></wp:author_display_name><wp:author_first_name><![CDATA[David]]></wp:author_first_name><wp:author_last_name><![CDATA[Kleuker]]></wp:author_last_name></wp:author>
<wp:category><wp:term_id>12</wp:term_id><wp:category_nicename><![CDATA[podcasts]]></wp:category_nicename><wp:category_parent><![CDATA[]]></wp:category_parent><wp:cat_name><![CDATA[Podcasts]]></wp:cat_name></wp:category>
<generator>https://wordpress.org/?v=4.4.2</generator>
<item>
<title>Umfrage: Podcasts?</title>
<link>http://davidak.de/blog/170-umfrage-podcasts/</link>
<pubDate>Wed, 27 Jun 2007 13:59:10 +0000</pubDate>
<dc:creator><![CDATA[davidak]]></dc:creator>
<guid isPermaLink="false">http://davidak.de/blog/?p=170</guid>
<description></description>
<content:encoded><![CDATA[<?php get_poll(6);?>
]]></content:encoded>
<excerpt:encoded><![CDATA[]]></excerpt:encoded>
<wp:post_id>170</wp:post_id>
<wp:post_date><![CDATA[2007-06-27 15:59:10]]></wp:post_date>
<wp:post_date_gmt><![CDATA[2007-06-27 13:59:10]]></wp:post_date_gmt>
<wp:comment_status><![CDATA[open]]></wp:comment_status>
<wp:ping_status><![CDATA[open]]></wp:ping_status>
<wp:post_name><![CDATA[umfrage-podcasts]]></wp:post_name>
<wp:status><![CDATA[publish]]></wp:status>
<wp:post_parent>0</wp:post_parent>
<wp:menu_order>0</wp:menu_order>
<wp:post_type><![CDATA[post]]></wp:post_type>
<wp:post_password><![CDATA[]]></wp:post_password>
<wp:is_sticky>0</wp:is_sticky>
<category domain="category" nicename="internet"><![CDATA[Internet]]></category>
<category domain="post_tag" nicename="podcasting"><![CDATA[podcasting]]></category>
<category domain="category" nicename="podcasts"><![CDATA[Podcasts]]></category>
<category domain="post_tag" nicename="podcasts"><![CDATA[Podcasts]]></category>
<category domain="post_tag" nicename="umfrage"><![CDATA[umfrage]]></category>
<wp:postmeta>
<wp:meta_key><![CDATA[runphp]]></wp:meta_key>
<wp:meta_value><![CDATA[1]]></wp:meta_value>
</wp:postmeta>
</item>
</channel>
</rss>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/basic_import.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright Β© 2012-2016 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Mixin for importer plugins."""
28
29 from __future__ import unicode_literals, print_function
30 import io
31 import csv
32 import datetime
33 import os
34 import sys
35 from pkg_resources import resource_filename
36
37 try:
38 from urlparse import urlparse
39 except ImportError:
40 from urllib.parse import urlparse # NOQA
41
42 from lxml import etree, html
43 from mako.template import Template
44
45 from nikola import utils
46
47 links = {}
48
49
50 class ImportMixin(object):
51 """Mixin with common used methods."""
52
53 name = "import_mixin"
54 needs_config = False
55 doc_usage = "[options] export_file"
56 doc_purpose = "import a dump from a different engine."
57 cmd_options = [
58 {
59 'name': 'output_folder',
60 'long': 'output-folder',
61 'short': 'o',
62 'default': 'new_site',
63 'help': 'Location to write imported content.'
64 },
65 ]
66
67 def _execute(self, options={}, args=[]):
68 """Import a blog from an export into a Nikola site."""
69 raise NotImplementedError("Must be implemented by a subclass.")
70
71 @classmethod
72 def get_channel_from_file(cls, filename):
73 """Get channel from XML file."""
74 tree = etree.fromstring(cls.read_xml_file(filename))
75 channel = tree.find('channel')
76 return channel
77
78 @staticmethod
79 def configure_redirections(url_map, base_dir=''):
80 """Configure redirections from an url_map."""
81 index = base_dir + 'index.html'
82 if index.startswith('/'):
83 index = index[1:]
84 redirections = []
85 for k, v in url_map.items():
86 if not k[-1] == '/':
87 k = k + '/'
88
89 # remove the initial "/" because src is a relative file path
90 src = (urlparse(k).path + 'index.html')[1:]
91 dst = (urlparse(v).path)
92 if src == index:
93 utils.LOGGER.warn("Can't do a redirect for: {0!r}".format(k))
94 else:
95 redirections.append((src, dst))
96 return redirections
97
98 def generate_base_site(self):
99 """Generate a base Nikola site."""
100 if not os.path.exists(self.output_folder):
101 os.system('nikola init -q ' + self.output_folder)
102 else:
103 self.import_into_existing_site = True
104 utils.LOGGER.notice('The folder {0} already exists - assuming that this is a '
105 'already existing Nikola site.'.format(self.output_folder))
106
107 filename = resource_filename('nikola', 'conf.py.in')
108 # The 'strict_undefined=True' will give the missing symbol name if any,
109 # (ex: NameError: 'THEME' is not defined )
110 # for other errors from mako/runtime.py, you can add format_extensions=True ,
111 # then more info will be writen to *somefile* (most probably conf.py)
112 conf_template = Template(filename=filename, strict_undefined=True)
113
114 return conf_template
115
116 @staticmethod
117 def populate_context(channel):
118 """Populate context with settings."""
119 raise NotImplementedError("Must be implemented by a subclass.")
120
121 @classmethod
122 def transform_content(cls, content):
123 """Transform content to a Nikola-friendly format."""
124 return content
125
126 @classmethod
127 def write_content(cls, filename, content, rewrite_html=True):
128 """Write content to file."""
129 if rewrite_html:
130 doc = html.document_fromstring(content)
131 doc.rewrite_links(replacer)
132 content = html.tostring(doc, encoding='utf8')
133 else:
134 content = content.encode('utf-8')
135
136 utils.makedirs(os.path.dirname(filename))
137 with open(filename, "wb+") as fd:
138 fd.write(content)
139
140 @staticmethod
141 def write_metadata(filename, title, slug, post_date, description, tags, **kwargs):
142 """Write metadata to meta file."""
143 if not description:
144 description = ""
145
146 utils.makedirs(os.path.dirname(filename))
147 with io.open(filename, "w+", encoding="utf8") as fd:
148 data = {'title': title, 'slug': slug, 'date': post_date, 'tags': ','.join(tags), 'description': description}
149 data.update(kwargs)
150 fd.write(utils.write_metadata(data))
151
152 @staticmethod
153 def write_urlmap_csv(output_file, url_map):
154 """Write urlmap to csv file."""
155 utils.makedirs(os.path.dirname(output_file))
156 fmode = 'wb+' if sys.version_info[0] == 2 else 'w+'
157 with io.open(output_file, fmode) as fd:
158 csv_writer = csv.writer(fd)
159 for item in url_map.items():
160 csv_writer.writerow(item)
161
162 def get_configuration_output_path(self):
163 """Get path for the output configuration file."""
164 if not self.import_into_existing_site:
165 filename = 'conf.py'
166 else:
167 filename = 'conf.py.{name}-{time}'.format(
168 time=datetime.datetime.now().strftime('%Y%m%d_%H%M%S'),
169 name=self.name)
170 config_output_path = os.path.join(self.output_folder, filename)
171 utils.LOGGER.info('Configuration will be written to: {0}'.format(config_output_path))
172
173 return config_output_path
174
175 @staticmethod
176 def write_configuration(filename, rendered_template):
177 """Write the configuration file."""
178 utils.makedirs(os.path.dirname(filename))
179 with io.open(filename, 'w+', encoding='utf8') as fd:
180 fd.write(rendered_template)
181
182
183 def replacer(dst):
184 """Replace links."""
185 return links.get(dst, dst)
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/basic_import.py b/nikola/plugins/basic_import.py
--- a/nikola/plugins/basic_import.py
+++ b/nikola/plugins/basic_import.py
@@ -127,9 +127,12 @@
def write_content(cls, filename, content, rewrite_html=True):
"""Write content to file."""
if rewrite_html:
- doc = html.document_fromstring(content)
- doc.rewrite_links(replacer)
- content = html.tostring(doc, encoding='utf8')
+ try:
+ doc = html.document_fromstring(content)
+ doc.rewrite_links(replacer)
+ content = html.tostring(doc, encoding='utf8')
+ except etree.ParserError:
+ content = content.encode('utf-8')
else:
content = content.encode('utf-8')
|
{"golden_diff": "diff --git a/nikola/plugins/basic_import.py b/nikola/plugins/basic_import.py\n--- a/nikola/plugins/basic_import.py\n+++ b/nikola/plugins/basic_import.py\n@@ -127,9 +127,12 @@\n def write_content(cls, filename, content, rewrite_html=True):\n \"\"\"Write content to file.\"\"\"\n if rewrite_html:\n- doc = html.document_fromstring(content)\n- doc.rewrite_links(replacer)\n- content = html.tostring(doc, encoding='utf8')\n+ try:\n+ doc = html.document_fromstring(content)\n+ doc.rewrite_links(replacer)\n+ content = html.tostring(doc, encoding='utf8')\n+ except etree.ParserError:\n+ content = content.encode('utf-8')\n else:\n content = content.encode('utf-8')\n", "issue": "import_wordpress problem with php code in blogpost\n```\n$ nikola import_wordpress /Users/davidak/Downloads/davidakweblog.wordpress.2016-02-23.xml\n[2016-02-23T17:06:37Z] NOTICE: Nikola: The folder new_site already exists - assuming that this is a already existing Nikola site.\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.5/site-packages/doit/doit_cmd.py\", line 168, in run\n return command.parse_execute(args)\n File \"/usr/local/lib/python3.5/site-packages/doit/cmd_base.py\", line 122, in parse_execute\n return self.execute(params, args)\n File \"/usr/local/lib/python3.5/site-packages/nikola/plugin_categories.py\", line 124, in execute\n return self._execute(options, args)\n File \"/usr/local/lib/python3.5/site-packages/nikola/plugins/command/import_wordpress.py\", line 332, in _execute\n self.import_posts(channel)\n File \"/usr/local/lib/python3.5/site-packages/nikola/plugins/command/import_wordpress.py\", line 1015, in import_posts\n self.process_item_if_post_or_page(item)\n File \"/usr/local/lib/python3.5/site-packages/nikola/plugins/command/import_wordpress.py\", line 996, in process_item_if_post_or_page\n out_folder_slug = self.import_postpage_item(item, wordpress_namespace, 'posts', attachments)\n File \"/usr/local/lib/python3.5/site-packages/nikola/plugins/command/import_wordpress.py\", line 938, in import_postpage_item\n rewrite_html)\n File \"/usr/local/lib/python3.5/site-packages/nikola/plugins/basic_import.py\", line 130, in write_content\n doc = html.document_fromstring(content)\n File \"/usr/local/lib/python3.5/site-packages/lxml/html/__init__.py\", line 755, in document_fromstring\n \"Document is empty\")\nlxml.etree.ParserError: Document is empty\n```\n\nwordpress export of the post:\n\n```\n<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!-- This is a WordPress eXtended RSS file generated by WordPress as an export of your site. -->\n<!-- It contains information about your site's posts, pages, comments, categories, and other content. -->\n<!-- You may use this file to transfer that content from one site to another. -->\n<!-- This file is not intended to serve as a complete backup of your site. -->\n\n<!-- To import this information into a WordPress site follow these steps: -->\n<!-- 1. Log in to that site as an administrator. -->\n<!-- 2. Go to Tools: Import in the WordPress admin panel. -->\n<!-- 3. Install the \"WordPress\" importer from the list. -->\n<!-- 4. Activate & Run Importer. -->\n<!-- 5. Upload this file using the form provided on that page. -->\n<!-- 6. You will first be asked to map the authors in this export file to users -->\n<!-- on the site. For each author, you may choose to map to an -->\n<!-- existing user on the site or to create a new user. -->\n<!-- 7. WordPress will then import each of the posts, pages, comments, categories, etc. -->\n<!-- contained in this file into your site. -->\n\n<!-- generator=\"WordPress/4.4.2\" created=\"2016-02-23 17:01\" -->\n<rss version=\"2.0\"\n xmlns:excerpt=\"http://wordpress.org/export/1.2/excerpt/\"\n xmlns:content=\"http://purl.org/rss/1.0/modules/content/\"\n xmlns:wfw=\"http://wellformedweb.org/CommentAPI/\"\n xmlns:dc=\"http://purl.org/dc/elements/1.1/\"\n xmlns:wp=\"http://wordpress.org/export/1.2/\"\n>\n\n<channel>\n <title>davidak Weblog</title>\n <link>http://davidak.de/blog</link>\n <description></description>\n <pubDate>Tue, 23 Feb 2016 17:01:21 +0000</pubDate>\n <language>de-DE</language>\n <wp:wxr_version>1.2</wp:wxr_version>\n <wp:base_site_url>http://davidak.de/blog</wp:base_site_url>\n <wp:base_blog_url>http://davidak.de/blog</wp:base_blog_url>\n\n <wp:author><wp:author_id>3</wp:author_id><wp:author_login><![CDATA[davidak]]></wp:author_login><wp:author_email><![CDATA[[email protected]]]></wp:author_email><wp:author_display_name><![CDATA[davidak]]></wp:author_display_name><wp:author_first_name><![CDATA[David]]></wp:author_first_name><wp:author_last_name><![CDATA[Kleuker]]></wp:author_last_name></wp:author>\n\n <wp:category><wp:term_id>12</wp:term_id><wp:category_nicename><![CDATA[podcasts]]></wp:category_nicename><wp:category_parent><![CDATA[]]></wp:category_parent><wp:cat_name><![CDATA[Podcasts]]></wp:cat_name></wp:category>\n\n <generator>https://wordpress.org/?v=4.4.2</generator>\n\n <item>\n <title>Umfrage: Podcasts?</title>\n <link>http://davidak.de/blog/170-umfrage-podcasts/</link>\n <pubDate>Wed, 27 Jun 2007 13:59:10 +0000</pubDate>\n <dc:creator><![CDATA[davidak]]></dc:creator>\n <guid isPermaLink=\"false\">http://davidak.de/blog/?p=170</guid>\n <description></description>\n <content:encoded><![CDATA[<?php get_poll(6);?>\n]]></content:encoded>\n <excerpt:encoded><![CDATA[]]></excerpt:encoded>\n <wp:post_id>170</wp:post_id>\n <wp:post_date><![CDATA[2007-06-27 15:59:10]]></wp:post_date>\n <wp:post_date_gmt><![CDATA[2007-06-27 13:59:10]]></wp:post_date_gmt>\n <wp:comment_status><![CDATA[open]]></wp:comment_status>\n <wp:ping_status><![CDATA[open]]></wp:ping_status>\n <wp:post_name><![CDATA[umfrage-podcasts]]></wp:post_name>\n <wp:status><![CDATA[publish]]></wp:status>\n <wp:post_parent>0</wp:post_parent>\n <wp:menu_order>0</wp:menu_order>\n <wp:post_type><![CDATA[post]]></wp:post_type>\n <wp:post_password><![CDATA[]]></wp:post_password>\n <wp:is_sticky>0</wp:is_sticky>\n <category domain=\"category\" nicename=\"internet\"><![CDATA[Internet]]></category>\n <category domain=\"post_tag\" nicename=\"podcasting\"><![CDATA[podcasting]]></category>\n <category domain=\"category\" nicename=\"podcasts\"><![CDATA[Podcasts]]></category>\n <category domain=\"post_tag\" nicename=\"podcasts\"><![CDATA[Podcasts]]></category>\n <category domain=\"post_tag\" nicename=\"umfrage\"><![CDATA[umfrage]]></category>\n <wp:postmeta>\n <wp:meta_key><![CDATA[runphp]]></wp:meta_key>\n <wp:meta_value><![CDATA[1]]></wp:meta_value>\n </wp:postmeta>\n </item>\n\n</channel>\n</rss>\n\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Mixin for importer plugins.\"\"\"\n\nfrom __future__ import unicode_literals, print_function\nimport io\nimport csv\nimport datetime\nimport os\nimport sys\nfrom pkg_resources import resource_filename\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse # NOQA\n\nfrom lxml import etree, html\nfrom mako.template import Template\n\nfrom nikola import utils\n\nlinks = {}\n\n\nclass ImportMixin(object):\n \"\"\"Mixin with common used methods.\"\"\"\n\n name = \"import_mixin\"\n needs_config = False\n doc_usage = \"[options] export_file\"\n doc_purpose = \"import a dump from a different engine.\"\n cmd_options = [\n {\n 'name': 'output_folder',\n 'long': 'output-folder',\n 'short': 'o',\n 'default': 'new_site',\n 'help': 'Location to write imported content.'\n },\n ]\n\n def _execute(self, options={}, args=[]):\n \"\"\"Import a blog from an export into a Nikola site.\"\"\"\n raise NotImplementedError(\"Must be implemented by a subclass.\")\n\n @classmethod\n def get_channel_from_file(cls, filename):\n \"\"\"Get channel from XML file.\"\"\"\n tree = etree.fromstring(cls.read_xml_file(filename))\n channel = tree.find('channel')\n return channel\n\n @staticmethod\n def configure_redirections(url_map, base_dir=''):\n \"\"\"Configure redirections from an url_map.\"\"\"\n index = base_dir + 'index.html'\n if index.startswith('/'):\n index = index[1:]\n redirections = []\n for k, v in url_map.items():\n if not k[-1] == '/':\n k = k + '/'\n\n # remove the initial \"/\" because src is a relative file path\n src = (urlparse(k).path + 'index.html')[1:]\n dst = (urlparse(v).path)\n if src == index:\n utils.LOGGER.warn(\"Can't do a redirect for: {0!r}\".format(k))\n else:\n redirections.append((src, dst))\n return redirections\n\n def generate_base_site(self):\n \"\"\"Generate a base Nikola site.\"\"\"\n if not os.path.exists(self.output_folder):\n os.system('nikola init -q ' + self.output_folder)\n else:\n self.import_into_existing_site = True\n utils.LOGGER.notice('The folder {0} already exists - assuming that this is a '\n 'already existing Nikola site.'.format(self.output_folder))\n\n filename = resource_filename('nikola', 'conf.py.in')\n # The 'strict_undefined=True' will give the missing symbol name if any,\n # (ex: NameError: 'THEME' is not defined )\n # for other errors from mako/runtime.py, you can add format_extensions=True ,\n # then more info will be writen to *somefile* (most probably conf.py)\n conf_template = Template(filename=filename, strict_undefined=True)\n\n return conf_template\n\n @staticmethod\n def populate_context(channel):\n \"\"\"Populate context with settings.\"\"\"\n raise NotImplementedError(\"Must be implemented by a subclass.\")\n\n @classmethod\n def transform_content(cls, content):\n \"\"\"Transform content to a Nikola-friendly format.\"\"\"\n return content\n\n @classmethod\n def write_content(cls, filename, content, rewrite_html=True):\n \"\"\"Write content to file.\"\"\"\n if rewrite_html:\n doc = html.document_fromstring(content)\n doc.rewrite_links(replacer)\n content = html.tostring(doc, encoding='utf8')\n else:\n content = content.encode('utf-8')\n\n utils.makedirs(os.path.dirname(filename))\n with open(filename, \"wb+\") as fd:\n fd.write(content)\n\n @staticmethod\n def write_metadata(filename, title, slug, post_date, description, tags, **kwargs):\n \"\"\"Write metadata to meta file.\"\"\"\n if not description:\n description = \"\"\n\n utils.makedirs(os.path.dirname(filename))\n with io.open(filename, \"w+\", encoding=\"utf8\") as fd:\n data = {'title': title, 'slug': slug, 'date': post_date, 'tags': ','.join(tags), 'description': description}\n data.update(kwargs)\n fd.write(utils.write_metadata(data))\n\n @staticmethod\n def write_urlmap_csv(output_file, url_map):\n \"\"\"Write urlmap to csv file.\"\"\"\n utils.makedirs(os.path.dirname(output_file))\n fmode = 'wb+' if sys.version_info[0] == 2 else 'w+'\n with io.open(output_file, fmode) as fd:\n csv_writer = csv.writer(fd)\n for item in url_map.items():\n csv_writer.writerow(item)\n\n def get_configuration_output_path(self):\n \"\"\"Get path for the output configuration file.\"\"\"\n if not self.import_into_existing_site:\n filename = 'conf.py'\n else:\n filename = 'conf.py.{name}-{time}'.format(\n time=datetime.datetime.now().strftime('%Y%m%d_%H%M%S'),\n name=self.name)\n config_output_path = os.path.join(self.output_folder, filename)\n utils.LOGGER.info('Configuration will be written to: {0}'.format(config_output_path))\n\n return config_output_path\n\n @staticmethod\n def write_configuration(filename, rendered_template):\n \"\"\"Write the configuration file.\"\"\"\n utils.makedirs(os.path.dirname(filename))\n with io.open(filename, 'w+', encoding='utf8') as fd:\n fd.write(rendered_template)\n\n\ndef replacer(dst):\n \"\"\"Replace links.\"\"\"\n return links.get(dst, dst)\n", "path": "nikola/plugins/basic_import.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Mixin for importer plugins.\"\"\"\n\nfrom __future__ import unicode_literals, print_function\nimport io\nimport csv\nimport datetime\nimport os\nimport sys\nfrom pkg_resources import resource_filename\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse # NOQA\n\nfrom lxml import etree, html\nfrom mako.template import Template\n\nfrom nikola import utils\n\nlinks = {}\n\n\nclass ImportMixin(object):\n \"\"\"Mixin with common used methods.\"\"\"\n\n name = \"import_mixin\"\n needs_config = False\n doc_usage = \"[options] export_file\"\n doc_purpose = \"import a dump from a different engine.\"\n cmd_options = [\n {\n 'name': 'output_folder',\n 'long': 'output-folder',\n 'short': 'o',\n 'default': 'new_site',\n 'help': 'Location to write imported content.'\n },\n ]\n\n def _execute(self, options={}, args=[]):\n \"\"\"Import a blog from an export into a Nikola site.\"\"\"\n raise NotImplementedError(\"Must be implemented by a subclass.\")\n\n @classmethod\n def get_channel_from_file(cls, filename):\n \"\"\"Get channel from XML file.\"\"\"\n tree = etree.fromstring(cls.read_xml_file(filename))\n channel = tree.find('channel')\n return channel\n\n @staticmethod\n def configure_redirections(url_map, base_dir=''):\n \"\"\"Configure redirections from an url_map.\"\"\"\n index = base_dir + 'index.html'\n if index.startswith('/'):\n index = index[1:]\n redirections = []\n for k, v in url_map.items():\n if not k[-1] == '/':\n k = k + '/'\n\n # remove the initial \"/\" because src is a relative file path\n src = (urlparse(k).path + 'index.html')[1:]\n dst = (urlparse(v).path)\n if src == index:\n utils.LOGGER.warn(\"Can't do a redirect for: {0!r}\".format(k))\n else:\n redirections.append((src, dst))\n return redirections\n\n def generate_base_site(self):\n \"\"\"Generate a base Nikola site.\"\"\"\n if not os.path.exists(self.output_folder):\n os.system('nikola init -q ' + self.output_folder)\n else:\n self.import_into_existing_site = True\n utils.LOGGER.notice('The folder {0} already exists - assuming that this is a '\n 'already existing Nikola site.'.format(self.output_folder))\n\n filename = resource_filename('nikola', 'conf.py.in')\n # The 'strict_undefined=True' will give the missing symbol name if any,\n # (ex: NameError: 'THEME' is not defined )\n # for other errors from mako/runtime.py, you can add format_extensions=True ,\n # then more info will be writen to *somefile* (most probably conf.py)\n conf_template = Template(filename=filename, strict_undefined=True)\n\n return conf_template\n\n @staticmethod\n def populate_context(channel):\n \"\"\"Populate context with settings.\"\"\"\n raise NotImplementedError(\"Must be implemented by a subclass.\")\n\n @classmethod\n def transform_content(cls, content):\n \"\"\"Transform content to a Nikola-friendly format.\"\"\"\n return content\n\n @classmethod\n def write_content(cls, filename, content, rewrite_html=True):\n \"\"\"Write content to file.\"\"\"\n if rewrite_html:\n try:\n doc = html.document_fromstring(content)\n doc.rewrite_links(replacer)\n content = html.tostring(doc, encoding='utf8')\n except etree.ParserError:\n content = content.encode('utf-8')\n else:\n content = content.encode('utf-8')\n\n utils.makedirs(os.path.dirname(filename))\n with open(filename, \"wb+\") as fd:\n fd.write(content)\n\n @staticmethod\n def write_metadata(filename, title, slug, post_date, description, tags, **kwargs):\n \"\"\"Write metadata to meta file.\"\"\"\n if not description:\n description = \"\"\n\n utils.makedirs(os.path.dirname(filename))\n with io.open(filename, \"w+\", encoding=\"utf8\") as fd:\n data = {'title': title, 'slug': slug, 'date': post_date, 'tags': ','.join(tags), 'description': description}\n data.update(kwargs)\n fd.write(utils.write_metadata(data))\n\n @staticmethod\n def write_urlmap_csv(output_file, url_map):\n \"\"\"Write urlmap to csv file.\"\"\"\n utils.makedirs(os.path.dirname(output_file))\n fmode = 'wb+' if sys.version_info[0] == 2 else 'w+'\n with io.open(output_file, fmode) as fd:\n csv_writer = csv.writer(fd)\n for item in url_map.items():\n csv_writer.writerow(item)\n\n def get_configuration_output_path(self):\n \"\"\"Get path for the output configuration file.\"\"\"\n if not self.import_into_existing_site:\n filename = 'conf.py'\n else:\n filename = 'conf.py.{name}-{time}'.format(\n time=datetime.datetime.now().strftime('%Y%m%d_%H%M%S'),\n name=self.name)\n config_output_path = os.path.join(self.output_folder, filename)\n utils.LOGGER.info('Configuration will be written to: {0}'.format(config_output_path))\n\n return config_output_path\n\n @staticmethod\n def write_configuration(filename, rendered_template):\n \"\"\"Write the configuration file.\"\"\"\n utils.makedirs(os.path.dirname(filename))\n with io.open(filename, 'w+', encoding='utf8') as fd:\n fd.write(rendered_template)\n\n\ndef replacer(dst):\n \"\"\"Replace links.\"\"\"\n return links.get(dst, dst)\n", "path": "nikola/plugins/basic_import.py"}]}
| 3,901 | 182 |
gh_patches_debug_58533
|
rasdani/github-patches
|
git_diff
|
shuup__shuup-1665
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add option to remove coupon code from basket
There isn't option to remove coupon code from basket. Should likely be done with basket command at basket view [this](https://github.com/shuup/shuup/blob/master/shuup/core/basket/commands.py#L177).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shuup/front/basket/commands.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
5 #
6 # This source code is licensed under the OSL-3.0 license found in the
7 # LICENSE file in the root directory of this source tree.
8 from shuup.core.basket.commands import ( # noqa
9 handle_add, handle_add_campaign_code, handle_add_var, handle_clear,
10 handle_del, handle_update
11 )
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shuup/front/basket/commands.py b/shuup/front/basket/commands.py
--- a/shuup/front/basket/commands.py
+++ b/shuup/front/basket/commands.py
@@ -7,5 +7,5 @@
# LICENSE file in the root directory of this source tree.
from shuup.core.basket.commands import ( # noqa
handle_add, handle_add_campaign_code, handle_add_var, handle_clear,
- handle_del, handle_update
+ handle_del, handle_remove_campaign_code, handle_update
)
|
{"golden_diff": "diff --git a/shuup/front/basket/commands.py b/shuup/front/basket/commands.py\n--- a/shuup/front/basket/commands.py\n+++ b/shuup/front/basket/commands.py\n@@ -7,5 +7,5 @@\n # LICENSE file in the root directory of this source tree.\n from shuup.core.basket.commands import ( # noqa\n handle_add, handle_add_campaign_code, handle_add_var, handle_clear,\n- handle_del, handle_update\n+ handle_del, handle_remove_campaign_code, handle_update\n )\n", "issue": "Add option to remove coupon code from basket\nThere isn't option to remove coupon code from basket. Should likely be done with basket command at basket view [this](https://github.com/shuup/shuup/blob/master/shuup/core/basket/commands.py#L177).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom shuup.core.basket.commands import ( # noqa\n handle_add, handle_add_campaign_code, handle_add_var, handle_clear,\n handle_del, handle_update\n)\n", "path": "shuup/front/basket/commands.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom shuup.core.basket.commands import ( # noqa\n handle_add, handle_add_campaign_code, handle_add_var, handle_clear,\n handle_del, handle_remove_campaign_code, handle_update\n)\n", "path": "shuup/front/basket/commands.py"}]}
| 452 | 122 |
gh_patches_debug_26191
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-1681
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
manage.py compress errors with empty db
In 1.1rc1 manage.py command 'compress' doesn't work anymore if the database isn't migrated yet. I need to be able to do this since i'm running django compress during a docker build.
Complete stacktrace is:
```
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/management/__init__.py", line 338, in execute_from_command_line
utility.execute()
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/management/__init__.py", line 312, in execute
django.setup()
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/__init__.py", line 18, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/apps/registry.py", line 115, in populate
app_config.ready()
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/debug_toolbar/apps.py", line 15, in ready
dt_settings.patch_all()
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/debug_toolbar/settings.py", line 232, in patch_all
patch_root_urlconf()
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/debug_toolbar/settings.py", line 220, in patch_root_urlconf
reverse('djdt:render_panel')
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py", line 550, in reverse
app_list = resolver.app_dict[ns]
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py", line 352, in app_dict
self._populate()
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py", line 285, in _populate
for pattern in reversed(self.url_patterns):
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py", line 402, in url_patterns
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py", line 396, in urlconf_module
self._urlconf_module = import_module(self.urlconf_name)
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/Users/mvantellingen/projects/myorg/myproj/src/myproj/urls.py", line 7, in <module>
from wagtail.wagtailadmin import urls as wagtailadmin_urls
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailadmin/urls/__init__.py", line 7, in <module>
from wagtail.wagtailadmin.views import account, chooser, home, pages, tags, userbar
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailadmin/views/account.py", line 12, in <module>
from wagtail.wagtailusers.forms import NotificationPreferencesForm
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailusers/forms.py", line 236, in <module>
class GroupPagePermissionForm(forms.ModelForm):
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailusers/forms.py", line 238, in GroupPagePermissionForm
widget=AdminPageChooser(show_edit_link=False))
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailadmin/widgets.py", line 123, in __init__
self.target_content_types = content_type or ContentType.objects.get_for_model(Page)
File "/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/contrib/contenttypes/models.py", line 78, in get_for_model
"Error creating new content types. Please make sure contenttypes "
RuntimeError: Error creating new content types. Please make sure contenttypes is migrated before trying to migrate apps individually.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailadmin/widgets.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import json
4
5 from django.core.urlresolvers import reverse
6 from django.forms import widgets
7 from django.contrib.contenttypes.models import ContentType
8 from django.utils.translation import ugettext_lazy as _
9 from django.template.loader import render_to_string
10
11 from wagtail.utils.widgets import WidgetWithScript
12 from wagtail.wagtailcore.models import Page
13
14 from taggit.forms import TagWidget
15
16
17 class AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):
18 def __init__(self, attrs=None):
19 # Use more appropriate rows default, given autoheight will alter this anyway
20 default_attrs = {'rows': '1'}
21 if attrs:
22 default_attrs.update(attrs)
23
24 super(AdminAutoHeightTextInput, self).__init__(default_attrs)
25
26 def render_js_init(self, id_, name, value):
27 return 'autosize($("#{0}"));'.format(id_)
28
29 class AdminDateInput(WidgetWithScript, widgets.DateInput):
30 # Set a default date format to match the one that our JS date picker expects -
31 # it can still be overridden explicitly, but this way it won't be affected by
32 # the DATE_INPUT_FORMATS setting
33 def __init__(self, attrs=None, format='%Y-%m-%d'):
34 super(AdminDateInput, self).__init__(attrs=attrs, format=format)
35
36 def render_js_init(self, id_, name, value):
37 return 'initDateChooser({0});'.format(json.dumps(id_))
38
39
40 class AdminTimeInput(WidgetWithScript, widgets.TimeInput):
41 def __init__(self, attrs=None, format='%H:%M'):
42 super(AdminTimeInput, self).__init__(attrs=attrs, format=format)
43
44 def render_js_init(self, id_, name, value):
45 return 'initTimeChooser({0});'.format(json.dumps(id_))
46
47
48 class AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):
49 def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):
50 super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)
51
52 def render_js_init(self, id_, name, value):
53 return 'initDateTimeChooser({0});'.format(json.dumps(id_))
54
55
56 class AdminTagWidget(WidgetWithScript, TagWidget):
57 def render_js_init(self, id_, name, value):
58 return "initTagField({0}, {1});".format(
59 json.dumps(id_),
60 json.dumps(reverse('wagtailadmin_tag_autocomplete')))
61
62
63 class AdminChooser(WidgetWithScript, widgets.Input):
64 input_type = 'hidden'
65 choose_one_text = _("Choose an item")
66 choose_another_text = _("Choose another item")
67 clear_choice_text = _("Clear choice")
68 link_to_chosen_text = _("Edit this item")
69 show_edit_link = True
70
71 def get_instance(self, model_class, value):
72 # helper method for cleanly turning 'value' into an instance object
73 if value is None:
74 return None
75
76 try:
77 return model_class.objects.get(pk=value)
78 except model_class.DoesNotExist:
79 return None
80
81 def get_instance_and_id(self, model_class, value):
82 if value is None:
83 return (None, None)
84 elif isinstance(value, model_class):
85 return (value, value.pk)
86 else:
87 try:
88 return (model_class.objects.get(pk=value), value)
89 except model_class.DoesNotExist:
90 return (None, None)
91
92 def value_from_datadict(self, data, files, name):
93 # treat the empty string as None
94 result = super(AdminChooser, self).value_from_datadict(data, files, name)
95 if result == '':
96 return None
97 else:
98 return result
99
100 def __init__(self, **kwargs):
101 # allow choose_one_text / choose_another_text to be overridden per-instance
102 if 'choose_one_text' in kwargs:
103 self.choose_one_text = kwargs.pop('choose_one_text')
104 if 'choose_another_text' in kwargs:
105 self.choose_another_text = kwargs.pop('choose_another_text')
106 if 'clear_choice_text' in kwargs:
107 self.clear_choice_text = kwargs.pop('clear_choice_text')
108 if 'link_to_chosen_text' in kwargs:
109 self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')
110 if 'show_edit_link' in kwargs:
111 self.show_edit_link = kwargs.pop('show_edit_link')
112 super(AdminChooser, self).__init__(**kwargs)
113
114
115 class AdminPageChooser(AdminChooser):
116 choose_one_text = _('Choose a page')
117 choose_another_text = _('Choose another page')
118 link_to_chosen_text = _('Edit this page')
119
120 def __init__(self, content_type=None, **kwargs):
121 super(AdminPageChooser, self).__init__(**kwargs)
122
123 self.target_content_types = content_type or ContentType.objects.get_for_model(Page)
124 # Make sure target_content_types is a list or tuple
125 if not isinstance(self.target_content_types, (list, tuple)):
126 self.target_content_types = [self.target_content_types]
127
128 def render_html(self, name, value, attrs):
129 if len(self.target_content_types) == 1:
130 model_class = self.target_content_types[0].model_class()
131 else:
132 model_class = Page
133
134 instance, value = self.get_instance_and_id(model_class, value)
135
136 original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)
137
138 return render_to_string("wagtailadmin/widgets/page_chooser.html", {
139 'widget': self,
140 'original_field_html': original_field_html,
141 'attrs': attrs,
142 'value': value,
143 'page': instance,
144 })
145
146 def render_js_init(self, id_, name, value):
147 if isinstance(value, Page):
148 page = value
149 else:
150 # Value is an ID look up object
151 if len(self.target_content_types) == 1:
152 model_class = self.target_content_types[0].model_class()
153 else:
154 model_class = Page
155
156 page = self.get_instance(model_class, value)
157
158 parent = page.get_parent() if page else None
159
160 return "createPageChooser({id}, {content_type}, {parent});".format(
161 id=json.dumps(id_),
162 content_type=json.dumps([
163 '{app}.{model}'.format(
164 app=content_type.app_label,
165 model=content_type.model)
166 for content_type in self.target_content_types
167 ]),
168 parent=json.dumps(parent.id if parent else None))
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/wagtailadmin/widgets.py b/wagtail/wagtailadmin/widgets.py
--- a/wagtail/wagtailadmin/widgets.py
+++ b/wagtail/wagtailadmin/widgets.py
@@ -5,6 +5,7 @@
from django.core.urlresolvers import reverse
from django.forms import widgets
from django.contrib.contenttypes.models import ContentType
+from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.template.loader import render_to_string
@@ -119,11 +120,15 @@
def __init__(self, content_type=None, **kwargs):
super(AdminPageChooser, self).__init__(**kwargs)
+ self._content_type = content_type
- self.target_content_types = content_type or ContentType.objects.get_for_model(Page)
+ @cached_property
+ def target_content_types(self):
+ target_content_types = self._content_type or ContentType.objects.get_for_model(Page)
# Make sure target_content_types is a list or tuple
- if not isinstance(self.target_content_types, (list, tuple)):
- self.target_content_types = [self.target_content_types]
+ if not isinstance(target_content_types, (list, tuple)):
+ target_content_types = [target_content_types]
+ return target_content_types
def render_html(self, name, value, attrs):
if len(self.target_content_types) == 1:
|
{"golden_diff": "diff --git a/wagtail/wagtailadmin/widgets.py b/wagtail/wagtailadmin/widgets.py\n--- a/wagtail/wagtailadmin/widgets.py\n+++ b/wagtail/wagtailadmin/widgets.py\n@@ -5,6 +5,7 @@\n from django.core.urlresolvers import reverse\n from django.forms import widgets\n from django.contrib.contenttypes.models import ContentType\n+from django.utils.functional import cached_property\n from django.utils.translation import ugettext_lazy as _\n from django.template.loader import render_to_string\n \n@@ -119,11 +120,15 @@\n \n def __init__(self, content_type=None, **kwargs):\n super(AdminPageChooser, self).__init__(**kwargs)\n+ self._content_type = content_type\n \n- self.target_content_types = content_type or ContentType.objects.get_for_model(Page)\n+ @cached_property\n+ def target_content_types(self):\n+ target_content_types = self._content_type or ContentType.objects.get_for_model(Page)\n # Make sure target_content_types is a list or tuple\n- if not isinstance(self.target_content_types, (list, tuple)):\n- self.target_content_types = [self.target_content_types]\n+ if not isinstance(target_content_types, (list, tuple)):\n+ target_content_types = [target_content_types]\n+ return target_content_types\n \n def render_html(self, name, value, attrs):\n if len(self.target_content_types) == 1:\n", "issue": "manage.py compress errors with empty db\nIn 1.1rc1 manage.py command 'compress' doesn't work anymore if the database isn't migrated yet. I need to be able to do this since i'm running django compress during a docker build.\n\nComplete stacktrace is:\n\n```\nTraceback (most recent call last):\n File \"./manage.py\", line 10, in <module>\n execute_from_command_line(sys.argv)\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/management/__init__.py\", line 338, in execute_from_command_line\n utility.execute()\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/management/__init__.py\", line 312, in execute\n django.setup()\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/__init__.py\", line 18, in setup\n apps.populate(settings.INSTALLED_APPS)\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/apps/registry.py\", line 115, in populate\n app_config.ready()\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/debug_toolbar/apps.py\", line 15, in ready\n dt_settings.patch_all()\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/debug_toolbar/settings.py\", line 232, in patch_all\n patch_root_urlconf()\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/debug_toolbar/settings.py\", line 220, in patch_root_urlconf\n reverse('djdt:render_panel')\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py\", line 550, in reverse\n app_list = resolver.app_dict[ns]\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py\", line 352, in app_dict\n self._populate()\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py\", line 285, in _populate\n for pattern in reversed(self.url_patterns):\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py\", line 402, in url_patterns\n patterns = getattr(self.urlconf_module, \"urlpatterns\", self.urlconf_module)\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/core/urlresolvers.py\", line 396, in urlconf_module\n self._urlconf_module = import_module(self.urlconf_name)\n File \"/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/importlib/__init__.py\", line 37, in import_module\n __import__(name)\n File \"/Users/mvantellingen/projects/myorg/myproj/src/myproj/urls.py\", line 7, in <module>\n from wagtail.wagtailadmin import urls as wagtailadmin_urls\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailadmin/urls/__init__.py\", line 7, in <module>\n from wagtail.wagtailadmin.views import account, chooser, home, pages, tags, userbar\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailadmin/views/account.py\", line 12, in <module>\n from wagtail.wagtailusers.forms import NotificationPreferencesForm\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailusers/forms.py\", line 236, in <module>\n class GroupPagePermissionForm(forms.ModelForm):\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailusers/forms.py\", line 238, in GroupPagePermissionForm\n widget=AdminPageChooser(show_edit_link=False))\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/wagtail/wagtailadmin/widgets.py\", line 123, in __init__\n self.target_content_types = content_type or ContentType.objects.get_for_model(Page)\n File \"/usr/local/opt/pyenv/versions/myproj/lib/python2.7/site-packages/django/contrib/contenttypes/models.py\", line 78, in get_for_model\n \"Error creating new content types. Please make sure contenttypes \"\nRuntimeError: Error creating new content types. Please make sure contenttypes is migrated before trying to migrate apps individually.\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom django.core.urlresolvers import reverse\nfrom django.forms import widgets\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.loader import render_to_string\n\nfrom wagtail.utils.widgets import WidgetWithScript\nfrom wagtail.wagtailcore.models import Page\n\nfrom taggit.forms import TagWidget\n\n\nclass AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):\n def __init__(self, attrs=None):\n # Use more appropriate rows default, given autoheight will alter this anyway\n default_attrs = {'rows': '1'}\n if attrs:\n default_attrs.update(attrs)\n\n super(AdminAutoHeightTextInput, self).__init__(default_attrs)\n\n def render_js_init(self, id_, name, value):\n return 'autosize($(\"#{0}\"));'.format(id_)\n\nclass AdminDateInput(WidgetWithScript, widgets.DateInput):\n # Set a default date format to match the one that our JS date picker expects -\n # it can still be overridden explicitly, but this way it won't be affected by\n # the DATE_INPUT_FORMATS setting\n def __init__(self, attrs=None, format='%Y-%m-%d'):\n super(AdminDateInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTimeInput(WidgetWithScript, widgets.TimeInput):\n def __init__(self, attrs=None, format='%H:%M'):\n super(AdminTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):\n def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):\n super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTagWidget(WidgetWithScript, TagWidget):\n def render_js_init(self, id_, name, value):\n return \"initTagField({0}, {1});\".format(\n json.dumps(id_),\n json.dumps(reverse('wagtailadmin_tag_autocomplete')))\n\n\nclass AdminChooser(WidgetWithScript, widgets.Input):\n input_type = 'hidden'\n choose_one_text = _(\"Choose an item\")\n choose_another_text = _(\"Choose another item\")\n clear_choice_text = _(\"Clear choice\")\n link_to_chosen_text = _(\"Edit this item\")\n show_edit_link = True\n\n def get_instance(self, model_class, value):\n # helper method for cleanly turning 'value' into an instance object\n if value is None:\n return None\n\n try:\n return model_class.objects.get(pk=value)\n except model_class.DoesNotExist:\n return None\n\n def get_instance_and_id(self, model_class, value):\n if value is None:\n return (None, None)\n elif isinstance(value, model_class):\n return (value, value.pk)\n else:\n try:\n return (model_class.objects.get(pk=value), value)\n except model_class.DoesNotExist:\n return (None, None)\n\n def value_from_datadict(self, data, files, name):\n # treat the empty string as None\n result = super(AdminChooser, self).value_from_datadict(data, files, name)\n if result == '':\n return None\n else:\n return result\n\n def __init__(self, **kwargs):\n # allow choose_one_text / choose_another_text to be overridden per-instance\n if 'choose_one_text' in kwargs:\n self.choose_one_text = kwargs.pop('choose_one_text')\n if 'choose_another_text' in kwargs:\n self.choose_another_text = kwargs.pop('choose_another_text')\n if 'clear_choice_text' in kwargs:\n self.clear_choice_text = kwargs.pop('clear_choice_text')\n if 'link_to_chosen_text' in kwargs:\n self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')\n if 'show_edit_link' in kwargs:\n self.show_edit_link = kwargs.pop('show_edit_link')\n super(AdminChooser, self).__init__(**kwargs)\n\n\nclass AdminPageChooser(AdminChooser):\n choose_one_text = _('Choose a page')\n choose_another_text = _('Choose another page')\n link_to_chosen_text = _('Edit this page')\n\n def __init__(self, content_type=None, **kwargs):\n super(AdminPageChooser, self).__init__(**kwargs)\n\n self.target_content_types = content_type or ContentType.objects.get_for_model(Page)\n # Make sure target_content_types is a list or tuple\n if not isinstance(self.target_content_types, (list, tuple)):\n self.target_content_types = [self.target_content_types]\n\n def render_html(self, name, value, attrs):\n if len(self.target_content_types) == 1:\n model_class = self.target_content_types[0].model_class()\n else:\n model_class = Page\n\n instance, value = self.get_instance_and_id(model_class, value)\n\n original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)\n\n return render_to_string(\"wagtailadmin/widgets/page_chooser.html\", {\n 'widget': self,\n 'original_field_html': original_field_html,\n 'attrs': attrs,\n 'value': value,\n 'page': instance,\n })\n\n def render_js_init(self, id_, name, value):\n if isinstance(value, Page):\n page = value\n else:\n # Value is an ID look up object\n if len(self.target_content_types) == 1:\n model_class = self.target_content_types[0].model_class()\n else:\n model_class = Page\n\n page = self.get_instance(model_class, value)\n\n parent = page.get_parent() if page else None\n\n return \"createPageChooser({id}, {content_type}, {parent});\".format(\n id=json.dumps(id_),\n content_type=json.dumps([\n '{app}.{model}'.format(\n app=content_type.app_label,\n model=content_type.model)\n for content_type in self.target_content_types\n ]),\n parent=json.dumps(parent.id if parent else None))\n", "path": "wagtail/wagtailadmin/widgets.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport json\n\nfrom django.core.urlresolvers import reverse\nfrom django.forms import widgets\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.loader import render_to_string\n\nfrom wagtail.utils.widgets import WidgetWithScript\nfrom wagtail.wagtailcore.models import Page\n\nfrom taggit.forms import TagWidget\n\n\nclass AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):\n def __init__(self, attrs=None):\n # Use more appropriate rows default, given autoheight will alter this anyway\n default_attrs = {'rows': '1'}\n if attrs:\n default_attrs.update(attrs)\n\n super(AdminAutoHeightTextInput, self).__init__(default_attrs)\n\n def render_js_init(self, id_, name, value):\n return 'autosize($(\"#{0}\"));'.format(id_)\n\nclass AdminDateInput(WidgetWithScript, widgets.DateInput):\n # Set a default date format to match the one that our JS date picker expects -\n # it can still be overridden explicitly, but this way it won't be affected by\n # the DATE_INPUT_FORMATS setting\n def __init__(self, attrs=None, format='%Y-%m-%d'):\n super(AdminDateInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTimeInput(WidgetWithScript, widgets.TimeInput):\n def __init__(self, attrs=None, format='%H:%M'):\n super(AdminTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):\n def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):\n super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)\n\n def render_js_init(self, id_, name, value):\n return 'initDateTimeChooser({0});'.format(json.dumps(id_))\n\n\nclass AdminTagWidget(WidgetWithScript, TagWidget):\n def render_js_init(self, id_, name, value):\n return \"initTagField({0}, {1});\".format(\n json.dumps(id_),\n json.dumps(reverse('wagtailadmin_tag_autocomplete')))\n\n\nclass AdminChooser(WidgetWithScript, widgets.Input):\n input_type = 'hidden'\n choose_one_text = _(\"Choose an item\")\n choose_another_text = _(\"Choose another item\")\n clear_choice_text = _(\"Clear choice\")\n link_to_chosen_text = _(\"Edit this item\")\n show_edit_link = True\n\n def get_instance(self, model_class, value):\n # helper method for cleanly turning 'value' into an instance object\n if value is None:\n return None\n\n try:\n return model_class.objects.get(pk=value)\n except model_class.DoesNotExist:\n return None\n\n def get_instance_and_id(self, model_class, value):\n if value is None:\n return (None, None)\n elif isinstance(value, model_class):\n return (value, value.pk)\n else:\n try:\n return (model_class.objects.get(pk=value), value)\n except model_class.DoesNotExist:\n return (None, None)\n\n def value_from_datadict(self, data, files, name):\n # treat the empty string as None\n result = super(AdminChooser, self).value_from_datadict(data, files, name)\n if result == '':\n return None\n else:\n return result\n\n def __init__(self, **kwargs):\n # allow choose_one_text / choose_another_text to be overridden per-instance\n if 'choose_one_text' in kwargs:\n self.choose_one_text = kwargs.pop('choose_one_text')\n if 'choose_another_text' in kwargs:\n self.choose_another_text = kwargs.pop('choose_another_text')\n if 'clear_choice_text' in kwargs:\n self.clear_choice_text = kwargs.pop('clear_choice_text')\n if 'link_to_chosen_text' in kwargs:\n self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')\n if 'show_edit_link' in kwargs:\n self.show_edit_link = kwargs.pop('show_edit_link')\n super(AdminChooser, self).__init__(**kwargs)\n\n\nclass AdminPageChooser(AdminChooser):\n choose_one_text = _('Choose a page')\n choose_another_text = _('Choose another page')\n link_to_chosen_text = _('Edit this page')\n\n def __init__(self, content_type=None, **kwargs):\n super(AdminPageChooser, self).__init__(**kwargs)\n self._content_type = content_type\n\n @cached_property\n def target_content_types(self):\n target_content_types = self._content_type or ContentType.objects.get_for_model(Page)\n # Make sure target_content_types is a list or tuple\n if not isinstance(target_content_types, (list, tuple)):\n target_content_types = [target_content_types]\n return target_content_types\n\n def render_html(self, name, value, attrs):\n if len(self.target_content_types) == 1:\n model_class = self.target_content_types[0].model_class()\n else:\n model_class = Page\n\n instance, value = self.get_instance_and_id(model_class, value)\n\n original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)\n\n return render_to_string(\"wagtailadmin/widgets/page_chooser.html\", {\n 'widget': self,\n 'original_field_html': original_field_html,\n 'attrs': attrs,\n 'value': value,\n 'page': instance,\n })\n\n def render_js_init(self, id_, name, value):\n if isinstance(value, Page):\n page = value\n else:\n # Value is an ID look up object\n if len(self.target_content_types) == 1:\n model_class = self.target_content_types[0].model_class()\n else:\n model_class = Page\n\n page = self.get_instance(model_class, value)\n\n parent = page.get_parent() if page else None\n\n return \"createPageChooser({id}, {content_type}, {parent});\".format(\n id=json.dumps(id_),\n content_type=json.dumps([\n '{app}.{model}'.format(\n app=content_type.app_label,\n model=content_type.model)\n for content_type in self.target_content_types\n ]),\n parent=json.dumps(parent.id if parent else None))\n", "path": "wagtail/wagtailadmin/widgets.py"}]}
| 3,157 | 311 |
gh_patches_debug_15667
|
rasdani/github-patches
|
git_diff
|
pyload__pyload-1375
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Uplea plugin out of date (#1348 closed but still not working)
Hi,
now pyload reports failure, logs show:
26 25.04.2015 12:19:27 WARNING Download failed: ****\* | Missing HOSTER_DOMAIN
25 25.04.2015 12:19:27 INFO Download starts: ****\*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/hoster/UpleaCom.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import re
4
5 from urlparse import urljoin
6
7 from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
8
9
10 class UpleaCom(XFSHoster):
11 __name__ = "UpleaCom"
12 __type__ = "hoster"
13 __version__ = "0.07"
14
15 __pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
16
17 __description__ = """Uplea.com hoster plugin"""
18 __license__ = "GPLv3"
19 __authors__ = [("Redleon", None),
20 ("GammaC0de", None)]
21
22
23 NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<'
24 SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
25 SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]
26
27 OFFLINE_PATTERN = r'>You followed an invalid or expired link'
28 PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'
29
30 LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"'
31
32 WAIT_PATTERN = r'timeText: ?([\d.]+),'
33 STEP_PATTERN = r'<a href="(/step/.+)">'
34
35
36 def setup(self):
37 self.multiDL = False
38 self.chunkLimit = 1
39 self.resumeDownload = True
40
41
42 def handleFree(self, pyfile):
43 m = re.search(self.STEP_PATTERN, self.html)
44 if m is None:
45 self.error(_("STEP_PATTERN not found"))
46
47 self.html = self.load(urljoin("http://uplea.com/", m.group(1)))
48
49 m = re.search(self.WAIT_PATTERN, self.html)
50 if m:
51 self.logDebug(_("Waiting %s seconds") % m.group(1))
52 self.wait(m.group(1), True)
53 self.retry()
54
55 m = re.search(self.PREMIUM_PATTERN, self.html)
56 if m:
57 self.error(_("This URL requires a premium account"))
58
59 m = re.search(self.LINK_PATTERN, self.html)
60 if m is None:
61 self.error(_("LINK_PATTERN not found"))
62
63 self.link = m.group(1)
64 self.wait(15)
65
66
67 getInfo = create_getInfo(UpleaCom)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py
--- a/module/plugins/hoster/UpleaCom.py
+++ b/module/plugins/hoster/UpleaCom.py
@@ -10,7 +10,7 @@
class UpleaCom(XFSHoster):
__name__ = "UpleaCom"
__type__ = "hoster"
- __version__ = "0.07"
+ __version__ = "0.08"
__pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
@@ -28,6 +28,7 @@
PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'
LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"'
+ HOSTER_DOMAIN = "uplea.com"
WAIT_PATTERN = r'timeText: ?([\d.]+),'
STEP_PATTERN = r'<a href="(/step/.+)">'
|
{"golden_diff": "diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py\n--- a/module/plugins/hoster/UpleaCom.py\n+++ b/module/plugins/hoster/UpleaCom.py\n@@ -10,7 +10,7 @@\n class UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n- __version__ = \"0.07\"\n+ __version__ = \"0.08\"\n \n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n \n@@ -28,6 +28,7 @@\n PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n \n LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n+ HOSTER_DOMAIN = \"uplea.com\"\n \n WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n", "issue": "Uplea plugin out of date (#1348 closed but still not working)\nHi,\nnow pyload reports failure, logs show:\n26 25.04.2015 12:19:27 WARNING Download failed: ****\\* | Missing HOSTER_DOMAIN\n25 25.04.2015 12:19:27 INFO Download starts: ****\\* \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urlparse import urljoin\n\nfrom module.plugins.internal.XFSHoster import XFSHoster, create_getInfo\n\n\nclass UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n __version__ = \"0.07\"\n\n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n\n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Redleon\", None),\n (\"GammaC0de\", None)]\n\n\n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n\n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n\n LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n\n WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n\n\n def setup(self):\n self.multiDL = False\n self.chunkLimit = 1\n self.resumeDownload = True\n\n\n def handleFree(self, pyfile):\n m = re.search(self.STEP_PATTERN, self.html)\n if m is None:\n self.error(_(\"STEP_PATTERN not found\"))\n\n self.html = self.load(urljoin(\"http://uplea.com/\", m.group(1)))\n\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.logDebug(_(\"Waiting %s seconds\") % m.group(1))\n self.wait(m.group(1), True)\n self.retry()\n\n m = re.search(self.PREMIUM_PATTERN, self.html)\n if m:\n self.error(_(\"This URL requires a premium account\"))\n\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n\n self.link = m.group(1)\n self.wait(15)\n\n\ngetInfo = create_getInfo(UpleaCom)\n", "path": "module/plugins/hoster/UpleaCom.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urlparse import urljoin\n\nfrom module.plugins.internal.XFSHoster import XFSHoster, create_getInfo\n\n\nclass UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n __version__ = \"0.08\"\n\n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n\n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Redleon\", None),\n (\"GammaC0de\", None)]\n\n\n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n\n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n\n LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n HOSTER_DOMAIN = \"uplea.com\"\n\n WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n\n\n def setup(self):\n self.multiDL = False\n self.chunkLimit = 1\n self.resumeDownload = True\n\n\n def handleFree(self, pyfile):\n m = re.search(self.STEP_PATTERN, self.html)\n if m is None:\n self.error(_(\"STEP_PATTERN not found\"))\n\n self.html = self.load(urljoin(\"http://uplea.com/\", m.group(1)))\n\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.logDebug(_(\"Waiting %s seconds\") % m.group(1))\n self.wait(m.group(1), True)\n self.retry()\n\n m = re.search(self.PREMIUM_PATTERN, self.html)\n if m:\n self.error(_(\"This URL requires a premium account\"))\n\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n\n self.link = m.group(1)\n self.wait(15)\n\n\ngetInfo = create_getInfo(UpleaCom)\n", "path": "module/plugins/hoster/UpleaCom.py"}]}
| 1,038 | 244 |
gh_patches_debug_18200
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1603
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Chile (CL-SING): total generation stuck at fixed value
The total generation of the CL-SING zone is stuck at 1 939 MW. The renewable part varies, but the "unknown" category always fills the gap. The issue is on the data provider's side.


Chile (CL-SING): total generation stuck at fixed value
The total generation of the CL-SING zone is stuck at 1 939 MW. The renewable part varies, but the "unknown" category always fills the gap. The issue is on the data provider's side.


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/CL_SING.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Parser for the Chile-SING region.
4 # There are plans to unify the Chilean grid.
5 # https://www.engerati.com/article/chile%E2%80%99s-grid-be-unified-new-transmission-highway
6
7 from __future__ import print_function
8 import arrow
9 from bs4 import BeautifulSoup
10 from collections import defaultdict
11 import datetime
12 import json
13 from pytz import timezone
14 import pandas as pd
15 import re
16 import requests
17
18 url = 'https://sger.coordinadorelectrico.cl/Charts/AmChartCurvaCompuesta?showinfo=True'
19
20 plant_map = {
21 u"Generacion_Neta": "unknown",
22 u"Real Andes Solar": "solar",
23 u"Real Bolero": "solar",
24 u"Real Cerro Dominador PV": "solar",
25 u"Real Finis Terrae": "solar",
26 u"Real La Huayca 2": "solar",
27 u"Real Mar\xeda Elena FV": "solar",
28 u"Real PAS2": "solar",
29 u"Real PAS3": "solar",
30 u"Real Pampa Camarones": "solar",
31 u"Real Parque Eolico Sierra Gorda Este": "wind",
32 u"Real Puerto Seco Solar": "solar",
33 u"Real Solar El Aguila 1": "solar",
34 u"Real Solar Jama": "solar",
35 u"Real Solar Jama 2": "solar",
36 u"Real Uribe": "solar",
37 u"Real Valle de los Vientos": "wind",
38 u"date": "datetime"
39 }
40
41 generation_mapping = {'Gas Natural': 'gas',
42 'EΓ³lico': 'wind',
43 'Otro (CompensaciΓ³n Activa)': 'unknown',
44 'Solar': 'solar',
45 'CogeneraciΓ³n': 'biomass',
46 'GeotΓ©rmica': 'geothermal',
47 'Hidro': 'hydro'}
48
49
50 def get_old_data(lookup_time, session=None):
51 """Fetches data for past days, returns a list of tuples."""
52
53 s=session or requests.Session()
54 data_url = "http://cdec2.cdec-sing.cl/pls/portal/cdec.pck_oper_real_2_pub.qry_gen_real_2?p_fecha_ini={0}&p_fecha_fin={0}&p_informe=E".format(lookup_time)
55
56 data_req = s.get(data_url)
57 df = pd.read_html(data_req.text, decimal=',', thousands = '', header=0, index_col=0)
58 df = df[-2]
59
60 if df.shape != (14, 26):
61 raise AttributeError('Format for Cl-SING historical data has changed, check for new tables or generation types.')
62
63 data = []
64 for col in df:
65 if col not in ['25', 'Total']:
66 hour = int(col)
67 production = df[col].to_dict()
68 production['coal'] = production['CarbΓ³n + Petcoke'] + production['CarbΓ³n'] + production['Petcoke']
69 production['oil'] = production['Diesel + Fuel Oil'] + production['Diesel'] + production['Fuel Oil Nro. 6']
70
71 keys_to_remove = ['CarbΓ³n + Petcoke', 'CarbΓ³n', 'Petcoke', 'Diesel + Fuel Oil', 'Diesel', 'Fuel Oil Nro. 6', 'Total']
72
73 for k in keys_to_remove:
74 production.pop(k)
75
76 production = {generation_mapping.get(k,k):v for k,v in production.items()}
77
78 try:
79 dt = arrow.get(lookup_time).replace(hour=hour, tzinfo='Chile/Continental').datetime
80 except ValueError:
81 # Midnight is defined as 24 not 0 at end of day.
82 dt = arrow.get(lookup_time).shift(days=+1)
83 dt = dt.replace(tzinfo='Chile/Continental').datetime
84
85 data.append((dt, production))
86
87 return data
88
89
90 def get_data(session=None):
91 """
92 Makes a GET request to the data url. Parses the returned page to find the
93 data which is contained in a javascript variable.
94 Returns a list of dictionaries.
95 """
96
97 s = session or requests.Session()
98 datareq = s.get(url)
99 soup = BeautifulSoup(datareq.text, 'html.parser')
100 chartdata = soup.find('script', type="text/javascript").text
101
102 # regex that finds js var called chartData, list can be variable length.
103 pattern = r'chartData = \[(.*)\]'
104 match = re.search(pattern, chartdata)
105 rawdata = match.group(0)
106
107 # Cut down to just the list.
108 start = rawdata.index('[')
109 sliced = rawdata[start:]
110 loaded_data = json.loads(sliced)
111
112 return loaded_data
113
114
115 def convert_time_str(ts):
116 """Takes a unicode time string and converts into an aware datetime object."""
117
118 dt_naive = datetime.datetime.strptime(ts, '%Y-%m-%d %H:%M')
119 localtz = timezone('Chile/Continental')
120 dt_aware = localtz.localize(dt_naive)
121
122 return dt_aware
123
124
125 def data_processer(data, logger):
126 """
127 Takes raw production data and converts it into a usable form.
128 Removes unneeded keys and sums generation types.
129 Returns a list.
130 """
131
132 clean_data = []
133 for datapoint in data:
134 bad_keys = (u'Total_ERNC', u'Generacion Total')
135 for bad in bad_keys:
136 datapoint.pop(bad, None)
137
138 for key in datapoint.keys():
139 if key not in plant_map.keys():
140 logger.warning('{} is missing from the CL_SING plant mapping.'.format(key))
141
142 mapped_plants = [(plant_map.get(plant, 'unknown'), val) for plant, val
143 in datapoint.items()]
144 datapoint = defaultdict(lambda: 0.0)
145
146 # Sum values for duplicate keys.
147 for key, val in mapped_plants:
148 try:
149 datapoint[key] += val
150 except TypeError:
151 # datetime key is a string!
152 datapoint[key] = val
153
154 dt = convert_time_str(datapoint['datetime'])
155 datapoint.pop('datetime')
156 clean_data.append((dt, dict(datapoint)))
157
158 return clean_data
159
160
161 def fetch_production(zone_key='CL-SING', session=None, target_datetime=None, logger=None):
162 """
163 Requests the last known production mix (in MW) of a given country
164 Arguments:
165 zone_key (optional) -- used in case a parser is able to fetch multiple countries
166 Return:
167 A dictionary in the form:
168 {
169 'zoneKey': 'FR',
170 'datetime': '2017-01-01T00:00:00Z',
171 'production': {
172 'biomass': 0.0,
173 'coal': 0.0,
174 'gas': 0.0,
175 'hydro': 0.0,
176 'nuclear': null,
177 'oil': 0.0,
178 'solar': 0.0,
179 'wind': 0.0,
180 'geothermal': 0.0,
181 'unknown': 0.0
182 },
183 'storage': {
184 'hydro': -10.0,
185 },
186 'source': 'mysource.com'
187 }
188 """
189 if target_datetime:
190 lookup_time = arrow.get(target_datetime).floor('day').format('DD-MM-YYYY')
191 dp = get_old_data(lookup_time, session)
192 else:
193 gd = get_data(session)
194 dp = data_processer(gd, logger)
195
196 production_mix_by_hour = []
197 for point in dp:
198 production_mix = {
199 'zoneKey': zone_key,
200 'datetime': point[0],
201 'production': point[1],
202 'source': 'sger.coordinadorelectrico.cl'
203 }
204 production_mix_by_hour.append(production_mix)
205
206 return production_mix_by_hour
207
208
209 if __name__ == '__main__':
210 """Main method, never used by the Electricity Map backend, but handy for testing."""
211
212 print('fetch_production() ->')
213 print(fetch_production())
214 print('fetch_production(target_datetime=01/01/2018)')
215 print(fetch_production(target_datetime='01/01/2018'))
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/CL_SING.py b/parsers/CL_SING.py
--- a/parsers/CL_SING.py
+++ b/parsers/CL_SING.py
@@ -7,7 +7,7 @@
from __future__ import print_function
import arrow
from bs4 import BeautifulSoup
-from collections import defaultdict
+from collections import defaultdict, Counter
import datetime
import json
from pytz import timezone
@@ -203,6 +203,16 @@
}
production_mix_by_hour.append(production_mix)
+ # if production is the same for at least 3 in the last 48 hours, it's
+ # considered constant. Using round because of floating-point precision.
+ c = Counter([round(sum(v for _, v in e['production'].items()), 3)
+ for e in production_mix_by_hour])
+ most_common = c.most_common(1)[0]
+ if most_common[1] > 3:
+ raise ValueError(
+ 'Detected constant prod in CL-SING. Value {} occured {} times in '
+ 'the last 48 hours.'.format(most_common[0], most_common[1]))
+
return production_mix_by_hour
|
{"golden_diff": "diff --git a/parsers/CL_SING.py b/parsers/CL_SING.py\n--- a/parsers/CL_SING.py\n+++ b/parsers/CL_SING.py\n@@ -7,7 +7,7 @@\n from __future__ import print_function\n import arrow\n from bs4 import BeautifulSoup\n-from collections import defaultdict\n+from collections import defaultdict, Counter\n import datetime\n import json\n from pytz import timezone\n@@ -203,6 +203,16 @@\n }\n production_mix_by_hour.append(production_mix)\n \n+ # if production is the same for at least 3 in the last 48 hours, it's\n+ # considered constant. Using round because of floating-point precision.\n+ c = Counter([round(sum(v for _, v in e['production'].items()), 3)\n+ for e in production_mix_by_hour])\n+ most_common = c.most_common(1)[0]\n+ if most_common[1] > 3:\n+ raise ValueError(\n+ 'Detected constant prod in CL-SING. Value {} occured {} times in '\n+ 'the last 48 hours.'.format(most_common[0], most_common[1]))\n+\n return production_mix_by_hour\n", "issue": "Chile (CL-SING): total generation stuck at fixed value\nThe total generation of the CL-SING zone is stuck at 1 939 MW. The renewable part varies, but the \"unknown\" category always fills the gap. The issue is on the data provider's side.\r\n\r\n\r\n\r\n\nChile (CL-SING): total generation stuck at fixed value\nThe total generation of the CL-SING zone is stuck at 1 939 MW. The renewable part varies, but the \"unknown\" category always fills the gap. The issue is on the data provider's side.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Parser for the Chile-SING region.\n# There are plans to unify the Chilean grid.\n# https://www.engerati.com/article/chile%E2%80%99s-grid-be-unified-new-transmission-highway\n\nfrom __future__ import print_function\nimport arrow\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict\nimport datetime\nimport json\nfrom pytz import timezone\nimport pandas as pd\nimport re\nimport requests\n\nurl = 'https://sger.coordinadorelectrico.cl/Charts/AmChartCurvaCompuesta?showinfo=True'\n\nplant_map = {\n u\"Generacion_Neta\": \"unknown\",\n u\"Real Andes Solar\": \"solar\",\n u\"Real Bolero\": \"solar\",\n u\"Real Cerro Dominador PV\": \"solar\",\n u\"Real Finis Terrae\": \"solar\",\n u\"Real La Huayca 2\": \"solar\",\n u\"Real Mar\\xeda Elena FV\": \"solar\",\n u\"Real PAS2\": \"solar\",\n u\"Real PAS3\": \"solar\",\n u\"Real Pampa Camarones\": \"solar\",\n u\"Real Parque Eolico Sierra Gorda Este\": \"wind\",\n u\"Real Puerto Seco Solar\": \"solar\",\n u\"Real Solar El Aguila 1\": \"solar\",\n u\"Real Solar Jama\": \"solar\",\n u\"Real Solar Jama 2\": \"solar\",\n u\"Real Uribe\": \"solar\",\n u\"Real Valle de los Vientos\": \"wind\",\n u\"date\": \"datetime\"\n }\n\ngeneration_mapping = {'Gas Natural': 'gas',\n 'E\u00f3lico': 'wind',\n 'Otro (Compensaci\u00f3n Activa)': 'unknown',\n 'Solar': 'solar',\n 'Cogeneraci\u00f3n': 'biomass',\n 'Geot\u00e9rmica': 'geothermal',\n 'Hidro': 'hydro'}\n\n\ndef get_old_data(lookup_time, session=None):\n \"\"\"Fetches data for past days, returns a list of tuples.\"\"\"\n\n s=session or requests.Session()\n data_url = \"http://cdec2.cdec-sing.cl/pls/portal/cdec.pck_oper_real_2_pub.qry_gen_real_2?p_fecha_ini={0}&p_fecha_fin={0}&p_informe=E\".format(lookup_time)\n\n data_req = s.get(data_url)\n df = pd.read_html(data_req.text, decimal=',', thousands = '', header=0, index_col=0)\n df = df[-2]\n\n if df.shape != (14, 26):\n raise AttributeError('Format for Cl-SING historical data has changed, check for new tables or generation types.')\n\n data = []\n for col in df:\n if col not in ['25', 'Total']:\n hour = int(col)\n production = df[col].to_dict()\n production['coal'] = production['Carb\u00f3n + Petcoke'] + production['Carb\u00f3n'] + production['Petcoke']\n production['oil'] = production['Diesel + Fuel Oil'] + production['Diesel'] + production['Fuel Oil Nro. 6']\n\n keys_to_remove = ['Carb\u00f3n + Petcoke', 'Carb\u00f3n', 'Petcoke', 'Diesel + Fuel Oil', 'Diesel', 'Fuel Oil Nro. 6', 'Total']\n\n for k in keys_to_remove:\n production.pop(k)\n\n production = {generation_mapping.get(k,k):v for k,v in production.items()}\n\n try:\n dt = arrow.get(lookup_time).replace(hour=hour, tzinfo='Chile/Continental').datetime\n except ValueError:\n # Midnight is defined as 24 not 0 at end of day.\n dt = arrow.get(lookup_time).shift(days=+1)\n dt = dt.replace(tzinfo='Chile/Continental').datetime\n\n data.append((dt, production))\n\n return data\n\n\ndef get_data(session=None):\n \"\"\"\n Makes a GET request to the data url. Parses the returned page to find the\n data which is contained in a javascript variable.\n Returns a list of dictionaries.\n \"\"\"\n\n s = session or requests.Session()\n datareq = s.get(url)\n soup = BeautifulSoup(datareq.text, 'html.parser')\n chartdata = soup.find('script', type=\"text/javascript\").text\n\n # regex that finds js var called chartData, list can be variable length.\n pattern = r'chartData = \\[(.*)\\]'\n match = re.search(pattern, chartdata)\n rawdata = match.group(0)\n\n # Cut down to just the list.\n start = rawdata.index('[')\n sliced = rawdata[start:]\n loaded_data = json.loads(sliced)\n\n return loaded_data\n\n\ndef convert_time_str(ts):\n \"\"\"Takes a unicode time string and converts into an aware datetime object.\"\"\"\n\n dt_naive = datetime.datetime.strptime(ts, '%Y-%m-%d %H:%M')\n localtz = timezone('Chile/Continental')\n dt_aware = localtz.localize(dt_naive)\n\n return dt_aware\n\n\ndef data_processer(data, logger):\n \"\"\"\n Takes raw production data and converts it into a usable form.\n Removes unneeded keys and sums generation types.\n Returns a list.\n \"\"\"\n\n clean_data = []\n for datapoint in data:\n bad_keys = (u'Total_ERNC', u'Generacion Total')\n for bad in bad_keys:\n datapoint.pop(bad, None)\n\n for key in datapoint.keys():\n if key not in plant_map.keys():\n logger.warning('{} is missing from the CL_SING plant mapping.'.format(key))\n\n mapped_plants = [(plant_map.get(plant, 'unknown'), val) for plant, val\n in datapoint.items()]\n datapoint = defaultdict(lambda: 0.0)\n\n # Sum values for duplicate keys.\n for key, val in mapped_plants:\n try:\n datapoint[key] += val\n except TypeError:\n # datetime key is a string!\n datapoint[key] = val\n\n dt = convert_time_str(datapoint['datetime'])\n datapoint.pop('datetime')\n clean_data.append((dt, dict(datapoint)))\n\n return clean_data\n\n\ndef fetch_production(zone_key='CL-SING', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n lookup_time = arrow.get(target_datetime).floor('day').format('DD-MM-YYYY')\n dp = get_old_data(lookup_time, session)\n else:\n gd = get_data(session)\n dp = data_processer(gd, logger)\n\n production_mix_by_hour = []\n for point in dp:\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': point[0],\n 'production': point[1],\n 'source': 'sger.coordinadorelectrico.cl'\n }\n production_mix_by_hour.append(production_mix)\n\n return production_mix_by_hour\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_production(target_datetime=01/01/2018)')\n print(fetch_production(target_datetime='01/01/2018'))\n", "path": "parsers/CL_SING.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Parser for the Chile-SING region.\n# There are plans to unify the Chilean grid.\n# https://www.engerati.com/article/chile%E2%80%99s-grid-be-unified-new-transmission-highway\n\nfrom __future__ import print_function\nimport arrow\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict, Counter\nimport datetime\nimport json\nfrom pytz import timezone\nimport pandas as pd\nimport re\nimport requests\n\nurl = 'https://sger.coordinadorelectrico.cl/Charts/AmChartCurvaCompuesta?showinfo=True'\n\nplant_map = {\n u\"Generacion_Neta\": \"unknown\",\n u\"Real Andes Solar\": \"solar\",\n u\"Real Bolero\": \"solar\",\n u\"Real Cerro Dominador PV\": \"solar\",\n u\"Real Finis Terrae\": \"solar\",\n u\"Real La Huayca 2\": \"solar\",\n u\"Real Mar\\xeda Elena FV\": \"solar\",\n u\"Real PAS2\": \"solar\",\n u\"Real PAS3\": \"solar\",\n u\"Real Pampa Camarones\": \"solar\",\n u\"Real Parque Eolico Sierra Gorda Este\": \"wind\",\n u\"Real Puerto Seco Solar\": \"solar\",\n u\"Real Solar El Aguila 1\": \"solar\",\n u\"Real Solar Jama\": \"solar\",\n u\"Real Solar Jama 2\": \"solar\",\n u\"Real Uribe\": \"solar\",\n u\"Real Valle de los Vientos\": \"wind\",\n u\"date\": \"datetime\"\n }\n\ngeneration_mapping = {'Gas Natural': 'gas',\n 'E\u00f3lico': 'wind',\n 'Otro (Compensaci\u00f3n Activa)': 'unknown',\n 'Solar': 'solar',\n 'Cogeneraci\u00f3n': 'biomass',\n 'Geot\u00e9rmica': 'geothermal',\n 'Hidro': 'hydro'}\n\n\ndef get_old_data(lookup_time, session=None):\n \"\"\"Fetches data for past days, returns a list of tuples.\"\"\"\n\n s=session or requests.Session()\n data_url = \"http://cdec2.cdec-sing.cl/pls/portal/cdec.pck_oper_real_2_pub.qry_gen_real_2?p_fecha_ini={0}&p_fecha_fin={0}&p_informe=E\".format(lookup_time)\n\n data_req = s.get(data_url)\n df = pd.read_html(data_req.text, decimal=',', thousands = '', header=0, index_col=0)\n df = df[-2]\n\n if df.shape != (14, 26):\n raise AttributeError('Format for Cl-SING historical data has changed, check for new tables or generation types.')\n\n data = []\n for col in df:\n if col not in ['25', 'Total']:\n hour = int(col)\n production = df[col].to_dict()\n production['coal'] = production['Carb\u00f3n + Petcoke'] + production['Carb\u00f3n'] + production['Petcoke']\n production['oil'] = production['Diesel + Fuel Oil'] + production['Diesel'] + production['Fuel Oil Nro. 6']\n\n keys_to_remove = ['Carb\u00f3n + Petcoke', 'Carb\u00f3n', 'Petcoke', 'Diesel + Fuel Oil', 'Diesel', 'Fuel Oil Nro. 6', 'Total']\n\n for k in keys_to_remove:\n production.pop(k)\n\n production = {generation_mapping.get(k,k):v for k,v in production.items()}\n\n try:\n dt = arrow.get(lookup_time).replace(hour=hour, tzinfo='Chile/Continental').datetime\n except ValueError:\n # Midnight is defined as 24 not 0 at end of day.\n dt = arrow.get(lookup_time).shift(days=+1)\n dt = dt.replace(tzinfo='Chile/Continental').datetime\n\n data.append((dt, production))\n\n return data\n\n\ndef get_data(session=None):\n \"\"\"\n Makes a GET request to the data url. Parses the returned page to find the\n data which is contained in a javascript variable.\n Returns a list of dictionaries.\n \"\"\"\n\n s = session or requests.Session()\n datareq = s.get(url)\n soup = BeautifulSoup(datareq.text, 'html.parser')\n chartdata = soup.find('script', type=\"text/javascript\").text\n\n # regex that finds js var called chartData, list can be variable length.\n pattern = r'chartData = \\[(.*)\\]'\n match = re.search(pattern, chartdata)\n rawdata = match.group(0)\n\n # Cut down to just the list.\n start = rawdata.index('[')\n sliced = rawdata[start:]\n loaded_data = json.loads(sliced)\n\n return loaded_data\n\n\ndef convert_time_str(ts):\n \"\"\"Takes a unicode time string and converts into an aware datetime object.\"\"\"\n\n dt_naive = datetime.datetime.strptime(ts, '%Y-%m-%d %H:%M')\n localtz = timezone('Chile/Continental')\n dt_aware = localtz.localize(dt_naive)\n\n return dt_aware\n\n\ndef data_processer(data, logger):\n \"\"\"\n Takes raw production data and converts it into a usable form.\n Removes unneeded keys and sums generation types.\n Returns a list.\n \"\"\"\n\n clean_data = []\n for datapoint in data:\n bad_keys = (u'Total_ERNC', u'Generacion Total')\n for bad in bad_keys:\n datapoint.pop(bad, None)\n\n for key in datapoint.keys():\n if key not in plant_map.keys():\n logger.warning('{} is missing from the CL_SING plant mapping.'.format(key))\n\n mapped_plants = [(plant_map.get(plant, 'unknown'), val) for plant, val\n in datapoint.items()]\n datapoint = defaultdict(lambda: 0.0)\n\n # Sum values for duplicate keys.\n for key, val in mapped_plants:\n try:\n datapoint[key] += val\n except TypeError:\n # datetime key is a string!\n datapoint[key] = val\n\n dt = convert_time_str(datapoint['datetime'])\n datapoint.pop('datetime')\n clean_data.append((dt, dict(datapoint)))\n\n return clean_data\n\n\ndef fetch_production(zone_key='CL-SING', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n lookup_time = arrow.get(target_datetime).floor('day').format('DD-MM-YYYY')\n dp = get_old_data(lookup_time, session)\n else:\n gd = get_data(session)\n dp = data_processer(gd, logger)\n\n production_mix_by_hour = []\n for point in dp:\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': point[0],\n 'production': point[1],\n 'source': 'sger.coordinadorelectrico.cl'\n }\n production_mix_by_hour.append(production_mix)\n\n # if production is the same for at least 3 in the last 48 hours, it's\n # considered constant. Using round because of floating-point precision.\n c = Counter([round(sum(v for _, v in e['production'].items()), 3)\n for e in production_mix_by_hour])\n most_common = c.most_common(1)[0]\n if most_common[1] > 3:\n raise ValueError(\n 'Detected constant prod in CL-SING. Value {} occured {} times in '\n 'the last 48 hours.'.format(most_common[0], most_common[1]))\n\n return production_mix_by_hour\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_production(target_datetime=01/01/2018)')\n print(fetch_production(target_datetime='01/01/2018'))\n", "path": "parsers/CL_SING.py"}]}
| 3,029 | 270 |
gh_patches_debug_16040
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-1735
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flask support doesn't call store_request_headers
Flask integration doesn't store request/response headers on the span. Other integrations such as Falcon do.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/flask/middleware.py`
Content:
```
1 from ... import compat
2 from ...ext import SpanTypes, http, errors
3 from ...internal.logger import get_logger
4 from ...propagation.http import HTTPPropagator
5 from ...utils.deprecation import deprecated
6
7 import flask.templating
8 from flask import g, request, signals
9
10
11 log = get_logger(__name__)
12
13
14 SPAN_NAME = 'flask.request'
15
16
17 class TraceMiddleware(object):
18
19 @deprecated(message='Use patching instead (see the docs).', version='1.0.0')
20 def __init__(self, app, tracer, service='flask', use_signals=True, distributed_tracing=False):
21 self.app = app
22 log.debug('flask: initializing trace middleware')
23
24 # Attach settings to the inner application middleware. This is required if double
25 # instrumentation happens (i.e. `ddtrace-run` with `TraceMiddleware`). In that
26 # case, `ddtrace-run` instruments the application, but then users code is unable
27 # to update settings such as `distributed_tracing` flag. This step can be removed
28 # when the `Config` object is used
29 self.app._tracer = tracer
30 self.app._service = service
31 self.app._use_distributed_tracing = distributed_tracing
32 self.use_signals = use_signals
33
34 # safe-guard to avoid double instrumentation
35 if getattr(app, '__dd_instrumentation', False):
36 return
37 setattr(app, '__dd_instrumentation', True)
38
39 # Install hooks which time requests.
40 self.app.before_request(self._before_request)
41 self.app.after_request(self._after_request)
42 self.app.teardown_request(self._teardown_request)
43
44 # Add exception handling signals. This will annotate exceptions that
45 # are caught and handled in custom user code.
46 # See https://github.com/DataDog/dd-trace-py/issues/390
47 if use_signals and not signals.signals_available:
48 log.debug(_blinker_not_installed_msg)
49 self.use_signals = use_signals and signals.signals_available
50 timing_signals = {
51 'got_request_exception': self._request_exception,
52 }
53 self._receivers = []
54 if self.use_signals and _signals_exist(timing_signals):
55 self._connect(timing_signals)
56
57 _patch_render(tracer)
58
59 def _connect(self, signal_to_handler):
60 connected = True
61 for name, handler in signal_to_handler.items():
62 s = getattr(signals, name, None)
63 if not s:
64 connected = False
65 log.warning('trying to instrument missing signal %s', name)
66 continue
67 # we should connect to the signal without using weak references
68 # otherwise they will be garbage collected and our handlers
69 # will be disconnected after the first call; for more details check:
70 # https://github.com/jek/blinker/blob/207446f2d97/blinker/base.py#L106-L108
71 s.connect(handler, sender=self.app, weak=False)
72 self._receivers.append(handler)
73 return connected
74
75 def _before_request(self):
76 """ Starts tracing the current request and stores it in the global
77 request object.
78 """
79 self._start_span()
80
81 def _after_request(self, response):
82 """ Runs after the server can process a response. """
83 try:
84 self._process_response(response)
85 except Exception:
86 log.debug('flask: error tracing response', exc_info=True)
87 return response
88
89 def _teardown_request(self, exception):
90 """ Runs at the end of a request. If there's an unhandled exception, it
91 will be passed in.
92 """
93 # when we teardown the span, ensure we have a clean slate.
94 span = getattr(g, 'flask_datadog_span', None)
95 setattr(g, 'flask_datadog_span', None)
96 if not span:
97 return
98
99 try:
100 self._finish_span(span, exception=exception)
101 except Exception:
102 log.debug('flask: error finishing span', exc_info=True)
103
104 def _start_span(self):
105 if self.app._use_distributed_tracing:
106 propagator = HTTPPropagator()
107 context = propagator.extract(request.headers)
108 # Only need to active the new context if something was propagated
109 if context.trace_id:
110 self.app._tracer.context_provider.activate(context)
111 try:
112 g.flask_datadog_span = self.app._tracer.trace(
113 SPAN_NAME,
114 service=self.app._service,
115 span_type=SpanTypes.WEB,
116 )
117 except Exception:
118 log.debug('flask: error tracing request', exc_info=True)
119
120 def _process_response(self, response):
121 span = getattr(g, 'flask_datadog_span', None)
122 if not (span and span.sampled):
123 return
124
125 code = response.status_code if response else ''
126 span.set_tag(http.STATUS_CODE, code)
127
128 def _request_exception(self, *args, **kwargs):
129 exception = kwargs.get('exception', None)
130 span = getattr(g, 'flask_datadog_span', None)
131 if span and exception:
132 _set_error_on_span(span, exception)
133
134 def _finish_span(self, span, exception=None):
135 if not span or not span.sampled:
136 return
137
138 code = span.get_tag(http.STATUS_CODE) or 0
139 try:
140 code = int(code)
141 except Exception:
142 code = 0
143
144 if exception:
145 # if the request has already had a code set, don't override it.
146 code = code or 500
147 _set_error_on_span(span, exception)
148
149 # the endpoint that matched the request is None if an exception
150 # happened so we fallback to a common resource
151 span.error = 0 if code < 500 else 1
152
153 # the request isn't guaranteed to exist here, so only use it carefully.
154 method = ''
155 endpoint = ''
156 url = ''
157 if request:
158 method = request.method
159 endpoint = request.endpoint or code
160 url = request.base_url or ''
161
162 # Let users specify their own resource in middleware if they so desire.
163 # See case https://github.com/DataDog/dd-trace-py/issues/353
164 if span.resource == SPAN_NAME:
165 resource = endpoint or code
166 span.resource = compat.to_unicode(resource).lower()
167
168 span.set_tag(http.URL, compat.to_unicode(url))
169 span.set_tag(http.STATUS_CODE, code)
170 span.set_tag(http.METHOD, method)
171 span.finish()
172
173
174 def _set_error_on_span(span, exception):
175 # The 3 next lines might not be strictly required, since `set_traceback`
176 # also get the exception from the sys.exc_info (and fill the error meta).
177 # Since we aren't sure it always work/for insuring no BC break, keep
178 # these lines which get overridden anyway.
179 span.set_tag(errors.ERROR_TYPE, type(exception))
180 span.set_tag(errors.ERROR_MSG, exception)
181 # The provided `exception` object doesn't have a stack trace attached,
182 # so attach the stack trace with `set_traceback`.
183 span.set_traceback()
184
185
186 def _patch_render(tracer):
187 """ patch flask's render template methods with the given tracer. """
188 # fall back to patching global method
189 _render = flask.templating._render
190
191 # If the method has already been patched and we're patching again then
192 # we have to patch again with the new tracer reference.
193 if hasattr(_render, "__dd_orig"):
194 _render = getattr(_render, "__dd_orig")
195
196 def _traced_render(template, context, app):
197 with tracer.trace('flask.template', span_type=SpanTypes.TEMPLATE) as span:
198 span.set_tag('flask.template', template.name or 'string')
199 return _render(template, context, app)
200
201 setattr(_traced_render, "__dd_orig", _render)
202 flask.templating._render = _traced_render
203
204
205 def _signals_exist(names):
206 """ Return true if all of the given signals exist in this version of flask.
207 """
208 return all(getattr(signals, n, False) for n in names)
209
210
211 _blinker_not_installed_msg = (
212 'please install blinker to use flask signals. '
213 'http://flask.pocoo.org/docs/0.11/signals/'
214 )
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/flask/middleware.py b/ddtrace/contrib/flask/middleware.py
--- a/ddtrace/contrib/flask/middleware.py
+++ b/ddtrace/contrib/flask/middleware.py
@@ -3,6 +3,8 @@
from ...internal.logger import get_logger
from ...propagation.http import HTTPPropagator
from ...utils.deprecation import deprecated
+from ddtrace.http import store_request_headers
+from ddtrace import config
import flask.templating
from flask import g, request, signals
@@ -155,6 +157,7 @@
endpoint = ''
url = ''
if request:
+ store_request_headers(request.headers, span, config.flask)
method = request.method
endpoint = request.endpoint or code
url = request.base_url or ''
|
{"golden_diff": "diff --git a/ddtrace/contrib/flask/middleware.py b/ddtrace/contrib/flask/middleware.py\n--- a/ddtrace/contrib/flask/middleware.py\n+++ b/ddtrace/contrib/flask/middleware.py\n@@ -3,6 +3,8 @@\n from ...internal.logger import get_logger\n from ...propagation.http import HTTPPropagator\n from ...utils.deprecation import deprecated\n+from ddtrace.http import store_request_headers\n+from ddtrace import config\n \n import flask.templating\n from flask import g, request, signals\n@@ -155,6 +157,7 @@\n endpoint = ''\n url = ''\n if request:\n+ store_request_headers(request.headers, span, config.flask)\n method = request.method\n endpoint = request.endpoint or code\n url = request.base_url or ''\n", "issue": "Flask support doesn't call store_request_headers\nFlask integration doesn't store request/response headers on the span. Other integrations such as Falcon do.\n", "before_files": [{"content": "from ... import compat\nfrom ...ext import SpanTypes, http, errors\nfrom ...internal.logger import get_logger\nfrom ...propagation.http import HTTPPropagator\nfrom ...utils.deprecation import deprecated\n\nimport flask.templating\nfrom flask import g, request, signals\n\n\nlog = get_logger(__name__)\n\n\nSPAN_NAME = 'flask.request'\n\n\nclass TraceMiddleware(object):\n\n @deprecated(message='Use patching instead (see the docs).', version='1.0.0')\n def __init__(self, app, tracer, service='flask', use_signals=True, distributed_tracing=False):\n self.app = app\n log.debug('flask: initializing trace middleware')\n\n # Attach settings to the inner application middleware. This is required if double\n # instrumentation happens (i.e. `ddtrace-run` with `TraceMiddleware`). In that\n # case, `ddtrace-run` instruments the application, but then users code is unable\n # to update settings such as `distributed_tracing` flag. This step can be removed\n # when the `Config` object is used\n self.app._tracer = tracer\n self.app._service = service\n self.app._use_distributed_tracing = distributed_tracing\n self.use_signals = use_signals\n\n # safe-guard to avoid double instrumentation\n if getattr(app, '__dd_instrumentation', False):\n return\n setattr(app, '__dd_instrumentation', True)\n\n # Install hooks which time requests.\n self.app.before_request(self._before_request)\n self.app.after_request(self._after_request)\n self.app.teardown_request(self._teardown_request)\n\n # Add exception handling signals. This will annotate exceptions that\n # are caught and handled in custom user code.\n # See https://github.com/DataDog/dd-trace-py/issues/390\n if use_signals and not signals.signals_available:\n log.debug(_blinker_not_installed_msg)\n self.use_signals = use_signals and signals.signals_available\n timing_signals = {\n 'got_request_exception': self._request_exception,\n }\n self._receivers = []\n if self.use_signals and _signals_exist(timing_signals):\n self._connect(timing_signals)\n\n _patch_render(tracer)\n\n def _connect(self, signal_to_handler):\n connected = True\n for name, handler in signal_to_handler.items():\n s = getattr(signals, name, None)\n if not s:\n connected = False\n log.warning('trying to instrument missing signal %s', name)\n continue\n # we should connect to the signal without using weak references\n # otherwise they will be garbage collected and our handlers\n # will be disconnected after the first call; for more details check:\n # https://github.com/jek/blinker/blob/207446f2d97/blinker/base.py#L106-L108\n s.connect(handler, sender=self.app, weak=False)\n self._receivers.append(handler)\n return connected\n\n def _before_request(self):\n \"\"\" Starts tracing the current request and stores it in the global\n request object.\n \"\"\"\n self._start_span()\n\n def _after_request(self, response):\n \"\"\" Runs after the server can process a response. \"\"\"\n try:\n self._process_response(response)\n except Exception:\n log.debug('flask: error tracing response', exc_info=True)\n return response\n\n def _teardown_request(self, exception):\n \"\"\" Runs at the end of a request. If there's an unhandled exception, it\n will be passed in.\n \"\"\"\n # when we teardown the span, ensure we have a clean slate.\n span = getattr(g, 'flask_datadog_span', None)\n setattr(g, 'flask_datadog_span', None)\n if not span:\n return\n\n try:\n self._finish_span(span, exception=exception)\n except Exception:\n log.debug('flask: error finishing span', exc_info=True)\n\n def _start_span(self):\n if self.app._use_distributed_tracing:\n propagator = HTTPPropagator()\n context = propagator.extract(request.headers)\n # Only need to active the new context if something was propagated\n if context.trace_id:\n self.app._tracer.context_provider.activate(context)\n try:\n g.flask_datadog_span = self.app._tracer.trace(\n SPAN_NAME,\n service=self.app._service,\n span_type=SpanTypes.WEB,\n )\n except Exception:\n log.debug('flask: error tracing request', exc_info=True)\n\n def _process_response(self, response):\n span = getattr(g, 'flask_datadog_span', None)\n if not (span and span.sampled):\n return\n\n code = response.status_code if response else ''\n span.set_tag(http.STATUS_CODE, code)\n\n def _request_exception(self, *args, **kwargs):\n exception = kwargs.get('exception', None)\n span = getattr(g, 'flask_datadog_span', None)\n if span and exception:\n _set_error_on_span(span, exception)\n\n def _finish_span(self, span, exception=None):\n if not span or not span.sampled:\n return\n\n code = span.get_tag(http.STATUS_CODE) or 0\n try:\n code = int(code)\n except Exception:\n code = 0\n\n if exception:\n # if the request has already had a code set, don't override it.\n code = code or 500\n _set_error_on_span(span, exception)\n\n # the endpoint that matched the request is None if an exception\n # happened so we fallback to a common resource\n span.error = 0 if code < 500 else 1\n\n # the request isn't guaranteed to exist here, so only use it carefully.\n method = ''\n endpoint = ''\n url = ''\n if request:\n method = request.method\n endpoint = request.endpoint or code\n url = request.base_url or ''\n\n # Let users specify their own resource in middleware if they so desire.\n # See case https://github.com/DataDog/dd-trace-py/issues/353\n if span.resource == SPAN_NAME:\n resource = endpoint or code\n span.resource = compat.to_unicode(resource).lower()\n\n span.set_tag(http.URL, compat.to_unicode(url))\n span.set_tag(http.STATUS_CODE, code)\n span.set_tag(http.METHOD, method)\n span.finish()\n\n\ndef _set_error_on_span(span, exception):\n # The 3 next lines might not be strictly required, since `set_traceback`\n # also get the exception from the sys.exc_info (and fill the error meta).\n # Since we aren't sure it always work/for insuring no BC break, keep\n # these lines which get overridden anyway.\n span.set_tag(errors.ERROR_TYPE, type(exception))\n span.set_tag(errors.ERROR_MSG, exception)\n # The provided `exception` object doesn't have a stack trace attached,\n # so attach the stack trace with `set_traceback`.\n span.set_traceback()\n\n\ndef _patch_render(tracer):\n \"\"\" patch flask's render template methods with the given tracer. \"\"\"\n # fall back to patching global method\n _render = flask.templating._render\n\n # If the method has already been patched and we're patching again then\n # we have to patch again with the new tracer reference.\n if hasattr(_render, \"__dd_orig\"):\n _render = getattr(_render, \"__dd_orig\")\n\n def _traced_render(template, context, app):\n with tracer.trace('flask.template', span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag('flask.template', template.name or 'string')\n return _render(template, context, app)\n\n setattr(_traced_render, \"__dd_orig\", _render)\n flask.templating._render = _traced_render\n\n\ndef _signals_exist(names):\n \"\"\" Return true if all of the given signals exist in this version of flask.\n \"\"\"\n return all(getattr(signals, n, False) for n in names)\n\n\n_blinker_not_installed_msg = (\n 'please install blinker to use flask signals. '\n 'http://flask.pocoo.org/docs/0.11/signals/'\n)\n", "path": "ddtrace/contrib/flask/middleware.py"}], "after_files": [{"content": "from ... import compat\nfrom ...ext import SpanTypes, http, errors\nfrom ...internal.logger import get_logger\nfrom ...propagation.http import HTTPPropagator\nfrom ...utils.deprecation import deprecated\nfrom ddtrace.http import store_request_headers\nfrom ddtrace import config\n\nimport flask.templating\nfrom flask import g, request, signals\n\n\nlog = get_logger(__name__)\n\n\nSPAN_NAME = 'flask.request'\n\n\nclass TraceMiddleware(object):\n\n @deprecated(message='Use patching instead (see the docs).', version='1.0.0')\n def __init__(self, app, tracer, service='flask', use_signals=True, distributed_tracing=False):\n self.app = app\n log.debug('flask: initializing trace middleware')\n\n # Attach settings to the inner application middleware. This is required if double\n # instrumentation happens (i.e. `ddtrace-run` with `TraceMiddleware`). In that\n # case, `ddtrace-run` instruments the application, but then users code is unable\n # to update settings such as `distributed_tracing` flag. This step can be removed\n # when the `Config` object is used\n self.app._tracer = tracer\n self.app._service = service\n self.app._use_distributed_tracing = distributed_tracing\n self.use_signals = use_signals\n\n # safe-guard to avoid double instrumentation\n if getattr(app, '__dd_instrumentation', False):\n return\n setattr(app, '__dd_instrumentation', True)\n\n # Install hooks which time requests.\n self.app.before_request(self._before_request)\n self.app.after_request(self._after_request)\n self.app.teardown_request(self._teardown_request)\n\n # Add exception handling signals. This will annotate exceptions that\n # are caught and handled in custom user code.\n # See https://github.com/DataDog/dd-trace-py/issues/390\n if use_signals and not signals.signals_available:\n log.debug(_blinker_not_installed_msg)\n self.use_signals = use_signals and signals.signals_available\n timing_signals = {\n 'got_request_exception': self._request_exception,\n }\n self._receivers = []\n if self.use_signals and _signals_exist(timing_signals):\n self._connect(timing_signals)\n\n _patch_render(tracer)\n\n def _connect(self, signal_to_handler):\n connected = True\n for name, handler in signal_to_handler.items():\n s = getattr(signals, name, None)\n if not s:\n connected = False\n log.warning('trying to instrument missing signal %s', name)\n continue\n # we should connect to the signal without using weak references\n # otherwise they will be garbage collected and our handlers\n # will be disconnected after the first call; for more details check:\n # https://github.com/jek/blinker/blob/207446f2d97/blinker/base.py#L106-L108\n s.connect(handler, sender=self.app, weak=False)\n self._receivers.append(handler)\n return connected\n\n def _before_request(self):\n \"\"\" Starts tracing the current request and stores it in the global\n request object.\n \"\"\"\n self._start_span()\n\n def _after_request(self, response):\n \"\"\" Runs after the server can process a response. \"\"\"\n try:\n self._process_response(response)\n except Exception:\n log.debug('flask: error tracing response', exc_info=True)\n return response\n\n def _teardown_request(self, exception):\n \"\"\" Runs at the end of a request. If there's an unhandled exception, it\n will be passed in.\n \"\"\"\n # when we teardown the span, ensure we have a clean slate.\n span = getattr(g, 'flask_datadog_span', None)\n setattr(g, 'flask_datadog_span', None)\n if not span:\n return\n\n try:\n self._finish_span(span, exception=exception)\n except Exception:\n log.debug('flask: error finishing span', exc_info=True)\n\n def _start_span(self):\n if self.app._use_distributed_tracing:\n propagator = HTTPPropagator()\n context = propagator.extract(request.headers)\n # Only need to active the new context if something was propagated\n if context.trace_id:\n self.app._tracer.context_provider.activate(context)\n try:\n g.flask_datadog_span = self.app._tracer.trace(\n SPAN_NAME,\n service=self.app._service,\n span_type=SpanTypes.WEB,\n )\n except Exception:\n log.debug('flask: error tracing request', exc_info=True)\n\n def _process_response(self, response):\n span = getattr(g, 'flask_datadog_span', None)\n if not (span and span.sampled):\n return\n\n code = response.status_code if response else ''\n span.set_tag(http.STATUS_CODE, code)\n\n def _request_exception(self, *args, **kwargs):\n exception = kwargs.get('exception', None)\n span = getattr(g, 'flask_datadog_span', None)\n if span and exception:\n _set_error_on_span(span, exception)\n\n def _finish_span(self, span, exception=None):\n if not span or not span.sampled:\n return\n\n code = span.get_tag(http.STATUS_CODE) or 0\n try:\n code = int(code)\n except Exception:\n code = 0\n\n if exception:\n # if the request has already had a code set, don't override it.\n code = code or 500\n _set_error_on_span(span, exception)\n\n # the endpoint that matched the request is None if an exception\n # happened so we fallback to a common resource\n span.error = 0 if code < 500 else 1\n\n # the request isn't guaranteed to exist here, so only use it carefully.\n method = ''\n endpoint = ''\n url = ''\n if request:\n store_request_headers(request.headers, span, config.flask)\n method = request.method\n endpoint = request.endpoint or code\n url = request.base_url or ''\n\n # Let users specify their own resource in middleware if they so desire.\n # See case https://github.com/DataDog/dd-trace-py/issues/353\n if span.resource == SPAN_NAME:\n resource = endpoint or code\n span.resource = compat.to_unicode(resource).lower()\n\n span.set_tag(http.URL, compat.to_unicode(url))\n span.set_tag(http.STATUS_CODE, code)\n span.set_tag(http.METHOD, method)\n span.finish()\n\n\ndef _set_error_on_span(span, exception):\n # The 3 next lines might not be strictly required, since `set_traceback`\n # also get the exception from the sys.exc_info (and fill the error meta).\n # Since we aren't sure it always work/for insuring no BC break, keep\n # these lines which get overridden anyway.\n span.set_tag(errors.ERROR_TYPE, type(exception))\n span.set_tag(errors.ERROR_MSG, exception)\n # The provided `exception` object doesn't have a stack trace attached,\n # so attach the stack trace with `set_traceback`.\n span.set_traceback()\n\n\ndef _patch_render(tracer):\n \"\"\" patch flask's render template methods with the given tracer. \"\"\"\n # fall back to patching global method\n _render = flask.templating._render\n\n # If the method has already been patched and we're patching again then\n # we have to patch again with the new tracer reference.\n if hasattr(_render, \"__dd_orig\"):\n _render = getattr(_render, \"__dd_orig\")\n\n def _traced_render(template, context, app):\n with tracer.trace('flask.template', span_type=SpanTypes.TEMPLATE) as span:\n span.set_tag('flask.template', template.name or 'string')\n return _render(template, context, app)\n\n setattr(_traced_render, \"__dd_orig\", _render)\n flask.templating._render = _traced_render\n\n\ndef _signals_exist(names):\n \"\"\" Return true if all of the given signals exist in this version of flask.\n \"\"\"\n return all(getattr(signals, n, False) for n in names)\n\n\n_blinker_not_installed_msg = (\n 'please install blinker to use flask signals. '\n 'http://flask.pocoo.org/docs/0.11/signals/'\n)\n", "path": "ddtrace/contrib/flask/middleware.py"}]}
| 2,650 | 179 |
gh_patches_debug_11554
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1740
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[GRPC] Handle the null span case
**Describe the bug**: ...
**To Reproduce**
1. Setup ElasticAPM in Django
2. Integrate with Google PubSub client
3. Try to publish message from your script
4. Stacktrace:
```
Exception in thread Thread-CommitBatchPublisher:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/usr/local/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.8/site-packages/google/cloud/pubsub_v1/publisher/_batch/thread.py", line 274, in _commit
response = self._client._gapic_publish(
File "/usr/local/lib/python3.8/site-packages/google/cloud/pubsub_v1/publisher/client.py", line 272, in _gapic_publish
return super().publish(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/google/pubsub_v1/services/publisher/client.py", line 781, in publish
response = rpc(
File "/usr/local/lib/python3.8/site-packages/google/api_core/gapic_v1/method.py", line 113, in __call__
return wrapped_func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/google/api_core/retry.py", line 349, in retry_wrapped_func
return retry_target(
File "/usr/local/lib/python3.8/site-packages/google/api_core/retry.py", line 191, in retry_target
return target()
File "/usr/local/lib/python3.8/site-packages/google/api_core/timeout.py", line 120, in func_with_timeout
return func(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 72, in error_remapped_callable
return callable_(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/grpc/_interceptor.py", line 247, in __call__
response, ignored_call = self._with_call(request,
File "/usr/local/lib/python3.8/site-packages/grpc/_interceptor.py", line 287, in _with_call
call = self._interceptor.intercept_unary_unary(continuation,
File "/usr/local/lib/python3.8/site-packages/elasticapm/contrib/grpc/client_interceptor.py", line 97, in intercept_unary_unary
client_call_details = self.attach_traceparent(client_call_details, span)
File "/usr/local/lib/python3.8/site-packages/elasticapm/contrib/grpc/client_interceptor.py", line 195, in attach_traceparent
if not span.transaction:
AttributeError: 'NoneType' object has no attribute 'transaction'
```
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.8
- Framework and version [e.g. Django 2.1]: Django 3.8
- APM Server version: 7.15
- Agent version: 6.14.0
**Additional context**
Add any other context about the problem here.
- `requirements.txt`:
<details>
<summary>Click to expand</summary>
```
google-cloud-pubsub==2.13.11
```
</details>
**Initial digging**
Looks like the span is not being returned from the context manager, but the grpc code is not handling the `None` case.
* [context manager](https://github.com/elastic/apm-agent-python/blob/main/elasticapm/traces.py#L1078)
* [grpc code](https://github.com/elastic/apm-agent-python/blob/main/elasticapm/contrib/grpc/client_interceptor.py#L195)
I can make a patch to handle the `None` case (and just return the client call details), but not sure if that is the right call here. Any guidance would be appreciated!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/contrib/grpc/client_interceptor.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2022, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from typing import Optional
32
33 import grpc
34 from grpc._interceptor import _ClientCallDetails
35
36 import elasticapm
37 from elasticapm.conf import constants
38 from elasticapm.traces import Span
39 from elasticapm.utils import default_ports
40
41
42 class _ClientInterceptor(
43 grpc.UnaryUnaryClientInterceptor,
44 # grpc.UnaryStreamClientInterceptor,
45 # grpc.StreamUnaryClientInterceptor,
46 # grpc.StreamStreamClientInterceptor,
47 ):
48 def __init__(self, host: Optional[str], port: Optional[str], secure: bool):
49 self.host: str = host
50 self.port: str = port
51 self.secure: bool = secure
52 schema = "https" if secure else "http"
53 resource = f"{schema}://{host}"
54 if port and int(port) != default_ports[schema]:
55 resource += f":{port}"
56
57 self._context = {
58 "http": {
59 "url": resource,
60 },
61 "destination": {
62 "address": host,
63 "port": port,
64 },
65 }
66
67 def intercept_unary_unary(self, continuation, client_call_details, request):
68 """Intercepts a unary-unary invocation asynchronously.
69
70 Args:
71 continuation: A function that proceeds with the invocation by
72 executing the next interceptor in chain or invoking the
73 actual RPC on the underlying Channel. It is the interceptor's
74 responsibility to call it if it decides to move the RPC forward.
75 The interceptor can use
76 `response_future = continuation(client_call_details, request)`
77 to continue with the RPC. `continuation` returns an object that is
78 both a Call for the RPC and a Future. In the event of RPC
79 completion, the return Call-Future's result value will be
80 the response message of the RPC. Should the event terminate
81 with non-OK status, the returned Call-Future's exception value
82 will be an RpcError.
83 client_call_details: A ClientCallDetails object describing the
84 outgoing RPC.
85 request: The request value for the RPC.
86
87 Returns:
88 An object that is both a Call for the RPC and a Future.
89 In the event of RPC completion, the return Call-Future's
90 result value will be the response message of the RPC.
91 Should the event terminate with non-OK status, the returned
92 Call-Future's exception value will be an RpcError.
93 """
94 with elasticapm.capture_span(
95 client_call_details.method, span_type="external", span_subtype="grpc", extra=self._context.copy(), leaf=True
96 ) as span:
97 client_call_details = self.attach_traceparent(client_call_details, span)
98 try:
99 response = continuation(client_call_details, request)
100 except grpc.RpcError:
101 span.set_failure()
102 raise
103
104 return response
105
106 # TODO: instrument other types of requests once the spec is ready
107
108 # def intercept_unary_stream(self, continuation, client_call_details,
109 # request):
110 # """Intercepts a unary-stream invocation.
111 #
112 # Args:
113 # continuation: A function that proceeds with the invocation by
114 # executing the next interceptor in chain or invoking the
115 # actual RPC on the underlying Channel. It is the interceptor's
116 # responsibility to call it if it decides to move the RPC forward.
117 # The interceptor can use
118 # `response_iterator = continuation(client_call_details, request)`
119 # to continue with the RPC. `continuation` returns an object that is
120 # both a Call for the RPC and an iterator for response values.
121 # Drawing response values from the returned Call-iterator may
122 # raise RpcError indicating termination of the RPC with non-OK
123 # status.
124 # client_call_details: A ClientCallDetails object describing the
125 # outgoing RPC.
126 # request: The request value for the RPC.
127 #
128 # Returns:
129 # An object that is both a Call for the RPC and an iterator of
130 # response values. Drawing response values from the returned
131 # Call-iterator may raise RpcError indicating termination of
132 # the RPC with non-OK status. This object *should* also fulfill the
133 # Future interface, though it may not.
134 # """
135 # response_iterator = continuation(client_call_details, request)
136 # return response_iterator
137 #
138 # def intercept_stream_unary(self, continuation, client_call_details,
139 # request_iterator):
140 # """Intercepts a stream-unary invocation asynchronously.
141 #
142 # Args:
143 # continuation: A function that proceeds with the invocation by
144 # executing the next interceptor in chain or invoking the
145 # actual RPC on the underlying Channel. It is the interceptor's
146 # responsibility to call it if it decides to move the RPC forward.
147 # The interceptor can use
148 # `response_future = continuation(client_call_details, request_iterator)`
149 # to continue with the RPC. `continuation` returns an object that is
150 # both a Call for the RPC and a Future. In the event of RPC completion,
151 # the return Call-Future's result value will be the response message
152 # of the RPC. Should the event terminate with non-OK status, the
153 # returned Call-Future's exception value will be an RpcError.
154 # client_call_details: A ClientCallDetails object describing the
155 # outgoing RPC.
156 # request_iterator: An iterator that yields request values for the RPC.
157 #
158 # Returns:
159 # An object that is both a Call for the RPC and a Future.
160 # In the event of RPC completion, the return Call-Future's
161 # result value will be the response message of the RPC.
162 # Should the event terminate with non-OK status, the returned
163 # Call-Future's exception value will be an RpcError.
164 # """
165 #
166 # def intercept_stream_stream(self, continuation, client_call_details,
167 # request_iterator):
168 # """Intercepts a stream-stream invocation.
169 #
170 # Args:
171 # continuation: A function that proceeds with the invocation by
172 # executing the next interceptor in chain or invoking the
173 # actual RPC on the underlying Channel. It is the interceptor's
174 # responsibility to call it if it decides to move the RPC forward.
175 # The interceptor can use
176 # `response_iterator = continuation(client_call_details, request_iterator)`
177 # to continue with the RPC. `continuation` returns an object that is
178 # both a Call for the RPC and an iterator for response values.
179 # Drawing response values from the returned Call-iterator may
180 # raise RpcError indicating termination of the RPC with non-OK
181 # status.
182 # client_call_details: A ClientCallDetails object describing the
183 # outgoing RPC.
184 # request_iterator: An iterator that yields request values for the RPC.
185 #
186 # Returns:
187 # An object that is both a Call for the RPC and an iterator of
188 # response values. Drawing response values from the returned
189 # Call-iterator may raise RpcError indicating termination of
190 # the RPC with non-OK status. This object *should* also fulfill the
191 # Future interface, though it may not.
192 # """
193
194 def attach_traceparent(self, client_call_details: _ClientCallDetails, span: Span):
195 if not span.transaction:
196 return client_call_details
197 meta = list(client_call_details.metadata) if client_call_details.metadata else []
198 if constants.TRACEPARENT_HEADER_NAME not in meta:
199 traceparent = span.transaction.trace_parent.copy_from(span_id=span.id)
200 meta.extend(
201 (
202 (constants.TRACEPARENT_HEADER_NAME, traceparent.to_string()),
203 (constants.TRACESTATE_HEADER_NAME, traceparent.tracestate),
204 )
205 )
206 return client_call_details._replace(metadata=meta)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/contrib/grpc/client_interceptor.py b/elasticapm/contrib/grpc/client_interceptor.py
--- a/elasticapm/contrib/grpc/client_interceptor.py
+++ b/elasticapm/contrib/grpc/client_interceptor.py
@@ -94,6 +94,8 @@
with elasticapm.capture_span(
client_call_details.method, span_type="external", span_subtype="grpc", extra=self._context.copy(), leaf=True
) as span:
+ if not span:
+ return continuation(client_call_details, request)
client_call_details = self.attach_traceparent(client_call_details, span)
try:
response = continuation(client_call_details, request)
|
{"golden_diff": "diff --git a/elasticapm/contrib/grpc/client_interceptor.py b/elasticapm/contrib/grpc/client_interceptor.py\n--- a/elasticapm/contrib/grpc/client_interceptor.py\n+++ b/elasticapm/contrib/grpc/client_interceptor.py\n@@ -94,6 +94,8 @@\n with elasticapm.capture_span(\n client_call_details.method, span_type=\"external\", span_subtype=\"grpc\", extra=self._context.copy(), leaf=True\n ) as span:\n+ if not span:\n+ return continuation(client_call_details, request)\n client_call_details = self.attach_traceparent(client_call_details, span)\n try:\n response = continuation(client_call_details, request)\n", "issue": "[GRPC] Handle the null span case\n**Describe the bug**: ...\r\n\r\n**To Reproduce**\r\n\r\n1. Setup ElasticAPM in Django\r\n2. Integrate with Google PubSub client\r\n3. Try to publish message from your script\r\n4. Stacktrace:\r\n```\r\nException in thread Thread-CommitBatchPublisher:\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/threading.py\", line 932, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/local/lib/python3.8/threading.py\", line 870, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/google/cloud/pubsub_v1/publisher/_batch/thread.py\", line 274, in _commit\r\n response = self._client._gapic_publish(\r\n File \"/usr/local/lib/python3.8/site-packages/google/cloud/pubsub_v1/publisher/client.py\", line 272, in _gapic_publish\r\n return super().publish(*args, **kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/google/pubsub_v1/services/publisher/client.py\", line 781, in publish\r\n response = rpc(\r\n File \"/usr/local/lib/python3.8/site-packages/google/api_core/gapic_v1/method.py\", line 113, in __call__\r\n return wrapped_func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/google/api_core/retry.py\", line 349, in retry_wrapped_func\r\n return retry_target(\r\n File \"/usr/local/lib/python3.8/site-packages/google/api_core/retry.py\", line 191, in retry_target\r\n return target()\r\n File \"/usr/local/lib/python3.8/site-packages/google/api_core/timeout.py\", line 120, in func_with_timeout\r\n return func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/google/api_core/grpc_helpers.py\", line 72, in error_remapped_callable\r\n return callable_(*args, **kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/grpc/_interceptor.py\", line 247, in __call__\r\n response, ignored_call = self._with_call(request,\r\n File \"/usr/local/lib/python3.8/site-packages/grpc/_interceptor.py\", line 287, in _with_call\r\n call = self._interceptor.intercept_unary_unary(continuation,\r\n File \"/usr/local/lib/python3.8/site-packages/elasticapm/contrib/grpc/client_interceptor.py\", line 97, in intercept_unary_unary\r\n client_call_details = self.attach_traceparent(client_call_details, span)\r\n File \"/usr/local/lib/python3.8/site-packages/elasticapm/contrib/grpc/client_interceptor.py\", line 195, in attach_traceparent\r\n if not span.transaction:\r\nAttributeError: 'NoneType' object has no attribute 'transaction'\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.8\r\n- Framework and version [e.g. Django 2.1]: Django 3.8\r\n- APM Server version: 7.15\r\n- Agent version: 6.14.0\r\n\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n- `requirements.txt`:\r\n <details>\r\n <summary>Click to expand</summary>\r\n\r\n ```\r\n google-cloud-pubsub==2.13.11\r\n ```\r\n </details>\r\n\r\n**Initial digging**\r\nLooks like the span is not being returned from the context manager, but the grpc code is not handling the `None` case. \r\n* [context manager](https://github.com/elastic/apm-agent-python/blob/main/elasticapm/traces.py#L1078)\r\n* [grpc code](https://github.com/elastic/apm-agent-python/blob/main/elasticapm/contrib/grpc/client_interceptor.py#L195)\r\n\r\nI can make a patch to handle the `None` case (and just return the client call details), but not sure if that is the right call here. Any guidance would be appreciated!\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2022, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom typing import Optional\n\nimport grpc\nfrom grpc._interceptor import _ClientCallDetails\n\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.traces import Span\nfrom elasticapm.utils import default_ports\n\n\nclass _ClientInterceptor(\n grpc.UnaryUnaryClientInterceptor,\n # grpc.UnaryStreamClientInterceptor,\n # grpc.StreamUnaryClientInterceptor,\n # grpc.StreamStreamClientInterceptor,\n):\n def __init__(self, host: Optional[str], port: Optional[str], secure: bool):\n self.host: str = host\n self.port: str = port\n self.secure: bool = secure\n schema = \"https\" if secure else \"http\"\n resource = f\"{schema}://{host}\"\n if port and int(port) != default_ports[schema]:\n resource += f\":{port}\"\n\n self._context = {\n \"http\": {\n \"url\": resource,\n },\n \"destination\": {\n \"address\": host,\n \"port\": port,\n },\n }\n\n def intercept_unary_unary(self, continuation, client_call_details, request):\n \"\"\"Intercepts a unary-unary invocation asynchronously.\n\n Args:\n continuation: A function that proceeds with the invocation by\n executing the next interceptor in chain or invoking the\n actual RPC on the underlying Channel. It is the interceptor's\n responsibility to call it if it decides to move the RPC forward.\n The interceptor can use\n `response_future = continuation(client_call_details, request)`\n to continue with the RPC. `continuation` returns an object that is\n both a Call for the RPC and a Future. In the event of RPC\n completion, the return Call-Future's result value will be\n the response message of the RPC. Should the event terminate\n with non-OK status, the returned Call-Future's exception value\n will be an RpcError.\n client_call_details: A ClientCallDetails object describing the\n outgoing RPC.\n request: The request value for the RPC.\n\n Returns:\n An object that is both a Call for the RPC and a Future.\n In the event of RPC completion, the return Call-Future's\n result value will be the response message of the RPC.\n Should the event terminate with non-OK status, the returned\n Call-Future's exception value will be an RpcError.\n \"\"\"\n with elasticapm.capture_span(\n client_call_details.method, span_type=\"external\", span_subtype=\"grpc\", extra=self._context.copy(), leaf=True\n ) as span:\n client_call_details = self.attach_traceparent(client_call_details, span)\n try:\n response = continuation(client_call_details, request)\n except grpc.RpcError:\n span.set_failure()\n raise\n\n return response\n\n # TODO: instrument other types of requests once the spec is ready\n\n # def intercept_unary_stream(self, continuation, client_call_details,\n # request):\n # \"\"\"Intercepts a unary-stream invocation.\n #\n # Args:\n # continuation: A function that proceeds with the invocation by\n # executing the next interceptor in chain or invoking the\n # actual RPC on the underlying Channel. It is the interceptor's\n # responsibility to call it if it decides to move the RPC forward.\n # The interceptor can use\n # `response_iterator = continuation(client_call_details, request)`\n # to continue with the RPC. `continuation` returns an object that is\n # both a Call for the RPC and an iterator for response values.\n # Drawing response values from the returned Call-iterator may\n # raise RpcError indicating termination of the RPC with non-OK\n # status.\n # client_call_details: A ClientCallDetails object describing the\n # outgoing RPC.\n # request: The request value for the RPC.\n #\n # Returns:\n # An object that is both a Call for the RPC and an iterator of\n # response values. Drawing response values from the returned\n # Call-iterator may raise RpcError indicating termination of\n # the RPC with non-OK status. This object *should* also fulfill the\n # Future interface, though it may not.\n # \"\"\"\n # response_iterator = continuation(client_call_details, request)\n # return response_iterator\n #\n # def intercept_stream_unary(self, continuation, client_call_details,\n # request_iterator):\n # \"\"\"Intercepts a stream-unary invocation asynchronously.\n #\n # Args:\n # continuation: A function that proceeds with the invocation by\n # executing the next interceptor in chain or invoking the\n # actual RPC on the underlying Channel. It is the interceptor's\n # responsibility to call it if it decides to move the RPC forward.\n # The interceptor can use\n # `response_future = continuation(client_call_details, request_iterator)`\n # to continue with the RPC. `continuation` returns an object that is\n # both a Call for the RPC and a Future. In the event of RPC completion,\n # the return Call-Future's result value will be the response message\n # of the RPC. Should the event terminate with non-OK status, the\n # returned Call-Future's exception value will be an RpcError.\n # client_call_details: A ClientCallDetails object describing the\n # outgoing RPC.\n # request_iterator: An iterator that yields request values for the RPC.\n #\n # Returns:\n # An object that is both a Call for the RPC and a Future.\n # In the event of RPC completion, the return Call-Future's\n # result value will be the response message of the RPC.\n # Should the event terminate with non-OK status, the returned\n # Call-Future's exception value will be an RpcError.\n # \"\"\"\n #\n # def intercept_stream_stream(self, continuation, client_call_details,\n # request_iterator):\n # \"\"\"Intercepts a stream-stream invocation.\n #\n # Args:\n # continuation: A function that proceeds with the invocation by\n # executing the next interceptor in chain or invoking the\n # actual RPC on the underlying Channel. It is the interceptor's\n # responsibility to call it if it decides to move the RPC forward.\n # The interceptor can use\n # `response_iterator = continuation(client_call_details, request_iterator)`\n # to continue with the RPC. `continuation` returns an object that is\n # both a Call for the RPC and an iterator for response values.\n # Drawing response values from the returned Call-iterator may\n # raise RpcError indicating termination of the RPC with non-OK\n # status.\n # client_call_details: A ClientCallDetails object describing the\n # outgoing RPC.\n # request_iterator: An iterator that yields request values for the RPC.\n #\n # Returns:\n # An object that is both a Call for the RPC and an iterator of\n # response values. Drawing response values from the returned\n # Call-iterator may raise RpcError indicating termination of\n # the RPC with non-OK status. This object *should* also fulfill the\n # Future interface, though it may not.\n # \"\"\"\n\n def attach_traceparent(self, client_call_details: _ClientCallDetails, span: Span):\n if not span.transaction:\n return client_call_details\n meta = list(client_call_details.metadata) if client_call_details.metadata else []\n if constants.TRACEPARENT_HEADER_NAME not in meta:\n traceparent = span.transaction.trace_parent.copy_from(span_id=span.id)\n meta.extend(\n (\n (constants.TRACEPARENT_HEADER_NAME, traceparent.to_string()),\n (constants.TRACESTATE_HEADER_NAME, traceparent.tracestate),\n )\n )\n return client_call_details._replace(metadata=meta)\n", "path": "elasticapm/contrib/grpc/client_interceptor.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2022, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom typing import Optional\n\nimport grpc\nfrom grpc._interceptor import _ClientCallDetails\n\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.traces import Span\nfrom elasticapm.utils import default_ports\n\n\nclass _ClientInterceptor(\n grpc.UnaryUnaryClientInterceptor,\n # grpc.UnaryStreamClientInterceptor,\n # grpc.StreamUnaryClientInterceptor,\n # grpc.StreamStreamClientInterceptor,\n):\n def __init__(self, host: Optional[str], port: Optional[str], secure: bool):\n self.host: str = host\n self.port: str = port\n self.secure: bool = secure\n schema = \"https\" if secure else \"http\"\n resource = f\"{schema}://{host}\"\n if port and int(port) != default_ports[schema]:\n resource += f\":{port}\"\n\n self._context = {\n \"http\": {\n \"url\": resource,\n },\n \"destination\": {\n \"address\": host,\n \"port\": port,\n },\n }\n\n def intercept_unary_unary(self, continuation, client_call_details, request):\n \"\"\"Intercepts a unary-unary invocation asynchronously.\n\n Args:\n continuation: A function that proceeds with the invocation by\n executing the next interceptor in chain or invoking the\n actual RPC on the underlying Channel. It is the interceptor's\n responsibility to call it if it decides to move the RPC forward.\n The interceptor can use\n `response_future = continuation(client_call_details, request)`\n to continue with the RPC. `continuation` returns an object that is\n both a Call for the RPC and a Future. In the event of RPC\n completion, the return Call-Future's result value will be\n the response message of the RPC. Should the event terminate\n with non-OK status, the returned Call-Future's exception value\n will be an RpcError.\n client_call_details: A ClientCallDetails object describing the\n outgoing RPC.\n request: The request value for the RPC.\n\n Returns:\n An object that is both a Call for the RPC and a Future.\n In the event of RPC completion, the return Call-Future's\n result value will be the response message of the RPC.\n Should the event terminate with non-OK status, the returned\n Call-Future's exception value will be an RpcError.\n \"\"\"\n with elasticapm.capture_span(\n client_call_details.method, span_type=\"external\", span_subtype=\"grpc\", extra=self._context.copy(), leaf=True\n ) as span:\n if not span:\n return continuation(client_call_details, request)\n client_call_details = self.attach_traceparent(client_call_details, span)\n try:\n response = continuation(client_call_details, request)\n except grpc.RpcError:\n span.set_failure()\n raise\n\n return response\n\n # TODO: instrument other types of requests once the spec is ready\n\n # def intercept_unary_stream(self, continuation, client_call_details,\n # request):\n # \"\"\"Intercepts a unary-stream invocation.\n #\n # Args:\n # continuation: A function that proceeds with the invocation by\n # executing the next interceptor in chain or invoking the\n # actual RPC on the underlying Channel. It is the interceptor's\n # responsibility to call it if it decides to move the RPC forward.\n # The interceptor can use\n # `response_iterator = continuation(client_call_details, request)`\n # to continue with the RPC. `continuation` returns an object that is\n # both a Call for the RPC and an iterator for response values.\n # Drawing response values from the returned Call-iterator may\n # raise RpcError indicating termination of the RPC with non-OK\n # status.\n # client_call_details: A ClientCallDetails object describing the\n # outgoing RPC.\n # request: The request value for the RPC.\n #\n # Returns:\n # An object that is both a Call for the RPC and an iterator of\n # response values. Drawing response values from the returned\n # Call-iterator may raise RpcError indicating termination of\n # the RPC with non-OK status. This object *should* also fulfill the\n # Future interface, though it may not.\n # \"\"\"\n # response_iterator = continuation(client_call_details, request)\n # return response_iterator\n #\n # def intercept_stream_unary(self, continuation, client_call_details,\n # request_iterator):\n # \"\"\"Intercepts a stream-unary invocation asynchronously.\n #\n # Args:\n # continuation: A function that proceeds with the invocation by\n # executing the next interceptor in chain or invoking the\n # actual RPC on the underlying Channel. It is the interceptor's\n # responsibility to call it if it decides to move the RPC forward.\n # The interceptor can use\n # `response_future = continuation(client_call_details, request_iterator)`\n # to continue with the RPC. `continuation` returns an object that is\n # both a Call for the RPC and a Future. In the event of RPC completion,\n # the return Call-Future's result value will be the response message\n # of the RPC. Should the event terminate with non-OK status, the\n # returned Call-Future's exception value will be an RpcError.\n # client_call_details: A ClientCallDetails object describing the\n # outgoing RPC.\n # request_iterator: An iterator that yields request values for the RPC.\n #\n # Returns:\n # An object that is both a Call for the RPC and a Future.\n # In the event of RPC completion, the return Call-Future's\n # result value will be the response message of the RPC.\n # Should the event terminate with non-OK status, the returned\n # Call-Future's exception value will be an RpcError.\n # \"\"\"\n #\n # def intercept_stream_stream(self, continuation, client_call_details,\n # request_iterator):\n # \"\"\"Intercepts a stream-stream invocation.\n #\n # Args:\n # continuation: A function that proceeds with the invocation by\n # executing the next interceptor in chain or invoking the\n # actual RPC on the underlying Channel. It is the interceptor's\n # responsibility to call it if it decides to move the RPC forward.\n # The interceptor can use\n # `response_iterator = continuation(client_call_details, request_iterator)`\n # to continue with the RPC. `continuation` returns an object that is\n # both a Call for the RPC and an iterator for response values.\n # Drawing response values from the returned Call-iterator may\n # raise RpcError indicating termination of the RPC with non-OK\n # status.\n # client_call_details: A ClientCallDetails object describing the\n # outgoing RPC.\n # request_iterator: An iterator that yields request values for the RPC.\n #\n # Returns:\n # An object that is both a Call for the RPC and an iterator of\n # response values. Drawing response values from the returned\n # Call-iterator may raise RpcError indicating termination of\n # the RPC with non-OK status. This object *should* also fulfill the\n # Future interface, though it may not.\n # \"\"\"\n\n def attach_traceparent(self, client_call_details: _ClientCallDetails, span: Span):\n if not span.transaction:\n return client_call_details\n meta = list(client_call_details.metadata) if client_call_details.metadata else []\n if constants.TRACEPARENT_HEADER_NAME not in meta:\n traceparent = span.transaction.trace_parent.copy_from(span_id=span.id)\n meta.extend(\n (\n (constants.TRACEPARENT_HEADER_NAME, traceparent.to_string()),\n (constants.TRACESTATE_HEADER_NAME, traceparent.tracestate),\n )\n )\n return client_call_details._replace(metadata=meta)\n", "path": "elasticapm/contrib/grpc/client_interceptor.py"}]}
| 3,766 | 150 |
gh_patches_debug_4912
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-3945
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/cli/v012/checkpoint.py`
Content:
```
1 import os
2 import sys
3 from typing import Dict
4
5 import click
6 from ruamel.yaml import YAML
7
8 from great_expectations import DataContext
9 from great_expectations.checkpoint import Checkpoint
10 from great_expectations.cli.v012 import toolkit
11 from great_expectations.cli.v012.mark import Mark as mark
12 from great_expectations.cli.v012.util import cli_message, cli_message_list
13 from great_expectations.core.expectation_suite import ExpectationSuite
14 from great_expectations.core.usage_statistics.util import send_usage_message
15 from great_expectations.data_context.types.base import DataContextConfigDefaults
16 from great_expectations.data_context.util import file_relative_path
17 from great_expectations.exceptions import InvalidTopLevelConfigKeyError
18 from great_expectations.util import lint_code
19 from great_expectations.validation_operators.types.validation_operator_result import (
20 ValidationOperatorResult,
21 )
22
23 try:
24 from sqlalchemy.exc import SQLAlchemyError
25 except ImportError:
26 SQLAlchemyError = RuntimeError
27
28
29 try:
30 from sqlalchemy.exc import SQLAlchemyError
31 except ImportError:
32 SQLAlchemyError = RuntimeError
33
34 yaml = YAML()
35 yaml.indent(mapping=2, sequence=4, offset=2)
36
37
38 """
39 --ge-feature-maturity-info--
40
41 id: checkpoint_command_line
42 title: LegacyCheckpoint - Command Line
43 icon:
44 short_description: Run a configured legacy checkpoint from a command line.
45 description: Run a configured legacy checkpoint from a command line in a Terminal shell.
46 how_to_guide_url: https://docs.greatexpectations.io/en/latest/guides/how_to_guides/validation/how_to_run_a_checkpoint_in_terminal.html
47 maturity: Experimental
48 maturity_details:
49 api_stability: Unstable (expect changes to batch request; no checkpoint store)
50 implementation_completeness: Complete
51 unit_test_coverage: Complete
52 integration_infrastructure_test_coverage: N/A
53 documentation_completeness: Complete
54 bug_risk: Low
55
56 --ge-feature-maturity-info--
57 """
58
59
60 @click.group(short_help="Checkpoint operations")
61 def checkpoint():
62 """
63 Checkpoint operations
64
65 A checkpoint is a bundle of one or more batches of data with one or more
66 Expectation Suites.
67
68 A checkpoint can be as simple as one batch of data paired with one
69 Expectation Suite.
70
71 A checkpoint can be as complex as many batches of data across different
72 datasources paired with one or more Expectation Suites each.
73 """
74 pass
75
76
77 @checkpoint.command(name="new")
78 @click.argument("checkpoint")
79 @click.argument("suite")
80 @click.option("--datasource", default=None)
81 @click.option(
82 "--directory",
83 "-d",
84 default=None,
85 help="The project's great_expectations directory.",
86 )
87 @mark.cli_as_experimental
88 def checkpoint_new(checkpoint, suite, directory, datasource):
89 """Create a new checkpoint for easy deployments. (Experimental)"""
90 suite_name = suite
91 usage_event = "cli.checkpoint.new"
92 context = toolkit.load_data_context_with_error_handling(directory)
93
94 _verify_checkpoint_does_not_exist(context, checkpoint, usage_event)
95 suite: ExpectationSuite = toolkit.load_expectation_suite(
96 context, suite_name, usage_event
97 )
98 datasource = toolkit.select_datasource(context, datasource_name=datasource)
99 if datasource is None:
100 send_usage_message(
101 data_context=context,
102 event=usage_event,
103 api_version="v2",
104 success=False,
105 )
106 sys.exit(1)
107 _, _, _, batch_kwargs = toolkit.get_batch_kwargs(context, datasource.name)
108
109 _ = context.add_checkpoint(
110 name=checkpoint,
111 **{
112 "class_name": "LegacyCheckpoint",
113 "batches": [
114 {
115 "batch_kwargs": dict(batch_kwargs),
116 "expectation_suite_names": [suite.expectation_suite_name],
117 }
118 ],
119 },
120 )
121
122 cli_message(
123 f"""<green>A Checkpoint named `{checkpoint}` was added to your project!</green>
124 - To run this Checkpoint, run `great_expectations checkpoint run {checkpoint}`"""
125 )
126 send_usage_message(
127 data_context=context,
128 event=usage_event,
129 api_version="v2",
130 success=True,
131 )
132
133
134 def _verify_checkpoint_does_not_exist(
135 context: DataContext, checkpoint: str, usage_event: str
136 ) -> None:
137 try:
138 if checkpoint in context.list_checkpoints():
139 toolkit.exit_with_failure_message_and_stats(
140 context,
141 usage_event,
142 f"A checkpoint named `{checkpoint}` already exists. Please choose a new name.",
143 )
144 except InvalidTopLevelConfigKeyError as e:
145 toolkit.exit_with_failure_message_and_stats(
146 context, usage_event, f"<red>{e}</red>"
147 )
148
149
150 def _write_checkpoint_to_disk(
151 context: DataContext, checkpoint: Dict, checkpoint_name: str
152 ) -> str:
153 # TODO this should be the responsibility of the DataContext
154 checkpoint_dir = os.path.join(
155 context.root_directory,
156 DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
157 )
158 checkpoint_file = os.path.join(checkpoint_dir, f"{checkpoint_name}.yml")
159 os.makedirs(checkpoint_dir, exist_ok=True)
160 with open(checkpoint_file, "w") as f:
161 yaml.dump(checkpoint, f)
162 return checkpoint_file
163
164
165 def _load_checkpoint_yml_template() -> dict:
166 # TODO this should be the responsibility of the DataContext
167 template_file = file_relative_path(
168 __file__, os.path.join("..", "data_context", "checkpoint_template.yml")
169 )
170 with open(template_file) as f:
171 template = yaml.load(f)
172 return template
173
174
175 @checkpoint.command(name="list")
176 @click.option(
177 "--directory",
178 "-d",
179 default=None,
180 help="The project's great_expectations directory.",
181 )
182 @mark.cli_as_experimental
183 def checkpoint_list(directory):
184 """List configured checkpoints. (Experimental)"""
185 context = toolkit.load_data_context_with_error_handling(directory)
186 checkpoints = context.list_checkpoints()
187 if not checkpoints:
188 cli_message(
189 "No checkpoints found.\n"
190 " - Use the command `great_expectations checkpoint new` to create one."
191 )
192 send_usage_message(
193 data_context=context,
194 event="cli.checkpoint.list",
195 api_version="v2",
196 success=True,
197 )
198 sys.exit(0)
199
200 number_found = len(checkpoints)
201 plural = "s" if number_found > 1 else ""
202 message = f"Found {number_found} checkpoint{plural}."
203 pretty_list = [f" - <cyan>{cp}</cyan>" for cp in checkpoints]
204 cli_message_list(pretty_list, list_intro_string=message)
205 send_usage_message(
206 data_context=context,
207 event="cli.checkpoint.list",
208 api_version="v2",
209 success=True,
210 )
211
212
213 @checkpoint.command(name="run")
214 @click.argument("checkpoint")
215 @click.option(
216 "--directory",
217 "-d",
218 default=None,
219 help="The project's great_expectations directory.",
220 )
221 @mark.cli_as_experimental
222 def checkpoint_run(checkpoint, directory):
223 """Run a checkpoint. (Experimental)"""
224 usage_event = "cli.checkpoint.run"
225 context = toolkit.load_data_context_with_error_handling(
226 directory=directory, from_cli_upgrade_command=False
227 )
228
229 checkpoint: Checkpoint = toolkit.load_checkpoint(
230 context,
231 checkpoint,
232 usage_event,
233 )
234
235 try:
236 results = checkpoint.run()
237 except Exception as e:
238 toolkit.exit_with_failure_message_and_stats(
239 context, usage_event, f"<red>{e}</red>"
240 )
241
242 if not results["success"]:
243 cli_message("Validation failed!")
244 send_usage_message(
245 data_context=context,
246 event=usage_event,
247 api_version="v2",
248 success=True,
249 )
250 print_validation_operator_results_details(results)
251 sys.exit(1)
252
253 cli_message("Validation succeeded!")
254 send_usage_message(
255 data_context=context,
256 event=usage_event,
257 api_version="v2",
258 success=True,
259 )
260 print_validation_operator_results_details(results)
261 sys.exit(0)
262
263
264 def print_validation_operator_results_details(
265 results: ValidationOperatorResult,
266 ) -> None:
267 max_suite_display_width = 40
268 toolkit.cli_message(
269 f"""
270 {'Suite Name'.ljust(max_suite_display_width)} Status Expectations met"""
271 )
272 for id, result in results.run_results.items():
273 vr = result["validation_result"]
274 stats = vr.statistics
275 passed = stats["successful_expectations"]
276 evaluated = stats["evaluated_expectations"]
277 percentage_slug = (
278 f"{round(passed / evaluated * 100, 2) if evaluated > 0 else 100} %"
279 )
280 stats_slug = f"{passed} of {evaluated} ({percentage_slug})"
281 if vr.success:
282 status_slug = "<green>β Passed</green>"
283 else:
284 status_slug = "<red>β Failed</red>"
285 suite_name = str(vr.meta["expectation_suite_name"])
286 if len(suite_name) > max_suite_display_width:
287 suite_name = suite_name[0:max_suite_display_width]
288 suite_name = suite_name[:-1] + "β¦"
289 status_line = f"- {suite_name.ljust(max_suite_display_width)} {status_slug} {stats_slug}"
290 toolkit.cli_message(status_line)
291
292
293 @checkpoint.command(name="script")
294 @click.argument("checkpoint")
295 @click.option(
296 "--directory",
297 "-d",
298 default=None,
299 help="The project's great_expectations directory.",
300 )
301 @mark.cli_as_experimental
302 def checkpoint_script(checkpoint, directory):
303 """
304 Create a python script to run a checkpoint. (Experimental)
305
306 Checkpoints can be run directly without this script using the
307 `great_expectations checkpoint run` command.
308
309 This script is provided for those who wish to run checkpoints via python.
310 """
311 context = toolkit.load_data_context_with_error_handling(directory)
312 usage_event = "cli.checkpoint.script"
313
314 # Attempt to load the checkpoint and deal with errors
315 _ = toolkit.load_checkpoint(context, checkpoint, usage_event)
316
317 script_name = f"run_{checkpoint}.py"
318 script_path = os.path.join(
319 context.root_directory, context.GE_UNCOMMITTED_DIR, script_name
320 )
321
322 if os.path.isfile(script_path):
323 toolkit.exit_with_failure_message_and_stats(
324 context,
325 usage_event,
326 f"""<red>Warning! A script named {script_name} already exists and this command will not overwrite it.</red>
327 - Existing file path: {script_path}""",
328 )
329
330 _write_checkpoint_script_to_disk(context.root_directory, checkpoint, script_path)
331 cli_message(
332 f"""<green>A python script was created that runs the checkpoint named: `{checkpoint}`</green>
333 - The script is located in `great_expectations/uncommitted/run_{checkpoint}.py`
334 - The script can be run with `python great_expectations/uncommitted/run_{checkpoint}.py`"""
335 )
336 send_usage_message(
337 data_context=context,
338 event=usage_event,
339 api_version="v2",
340 success=True,
341 )
342
343
344 def _load_script_template() -> str:
345 with open(file_relative_path(__file__, "checkpoint_script_template.py")) as f:
346 template = f.read()
347 return template
348
349
350 def _write_checkpoint_script_to_disk(
351 context_directory: str, checkpoint_name: str, script_path: str
352 ) -> None:
353 script_full_path = os.path.abspath(os.path.join(script_path))
354 template = _load_script_template().format(checkpoint_name, context_directory)
355 linted_code = lint_code(template)
356 with open(script_full_path, "w") as f:
357 f.write(linted_code)
358
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/cli/v012/checkpoint.py b/great_expectations/cli/v012/checkpoint.py
--- a/great_expectations/cli/v012/checkpoint.py
+++ b/great_expectations/cli/v012/checkpoint.py
@@ -121,7 +121,7 @@
cli_message(
f"""<green>A Checkpoint named `{checkpoint}` was added to your project!</green>
- - To run this Checkpoint, run `great_expectations checkpoint run {checkpoint}`"""
+ - To run this Checkpoint, run `great_expectations --v2-api checkpoint run {checkpoint}`"""
)
send_usage_message(
data_context=context,
|
{"golden_diff": "diff --git a/great_expectations/cli/v012/checkpoint.py b/great_expectations/cli/v012/checkpoint.py\n--- a/great_expectations/cli/v012/checkpoint.py\n+++ b/great_expectations/cli/v012/checkpoint.py\n@@ -121,7 +121,7 @@\n \n cli_message(\n f\"\"\"<green>A Checkpoint named `{checkpoint}` was added to your project!</green>\n- - To run this Checkpoint, run `great_expectations checkpoint run {checkpoint}`\"\"\"\n+ - To run this Checkpoint, run `great_expectations --v2-api checkpoint run {checkpoint}`\"\"\"\n )\n send_usage_message(\n data_context=context,\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import os\nimport sys\nfrom typing import Dict\n\nimport click\nfrom ruamel.yaml import YAML\n\nfrom great_expectations import DataContext\nfrom great_expectations.checkpoint import Checkpoint\nfrom great_expectations.cli.v012 import toolkit\nfrom great_expectations.cli.v012.mark import Mark as mark\nfrom great_expectations.cli.v012.util import cli_message, cli_message_list\nfrom great_expectations.core.expectation_suite import ExpectationSuite\nfrom great_expectations.core.usage_statistics.util import send_usage_message\nfrom great_expectations.data_context.types.base import DataContextConfigDefaults\nfrom great_expectations.data_context.util import file_relative_path\nfrom great_expectations.exceptions import InvalidTopLevelConfigKeyError\nfrom great_expectations.util import lint_code\nfrom great_expectations.validation_operators.types.validation_operator_result import (\n ValidationOperatorResult,\n)\n\ntry:\n from sqlalchemy.exc import SQLAlchemyError\nexcept ImportError:\n SQLAlchemyError = RuntimeError\n\n\ntry:\n from sqlalchemy.exc import SQLAlchemyError\nexcept ImportError:\n SQLAlchemyError = RuntimeError\n\nyaml = YAML()\nyaml.indent(mapping=2, sequence=4, offset=2)\n\n\n\"\"\"\n--ge-feature-maturity-info--\n\n id: checkpoint_command_line\n title: LegacyCheckpoint - Command Line\n icon:\n short_description: Run a configured legacy checkpoint from a command line.\n description: Run a configured legacy checkpoint from a command line in a Terminal shell.\n how_to_guide_url: https://docs.greatexpectations.io/en/latest/guides/how_to_guides/validation/how_to_run_a_checkpoint_in_terminal.html\n maturity: Experimental\n maturity_details:\n api_stability: Unstable (expect changes to batch request; no checkpoint store)\n implementation_completeness: Complete\n unit_test_coverage: Complete\n integration_infrastructure_test_coverage: N/A\n documentation_completeness: Complete\n bug_risk: Low\n\n--ge-feature-maturity-info--\n\"\"\"\n\n\[email protected](short_help=\"Checkpoint operations\")\ndef checkpoint():\n \"\"\"\n Checkpoint operations\n\n A checkpoint is a bundle of one or more batches of data with one or more\n Expectation Suites.\n\n A checkpoint can be as simple as one batch of data paired with one\n Expectation Suite.\n\n A checkpoint can be as complex as many batches of data across different\n datasources paired with one or more Expectation Suites each.\n \"\"\"\n pass\n\n\[email protected](name=\"new\")\[email protected](\"checkpoint\")\[email protected](\"suite\")\[email protected](\"--datasource\", default=None)\[email protected](\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\",\n)\[email protected]_as_experimental\ndef checkpoint_new(checkpoint, suite, directory, datasource):\n \"\"\"Create a new checkpoint for easy deployments. (Experimental)\"\"\"\n suite_name = suite\n usage_event = \"cli.checkpoint.new\"\n context = toolkit.load_data_context_with_error_handling(directory)\n\n _verify_checkpoint_does_not_exist(context, checkpoint, usage_event)\n suite: ExpectationSuite = toolkit.load_expectation_suite(\n context, suite_name, usage_event\n )\n datasource = toolkit.select_datasource(context, datasource_name=datasource)\n if datasource is None:\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=False,\n )\n sys.exit(1)\n _, _, _, batch_kwargs = toolkit.get_batch_kwargs(context, datasource.name)\n\n _ = context.add_checkpoint(\n name=checkpoint,\n **{\n \"class_name\": \"LegacyCheckpoint\",\n \"batches\": [\n {\n \"batch_kwargs\": dict(batch_kwargs),\n \"expectation_suite_names\": [suite.expectation_suite_name],\n }\n ],\n },\n )\n\n cli_message(\n f\"\"\"<green>A Checkpoint named `{checkpoint}` was added to your project!</green>\n - To run this Checkpoint, run `great_expectations checkpoint run {checkpoint}`\"\"\"\n )\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=True,\n )\n\n\ndef _verify_checkpoint_does_not_exist(\n context: DataContext, checkpoint: str, usage_event: str\n) -> None:\n try:\n if checkpoint in context.list_checkpoints():\n toolkit.exit_with_failure_message_and_stats(\n context,\n usage_event,\n f\"A checkpoint named `{checkpoint}` already exists. Please choose a new name.\",\n )\n except InvalidTopLevelConfigKeyError as e:\n toolkit.exit_with_failure_message_and_stats(\n context, usage_event, f\"<red>{e}</red>\"\n )\n\n\ndef _write_checkpoint_to_disk(\n context: DataContext, checkpoint: Dict, checkpoint_name: str\n) -> str:\n # TODO this should be the responsibility of the DataContext\n checkpoint_dir = os.path.join(\n context.root_directory,\n DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,\n )\n checkpoint_file = os.path.join(checkpoint_dir, f\"{checkpoint_name}.yml\")\n os.makedirs(checkpoint_dir, exist_ok=True)\n with open(checkpoint_file, \"w\") as f:\n yaml.dump(checkpoint, f)\n return checkpoint_file\n\n\ndef _load_checkpoint_yml_template() -> dict:\n # TODO this should be the responsibility of the DataContext\n template_file = file_relative_path(\n __file__, os.path.join(\"..\", \"data_context\", \"checkpoint_template.yml\")\n )\n with open(template_file) as f:\n template = yaml.load(f)\n return template\n\n\[email protected](name=\"list\")\[email protected](\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\",\n)\[email protected]_as_experimental\ndef checkpoint_list(directory):\n \"\"\"List configured checkpoints. (Experimental)\"\"\"\n context = toolkit.load_data_context_with_error_handling(directory)\n checkpoints = context.list_checkpoints()\n if not checkpoints:\n cli_message(\n \"No checkpoints found.\\n\"\n \" - Use the command `great_expectations checkpoint new` to create one.\"\n )\n send_usage_message(\n data_context=context,\n event=\"cli.checkpoint.list\",\n api_version=\"v2\",\n success=True,\n )\n sys.exit(0)\n\n number_found = len(checkpoints)\n plural = \"s\" if number_found > 1 else \"\"\n message = f\"Found {number_found} checkpoint{plural}.\"\n pretty_list = [f\" - <cyan>{cp}</cyan>\" for cp in checkpoints]\n cli_message_list(pretty_list, list_intro_string=message)\n send_usage_message(\n data_context=context,\n event=\"cli.checkpoint.list\",\n api_version=\"v2\",\n success=True,\n )\n\n\[email protected](name=\"run\")\[email protected](\"checkpoint\")\[email protected](\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\",\n)\[email protected]_as_experimental\ndef checkpoint_run(checkpoint, directory):\n \"\"\"Run a checkpoint. (Experimental)\"\"\"\n usage_event = \"cli.checkpoint.run\"\n context = toolkit.load_data_context_with_error_handling(\n directory=directory, from_cli_upgrade_command=False\n )\n\n checkpoint: Checkpoint = toolkit.load_checkpoint(\n context,\n checkpoint,\n usage_event,\n )\n\n try:\n results = checkpoint.run()\n except Exception as e:\n toolkit.exit_with_failure_message_and_stats(\n context, usage_event, f\"<red>{e}</red>\"\n )\n\n if not results[\"success\"]:\n cli_message(\"Validation failed!\")\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=True,\n )\n print_validation_operator_results_details(results)\n sys.exit(1)\n\n cli_message(\"Validation succeeded!\")\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=True,\n )\n print_validation_operator_results_details(results)\n sys.exit(0)\n\n\ndef print_validation_operator_results_details(\n results: ValidationOperatorResult,\n) -> None:\n max_suite_display_width = 40\n toolkit.cli_message(\n f\"\"\"\n{'Suite Name'.ljust(max_suite_display_width)} Status Expectations met\"\"\"\n )\n for id, result in results.run_results.items():\n vr = result[\"validation_result\"]\n stats = vr.statistics\n passed = stats[\"successful_expectations\"]\n evaluated = stats[\"evaluated_expectations\"]\n percentage_slug = (\n f\"{round(passed / evaluated * 100, 2) if evaluated > 0 else 100} %\"\n )\n stats_slug = f\"{passed} of {evaluated} ({percentage_slug})\"\n if vr.success:\n status_slug = \"<green>\u2714 Passed</green>\"\n else:\n status_slug = \"<red>\u2716 Failed</red>\"\n suite_name = str(vr.meta[\"expectation_suite_name\"])\n if len(suite_name) > max_suite_display_width:\n suite_name = suite_name[0:max_suite_display_width]\n suite_name = suite_name[:-1] + \"\u2026\"\n status_line = f\"- {suite_name.ljust(max_suite_display_width)} {status_slug} {stats_slug}\"\n toolkit.cli_message(status_line)\n\n\[email protected](name=\"script\")\[email protected](\"checkpoint\")\[email protected](\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\",\n)\[email protected]_as_experimental\ndef checkpoint_script(checkpoint, directory):\n \"\"\"\n Create a python script to run a checkpoint. (Experimental)\n\n Checkpoints can be run directly without this script using the\n `great_expectations checkpoint run` command.\n\n This script is provided for those who wish to run checkpoints via python.\n \"\"\"\n context = toolkit.load_data_context_with_error_handling(directory)\n usage_event = \"cli.checkpoint.script\"\n\n # Attempt to load the checkpoint and deal with errors\n _ = toolkit.load_checkpoint(context, checkpoint, usage_event)\n\n script_name = f\"run_{checkpoint}.py\"\n script_path = os.path.join(\n context.root_directory, context.GE_UNCOMMITTED_DIR, script_name\n )\n\n if os.path.isfile(script_path):\n toolkit.exit_with_failure_message_and_stats(\n context,\n usage_event,\n f\"\"\"<red>Warning! A script named {script_name} already exists and this command will not overwrite it.</red>\n - Existing file path: {script_path}\"\"\",\n )\n\n _write_checkpoint_script_to_disk(context.root_directory, checkpoint, script_path)\n cli_message(\n f\"\"\"<green>A python script was created that runs the checkpoint named: `{checkpoint}`</green>\n - The script is located in `great_expectations/uncommitted/run_{checkpoint}.py`\n - The script can be run with `python great_expectations/uncommitted/run_{checkpoint}.py`\"\"\"\n )\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=True,\n )\n\n\ndef _load_script_template() -> str:\n with open(file_relative_path(__file__, \"checkpoint_script_template.py\")) as f:\n template = f.read()\n return template\n\n\ndef _write_checkpoint_script_to_disk(\n context_directory: str, checkpoint_name: str, script_path: str\n) -> None:\n script_full_path = os.path.abspath(os.path.join(script_path))\n template = _load_script_template().format(checkpoint_name, context_directory)\n linted_code = lint_code(template)\n with open(script_full_path, \"w\") as f:\n f.write(linted_code)\n", "path": "great_expectations/cli/v012/checkpoint.py"}], "after_files": [{"content": "import os\nimport sys\nfrom typing import Dict\n\nimport click\nfrom ruamel.yaml import YAML\n\nfrom great_expectations import DataContext\nfrom great_expectations.checkpoint import Checkpoint\nfrom great_expectations.cli.v012 import toolkit\nfrom great_expectations.cli.v012.mark import Mark as mark\nfrom great_expectations.cli.v012.util import cli_message, cli_message_list\nfrom great_expectations.core.expectation_suite import ExpectationSuite\nfrom great_expectations.core.usage_statistics.util import send_usage_message\nfrom great_expectations.data_context.types.base import DataContextConfigDefaults\nfrom great_expectations.data_context.util import file_relative_path\nfrom great_expectations.exceptions import InvalidTopLevelConfigKeyError\nfrom great_expectations.util import lint_code\nfrom great_expectations.validation_operators.types.validation_operator_result import (\n ValidationOperatorResult,\n)\n\ntry:\n from sqlalchemy.exc import SQLAlchemyError\nexcept ImportError:\n SQLAlchemyError = RuntimeError\n\n\ntry:\n from sqlalchemy.exc import SQLAlchemyError\nexcept ImportError:\n SQLAlchemyError = RuntimeError\n\nyaml = YAML()\nyaml.indent(mapping=2, sequence=4, offset=2)\n\n\n\"\"\"\n--ge-feature-maturity-info--\n\n id: checkpoint_command_line\n title: LegacyCheckpoint - Command Line\n icon:\n short_description: Run a configured legacy checkpoint from a command line.\n description: Run a configured legacy checkpoint from a command line in a Terminal shell.\n how_to_guide_url: https://docs.greatexpectations.io/en/latest/guides/how_to_guides/validation/how_to_run_a_checkpoint_in_terminal.html\n maturity: Experimental\n maturity_details:\n api_stability: Unstable (expect changes to batch request; no checkpoint store)\n implementation_completeness: Complete\n unit_test_coverage: Complete\n integration_infrastructure_test_coverage: N/A\n documentation_completeness: Complete\n bug_risk: Low\n\n--ge-feature-maturity-info--\n\"\"\"\n\n\[email protected](short_help=\"Checkpoint operations\")\ndef checkpoint():\n \"\"\"\n Checkpoint operations\n\n A checkpoint is a bundle of one or more batches of data with one or more\n Expectation Suites.\n\n A checkpoint can be as simple as one batch of data paired with one\n Expectation Suite.\n\n A checkpoint can be as complex as many batches of data across different\n datasources paired with one or more Expectation Suites each.\n \"\"\"\n pass\n\n\[email protected](name=\"new\")\[email protected](\"checkpoint\")\[email protected](\"suite\")\[email protected](\"--datasource\", default=None)\[email protected](\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\",\n)\[email protected]_as_experimental\ndef checkpoint_new(checkpoint, suite, directory, datasource):\n \"\"\"Create a new checkpoint for easy deployments. (Experimental)\"\"\"\n suite_name = suite\n usage_event = \"cli.checkpoint.new\"\n context = toolkit.load_data_context_with_error_handling(directory)\n\n _verify_checkpoint_does_not_exist(context, checkpoint, usage_event)\n suite: ExpectationSuite = toolkit.load_expectation_suite(\n context, suite_name, usage_event\n )\n datasource = toolkit.select_datasource(context, datasource_name=datasource)\n if datasource is None:\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=False,\n )\n sys.exit(1)\n _, _, _, batch_kwargs = toolkit.get_batch_kwargs(context, datasource.name)\n\n _ = context.add_checkpoint(\n name=checkpoint,\n **{\n \"class_name\": \"LegacyCheckpoint\",\n \"batches\": [\n {\n \"batch_kwargs\": dict(batch_kwargs),\n \"expectation_suite_names\": [suite.expectation_suite_name],\n }\n ],\n },\n )\n\n cli_message(\n f\"\"\"<green>A Checkpoint named `{checkpoint}` was added to your project!</green>\n - To run this Checkpoint, run `great_expectations --v2-api checkpoint run {checkpoint}`\"\"\"\n )\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=True,\n )\n\n\ndef _verify_checkpoint_does_not_exist(\n context: DataContext, checkpoint: str, usage_event: str\n) -> None:\n try:\n if checkpoint in context.list_checkpoints():\n toolkit.exit_with_failure_message_and_stats(\n context,\n usage_event,\n f\"A checkpoint named `{checkpoint}` already exists. Please choose a new name.\",\n )\n except InvalidTopLevelConfigKeyError as e:\n toolkit.exit_with_failure_message_and_stats(\n context, usage_event, f\"<red>{e}</red>\"\n )\n\n\ndef _write_checkpoint_to_disk(\n context: DataContext, checkpoint: Dict, checkpoint_name: str\n) -> str:\n # TODO this should be the responsibility of the DataContext\n checkpoint_dir = os.path.join(\n context.root_directory,\n DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,\n )\n checkpoint_file = os.path.join(checkpoint_dir, f\"{checkpoint_name}.yml\")\n os.makedirs(checkpoint_dir, exist_ok=True)\n with open(checkpoint_file, \"w\") as f:\n yaml.dump(checkpoint, f)\n return checkpoint_file\n\n\ndef _load_checkpoint_yml_template() -> dict:\n # TODO this should be the responsibility of the DataContext\n template_file = file_relative_path(\n __file__, os.path.join(\"..\", \"data_context\", \"checkpoint_template.yml\")\n )\n with open(template_file) as f:\n template = yaml.load(f)\n return template\n\n\[email protected](name=\"list\")\[email protected](\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\",\n)\[email protected]_as_experimental\ndef checkpoint_list(directory):\n \"\"\"List configured checkpoints. (Experimental)\"\"\"\n context = toolkit.load_data_context_with_error_handling(directory)\n checkpoints = context.list_checkpoints()\n if not checkpoints:\n cli_message(\n \"No checkpoints found.\\n\"\n \" - Use the command `great_expectations checkpoint new` to create one.\"\n )\n send_usage_message(\n data_context=context,\n event=\"cli.checkpoint.list\",\n api_version=\"v2\",\n success=True,\n )\n sys.exit(0)\n\n number_found = len(checkpoints)\n plural = \"s\" if number_found > 1 else \"\"\n message = f\"Found {number_found} checkpoint{plural}.\"\n pretty_list = [f\" - <cyan>{cp}</cyan>\" for cp in checkpoints]\n cli_message_list(pretty_list, list_intro_string=message)\n send_usage_message(\n data_context=context,\n event=\"cli.checkpoint.list\",\n api_version=\"v2\",\n success=True,\n )\n\n\[email protected](name=\"run\")\[email protected](\"checkpoint\")\[email protected](\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\",\n)\[email protected]_as_experimental\ndef checkpoint_run(checkpoint, directory):\n \"\"\"Run a checkpoint. (Experimental)\"\"\"\n usage_event = \"cli.checkpoint.run\"\n context = toolkit.load_data_context_with_error_handling(\n directory=directory, from_cli_upgrade_command=False\n )\n\n checkpoint: Checkpoint = toolkit.load_checkpoint(\n context,\n checkpoint,\n usage_event,\n )\n\n try:\n results = checkpoint.run()\n except Exception as e:\n toolkit.exit_with_failure_message_and_stats(\n context, usage_event, f\"<red>{e}</red>\"\n )\n\n if not results[\"success\"]:\n cli_message(\"Validation failed!\")\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=True,\n )\n print_validation_operator_results_details(results)\n sys.exit(1)\n\n cli_message(\"Validation succeeded!\")\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=True,\n )\n print_validation_operator_results_details(results)\n sys.exit(0)\n\n\ndef print_validation_operator_results_details(\n results: ValidationOperatorResult,\n) -> None:\n max_suite_display_width = 40\n toolkit.cli_message(\n f\"\"\"\n{'Suite Name'.ljust(max_suite_display_width)} Status Expectations met\"\"\"\n )\n for id, result in results.run_results.items():\n vr = result[\"validation_result\"]\n stats = vr.statistics\n passed = stats[\"successful_expectations\"]\n evaluated = stats[\"evaluated_expectations\"]\n percentage_slug = (\n f\"{round(passed / evaluated * 100, 2) if evaluated > 0 else 100} %\"\n )\n stats_slug = f\"{passed} of {evaluated} ({percentage_slug})\"\n if vr.success:\n status_slug = \"<green>\u2714 Passed</green>\"\n else:\n status_slug = \"<red>\u2716 Failed</red>\"\n suite_name = str(vr.meta[\"expectation_suite_name\"])\n if len(suite_name) > max_suite_display_width:\n suite_name = suite_name[0:max_suite_display_width]\n suite_name = suite_name[:-1] + \"\u2026\"\n status_line = f\"- {suite_name.ljust(max_suite_display_width)} {status_slug} {stats_slug}\"\n toolkit.cli_message(status_line)\n\n\[email protected](name=\"script\")\[email protected](\"checkpoint\")\[email protected](\n \"--directory\",\n \"-d\",\n default=None,\n help=\"The project's great_expectations directory.\",\n)\[email protected]_as_experimental\ndef checkpoint_script(checkpoint, directory):\n \"\"\"\n Create a python script to run a checkpoint. (Experimental)\n\n Checkpoints can be run directly without this script using the\n `great_expectations checkpoint run` command.\n\n This script is provided for those who wish to run checkpoints via python.\n \"\"\"\n context = toolkit.load_data_context_with_error_handling(directory)\n usage_event = \"cli.checkpoint.script\"\n\n # Attempt to load the checkpoint and deal with errors\n _ = toolkit.load_checkpoint(context, checkpoint, usage_event)\n\n script_name = f\"run_{checkpoint}.py\"\n script_path = os.path.join(\n context.root_directory, context.GE_UNCOMMITTED_DIR, script_name\n )\n\n if os.path.isfile(script_path):\n toolkit.exit_with_failure_message_and_stats(\n context,\n usage_event,\n f\"\"\"<red>Warning! A script named {script_name} already exists and this command will not overwrite it.</red>\n - Existing file path: {script_path}\"\"\",\n )\n\n _write_checkpoint_script_to_disk(context.root_directory, checkpoint, script_path)\n cli_message(\n f\"\"\"<green>A python script was created that runs the checkpoint named: `{checkpoint}`</green>\n - The script is located in `great_expectations/uncommitted/run_{checkpoint}.py`\n - The script can be run with `python great_expectations/uncommitted/run_{checkpoint}.py`\"\"\"\n )\n send_usage_message(\n data_context=context,\n event=usage_event,\n api_version=\"v2\",\n success=True,\n )\n\n\ndef _load_script_template() -> str:\n with open(file_relative_path(__file__, \"checkpoint_script_template.py\")) as f:\n template = f.read()\n return template\n\n\ndef _write_checkpoint_script_to_disk(\n context_directory: str, checkpoint_name: str, script_path: str\n) -> None:\n script_full_path = os.path.abspath(os.path.join(script_path))\n template = _load_script_template().format(checkpoint_name, context_directory)\n linted_code = lint_code(template)\n with open(script_full_path, \"w\") as f:\n f.write(linted_code)\n", "path": "great_expectations/cli/v012/checkpoint.py"}]}
| 3,759 | 155 |
gh_patches_debug_38702
|
rasdani/github-patches
|
git_diff
|
scalableminds__webknossos-libs-662
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Always use `spawn` in multiprocessing
I just debugged an issue where my multiprocessing worker would hang on Linux, but not on macOS. After 4 hours of debugging, I found out that `s3fs` seems to have issues with the `fork` start method in multiprocessing. See https://github.com/fsspec/s3fs/issues/464.
I know there have been other occasions where switching the start method was the solution. I wonder if there are any downsides to always use `spawn` (or `forkserver` where available) within the ProcessPoolExecutors from `cluster_tools`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cluster_tools/cluster_tools/__init__.py`
Content:
```
1 import logging
2 import multiprocessing
3 import os
4 import shutil
5 import tempfile
6 from concurrent import futures
7 from concurrent.futures import ProcessPoolExecutor
8 from functools import partial
9 from pathlib import Path
10
11 from . import pickling
12 from .multiprocessing_logging_handler import get_multiprocessing_logging_setup_fn
13 from .schedulers.kube import KubernetesExecutor
14 from .schedulers.pbs import PBSExecutor
15 from .schedulers.slurm import SlurmExecutor
16 from .util import enrich_future_with_uncaught_warning
17
18
19 def get_existent_kwargs_subset(whitelist, kwargs):
20 new_kwargs = {}
21 for arg_name in whitelist:
22 if arg_name in kwargs:
23 new_kwargs[arg_name] = kwargs[arg_name]
24
25 return new_kwargs
26
27
28 PROCESS_POOL_KWARGS_WHITELIST = ["max_workers", "mp_context", "initializer", "initargs"]
29
30
31 class WrappedProcessPoolExecutor(ProcessPoolExecutor):
32 """
33 Wraps the ProcessPoolExecutor to add various features:
34 - map_to_futures and map_unordered method
35 - pickling of job's output (see output_pickle_path_getter and output_pickle_path)
36 - job submission via pickling to circumvent bug in python < 3.8 (see MULTIPROCESSING_VIA_IO_TMP_DIR)
37 """
38
39 def __init__(self, **kwargs):
40 new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs)
41
42 self.did_overwrite_start_method = False
43 if kwargs.get("start_method", None) is not None:
44 self.did_overwrite_start_method = True
45 self.old_start_method = multiprocessing.get_start_method()
46 start_method = kwargs["start_method"]
47 logging.info(
48 f"Overwriting start_method to {start_method}. Previous value: {self.old_start_method}"
49 )
50 multiprocessing.set_start_method(start_method, force=True)
51
52 ProcessPoolExecutor.__init__(self, **new_kwargs)
53
54 def shutdown(self, *args, **kwargs):
55
56 super().shutdown(*args, **kwargs)
57
58 if self.did_overwrite_start_method:
59 logging.info(
60 f"Restoring start_method to original value: {self.old_start_method}."
61 )
62 multiprocessing.set_start_method(self.old_start_method, force=True)
63 self.old_start_method = None
64 self.did_overwrite_start_method = False
65
66 def submit(self, *args, **kwargs):
67
68 output_pickle_path = None
69 if "__cfut_options" in kwargs:
70 output_pickle_path = kwargs["__cfut_options"]["output_pickle_path"]
71 del kwargs["__cfut_options"]
72
73 if os.environ.get("MULTIPROCESSING_VIA_IO"):
74 # If MULTIPROCESSING_VIA_IO is set, _submit_via_io is used to
75 # workaround size constraints in pythons multiprocessing
76 # implementation. Also see https://github.com/python/cpython/pull/10305/files
77 # This should be fixed in python 3.8
78 submit_fn = self._submit_via_io
79 else:
80 submit_fn = super().submit
81
82 # Depending on the start_method and output_pickle_path, wrapper functions may need to be
83 # executed in the new process context, before the actual code is ran.
84 # These wrapper functions consume their arguments from *args, **kwargs and assume
85 # that the next argument will be another function that is then called.
86 # The call_stack holds all of these wrapper functions and their arguments in the correct order.
87 # For example, call_stack = [wrapper_fn_1, wrapper_fn_1_arg_1, wrapper_fn_2, actual_fn, actual_fn_arg_1]
88 # where wrapper_fn_1 is called, which eventually calls wrapper_fn_2, which eventually calls actual_fn.
89 call_stack = []
90
91 if multiprocessing.get_start_method() != "fork":
92 # If a start_method other than the default "fork" is used, logging needs to be re-setup,
93 # because the programming context is not inherited in those cases.
94 multiprocessing_logging_setup_fn = get_multiprocessing_logging_setup_fn()
95 call_stack.extend(
96 [
97 WrappedProcessPoolExecutor._setup_logging_and_execute,
98 multiprocessing_logging_setup_fn,
99 ]
100 )
101
102 if output_pickle_path is not None:
103 call_stack.extend(
104 [
105 WrappedProcessPoolExecutor._execute_and_persist_function,
106 output_pickle_path,
107 ]
108 )
109
110 fut = submit_fn(*call_stack, *args, **kwargs)
111
112 enrich_future_with_uncaught_warning(fut)
113 return fut
114
115 def _submit_via_io(self, *args, **kwargs):
116
117 func = args[0]
118 args = args[1:]
119
120 opt_tmp_dir = os.environ.get("MULTIPROCESSING_VIA_IO_TMP_DIR")
121 if opt_tmp_dir is not None:
122 dirpath = tempfile.mkdtemp(dir=opt_tmp_dir)
123 else:
124 dirpath = tempfile.mkdtemp()
125
126 output_pickle_path = Path(dirpath) / "jobdescription.pickle"
127
128 with open(output_pickle_path, "wb") as file:
129 pickling.dump((func, args, kwargs), file)
130
131 future = super().submit(
132 WrappedProcessPoolExecutor._execute_via_io, output_pickle_path
133 )
134
135 future.add_done_callback(
136 partial(WrappedProcessPoolExecutor._remove_tmp_file, dirpath)
137 )
138
139 return future
140
141 @staticmethod
142 def _remove_tmp_file(path, _future):
143
144 shutil.rmtree(path)
145
146 @staticmethod
147 def _setup_logging_and_execute(multiprocessing_logging_setup_fn, *args, **kwargs):
148
149 func = args[0]
150 args = args[1:]
151
152 multiprocessing_logging_setup_fn()
153
154 return func(*args, **kwargs)
155
156 @staticmethod
157 def _execute_via_io(serialized_function_info_path):
158
159 with open(serialized_function_info_path, "rb") as file:
160 (func, args, kwargs) = pickling.load(file)
161 return func(*args, **kwargs)
162
163 @staticmethod
164 def _execute_and_persist_function(output_pickle_path, *args, **kwargs):
165
166 func = args[0]
167 args = args[1:]
168
169 result = func(*args, **kwargs)
170
171 with open(output_pickle_path, "wb") as file:
172 pickling.dump(result, file)
173
174 return result
175
176 def map_unordered(self, func, args):
177
178 futs = self.map_to_futures(func, args)
179
180 # Return a separate generator to avoid that map_unordered
181 # is executed lazily (otherwise, jobs would be submitted
182 # lazily, as well).
183 def result_generator():
184 for fut in futures.as_completed(futs):
185 yield fut.result()
186
187 return result_generator()
188
189 def map_to_futures(self, func, args, output_pickle_path_getter=None):
190
191 if output_pickle_path_getter is not None:
192 futs = [
193 self.submit(
194 func,
195 arg,
196 __cfut_options={
197 "output_pickle_path": output_pickle_path_getter(arg)
198 },
199 )
200 for arg in args
201 ]
202 else:
203 futs = [self.submit(func, arg) for arg in args]
204
205 return futs
206
207 def forward_log(self, fut):
208 """
209 Similar to the cluster executor, this method Takes a future from which the log file is forwarded to the active
210 process. This method blocks as long as the future is not done.
211 """
212
213 # Since the default behavior of process pool executors is to show the log in the main process
214 # we don't need to do anything except for blocking until the future is done.
215 return fut.result()
216
217
218 class SequentialExecutor(WrappedProcessPoolExecutor):
219 """
220 The same as WrappedProcessPoolExecutor, but always uses only one core. In essence,
221 this is a sequential executor approach, but it still makes use of the standard pool approach.
222 That way, switching between different executors should always work without any problems.
223 """
224
225 def __init__(self, **kwargs):
226 kwargs["max_workers"] = 1
227 WrappedProcessPoolExecutor.__init__(self, **kwargs)
228
229
230 class DebugSequentialExecutor(SequentialExecutor):
231 """
232 Only use for debugging purposes. This executor does not spawn new processes for its jobs. Therefore,
233 setting breakpoint()'s should be possible without context-related problems.
234 """
235
236 def submit(self, *args, **kwargs):
237
238 output_pickle_path = None
239 if "__cfut_options" in kwargs:
240 output_pickle_path = kwargs["__cfut_options"]["output_pickle_path"]
241 del kwargs["__cfut_options"]
242
243 if output_pickle_path is not None:
244 fut = self._blocking_submit(
245 WrappedProcessPoolExecutor._execute_and_persist_function,
246 output_pickle_path,
247 *args,
248 **kwargs,
249 )
250 else:
251 fut = self._blocking_submit(*args, **kwargs)
252
253 enrich_future_with_uncaught_warning(fut)
254 return fut
255
256 def _blocking_submit(self, *args, **kwargs):
257
258 func = args[0]
259 args = args[1:]
260
261 fut = futures.Future()
262 result = func(*args, **kwargs)
263 fut.set_result(result)
264
265 return fut
266
267
268 def pickle_identity(obj):
269 return pickling.loads(pickling.dumps(obj))
270
271
272 def pickle_identity_executor(func, *args, **kwargs):
273 result = func(*args, **kwargs)
274 return pickle_identity(result)
275
276
277 class PickleExecutor(WrappedProcessPoolExecutor):
278 """
279 The same as WrappedProcessPoolExecutor, but always pickles input and output of the jobs.
280 When using this executor for automated tests, it is ensured that using cluster executors in production
281 won't provoke pickling-related problems.
282 """
283
284 def submit(self, _func, *_args, **_kwargs):
285
286 (func, args, kwargs) = pickle_identity((_func, _args, _kwargs))
287 return super().submit(pickle_identity_executor, func, *args, **kwargs)
288
289
290 def noop():
291 return True
292
293
294 did_start_test_multiprocessing = False
295
296
297 def test_valid_multiprocessing():
298
299 msg = """
300 ###############################################################
301 An attempt has been made to start a new process before the
302 current process has finished its bootstrapping phase.
303
304 This probably means that you are not using fork to start your
305 child processes and you have forgotten to use the proper idiom
306 in the main module:
307
308 if __name__ == '__main__':
309 main()
310 ...
311 ###############################################################
312 """
313
314 with get_executor("multiprocessing") as executor:
315 try:
316 res_fut = executor.submit(noop)
317 assert res_fut.result() == True, msg
318 except RuntimeError as exc:
319 raise Exception(msg) from exc
320 except EOFError as exc:
321 raise Exception(msg) from exc
322
323
324 def get_executor(environment, **kwargs):
325
326 if environment == "slurm":
327 return SlurmExecutor(**kwargs)
328 elif environment == "pbs":
329 return PBSExecutor(**kwargs)
330 elif environment == "kubernetes":
331 return KubernetesExecutor(**kwargs)
332 elif environment == "multiprocessing":
333 global did_start_test_multiprocessing
334 if not did_start_test_multiprocessing:
335 did_start_test_multiprocessing = True
336 test_valid_multiprocessing()
337
338 return WrappedProcessPoolExecutor(**kwargs)
339 elif environment == "sequential":
340 return SequentialExecutor(**kwargs)
341 elif environment == "debug_sequential":
342 return DebugSequentialExecutor(**kwargs)
343 elif environment == "test_pickling":
344 return PickleExecutor(**kwargs)
345 raise Exception("Unknown executor: {}".format(environment))
346
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cluster_tools/cluster_tools/__init__.py b/cluster_tools/cluster_tools/__init__.py
--- a/cluster_tools/cluster_tools/__init__.py
+++ b/cluster_tools/cluster_tools/__init__.py
@@ -25,7 +25,7 @@
return new_kwargs
-PROCESS_POOL_KWARGS_WHITELIST = ["max_workers", "mp_context", "initializer", "initargs"]
+PROCESS_POOL_KWARGS_WHITELIST = ["max_workers", "initializer", "initargs"]
class WrappedProcessPoolExecutor(ProcessPoolExecutor):
@@ -37,31 +37,28 @@
"""
def __init__(self, **kwargs):
- new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs)
+ assert (not "start_method" in kwargs or kwargs["start_method"] is None) or (
+ not "mp_context" in kwargs
+ ), "Cannot use both `start_method` and `mp_context` kwargs."
- self.did_overwrite_start_method = False
- if kwargs.get("start_method", None) is not None:
- self.did_overwrite_start_method = True
- self.old_start_method = multiprocessing.get_start_method()
- start_method = kwargs["start_method"]
- logging.info(
- f"Overwriting start_method to {start_method}. Previous value: {self.old_start_method}"
- )
- multiprocessing.set_start_method(start_method, force=True)
+ new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs)
- ProcessPoolExecutor.__init__(self, **new_kwargs)
+ mp_context = None
- def shutdown(self, *args, **kwargs):
+ if "mp_context" in kwargs:
+ mp_context = kwargs["mp_context"]
+ elif "start_method" in kwargs and kwargs["start_method"] is not None:
+ mp_context = multiprocessing.get_context(kwargs["start_method"])
+ elif "MULTIPROCESSING_DEFAULT_START_METHOD" in os.environ:
+ mp_context = multiprocessing.get_context(
+ os.environ["MULTIPROCESSING_DEFAULT_START_METHOD"]
+ )
+ else:
+ mp_context = multiprocessing.get_context("spawn")
- super().shutdown(*args, **kwargs)
+ new_kwargs["mp_context"] = mp_context
- if self.did_overwrite_start_method:
- logging.info(
- f"Restoring start_method to original value: {self.old_start_method}."
- )
- multiprocessing.set_start_method(self.old_start_method, force=True)
- self.old_start_method = None
- self.did_overwrite_start_method = False
+ ProcessPoolExecutor.__init__(self, **new_kwargs)
def submit(self, *args, **kwargs):
@@ -88,7 +85,7 @@
# where wrapper_fn_1 is called, which eventually calls wrapper_fn_2, which eventually calls actual_fn.
call_stack = []
- if multiprocessing.get_start_method() != "fork":
+ if self._mp_context.get_start_method() != "fork":
# If a start_method other than the default "fork" is used, logging needs to be re-setup,
# because the programming context is not inherited in those cases.
multiprocessing_logging_setup_fn = get_multiprocessing_logging_setup_fn()
|
{"golden_diff": "diff --git a/cluster_tools/cluster_tools/__init__.py b/cluster_tools/cluster_tools/__init__.py\n--- a/cluster_tools/cluster_tools/__init__.py\n+++ b/cluster_tools/cluster_tools/__init__.py\n@@ -25,7 +25,7 @@\n return new_kwargs\n \n \n-PROCESS_POOL_KWARGS_WHITELIST = [\"max_workers\", \"mp_context\", \"initializer\", \"initargs\"]\n+PROCESS_POOL_KWARGS_WHITELIST = [\"max_workers\", \"initializer\", \"initargs\"]\n \n \n class WrappedProcessPoolExecutor(ProcessPoolExecutor):\n@@ -37,31 +37,28 @@\n \"\"\"\n \n def __init__(self, **kwargs):\n- new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs)\n+ assert (not \"start_method\" in kwargs or kwargs[\"start_method\"] is None) or (\n+ not \"mp_context\" in kwargs\n+ ), \"Cannot use both `start_method` and `mp_context` kwargs.\"\n \n- self.did_overwrite_start_method = False\n- if kwargs.get(\"start_method\", None) is not None:\n- self.did_overwrite_start_method = True\n- self.old_start_method = multiprocessing.get_start_method()\n- start_method = kwargs[\"start_method\"]\n- logging.info(\n- f\"Overwriting start_method to {start_method}. Previous value: {self.old_start_method}\"\n- )\n- multiprocessing.set_start_method(start_method, force=True)\n+ new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs)\n \n- ProcessPoolExecutor.__init__(self, **new_kwargs)\n+ mp_context = None\n \n- def shutdown(self, *args, **kwargs):\n+ if \"mp_context\" in kwargs:\n+ mp_context = kwargs[\"mp_context\"]\n+ elif \"start_method\" in kwargs and kwargs[\"start_method\"] is not None:\n+ mp_context = multiprocessing.get_context(kwargs[\"start_method\"])\n+ elif \"MULTIPROCESSING_DEFAULT_START_METHOD\" in os.environ:\n+ mp_context = multiprocessing.get_context(\n+ os.environ[\"MULTIPROCESSING_DEFAULT_START_METHOD\"]\n+ )\n+ else:\n+ mp_context = multiprocessing.get_context(\"spawn\")\n \n- super().shutdown(*args, **kwargs)\n+ new_kwargs[\"mp_context\"] = mp_context\n \n- if self.did_overwrite_start_method:\n- logging.info(\n- f\"Restoring start_method to original value: {self.old_start_method}.\"\n- )\n- multiprocessing.set_start_method(self.old_start_method, force=True)\n- self.old_start_method = None\n- self.did_overwrite_start_method = False\n+ ProcessPoolExecutor.__init__(self, **new_kwargs)\n \n def submit(self, *args, **kwargs):\n \n@@ -88,7 +85,7 @@\n # where wrapper_fn_1 is called, which eventually calls wrapper_fn_2, which eventually calls actual_fn.\n call_stack = []\n \n- if multiprocessing.get_start_method() != \"fork\":\n+ if self._mp_context.get_start_method() != \"fork\":\n # If a start_method other than the default \"fork\" is used, logging needs to be re-setup,\n # because the programming context is not inherited in those cases.\n multiprocessing_logging_setup_fn = get_multiprocessing_logging_setup_fn()\n", "issue": "Always use `spawn` in multiprocessing\nI just debugged an issue where my multiprocessing worker would hang on Linux, but not on macOS. After 4 hours of debugging, I found out that `s3fs` seems to have issues with the `fork` start method in multiprocessing. See https://github.com/fsspec/s3fs/issues/464.\r\n\r\nI know there have been other occasions where switching the start method was the solution. I wonder if there are any downsides to always use `spawn` (or `forkserver` where available) within the ProcessPoolExecutors from `cluster_tools`?\n", "before_files": [{"content": "import logging\nimport multiprocessing\nimport os\nimport shutil\nimport tempfile\nfrom concurrent import futures\nfrom concurrent.futures import ProcessPoolExecutor\nfrom functools import partial\nfrom pathlib import Path\n\nfrom . import pickling\nfrom .multiprocessing_logging_handler import get_multiprocessing_logging_setup_fn\nfrom .schedulers.kube import KubernetesExecutor\nfrom .schedulers.pbs import PBSExecutor\nfrom .schedulers.slurm import SlurmExecutor\nfrom .util import enrich_future_with_uncaught_warning\n\n\ndef get_existent_kwargs_subset(whitelist, kwargs):\n new_kwargs = {}\n for arg_name in whitelist:\n if arg_name in kwargs:\n new_kwargs[arg_name] = kwargs[arg_name]\n\n return new_kwargs\n\n\nPROCESS_POOL_KWARGS_WHITELIST = [\"max_workers\", \"mp_context\", \"initializer\", \"initargs\"]\n\n\nclass WrappedProcessPoolExecutor(ProcessPoolExecutor):\n \"\"\"\n Wraps the ProcessPoolExecutor to add various features:\n - map_to_futures and map_unordered method\n - pickling of job's output (see output_pickle_path_getter and output_pickle_path)\n - job submission via pickling to circumvent bug in python < 3.8 (see MULTIPROCESSING_VIA_IO_TMP_DIR)\n \"\"\"\n\n def __init__(self, **kwargs):\n new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs)\n\n self.did_overwrite_start_method = False\n if kwargs.get(\"start_method\", None) is not None:\n self.did_overwrite_start_method = True\n self.old_start_method = multiprocessing.get_start_method()\n start_method = kwargs[\"start_method\"]\n logging.info(\n f\"Overwriting start_method to {start_method}. Previous value: {self.old_start_method}\"\n )\n multiprocessing.set_start_method(start_method, force=True)\n\n ProcessPoolExecutor.__init__(self, **new_kwargs)\n\n def shutdown(self, *args, **kwargs):\n\n super().shutdown(*args, **kwargs)\n\n if self.did_overwrite_start_method:\n logging.info(\n f\"Restoring start_method to original value: {self.old_start_method}.\"\n )\n multiprocessing.set_start_method(self.old_start_method, force=True)\n self.old_start_method = None\n self.did_overwrite_start_method = False\n\n def submit(self, *args, **kwargs):\n\n output_pickle_path = None\n if \"__cfut_options\" in kwargs:\n output_pickle_path = kwargs[\"__cfut_options\"][\"output_pickle_path\"]\n del kwargs[\"__cfut_options\"]\n\n if os.environ.get(\"MULTIPROCESSING_VIA_IO\"):\n # If MULTIPROCESSING_VIA_IO is set, _submit_via_io is used to\n # workaround size constraints in pythons multiprocessing\n # implementation. Also see https://github.com/python/cpython/pull/10305/files\n # This should be fixed in python 3.8\n submit_fn = self._submit_via_io\n else:\n submit_fn = super().submit\n\n # Depending on the start_method and output_pickle_path, wrapper functions may need to be\n # executed in the new process context, before the actual code is ran.\n # These wrapper functions consume their arguments from *args, **kwargs and assume\n # that the next argument will be another function that is then called.\n # The call_stack holds all of these wrapper functions and their arguments in the correct order.\n # For example, call_stack = [wrapper_fn_1, wrapper_fn_1_arg_1, wrapper_fn_2, actual_fn, actual_fn_arg_1]\n # where wrapper_fn_1 is called, which eventually calls wrapper_fn_2, which eventually calls actual_fn.\n call_stack = []\n\n if multiprocessing.get_start_method() != \"fork\":\n # If a start_method other than the default \"fork\" is used, logging needs to be re-setup,\n # because the programming context is not inherited in those cases.\n multiprocessing_logging_setup_fn = get_multiprocessing_logging_setup_fn()\n call_stack.extend(\n [\n WrappedProcessPoolExecutor._setup_logging_and_execute,\n multiprocessing_logging_setup_fn,\n ]\n )\n\n if output_pickle_path is not None:\n call_stack.extend(\n [\n WrappedProcessPoolExecutor._execute_and_persist_function,\n output_pickle_path,\n ]\n )\n\n fut = submit_fn(*call_stack, *args, **kwargs)\n\n enrich_future_with_uncaught_warning(fut)\n return fut\n\n def _submit_via_io(self, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n opt_tmp_dir = os.environ.get(\"MULTIPROCESSING_VIA_IO_TMP_DIR\")\n if opt_tmp_dir is not None:\n dirpath = tempfile.mkdtemp(dir=opt_tmp_dir)\n else:\n dirpath = tempfile.mkdtemp()\n\n output_pickle_path = Path(dirpath) / \"jobdescription.pickle\"\n\n with open(output_pickle_path, \"wb\") as file:\n pickling.dump((func, args, kwargs), file)\n\n future = super().submit(\n WrappedProcessPoolExecutor._execute_via_io, output_pickle_path\n )\n\n future.add_done_callback(\n partial(WrappedProcessPoolExecutor._remove_tmp_file, dirpath)\n )\n\n return future\n\n @staticmethod\n def _remove_tmp_file(path, _future):\n\n shutil.rmtree(path)\n\n @staticmethod\n def _setup_logging_and_execute(multiprocessing_logging_setup_fn, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n multiprocessing_logging_setup_fn()\n\n return func(*args, **kwargs)\n\n @staticmethod\n def _execute_via_io(serialized_function_info_path):\n\n with open(serialized_function_info_path, \"rb\") as file:\n (func, args, kwargs) = pickling.load(file)\n return func(*args, **kwargs)\n\n @staticmethod\n def _execute_and_persist_function(output_pickle_path, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n result = func(*args, **kwargs)\n\n with open(output_pickle_path, \"wb\") as file:\n pickling.dump(result, file)\n\n return result\n\n def map_unordered(self, func, args):\n\n futs = self.map_to_futures(func, args)\n\n # Return a separate generator to avoid that map_unordered\n # is executed lazily (otherwise, jobs would be submitted\n # lazily, as well).\n def result_generator():\n for fut in futures.as_completed(futs):\n yield fut.result()\n\n return result_generator()\n\n def map_to_futures(self, func, args, output_pickle_path_getter=None):\n\n if output_pickle_path_getter is not None:\n futs = [\n self.submit(\n func,\n arg,\n __cfut_options={\n \"output_pickle_path\": output_pickle_path_getter(arg)\n },\n )\n for arg in args\n ]\n else:\n futs = [self.submit(func, arg) for arg in args]\n\n return futs\n\n def forward_log(self, fut):\n \"\"\"\n Similar to the cluster executor, this method Takes a future from which the log file is forwarded to the active\n process. This method blocks as long as the future is not done.\n \"\"\"\n\n # Since the default behavior of process pool executors is to show the log in the main process\n # we don't need to do anything except for blocking until the future is done.\n return fut.result()\n\n\nclass SequentialExecutor(WrappedProcessPoolExecutor):\n \"\"\"\n The same as WrappedProcessPoolExecutor, but always uses only one core. In essence,\n this is a sequential executor approach, but it still makes use of the standard pool approach.\n That way, switching between different executors should always work without any problems.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs[\"max_workers\"] = 1\n WrappedProcessPoolExecutor.__init__(self, **kwargs)\n\n\nclass DebugSequentialExecutor(SequentialExecutor):\n \"\"\"\n Only use for debugging purposes. This executor does not spawn new processes for its jobs. Therefore,\n setting breakpoint()'s should be possible without context-related problems.\n \"\"\"\n\n def submit(self, *args, **kwargs):\n\n output_pickle_path = None\n if \"__cfut_options\" in kwargs:\n output_pickle_path = kwargs[\"__cfut_options\"][\"output_pickle_path\"]\n del kwargs[\"__cfut_options\"]\n\n if output_pickle_path is not None:\n fut = self._blocking_submit(\n WrappedProcessPoolExecutor._execute_and_persist_function,\n output_pickle_path,\n *args,\n **kwargs,\n )\n else:\n fut = self._blocking_submit(*args, **kwargs)\n\n enrich_future_with_uncaught_warning(fut)\n return fut\n\n def _blocking_submit(self, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n fut = futures.Future()\n result = func(*args, **kwargs)\n fut.set_result(result)\n\n return fut\n\n\ndef pickle_identity(obj):\n return pickling.loads(pickling.dumps(obj))\n\n\ndef pickle_identity_executor(func, *args, **kwargs):\n result = func(*args, **kwargs)\n return pickle_identity(result)\n\n\nclass PickleExecutor(WrappedProcessPoolExecutor):\n \"\"\"\n The same as WrappedProcessPoolExecutor, but always pickles input and output of the jobs.\n When using this executor for automated tests, it is ensured that using cluster executors in production\n won't provoke pickling-related problems.\n \"\"\"\n\n def submit(self, _func, *_args, **_kwargs):\n\n (func, args, kwargs) = pickle_identity((_func, _args, _kwargs))\n return super().submit(pickle_identity_executor, func, *args, **kwargs)\n\n\ndef noop():\n return True\n\n\ndid_start_test_multiprocessing = False\n\n\ndef test_valid_multiprocessing():\n\n msg = \"\"\"\n ###############################################################\n An attempt has been made to start a new process before the\n current process has finished its bootstrapping phase.\n\n This probably means that you are not using fork to start your\n child processes and you have forgotten to use the proper idiom\n in the main module:\n\n if __name__ == '__main__':\n main()\n ...\n ###############################################################\n \"\"\"\n\n with get_executor(\"multiprocessing\") as executor:\n try:\n res_fut = executor.submit(noop)\n assert res_fut.result() == True, msg\n except RuntimeError as exc:\n raise Exception(msg) from exc\n except EOFError as exc:\n raise Exception(msg) from exc\n\n\ndef get_executor(environment, **kwargs):\n\n if environment == \"slurm\":\n return SlurmExecutor(**kwargs)\n elif environment == \"pbs\":\n return PBSExecutor(**kwargs)\n elif environment == \"kubernetes\":\n return KubernetesExecutor(**kwargs)\n elif environment == \"multiprocessing\":\n global did_start_test_multiprocessing\n if not did_start_test_multiprocessing:\n did_start_test_multiprocessing = True\n test_valid_multiprocessing()\n\n return WrappedProcessPoolExecutor(**kwargs)\n elif environment == \"sequential\":\n return SequentialExecutor(**kwargs)\n elif environment == \"debug_sequential\":\n return DebugSequentialExecutor(**kwargs)\n elif environment == \"test_pickling\":\n return PickleExecutor(**kwargs)\n raise Exception(\"Unknown executor: {}\".format(environment))\n", "path": "cluster_tools/cluster_tools/__init__.py"}], "after_files": [{"content": "import logging\nimport multiprocessing\nimport os\nimport shutil\nimport tempfile\nfrom concurrent import futures\nfrom concurrent.futures import ProcessPoolExecutor\nfrom functools import partial\nfrom pathlib import Path\n\nfrom . import pickling\nfrom .multiprocessing_logging_handler import get_multiprocessing_logging_setup_fn\nfrom .schedulers.kube import KubernetesExecutor\nfrom .schedulers.pbs import PBSExecutor\nfrom .schedulers.slurm import SlurmExecutor\nfrom .util import enrich_future_with_uncaught_warning\n\n\ndef get_existent_kwargs_subset(whitelist, kwargs):\n new_kwargs = {}\n for arg_name in whitelist:\n if arg_name in kwargs:\n new_kwargs[arg_name] = kwargs[arg_name]\n\n return new_kwargs\n\n\nPROCESS_POOL_KWARGS_WHITELIST = [\"max_workers\", \"initializer\", \"initargs\"]\n\n\nclass WrappedProcessPoolExecutor(ProcessPoolExecutor):\n \"\"\"\n Wraps the ProcessPoolExecutor to add various features:\n - map_to_futures and map_unordered method\n - pickling of job's output (see output_pickle_path_getter and output_pickle_path)\n - job submission via pickling to circumvent bug in python < 3.8 (see MULTIPROCESSING_VIA_IO_TMP_DIR)\n \"\"\"\n\n def __init__(self, **kwargs):\n assert (not \"start_method\" in kwargs or kwargs[\"start_method\"] is None) or (\n not \"mp_context\" in kwargs\n ), \"Cannot use both `start_method` and `mp_context` kwargs.\"\n\n new_kwargs = get_existent_kwargs_subset(PROCESS_POOL_KWARGS_WHITELIST, kwargs)\n\n mp_context = None\n\n if \"mp_context\" in kwargs:\n mp_context = kwargs[\"mp_context\"]\n elif \"start_method\" in kwargs and kwargs[\"start_method\"] is not None:\n mp_context = multiprocessing.get_context(kwargs[\"start_method\"])\n elif \"MULTIPROCESSING_DEFAULT_START_METHOD\" in os.environ:\n mp_context = multiprocessing.get_context(\n os.environ[\"MULTIPROCESSING_DEFAULT_START_METHOD\"]\n )\n else:\n mp_context = multiprocessing.get_context(\"spawn\")\n\n new_kwargs[\"mp_context\"] = mp_context\n\n ProcessPoolExecutor.__init__(self, **new_kwargs)\n\n def submit(self, *args, **kwargs):\n\n output_pickle_path = None\n if \"__cfut_options\" in kwargs:\n output_pickle_path = kwargs[\"__cfut_options\"][\"output_pickle_path\"]\n del kwargs[\"__cfut_options\"]\n\n if os.environ.get(\"MULTIPROCESSING_VIA_IO\"):\n # If MULTIPROCESSING_VIA_IO is set, _submit_via_io is used to\n # workaround size constraints in pythons multiprocessing\n # implementation. Also see https://github.com/python/cpython/pull/10305/files\n # This should be fixed in python 3.8\n submit_fn = self._submit_via_io\n else:\n submit_fn = super().submit\n\n # Depending on the start_method and output_pickle_path, wrapper functions may need to be\n # executed in the new process context, before the actual code is ran.\n # These wrapper functions consume their arguments from *args, **kwargs and assume\n # that the next argument will be another function that is then called.\n # The call_stack holds all of these wrapper functions and their arguments in the correct order.\n # For example, call_stack = [wrapper_fn_1, wrapper_fn_1_arg_1, wrapper_fn_2, actual_fn, actual_fn_arg_1]\n # where wrapper_fn_1 is called, which eventually calls wrapper_fn_2, which eventually calls actual_fn.\n call_stack = []\n\n if self._mp_context.get_start_method() != \"fork\":\n # If a start_method other than the default \"fork\" is used, logging needs to be re-setup,\n # because the programming context is not inherited in those cases.\n multiprocessing_logging_setup_fn = get_multiprocessing_logging_setup_fn()\n call_stack.extend(\n [\n WrappedProcessPoolExecutor._setup_logging_and_execute,\n multiprocessing_logging_setup_fn,\n ]\n )\n\n if output_pickle_path is not None:\n call_stack.extend(\n [\n WrappedProcessPoolExecutor._execute_and_persist_function,\n output_pickle_path,\n ]\n )\n\n fut = submit_fn(*call_stack, *args, **kwargs)\n\n enrich_future_with_uncaught_warning(fut)\n return fut\n\n def _submit_via_io(self, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n opt_tmp_dir = os.environ.get(\"MULTIPROCESSING_VIA_IO_TMP_DIR\")\n if opt_tmp_dir is not None:\n dirpath = tempfile.mkdtemp(dir=opt_tmp_dir)\n else:\n dirpath = tempfile.mkdtemp()\n\n output_pickle_path = Path(dirpath) / \"jobdescription.pickle\"\n\n with open(output_pickle_path, \"wb\") as file:\n pickling.dump((func, args, kwargs), file)\n\n future = super().submit(\n WrappedProcessPoolExecutor._execute_via_io, output_pickle_path\n )\n\n future.add_done_callback(\n partial(WrappedProcessPoolExecutor._remove_tmp_file, dirpath)\n )\n\n return future\n\n @staticmethod\n def _remove_tmp_file(path, _future):\n\n shutil.rmtree(path)\n\n @staticmethod\n def _setup_logging_and_execute(multiprocessing_logging_setup_fn, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n multiprocessing_logging_setup_fn()\n\n return func(*args, **kwargs)\n\n @staticmethod\n def _execute_via_io(serialized_function_info_path):\n\n with open(serialized_function_info_path, \"rb\") as file:\n (func, args, kwargs) = pickling.load(file)\n return func(*args, **kwargs)\n\n @staticmethod\n def _execute_and_persist_function(output_pickle_path, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n result = func(*args, **kwargs)\n\n with open(output_pickle_path, \"wb\") as file:\n pickling.dump(result, file)\n\n return result\n\n def map_unordered(self, func, args):\n\n futs = self.map_to_futures(func, args)\n\n # Return a separate generator to avoid that map_unordered\n # is executed lazily (otherwise, jobs would be submitted\n # lazily, as well).\n def result_generator():\n for fut in futures.as_completed(futs):\n yield fut.result()\n\n return result_generator()\n\n def map_to_futures(self, func, args, output_pickle_path_getter=None):\n\n if output_pickle_path_getter is not None:\n futs = [\n self.submit(\n func,\n arg,\n __cfut_options={\n \"output_pickle_path\": output_pickle_path_getter(arg)\n },\n )\n for arg in args\n ]\n else:\n futs = [self.submit(func, arg) for arg in args]\n\n return futs\n\n def forward_log(self, fut):\n \"\"\"\n Similar to the cluster executor, this method Takes a future from which the log file is forwarded to the active\n process. This method blocks as long as the future is not done.\n \"\"\"\n\n # Since the default behavior of process pool executors is to show the log in the main process\n # we don't need to do anything except for blocking until the future is done.\n return fut.result()\n\n\nclass SequentialExecutor(WrappedProcessPoolExecutor):\n \"\"\"\n The same as WrappedProcessPoolExecutor, but always uses only one core. In essence,\n this is a sequential executor approach, but it still makes use of the standard pool approach.\n That way, switching between different executors should always work without any problems.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs[\"max_workers\"] = 1\n WrappedProcessPoolExecutor.__init__(self, **kwargs)\n\n\nclass DebugSequentialExecutor(SequentialExecutor):\n \"\"\"\n Only use for debugging purposes. This executor does not spawn new processes for its jobs. Therefore,\n setting breakpoint()'s should be possible without context-related problems.\n \"\"\"\n\n def submit(self, *args, **kwargs):\n\n output_pickle_path = None\n if \"__cfut_options\" in kwargs:\n output_pickle_path = kwargs[\"__cfut_options\"][\"output_pickle_path\"]\n del kwargs[\"__cfut_options\"]\n\n if output_pickle_path is not None:\n fut = self._blocking_submit(\n WrappedProcessPoolExecutor._execute_and_persist_function,\n output_pickle_path,\n *args,\n **kwargs,\n )\n else:\n fut = self._blocking_submit(*args, **kwargs)\n\n enrich_future_with_uncaught_warning(fut)\n return fut\n\n def _blocking_submit(self, *args, **kwargs):\n\n func = args[0]\n args = args[1:]\n\n fut = futures.Future()\n result = func(*args, **kwargs)\n fut.set_result(result)\n\n return fut\n\n\ndef pickle_identity(obj):\n return pickling.loads(pickling.dumps(obj))\n\n\ndef pickle_identity_executor(func, *args, **kwargs):\n result = func(*args, **kwargs)\n return pickle_identity(result)\n\n\nclass PickleExecutor(WrappedProcessPoolExecutor):\n \"\"\"\n The same as WrappedProcessPoolExecutor, but always pickles input and output of the jobs.\n When using this executor for automated tests, it is ensured that using cluster executors in production\n won't provoke pickling-related problems.\n \"\"\"\n\n def submit(self, _func, *_args, **_kwargs):\n\n (func, args, kwargs) = pickle_identity((_func, _args, _kwargs))\n return super().submit(pickle_identity_executor, func, *args, **kwargs)\n\n\ndef noop():\n return True\n\n\ndid_start_test_multiprocessing = False\n\n\ndef test_valid_multiprocessing():\n\n msg = \"\"\"\n ###############################################################\n An attempt has been made to start a new process before the\n current process has finished its bootstrapping phase.\n\n This probably means that you are not using fork to start your\n child processes and you have forgotten to use the proper idiom\n in the main module:\n\n if __name__ == '__main__':\n main()\n ...\n ###############################################################\n \"\"\"\n\n with get_executor(\"multiprocessing\") as executor:\n try:\n res_fut = executor.submit(noop)\n assert res_fut.result() == True, msg\n except RuntimeError as exc:\n raise Exception(msg) from exc\n except EOFError as exc:\n raise Exception(msg) from exc\n\n\ndef get_executor(environment, **kwargs):\n\n if environment == \"slurm\":\n return SlurmExecutor(**kwargs)\n elif environment == \"pbs\":\n return PBSExecutor(**kwargs)\n elif environment == \"kubernetes\":\n return KubernetesExecutor(**kwargs)\n elif environment == \"multiprocessing\":\n global did_start_test_multiprocessing\n if not did_start_test_multiprocessing:\n did_start_test_multiprocessing = True\n test_valid_multiprocessing()\n\n return WrappedProcessPoolExecutor(**kwargs)\n elif environment == \"sequential\":\n return SequentialExecutor(**kwargs)\n elif environment == \"debug_sequential\":\n return DebugSequentialExecutor(**kwargs)\n elif environment == \"test_pickling\":\n return PickleExecutor(**kwargs)\n raise Exception(\"Unknown executor: {}\".format(environment))\n", "path": "cluster_tools/cluster_tools/__init__.py"}]}
| 3,821 | 737 |
gh_patches_debug_12607
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-285
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A confusing error occurs when GPU and CPU arrays are mixed on `numerical_grad`
In this case, unreadable error occurs. We nee to check if arrays are not mixed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/gradient_check.py`
Content:
```
1 import numpy
2 import six
3
4 from chainer import cuda
5
6
7 def numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):
8 grads = tuple(numpy.zeros_like(x) for x in inputs)
9 for x, gx in zip(inputs, grads):
10 flat_x = x.ravel()
11 flat_gx = gx.ravel()
12 for i in six.moves.range(flat_x.size):
13 orig = flat_x[i]
14 flat_x[i] = orig + eps
15 ys1 = f()
16 flat_x[i] = orig - eps
17 ys2 = f()
18 flat_x[i] = orig
19
20 for y1, y2, gy in zip(ys1, ys2, grad_outputs):
21 if gy is not None:
22 dot = float(sum(((y1 - y2) * gy).ravel()))
23 flat_gx[i] += dot / (2 * eps)
24
25 return grads
26
27
28 def numerical_grad_gpu(f, inputs, grad_outputs, eps=1e-3):
29 grads = tuple(cuda.zeros_like(x) for x in inputs)
30 for x, gx in zip(inputs, grads):
31 x = x.ravel()
32 gx = gx.ravel()
33 x_cpu = x.get()
34 gx_cpu = gx.get()
35 for i in six.moves.range(x_cpu.size):
36 orig = x_cpu[i]
37 x_cpu[i] = orig + eps
38 x.set(x_cpu)
39 ys1 = f()
40 x_cpu[i] = orig - eps
41 x.set(x_cpu)
42 ys2 = f()
43 x_cpu[i] = orig
44 x.set(x_cpu)
45
46 for y1, y2, gy in zip(ys1, ys2, grad_outputs):
47 if gy is not None:
48 dot = sum(((y1 - y2) * gy).ravel()).get()
49 gx_cpu[i] += dot / (2 * eps)
50 gx.set(gx_cpu)
51
52 return grads
53
54
55 def numerical_grad(f, inputs, grad_outputs, eps=1e-3):
56 """Computes numerical gradient by finite differences.
57
58 This function is used to implement gradient check. For usage example, see
59 unit tests of :mod:`chainer.functions`.
60
61 Args:
62 f (function): Python function with no arguments that runs forward
63 computation and returns the result.
64 inputs (tuple of arrays): Tuple of arrays that should be treated as
65 inputs. Each element of them is slightly modified to realize
66 numerical gradient by finite differences.
67 grad_outputs (tuple of arrays): Tuple of arrays that are treated as
68 output gradients.
69 eps (float): Epsilon value of finite differences.
70
71 Returns:
72 tuple: Numerical gradient arrays corresponding to ``inputs``.
73
74 """
75 assert eps > 0
76 if any(isinstance(x, cuda.GPUArray) for x in inputs):
77 return numerical_grad_gpu(f, inputs, grad_outputs, eps)
78 return numerical_grad_cpu(f, inputs, grad_outputs, eps)
79
80
81 def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
82 """Asserts if some corresponding element of x and y differs too much.
83
84 This function can handle both CPU and GPU arrays simultaneously.
85
86 Args:
87 x: Left-hand-side array.
88 y: Right-hand-side array.
89 atol (float): Absolute tolerance.
90 rtol (float): Relative tolerance.
91 verbose (bool): If True, it outputs verbose messages on error.
92
93 """
94 x = cuda.to_cpu(x)
95 y = cuda.to_cpu(y)
96 try:
97 numpy.testing.assert_allclose(
98 x, y, atol=atol, rtol=rtol, verbose=verbose)
99 except Exception:
100 print('error:', numpy.abs(x - y).max())
101 raise
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/gradient_check.py b/chainer/gradient_check.py
--- a/chainer/gradient_check.py
+++ b/chainer/gradient_check.py
@@ -73,9 +73,18 @@
"""
assert eps > 0
- if any(isinstance(x, cuda.GPUArray) for x in inputs):
+ inputs = tuple(inputs)
+ grad_outputs = tuple(grad_outputs)
+ gpu = any(isinstance(x, cuda.GPUArray) for x in inputs + grad_outputs)
+
+ cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)
+
+ if gpu and cpu:
+ raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')
+ elif gpu:
return numerical_grad_gpu(f, inputs, grad_outputs, eps)
- return numerical_grad_cpu(f, inputs, grad_outputs, eps)
+ else:
+ return numerical_grad_cpu(f, inputs, grad_outputs, eps)
def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):
|
{"golden_diff": "diff --git a/chainer/gradient_check.py b/chainer/gradient_check.py\n--- a/chainer/gradient_check.py\n+++ b/chainer/gradient_check.py\n@@ -73,9 +73,18 @@\n \n \"\"\"\n assert eps > 0\n- if any(isinstance(x, cuda.GPUArray) for x in inputs):\n+ inputs = tuple(inputs)\n+ grad_outputs = tuple(grad_outputs)\n+ gpu = any(isinstance(x, cuda.GPUArray) for x in inputs + grad_outputs)\n+\n+ cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)\n+\n+ if gpu and cpu:\n+ raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')\n+ elif gpu:\n return numerical_grad_gpu(f, inputs, grad_outputs, eps)\n- return numerical_grad_cpu(f, inputs, grad_outputs, eps)\n+ else:\n+ return numerical_grad_cpu(f, inputs, grad_outputs, eps)\n \n \n def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):\n", "issue": "A confusing error occurs when GPU and CPU arrays are mixed on `numerical_grad`\nIn this case, unreadable error occurs. We nee to check if arrays are not mixed.\n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\n\n\ndef numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(numpy.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n flat_x = x.ravel()\n flat_gx = gx.ravel()\n for i in six.moves.range(flat_x.size):\n orig = flat_x[i]\n flat_x[i] = orig + eps\n ys1 = f()\n flat_x[i] = orig - eps\n ys2 = f()\n flat_x[i] = orig\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = float(sum(((y1 - y2) * gy).ravel()))\n flat_gx[i] += dot / (2 * eps)\n\n return grads\n\n\ndef numerical_grad_gpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(cuda.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n x = x.ravel()\n gx = gx.ravel()\n x_cpu = x.get()\n gx_cpu = gx.get()\n for i in six.moves.range(x_cpu.size):\n orig = x_cpu[i]\n x_cpu[i] = orig + eps\n x.set(x_cpu)\n ys1 = f()\n x_cpu[i] = orig - eps\n x.set(x_cpu)\n ys2 = f()\n x_cpu[i] = orig\n x.set(x_cpu)\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = sum(((y1 - y2) * gy).ravel()).get()\n gx_cpu[i] += dot / (2 * eps)\n gx.set(gx_cpu)\n\n return grads\n\n\ndef numerical_grad(f, inputs, grad_outputs, eps=1e-3):\n \"\"\"Computes numerical gradient by finite differences.\n\n This function is used to implement gradient check. For usage example, see\n unit tests of :mod:`chainer.functions`.\n\n Args:\n f (function): Python function with no arguments that runs forward\n computation and returns the result.\n inputs (tuple of arrays): Tuple of arrays that should be treated as\n inputs. Each element of them is slightly modified to realize\n numerical gradient by finite differences.\n grad_outputs (tuple of arrays): Tuple of arrays that are treated as\n output gradients.\n eps (float): Epsilon value of finite differences.\n\n Returns:\n tuple: Numerical gradient arrays corresponding to ``inputs``.\n\n \"\"\"\n assert eps > 0\n if any(isinstance(x, cuda.GPUArray) for x in inputs):\n return numerical_grad_gpu(f, inputs, grad_outputs, eps)\n return numerical_grad_cpu(f, inputs, grad_outputs, eps)\n\n\ndef assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):\n \"\"\"Asserts if some corresponding element of x and y differs too much.\n\n This function can handle both CPU and GPU arrays simultaneously.\n\n Args:\n x: Left-hand-side array.\n y: Right-hand-side array.\n atol (float): Absolute tolerance.\n rtol (float): Relative tolerance.\n verbose (bool): If True, it outputs verbose messages on error.\n\n \"\"\"\n x = cuda.to_cpu(x)\n y = cuda.to_cpu(y)\n try:\n numpy.testing.assert_allclose(\n x, y, atol=atol, rtol=rtol, verbose=verbose)\n except Exception:\n print('error:', numpy.abs(x - y).max())\n raise\n", "path": "chainer/gradient_check.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\n\n\ndef numerical_grad_cpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(numpy.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n flat_x = x.ravel()\n flat_gx = gx.ravel()\n for i in six.moves.range(flat_x.size):\n orig = flat_x[i]\n flat_x[i] = orig + eps\n ys1 = f()\n flat_x[i] = orig - eps\n ys2 = f()\n flat_x[i] = orig\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = float(sum(((y1 - y2) * gy).ravel()))\n flat_gx[i] += dot / (2 * eps)\n\n return grads\n\n\ndef numerical_grad_gpu(f, inputs, grad_outputs, eps=1e-3):\n grads = tuple(cuda.zeros_like(x) for x in inputs)\n for x, gx in zip(inputs, grads):\n x = x.ravel()\n gx = gx.ravel()\n x_cpu = x.get()\n gx_cpu = gx.get()\n for i in six.moves.range(x_cpu.size):\n orig = x_cpu[i]\n x_cpu[i] = orig + eps\n x.set(x_cpu)\n ys1 = f()\n x_cpu[i] = orig - eps\n x.set(x_cpu)\n ys2 = f()\n x_cpu[i] = orig\n x.set(x_cpu)\n\n for y1, y2, gy in zip(ys1, ys2, grad_outputs):\n if gy is not None:\n dot = sum(((y1 - y2) * gy).ravel()).get()\n gx_cpu[i] += dot / (2 * eps)\n gx.set(gx_cpu)\n\n return grads\n\n\ndef numerical_grad(f, inputs, grad_outputs, eps=1e-3):\n \"\"\"Computes numerical gradient by finite differences.\n\n This function is used to implement gradient check. For usage example, see\n unit tests of :mod:`chainer.functions`.\n\n Args:\n f (function): Python function with no arguments that runs forward\n computation and returns the result.\n inputs (tuple of arrays): Tuple of arrays that should be treated as\n inputs. Each element of them is slightly modified to realize\n numerical gradient by finite differences.\n grad_outputs (tuple of arrays): Tuple of arrays that are treated as\n output gradients.\n eps (float): Epsilon value of finite differences.\n\n Returns:\n tuple: Numerical gradient arrays corresponding to ``inputs``.\n\n \"\"\"\n assert eps > 0\n inputs = tuple(inputs)\n grad_outputs = tuple(grad_outputs)\n gpu = any(isinstance(x, cuda.GPUArray) for x in inputs + grad_outputs)\n\n cpu = any(isinstance(x, numpy.ndarray) for x in inputs + grad_outputs)\n\n if gpu and cpu:\n raise RuntimeError('Do not mix GPU and CPU arrays in `numerical_grad`')\n elif gpu:\n return numerical_grad_gpu(f, inputs, grad_outputs, eps)\n else:\n return numerical_grad_cpu(f, inputs, grad_outputs, eps)\n\n\ndef assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):\n \"\"\"Asserts if some corresponding element of x and y differs too much.\n\n This function can handle both CPU and GPU arrays simultaneously.\n\n Args:\n x: Left-hand-side array.\n y: Right-hand-side array.\n atol (float): Absolute tolerance.\n rtol (float): Relative tolerance.\n verbose (bool): If True, it outputs verbose messages on error.\n\n \"\"\"\n x = cuda.to_cpu(x)\n y = cuda.to_cpu(y)\n try:\n numpy.testing.assert_allclose(\n x, y, atol=atol, rtol=rtol, verbose=verbose)\n except Exception:\n print('error:', numpy.abs(x - y).max())\n raise\n", "path": "chainer/gradient_check.py"}]}
| 1,312 | 240 |
gh_patches_debug_21649
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-1439
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Membership edit fails due to form errors
The error is due to missing `comment` field in `MembershipForm`.
`MembershipForm.save()` should be placed in `MembershipCreateForm`, because that's where `comment` field is and that's what should trigger the signal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `amy/fiscal/forms.py`
Content:
```
1 from django import forms
2 from django.core.validators import RegexValidator
3 from django.dispatch import receiver
4 from markdownx.fields import MarkdownxFormField
5
6 from workshops.forms import (
7 BootstrapHelper,
8 WidgetOverrideMixin,
9 form_saved_add_comment,
10 )
11 from workshops.models import (
12 Organization,
13 Membership,
14 Sponsorship,
15 )
16 # this is used instead of Django Autocomplete Light widgets
17 # see issue #1330: https://github.com/swcarpentry/amy/issues/1330
18 from workshops.fields import (
19 ModelSelect2,
20 )
21 from workshops.signals import create_comment_signal
22
23
24 # settings for Select2
25 # this makes it possible for autocomplete widget to fit in low-width sidebar
26 SIDEBAR_DAL_WIDTH = {
27 'data-width': '100%',
28 'width': 'style',
29 }
30
31
32 class OrganizationForm(forms.ModelForm):
33 domain = forms.CharField(
34 max_length=Organization._meta.get_field('domain').max_length,
35 validators=[
36 RegexValidator(
37 r'[^\w\.-]+', inverse_match=True,
38 message='Please enter only the domain (such as "math.esu.edu")'
39 ' without a leading "http://" or a trailing "/".')
40 ],
41 )
42
43 helper = BootstrapHelper(add_cancel_button=False,
44 duplicate_buttons_on_top=True)
45
46 class Meta:
47 model = Organization
48 fields = ['domain', 'fullname', 'country']
49
50
51 class OrganizationCreateForm(OrganizationForm):
52 comment = MarkdownxFormField(
53 label='Comment',
54 help_text='This will be added to comments after the organization '
55 'is created.',
56 widget=forms.Textarea,
57 required=False,
58 )
59
60 class Meta(OrganizationForm.Meta):
61 fields = OrganizationForm.Meta.fields.copy()
62 fields.append('comment')
63
64 def save(self, *args, **kwargs):
65 res = super().save(*args, **kwargs)
66
67 create_comment_signal.send(sender=self.__class__,
68 content_object=res,
69 comment=self.cleaned_data['comment'],
70 timestamp=None)
71
72 return res
73
74
75 class MembershipForm(forms.ModelForm):
76 helper = BootstrapHelper(add_cancel_button=False)
77
78 organization = forms.ModelChoiceField(
79 label='Organization',
80 required=True,
81 queryset=Organization.objects.all(),
82 widget=ModelSelect2(url='organization-lookup')
83 )
84
85 class Meta:
86 model = Membership
87 fields = [
88 'organization', 'variant', 'agreement_start', 'agreement_end',
89 'contribution_type', 'workshops_without_admin_fee_per_agreement',
90 'self_organized_workshops_per_agreement',
91 'seats_instructor_training',
92 'additional_instructor_training_seats',
93 ]
94
95 def save(self, *args, **kwargs):
96 res = super().save(*args, **kwargs)
97
98 create_comment_signal.send(sender=self.__class__,
99 content_object=res,
100 comment=self.cleaned_data['comment'],
101 timestamp=None)
102
103 return res
104
105
106 class MembershipCreateForm(MembershipForm):
107 comment = MarkdownxFormField(
108 label='Comment',
109 help_text='This will be added to comments after the membership is '
110 'created.',
111 widget=forms.Textarea,
112 required=False,
113 )
114
115 class Meta(MembershipForm.Meta):
116 fields = MembershipForm.Meta.fields.copy()
117 fields.append('comment')
118
119
120 class SponsorshipForm(WidgetOverrideMixin, forms.ModelForm):
121
122 helper = BootstrapHelper(submit_label='Add')
123
124 class Meta:
125 model = Sponsorship
126 fields = '__all__'
127 widgets = {
128 'organization': ModelSelect2(url='organization-lookup'),
129 'event': ModelSelect2(url='event-lookup'),
130 'contact': ModelSelect2(url='person-lookup'),
131 }
132
133
134 # ----------------------------------------------------------
135 # Signals
136
137 # adding @receiver decorator to the function defined in `workshops.forms`
138 form_saved_add_comment = receiver(
139 create_comment_signal,
140 sender=OrganizationCreateForm,
141 )(form_saved_add_comment)
142
143 # adding @receiver decorator to the function defined in `workshops.forms`
144 form_saved_add_comment = receiver(
145 create_comment_signal,
146 sender=MembershipCreateForm,
147 )(form_saved_add_comment)
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/amy/fiscal/forms.py b/amy/fiscal/forms.py
--- a/amy/fiscal/forms.py
+++ b/amy/fiscal/forms.py
@@ -92,16 +92,6 @@
'additional_instructor_training_seats',
]
- def save(self, *args, **kwargs):
- res = super().save(*args, **kwargs)
-
- create_comment_signal.send(sender=self.__class__,
- content_object=res,
- comment=self.cleaned_data['comment'],
- timestamp=None)
-
- return res
-
class MembershipCreateForm(MembershipForm):
comment = MarkdownxFormField(
@@ -116,6 +106,16 @@
fields = MembershipForm.Meta.fields.copy()
fields.append('comment')
+ def save(self, *args, **kwargs):
+ res = super().save(*args, **kwargs)
+
+ create_comment_signal.send(sender=self.__class__,
+ content_object=res,
+ comment=self.cleaned_data['comment'],
+ timestamp=None)
+
+ return res
+
class SponsorshipForm(WidgetOverrideMixin, forms.ModelForm):
|
{"golden_diff": "diff --git a/amy/fiscal/forms.py b/amy/fiscal/forms.py\n--- a/amy/fiscal/forms.py\n+++ b/amy/fiscal/forms.py\n@@ -92,16 +92,6 @@\n 'additional_instructor_training_seats',\n ]\n \n- def save(self, *args, **kwargs):\n- res = super().save(*args, **kwargs)\n-\n- create_comment_signal.send(sender=self.__class__,\n- content_object=res,\n- comment=self.cleaned_data['comment'],\n- timestamp=None)\n-\n- return res\n-\n \n class MembershipCreateForm(MembershipForm):\n comment = MarkdownxFormField(\n@@ -116,6 +106,16 @@\n fields = MembershipForm.Meta.fields.copy()\n fields.append('comment')\n \n+ def save(self, *args, **kwargs):\n+ res = super().save(*args, **kwargs)\n+\n+ create_comment_signal.send(sender=self.__class__,\n+ content_object=res,\n+ comment=self.cleaned_data['comment'],\n+ timestamp=None)\n+\n+ return res\n+\n \n class SponsorshipForm(WidgetOverrideMixin, forms.ModelForm):\n", "issue": "Membership edit fails due to form errors\nThe error is due to missing `comment` field in `MembershipForm`.\r\n\r\n`MembershipForm.save()` should be placed in `MembershipCreateForm`, because that's where `comment` field is and that's what should trigger the signal.\n", "before_files": [{"content": "from django import forms\nfrom django.core.validators import RegexValidator\nfrom django.dispatch import receiver\nfrom markdownx.fields import MarkdownxFormField\n\nfrom workshops.forms import (\n BootstrapHelper,\n WidgetOverrideMixin,\n form_saved_add_comment,\n)\nfrom workshops.models import (\n Organization,\n Membership,\n Sponsorship,\n)\n# this is used instead of Django Autocomplete Light widgets\n# see issue #1330: https://github.com/swcarpentry/amy/issues/1330\nfrom workshops.fields import (\n ModelSelect2,\n)\nfrom workshops.signals import create_comment_signal\n\n\n# settings for Select2\n# this makes it possible for autocomplete widget to fit in low-width sidebar\nSIDEBAR_DAL_WIDTH = {\n 'data-width': '100%',\n 'width': 'style',\n}\n\n\nclass OrganizationForm(forms.ModelForm):\n domain = forms.CharField(\n max_length=Organization._meta.get_field('domain').max_length,\n validators=[\n RegexValidator(\n r'[^\\w\\.-]+', inverse_match=True,\n message='Please enter only the domain (such as \"math.esu.edu\")'\n ' without a leading \"http://\" or a trailing \"/\".')\n ],\n )\n\n helper = BootstrapHelper(add_cancel_button=False,\n duplicate_buttons_on_top=True)\n\n class Meta:\n model = Organization\n fields = ['domain', 'fullname', 'country']\n\n\nclass OrganizationCreateForm(OrganizationForm):\n comment = MarkdownxFormField(\n label='Comment',\n help_text='This will be added to comments after the organization '\n 'is created.',\n widget=forms.Textarea,\n required=False,\n )\n\n class Meta(OrganizationForm.Meta):\n fields = OrganizationForm.Meta.fields.copy()\n fields.append('comment')\n\n def save(self, *args, **kwargs):\n res = super().save(*args, **kwargs)\n\n create_comment_signal.send(sender=self.__class__,\n content_object=res,\n comment=self.cleaned_data['comment'],\n timestamp=None)\n\n return res\n\n\nclass MembershipForm(forms.ModelForm):\n helper = BootstrapHelper(add_cancel_button=False)\n\n organization = forms.ModelChoiceField(\n label='Organization',\n required=True,\n queryset=Organization.objects.all(),\n widget=ModelSelect2(url='organization-lookup')\n )\n\n class Meta:\n model = Membership\n fields = [\n 'organization', 'variant', 'agreement_start', 'agreement_end',\n 'contribution_type', 'workshops_without_admin_fee_per_agreement',\n 'self_organized_workshops_per_agreement',\n 'seats_instructor_training',\n 'additional_instructor_training_seats',\n ]\n\n def save(self, *args, **kwargs):\n res = super().save(*args, **kwargs)\n\n create_comment_signal.send(sender=self.__class__,\n content_object=res,\n comment=self.cleaned_data['comment'],\n timestamp=None)\n\n return res\n\n\nclass MembershipCreateForm(MembershipForm):\n comment = MarkdownxFormField(\n label='Comment',\n help_text='This will be added to comments after the membership is '\n 'created.',\n widget=forms.Textarea,\n required=False,\n )\n\n class Meta(MembershipForm.Meta):\n fields = MembershipForm.Meta.fields.copy()\n fields.append('comment')\n\n\nclass SponsorshipForm(WidgetOverrideMixin, forms.ModelForm):\n\n helper = BootstrapHelper(submit_label='Add')\n\n class Meta:\n model = Sponsorship\n fields = '__all__'\n widgets = {\n 'organization': ModelSelect2(url='organization-lookup'),\n 'event': ModelSelect2(url='event-lookup'),\n 'contact': ModelSelect2(url='person-lookup'),\n }\n\n\n# ----------------------------------------------------------\n# Signals\n\n# adding @receiver decorator to the function defined in `workshops.forms`\nform_saved_add_comment = receiver(\n create_comment_signal,\n sender=OrganizationCreateForm,\n)(form_saved_add_comment)\n\n# adding @receiver decorator to the function defined in `workshops.forms`\nform_saved_add_comment = receiver(\n create_comment_signal,\n sender=MembershipCreateForm,\n)(form_saved_add_comment)\n", "path": "amy/fiscal/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.core.validators import RegexValidator\nfrom django.dispatch import receiver\nfrom markdownx.fields import MarkdownxFormField\n\nfrom workshops.forms import (\n BootstrapHelper,\n WidgetOverrideMixin,\n form_saved_add_comment,\n)\nfrom workshops.models import (\n Organization,\n Membership,\n Sponsorship,\n)\n# this is used instead of Django Autocomplete Light widgets\n# see issue #1330: https://github.com/swcarpentry/amy/issues/1330\nfrom workshops.fields import (\n ModelSelect2,\n)\nfrom workshops.signals import create_comment_signal\n\n\n# settings for Select2\n# this makes it possible for autocomplete widget to fit in low-width sidebar\nSIDEBAR_DAL_WIDTH = {\n 'data-width': '100%',\n 'width': 'style',\n}\n\n\nclass OrganizationForm(forms.ModelForm):\n domain = forms.CharField(\n max_length=Organization._meta.get_field('domain').max_length,\n validators=[\n RegexValidator(\n r'[^\\w\\.-]+', inverse_match=True,\n message='Please enter only the domain (such as \"math.esu.edu\")'\n ' without a leading \"http://\" or a trailing \"/\".')\n ],\n )\n\n helper = BootstrapHelper(add_cancel_button=False,\n duplicate_buttons_on_top=True)\n\n class Meta:\n model = Organization\n fields = ['domain', 'fullname', 'country']\n\n\nclass OrganizationCreateForm(OrganizationForm):\n comment = MarkdownxFormField(\n label='Comment',\n help_text='This will be added to comments after the organization '\n 'is created.',\n widget=forms.Textarea,\n required=False,\n )\n\n class Meta(OrganizationForm.Meta):\n fields = OrganizationForm.Meta.fields.copy()\n fields.append('comment')\n\n def save(self, *args, **kwargs):\n res = super().save(*args, **kwargs)\n\n create_comment_signal.send(sender=self.__class__,\n content_object=res,\n comment=self.cleaned_data['comment'],\n timestamp=None)\n\n return res\n\n\nclass MembershipForm(forms.ModelForm):\n helper = BootstrapHelper(add_cancel_button=False)\n\n organization = forms.ModelChoiceField(\n label='Organization',\n required=True,\n queryset=Organization.objects.all(),\n widget=ModelSelect2(url='organization-lookup')\n )\n\n class Meta:\n model = Membership\n fields = [\n 'organization', 'variant', 'agreement_start', 'agreement_end',\n 'contribution_type', 'workshops_without_admin_fee_per_agreement',\n 'self_organized_workshops_per_agreement',\n 'seats_instructor_training',\n 'additional_instructor_training_seats',\n ]\n\n\nclass MembershipCreateForm(MembershipForm):\n comment = MarkdownxFormField(\n label='Comment',\n help_text='This will be added to comments after the membership is '\n 'created.',\n widget=forms.Textarea,\n required=False,\n )\n\n class Meta(MembershipForm.Meta):\n fields = MembershipForm.Meta.fields.copy()\n fields.append('comment')\n\n def save(self, *args, **kwargs):\n res = super().save(*args, **kwargs)\n\n create_comment_signal.send(sender=self.__class__,\n content_object=res,\n comment=self.cleaned_data['comment'],\n timestamp=None)\n\n return res\n\n\nclass SponsorshipForm(WidgetOverrideMixin, forms.ModelForm):\n\n helper = BootstrapHelper(submit_label='Add')\n\n class Meta:\n model = Sponsorship\n fields = '__all__'\n widgets = {\n 'organization': ModelSelect2(url='organization-lookup'),\n 'event': ModelSelect2(url='event-lookup'),\n 'contact': ModelSelect2(url='person-lookup'),\n }\n\n\n# ----------------------------------------------------------\n# Signals\n\n# adding @receiver decorator to the function defined in `workshops.forms`\nform_saved_add_comment = receiver(\n create_comment_signal,\n sender=OrganizationCreateForm,\n)(form_saved_add_comment)\n\n# adding @receiver decorator to the function defined in `workshops.forms`\nform_saved_add_comment = receiver(\n create_comment_signal,\n sender=MembershipCreateForm,\n)(form_saved_add_comment)\n", "path": "amy/fiscal/forms.py"}]}
| 1,531 | 250 |
gh_patches_debug_41912
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-6181
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] xerces-c/3.2.3: xmlch-type should be a package option
<!--
Please don't forget to update the issue title.
Include all applicable information to help us reproduce your problem.
-->
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **xerces-c/3.2.3**
* Operating System+version: **Windows 10**
* Compiler+version: **Visual Studio 2019**
* Conan version: **conan 1.37.0**
* Python version: **Python 3.9**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os=Windows
os_build=Windows
arch=x86_64
arch_build=x86_64
compiler=Visual Studio
compiler.version=16
build_type=Release
[options]
[build_requires]
*: cmake/3.19.6
[env]
```
According to the library [cmake-files](https://github.com/apache/xerces-c/blob/v3.2.3/cmake/XercesXMLCh.cmake) `xerces-c` supports `uint16_t`, `char16_t` and `wchar_t`, while the current [recipe supports only `uint16_t`](https://github.com/conan-io/conan-center-index/blob/c4f06bca2cf69dd5429f4867bced49872e4300a2/recipes/xerces-c/all/conanfile.py#L54).
The library, I am working on, uses `char16_t` for the `xmlch-type` and the build fails because the `xerces-c` recipe provides only `uint16_t` for `xmlch-type`:
`error C2440: '<function-style-cast>': cannot convert from 'const XMLCh *' to 'std::u16string'`
We need `xmlch-type` as an option so that the user could choose any of the types supported by `xerces-c`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/xerces-c/all/conanfile.py`
Content:
```
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4
5
6 class XercesCConan(ConanFile):
7 name = "xerces-c"
8 description = "Xerces-C++ is a validating XML parser written in a portable subset of C++"
9 topics = ("conan", "xerces", "XML", "validation", "DOM", "SAX", "SAX2")
10 url = "https://github.com/conan-io/conan-center-index"
11 homepage = "http://xerces.apache.org/xerces-c/index.html"
12 license = "Apache-2.0"
13 exports_sources = ["CMakeLists.txt"]
14 generators = "cmake"
15 settings = "os", "arch", "compiler", "build_type"
16 options = {"shared": [True, False], "fPIC": [True, False], "char_type": ["uint16_t", "wchar_t"]}
17 default_options = {"shared": False, "fPIC": True, "char_type": "uint16_t"}
18
19 _cmake = None
20
21 @property
22 def _source_subfolder(self):
23 return "source_subfolder"
24
25 @property
26 def _build_subfolder(self):
27 return "build_subfolder"
28
29 def validate(self):
30 if self.options.char_type == "wchar_t" and self.settings.os != "Windows":
31 raise ConanInvalidConfiguration("Option 'char_type=wchar_t' is only supported in Windows")
32
33
34 def config_options(self):
35 if self.settings.os == "Windows":
36 del self.options.fPIC
37
38 def configure(self):
39 if self.settings.os not in ("Windows", "Macos", "Linux"):
40 raise ConanInvalidConfiguration("OS is not supported")
41 if self.options.shared:
42 del self.options.fPIC
43
44 def source(self):
45 tools.get(**self.conan_data["sources"][self.version])
46 extracted_dir = self.name + "-" + self.version
47 os.rename(extracted_dir, self._source_subfolder)
48
49 def _configure_cmake(self):
50 if self._cmake:
51 return self._cmake
52 self._cmake = CMake(self)
53 # https://xerces.apache.org/xerces-c/build-3.html
54 self._cmake.definitions["network-accessor"] = {"Windows": "winsock",
55 "Macos": "cfurl",
56 "Linux": "socket"}.get(str(self.settings.os))
57 self._cmake.definitions["transcoder"] = {"Windows": "windows",
58 "Macos": "macosunicodeconverter",
59 "Linux": "gnuiconv"}.get(str(self.settings.os))
60 self._cmake.definitions["message-loader"] = "inmemory"
61
62 self._cmake.definitions["xmlch-type"] = self.options.char_type
63
64 self._cmake.definitions["mutex-manager"] = {"Windows": "windows",
65 "Macos": "posix",
66 "Linux": "posix"}.get(str(self.settings.os))
67 # avoid picking up system dependency
68 self._cmake.definitions["CMAKE_DISABLE_FIND_PACKAGE_CURL"] = True
69 self._cmake.definitions["CMAKE_DISABLE_FIND_PACKAGE_ICU"] = True
70 self._cmake.configure(build_folder=self._build_subfolder)
71 return self._cmake
72
73 def build(self):
74 cmake = self._configure_cmake()
75 cmake.build()
76
77 def package(self):
78 self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
79 cmake = self._configure_cmake()
80 cmake.install()
81 # remove unneeded directories
82 tools.rmdir(os.path.join(self.package_folder, "share"))
83 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
84 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
85 tools.rmdir(os.path.join(self.package_folder, "cmake"))
86
87 def package_info(self):
88 self.cpp_info.libs = tools.collect_libs(self)
89 if self.settings.os == "Macos":
90 self.cpp_info.frameworks = ["CoreFoundation", "CoreServices"]
91 elif self.settings.os == "Linux":
92 self.cpp_info.system_libs.append("pthread")
93 self.cpp_info.names["cmake_find_package"] = "XercesC"
94 self.cpp_info.names["cmake_find_package_multi"] = "XercesC"
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/xerces-c/all/conanfile.py b/recipes/xerces-c/all/conanfile.py
--- a/recipes/xerces-c/all/conanfile.py
+++ b/recipes/xerces-c/all/conanfile.py
@@ -5,7 +5,9 @@
class XercesCConan(ConanFile):
name = "xerces-c"
- description = "Xerces-C++ is a validating XML parser written in a portable subset of C++"
+ description = (
+ "Xerces-C++ is a validating XML parser written in a portable subset of C++"
+ )
topics = ("conan", "xerces", "XML", "validation", "DOM", "SAX", "SAX2")
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://xerces.apache.org/xerces-c/index.html"
@@ -13,7 +15,11 @@
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
- options = {"shared": [True, False], "fPIC": [True, False], "char_type": ["uint16_t", "wchar_t"]}
+ options = {
+ "shared": [True, False],
+ "fPIC": [True, False],
+ "char_type": ["uint16_t", "char16_t", "wchar_t"],
+ }
default_options = {"shared": False, "fPIC": True, "char_type": "uint16_t"}
_cmake = None
@@ -25,11 +31,12 @@
@property
def _build_subfolder(self):
return "build_subfolder"
-
+
def validate(self):
if self.options.char_type == "wchar_t" and self.settings.os != "Windows":
- raise ConanInvalidConfiguration("Option 'char_type=wchar_t' is only supported in Windows")
-
+ raise ConanInvalidConfiguration(
+ "Option 'char_type=wchar_t' is only supported in Windows"
+ )
def config_options(self):
if self.settings.os == "Windows":
@@ -51,19 +58,25 @@
return self._cmake
self._cmake = CMake(self)
# https://xerces.apache.org/xerces-c/build-3.html
- self._cmake.definitions["network-accessor"] = {"Windows": "winsock",
- "Macos": "cfurl",
- "Linux": "socket"}.get(str(self.settings.os))
- self._cmake.definitions["transcoder"] = {"Windows": "windows",
- "Macos": "macosunicodeconverter",
- "Linux": "gnuiconv"}.get(str(self.settings.os))
+ self._cmake.definitions["network-accessor"] = {
+ "Windows": "winsock",
+ "Macos": "cfurl",
+ "Linux": "socket",
+ }.get(str(self.settings.os))
+ self._cmake.definitions["transcoder"] = {
+ "Windows": "windows",
+ "Macos": "macosunicodeconverter",
+ "Linux": "gnuiconv",
+ }.get(str(self.settings.os))
self._cmake.definitions["message-loader"] = "inmemory"
self._cmake.definitions["xmlch-type"] = self.options.char_type
- self._cmake.definitions["mutex-manager"] = {"Windows": "windows",
- "Macos": "posix",
- "Linux": "posix"}.get(str(self.settings.os))
+ self._cmake.definitions["mutex-manager"] = {
+ "Windows": "windows",
+ "Macos": "posix",
+ "Linux": "posix",
+ }.get(str(self.settings.os))
# avoid picking up system dependency
self._cmake.definitions["CMAKE_DISABLE_FIND_PACKAGE_CURL"] = True
self._cmake.definitions["CMAKE_DISABLE_FIND_PACKAGE_ICU"] = True
|
{"golden_diff": "diff --git a/recipes/xerces-c/all/conanfile.py b/recipes/xerces-c/all/conanfile.py\n--- a/recipes/xerces-c/all/conanfile.py\n+++ b/recipes/xerces-c/all/conanfile.py\n@@ -5,7 +5,9 @@\n \n class XercesCConan(ConanFile):\n name = \"xerces-c\"\n- description = \"Xerces-C++ is a validating XML parser written in a portable subset of C++\"\n+ description = (\n+ \"Xerces-C++ is a validating XML parser written in a portable subset of C++\"\n+ )\n topics = (\"conan\", \"xerces\", \"XML\", \"validation\", \"DOM\", \"SAX\", \"SAX2\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://xerces.apache.org/xerces-c/index.html\"\n@@ -13,7 +15,11 @@\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n- options = {\"shared\": [True, False], \"fPIC\": [True, False], \"char_type\": [\"uint16_t\", \"wchar_t\"]}\n+ options = {\n+ \"shared\": [True, False],\n+ \"fPIC\": [True, False],\n+ \"char_type\": [\"uint16_t\", \"char16_t\", \"wchar_t\"],\n+ }\n default_options = {\"shared\": False, \"fPIC\": True, \"char_type\": \"uint16_t\"}\n \n _cmake = None\n@@ -25,11 +31,12 @@\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n- \n+\n def validate(self):\n if self.options.char_type == \"wchar_t\" and self.settings.os != \"Windows\":\n- raise ConanInvalidConfiguration(\"Option 'char_type=wchar_t' is only supported in Windows\")\n-\n+ raise ConanInvalidConfiguration(\n+ \"Option 'char_type=wchar_t' is only supported in Windows\"\n+ )\n \n def config_options(self):\n if self.settings.os == \"Windows\":\n@@ -51,19 +58,25 @@\n return self._cmake\n self._cmake = CMake(self)\n # https://xerces.apache.org/xerces-c/build-3.html\n- self._cmake.definitions[\"network-accessor\"] = {\"Windows\": \"winsock\",\n- \"Macos\": \"cfurl\",\n- \"Linux\": \"socket\"}.get(str(self.settings.os))\n- self._cmake.definitions[\"transcoder\"] = {\"Windows\": \"windows\",\n- \"Macos\": \"macosunicodeconverter\",\n- \"Linux\": \"gnuiconv\"}.get(str(self.settings.os))\n+ self._cmake.definitions[\"network-accessor\"] = {\n+ \"Windows\": \"winsock\",\n+ \"Macos\": \"cfurl\",\n+ \"Linux\": \"socket\",\n+ }.get(str(self.settings.os))\n+ self._cmake.definitions[\"transcoder\"] = {\n+ \"Windows\": \"windows\",\n+ \"Macos\": \"macosunicodeconverter\",\n+ \"Linux\": \"gnuiconv\",\n+ }.get(str(self.settings.os))\n self._cmake.definitions[\"message-loader\"] = \"inmemory\"\n \n self._cmake.definitions[\"xmlch-type\"] = self.options.char_type\n \n- self._cmake.definitions[\"mutex-manager\"] = {\"Windows\": \"windows\",\n- \"Macos\": \"posix\",\n- \"Linux\": \"posix\"}.get(str(self.settings.os))\n+ self._cmake.definitions[\"mutex-manager\"] = {\n+ \"Windows\": \"windows\",\n+ \"Macos\": \"posix\",\n+ \"Linux\": \"posix\",\n+ }.get(str(self.settings.os))\n # avoid picking up system dependency\n self._cmake.definitions[\"CMAKE_DISABLE_FIND_PACKAGE_CURL\"] = True\n self._cmake.definitions[\"CMAKE_DISABLE_FIND_PACKAGE_ICU\"] = True\n", "issue": "[package] xerces-c/3.2.3: xmlch-type should be a package option\n<!-- \r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n-->\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **xerces-c/3.2.3**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **Visual Studio 2019**\r\n * Conan version: **conan 1.37.0**\r\n * Python version: **Python 3.9**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Windows\r\nos_build=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\nbuild_type=Release\r\n[options]\r\n[build_requires]\r\n*: cmake/3.19.6\r\n[env]\r\n```\r\n\r\nAccording to the library [cmake-files](https://github.com/apache/xerces-c/blob/v3.2.3/cmake/XercesXMLCh.cmake) `xerces-c` supports `uint16_t`, `char16_t` and `wchar_t`, while the current [recipe supports only `uint16_t`](https://github.com/conan-io/conan-center-index/blob/c4f06bca2cf69dd5429f4867bced49872e4300a2/recipes/xerces-c/all/conanfile.py#L54).\r\n\r\nThe library, I am working on, uses `char16_t` for the `xmlch-type` and the build fails because the `xerces-c` recipe provides only `uint16_t` for `xmlch-type`:\r\n`error C2440: '<function-style-cast>': cannot convert from 'const XMLCh *' to 'std::u16string'`\r\n\r\nWe need `xmlch-type` as an option so that the user could choose any of the types supported by `xerces-c`.\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass XercesCConan(ConanFile):\n name = \"xerces-c\"\n description = \"Xerces-C++ is a validating XML parser written in a portable subset of C++\"\n topics = (\"conan\", \"xerces\", \"XML\", \"validation\", \"DOM\", \"SAX\", \"SAX2\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://xerces.apache.org/xerces-c/index.html\"\n license = \"Apache-2.0\"\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"char_type\": [\"uint16_t\", \"wchar_t\"]}\n default_options = {\"shared\": False, \"fPIC\": True, \"char_type\": \"uint16_t\"}\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n \n def validate(self):\n if self.options.char_type == \"wchar_t\" and self.settings.os != \"Windows\":\n raise ConanInvalidConfiguration(\"Option 'char_type=wchar_t' is only supported in Windows\")\n\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.settings.os not in (\"Windows\", \"Macos\", \"Linux\"):\n raise ConanInvalidConfiguration(\"OS is not supported\")\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n # https://xerces.apache.org/xerces-c/build-3.html\n self._cmake.definitions[\"network-accessor\"] = {\"Windows\": \"winsock\",\n \"Macos\": \"cfurl\",\n \"Linux\": \"socket\"}.get(str(self.settings.os))\n self._cmake.definitions[\"transcoder\"] = {\"Windows\": \"windows\",\n \"Macos\": \"macosunicodeconverter\",\n \"Linux\": \"gnuiconv\"}.get(str(self.settings.os))\n self._cmake.definitions[\"message-loader\"] = \"inmemory\"\n\n self._cmake.definitions[\"xmlch-type\"] = self.options.char_type\n\n self._cmake.definitions[\"mutex-manager\"] = {\"Windows\": \"windows\",\n \"Macos\": \"posix\",\n \"Linux\": \"posix\"}.get(str(self.settings.os))\n # avoid picking up system dependency\n self._cmake.definitions[\"CMAKE_DISABLE_FIND_PACKAGE_CURL\"] = True\n self._cmake.definitions[\"CMAKE_DISABLE_FIND_PACKAGE_ICU\"] = True\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n # remove unneeded directories\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"cmake\"))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Macos\":\n self.cpp_info.frameworks = [\"CoreFoundation\", \"CoreServices\"]\n elif self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n self.cpp_info.names[\"cmake_find_package\"] = \"XercesC\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"XercesC\"\n", "path": "recipes/xerces-c/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass XercesCConan(ConanFile):\n name = \"xerces-c\"\n description = (\n \"Xerces-C++ is a validating XML parser written in a portable subset of C++\"\n )\n topics = (\"conan\", \"xerces\", \"XML\", \"validation\", \"DOM\", \"SAX\", \"SAX2\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://xerces.apache.org/xerces-c/index.html\"\n license = \"Apache-2.0\"\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"char_type\": [\"uint16_t\", \"char16_t\", \"wchar_t\"],\n }\n default_options = {\"shared\": False, \"fPIC\": True, \"char_type\": \"uint16_t\"}\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def validate(self):\n if self.options.char_type == \"wchar_t\" and self.settings.os != \"Windows\":\n raise ConanInvalidConfiguration(\n \"Option 'char_type=wchar_t' is only supported in Windows\"\n )\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.settings.os not in (\"Windows\", \"Macos\", \"Linux\"):\n raise ConanInvalidConfiguration(\"OS is not supported\")\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n # https://xerces.apache.org/xerces-c/build-3.html\n self._cmake.definitions[\"network-accessor\"] = {\n \"Windows\": \"winsock\",\n \"Macos\": \"cfurl\",\n \"Linux\": \"socket\",\n }.get(str(self.settings.os))\n self._cmake.definitions[\"transcoder\"] = {\n \"Windows\": \"windows\",\n \"Macos\": \"macosunicodeconverter\",\n \"Linux\": \"gnuiconv\",\n }.get(str(self.settings.os))\n self._cmake.definitions[\"message-loader\"] = \"inmemory\"\n\n self._cmake.definitions[\"xmlch-type\"] = self.options.char_type\n\n self._cmake.definitions[\"mutex-manager\"] = {\n \"Windows\": \"windows\",\n \"Macos\": \"posix\",\n \"Linux\": \"posix\",\n }.get(str(self.settings.os))\n # avoid picking up system dependency\n self._cmake.definitions[\"CMAKE_DISABLE_FIND_PACKAGE_CURL\"] = True\n self._cmake.definitions[\"CMAKE_DISABLE_FIND_PACKAGE_ICU\"] = True\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n # remove unneeded directories\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"cmake\"))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Macos\":\n self.cpp_info.frameworks = [\"CoreFoundation\", \"CoreServices\"]\n elif self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n self.cpp_info.names[\"cmake_find_package\"] = \"XercesC\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"XercesC\"\n", "path": "recipes/xerces-c/all/conanfile.py"}]}
| 1,885 | 915 |
gh_patches_debug_59228
|
rasdani/github-patches
|
git_diff
|
scikit-hep__awkward-1835
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Awkward v2 mixins not propagated upwards
### Version of Awkward Array
2.0.0rc1
### Description and code to reproduce
Coffea tests with awkwardv2 fail with: `E AttributeError: module 'awkward.behaviors' has no attribute 'mixin_class'`
`awkward.mixin_class` has moved to `awkward.behaviors.mixins.mixin_class`, along with all other mixin decorators.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/__init__.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 # NumPy-like alternatives
4 import awkward.nplikes
5
6 # shims for C++ (now everything is compiled into one 'awkward._ext' module)
7 import awkward._ext
8
9 # Compiled dynamic modules
10 import awkward._cpu_kernels
11 import awkward._libawkward
12
13 # layout classes; functionality that used to be in C++ (in Awkward 1.x)
14 import awkward.index
15 import awkward.identifier
16 import awkward.contents
17 import awkward.record
18 import awkward.types
19 import awkward.forms
20 import awkward._slicing
21 import awkward._broadcasting
22 import awkward._typetracer
23
24 # internal
25 import awkward._util
26 import awkward._errors
27 import awkward._lookup
28
29 # third-party connectors
30 import awkward._connect.numpy
31 import awkward._connect.numexpr
32 import awkward.numba
33 import awkward.jax
34
35 # high-level interface
36 from awkward.highlevel import Array
37 from awkward.highlevel import Record
38 from awkward.highlevel import ArrayBuilder
39
40 # behaviors
41 import awkward.behaviors.categorical
42 import awkward.behaviors.mixins
43 import awkward.behaviors.string
44
45 behavior = {}
46 awkward.behaviors.string.register(behavior) # noqa: F405
47 awkward.behaviors.categorical.register(behavior) # noqa: F405
48
49 # operations
50 from awkward.operations import *
51
52 # temporary shim to access v2 under _v2
53 import awkward._v2
54
55 # version
56 __version__ = awkward._ext.__version__
57 __all__ = [x for x in globals() if not x.startswith("_")]
58
59
60 def __dir__():
61 return __all__
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/awkward/__init__.py b/src/awkward/__init__.py
--- a/src/awkward/__init__.py
+++ b/src/awkward/__init__.py
@@ -39,8 +39,8 @@
# behaviors
import awkward.behaviors.categorical
-import awkward.behaviors.mixins
import awkward.behaviors.string
+from awkward.behaviors.mixins import mixin_class, mixin_class_method
behavior = {}
awkward.behaviors.string.register(behavior) # noqa: F405
|
{"golden_diff": "diff --git a/src/awkward/__init__.py b/src/awkward/__init__.py\n--- a/src/awkward/__init__.py\n+++ b/src/awkward/__init__.py\n@@ -39,8 +39,8 @@\n \n # behaviors\n import awkward.behaviors.categorical\n-import awkward.behaviors.mixins\n import awkward.behaviors.string\n+from awkward.behaviors.mixins import mixin_class, mixin_class_method\n \n behavior = {}\n awkward.behaviors.string.register(behavior) # noqa: F405\n", "issue": "Awkward v2 mixins not propagated upwards\n### Version of Awkward Array\r\n\r\n2.0.0rc1\r\n\r\n### Description and code to reproduce\r\n\r\nCoffea tests with awkwardv2 fail with: `E AttributeError: module 'awkward.behaviors' has no attribute 'mixin_class'`\r\n\r\n`awkward.mixin_class` has moved to `awkward.behaviors.mixins.mixin_class`, along with all other mixin decorators.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n# NumPy-like alternatives\nimport awkward.nplikes\n\n# shims for C++ (now everything is compiled into one 'awkward._ext' module)\nimport awkward._ext\n\n# Compiled dynamic modules\nimport awkward._cpu_kernels\nimport awkward._libawkward\n\n# layout classes; functionality that used to be in C++ (in Awkward 1.x)\nimport awkward.index\nimport awkward.identifier\nimport awkward.contents\nimport awkward.record\nimport awkward.types\nimport awkward.forms\nimport awkward._slicing\nimport awkward._broadcasting\nimport awkward._typetracer\n\n# internal\nimport awkward._util\nimport awkward._errors\nimport awkward._lookup\n\n# third-party connectors\nimport awkward._connect.numpy\nimport awkward._connect.numexpr\nimport awkward.numba\nimport awkward.jax\n\n# high-level interface\nfrom awkward.highlevel import Array\nfrom awkward.highlevel import Record\nfrom awkward.highlevel import ArrayBuilder\n\n# behaviors\nimport awkward.behaviors.categorical\nimport awkward.behaviors.mixins\nimport awkward.behaviors.string\n\nbehavior = {}\nawkward.behaviors.string.register(behavior) # noqa: F405\nawkward.behaviors.categorical.register(behavior) # noqa: F405\n\n# operations\nfrom awkward.operations import *\n\n# temporary shim to access v2 under _v2\nimport awkward._v2\n\n# version\n__version__ = awkward._ext.__version__\n__all__ = [x for x in globals() if not x.startswith(\"_\")]\n\n\ndef __dir__():\n return __all__\n", "path": "src/awkward/__init__.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\n# NumPy-like alternatives\nimport awkward.nplikes\n\n# shims for C++ (now everything is compiled into one 'awkward._ext' module)\nimport awkward._ext\n\n# Compiled dynamic modules\nimport awkward._cpu_kernels\nimport awkward._libawkward\n\n# layout classes; functionality that used to be in C++ (in Awkward 1.x)\nimport awkward.index\nimport awkward.identifier\nimport awkward.contents\nimport awkward.record\nimport awkward.types\nimport awkward.forms\nimport awkward._slicing\nimport awkward._broadcasting\nimport awkward._typetracer\n\n# internal\nimport awkward._util\nimport awkward._errors\nimport awkward._lookup\n\n# third-party connectors\nimport awkward._connect.numpy\nimport awkward._connect.numexpr\nimport awkward.numba\nimport awkward.jax\n\n# high-level interface\nfrom awkward.highlevel import Array\nfrom awkward.highlevel import Record\nfrom awkward.highlevel import ArrayBuilder\n\n# behaviors\nimport awkward.behaviors.categorical\nimport awkward.behaviors.string\nfrom awkward.behaviors.mixins import mixin_class, mixin_class_method\n\nbehavior = {}\nawkward.behaviors.string.register(behavior) # noqa: F405\nawkward.behaviors.categorical.register(behavior) # noqa: F405\n\n# operations\nfrom awkward.operations import *\n\n# temporary shim to access v2 under _v2\nimport awkward._v2\n\n# version\n__version__ = awkward._ext.__version__\n__all__ = [x for x in globals() if not x.startswith(\"_\")]\n\n\ndef __dir__():\n return __all__\n", "path": "src/awkward/__init__.py"}]}
| 831 | 115 |
gh_patches_debug_32164
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-3644
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sweepable is actually a recursive type
The current type signature of Sweepable is
```
Sweepable = Union[Dict[str, float], ParamResolver, Sweep, Iterable[
Union[Dict[str, float], ParamResolver, Sweep]], None]
```
But there is no logical reason to only allow one level of iteration over Sweep. And in fact, `cirq.to_sweeps` is a recursive function, so it already assumes that Sweepable is a recursive type. So as it stands, Sweepable should be defined as a recursive type.
But I would like to ask whether Sweepable really needs to be this recursive type, rather than just something that can be converted to a single Sweep. Do people actually use lists of sweeps, rather than just plain sweeps? It seems like a rather confusing thing to attempt, given that one would need to keep track of how results correspond to the individual parameter resolvers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/study/sweepable.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Defines which types are Sweepable."""
16
17 from typing import Dict, Iterable, Iterator, List, Sequence, Union, cast
18 import warnings
19
20 from cirq._doc import document
21 from cirq.study.resolver import ParamResolver, ParamResolverOrSimilarType
22 from cirq.study.sweeps import ListSweep, Points, Sweep, UnitSweep, Zip, dict_to_product_sweep
23
24 SweepLike = Union[ParamResolverOrSimilarType, Sweep]
25 document(SweepLike, """An object similar to an iterable of parameter resolvers.""") # type: ignore
26
27 Sweepable = Union[SweepLike, Iterable[SweepLike]]
28 document(
29 Sweepable, # type: ignore
30 """An object or collection of objects representing a parameter sweep.""",
31 )
32
33
34 def to_resolvers(sweepable: Sweepable) -> Iterator[ParamResolver]:
35 """Convert a Sweepable to a list of ParamResolvers."""
36 for sweep in to_sweeps(sweepable):
37 yield from sweep
38
39
40 def to_sweeps(sweepable: Sweepable) -> List[Sweep]:
41 """Converts a Sweepable to a list of Sweeps."""
42 if sweepable is None:
43 return [UnitSweep]
44 if isinstance(sweepable, ParamResolver):
45 return [_resolver_to_sweep(sweepable)]
46 if isinstance(sweepable, Sweep):
47 return [sweepable]
48 if isinstance(sweepable, dict):
49 if any(isinstance(val, Sequence) for val in sweepable.values()):
50 warnings.warn(
51 'Implicit expansion of a dictionary into a Cartesian product '
52 'of sweeps is deprecated and will be removed in cirq 0.10. '
53 'Instead, expand the sweep explicitly using '
54 '`cirq.dict_to_product_sweep`.',
55 DeprecationWarning,
56 stacklevel=2,
57 )
58 product_sweep = dict_to_product_sweep(sweepable)
59 return [_resolver_to_sweep(resolver) for resolver in product_sweep]
60 if isinstance(sweepable, Iterable) and not isinstance(sweepable, str):
61 return [
62 sweep
63 for item in sweepable
64 for sweep in to_sweeps(cast(Union[Dict[str, float], ParamResolver, Sweep], item))
65 ]
66 raise TypeError(f'Unrecognized sweepable type: {type(sweepable)}.\nsweepable: {sweepable}')
67
68
69 def to_sweep(
70 sweep_or_resolver_list: Union[
71 'Sweep', ParamResolverOrSimilarType, Iterable[ParamResolverOrSimilarType]
72 ]
73 ) -> 'Sweep':
74 """Converts the argument into a ``cirq.Sweep``.
75
76 Args:
77 sweep_or_resolver_list: The object to try to turn into a
78 ``cirq.Sweep`` . A ``cirq.Sweep``, a single ``cirq.ParamResolver``,
79 or a list of ``cirq.ParamResolver`` s.
80
81 Returns:
82 A sweep equal to or containing the argument.
83 """
84 if isinstance(sweep_or_resolver_list, Sweep):
85 return sweep_or_resolver_list
86 if isinstance(sweep_or_resolver_list, (ParamResolver, dict)):
87 resolver = cast(ParamResolverOrSimilarType, sweep_or_resolver_list)
88 return ListSweep([resolver])
89 if isinstance(sweep_or_resolver_list, Iterable):
90 resolver_iter = cast(Iterable[ParamResolverOrSimilarType], sweep_or_resolver_list)
91 return ListSweep(resolver_iter)
92 raise TypeError('Unexpected sweep-like value: {}'.format(sweep_or_resolver_list))
93
94
95 def _resolver_to_sweep(resolver: ParamResolver) -> Sweep:
96 params = resolver.param_dict
97 if not params:
98 return UnitSweep
99 return Zip(*[Points(key, [cast(float, value)]) for key, value in params.items()])
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq/study/sweepable.py b/cirq/study/sweepable.py
--- a/cirq/study/sweepable.py
+++ b/cirq/study/sweepable.py
@@ -16,17 +16,27 @@
from typing import Dict, Iterable, Iterator, List, Sequence, Union, cast
import warnings
+from typing_extensions import Protocol
from cirq._doc import document
from cirq.study.resolver import ParamResolver, ParamResolverOrSimilarType
from cirq.study.sweeps import ListSweep, Points, Sweep, UnitSweep, Zip, dict_to_product_sweep
SweepLike = Union[ParamResolverOrSimilarType, Sweep]
-document(SweepLike, """An object similar to an iterable of parameter resolvers.""") # type: ignore
+document(SweepLike, """An object similar to an iterable of parameter resolvers.""")
-Sweepable = Union[SweepLike, Iterable[SweepLike]]
+
+class _Sweepable(Protocol):
+ """An intermediate class allowing for recursive definition of Sweepable,
+ since recursive union definitions are not yet supported in mypy."""
+
+ def __iter__(self) -> Iterator[Union[SweepLike, '_Sweepable']]:
+ pass
+
+
+Sweepable = Union[SweepLike, _Sweepable]
document(
- Sweepable, # type: ignore
+ Sweepable,
"""An object or collection of objects representing a parameter sweep.""",
)
@@ -58,11 +68,7 @@
product_sweep = dict_to_product_sweep(sweepable)
return [_resolver_to_sweep(resolver) for resolver in product_sweep]
if isinstance(sweepable, Iterable) and not isinstance(sweepable, str):
- return [
- sweep
- for item in sweepable
- for sweep in to_sweeps(cast(Union[Dict[str, float], ParamResolver, Sweep], item))
- ]
+ return [sweep for item in sweepable for sweep in to_sweeps(item)]
raise TypeError(f'Unrecognized sweepable type: {type(sweepable)}.\nsweepable: {sweepable}')
|
{"golden_diff": "diff --git a/cirq/study/sweepable.py b/cirq/study/sweepable.py\n--- a/cirq/study/sweepable.py\n+++ b/cirq/study/sweepable.py\n@@ -16,17 +16,27 @@\n \n from typing import Dict, Iterable, Iterator, List, Sequence, Union, cast\n import warnings\n+from typing_extensions import Protocol\n \n from cirq._doc import document\n from cirq.study.resolver import ParamResolver, ParamResolverOrSimilarType\n from cirq.study.sweeps import ListSweep, Points, Sweep, UnitSweep, Zip, dict_to_product_sweep\n \n SweepLike = Union[ParamResolverOrSimilarType, Sweep]\n-document(SweepLike, \"\"\"An object similar to an iterable of parameter resolvers.\"\"\") # type: ignore\n+document(SweepLike, \"\"\"An object similar to an iterable of parameter resolvers.\"\"\")\n \n-Sweepable = Union[SweepLike, Iterable[SweepLike]]\n+\n+class _Sweepable(Protocol):\n+ \"\"\"An intermediate class allowing for recursive definition of Sweepable,\n+ since recursive union definitions are not yet supported in mypy.\"\"\"\n+\n+ def __iter__(self) -> Iterator[Union[SweepLike, '_Sweepable']]:\n+ pass\n+\n+\n+Sweepable = Union[SweepLike, _Sweepable]\n document(\n- Sweepable, # type: ignore\n+ Sweepable,\n \"\"\"An object or collection of objects representing a parameter sweep.\"\"\",\n )\n \n@@ -58,11 +68,7 @@\n product_sweep = dict_to_product_sweep(sweepable)\n return [_resolver_to_sweep(resolver) for resolver in product_sweep]\n if isinstance(sweepable, Iterable) and not isinstance(sweepable, str):\n- return [\n- sweep\n- for item in sweepable\n- for sweep in to_sweeps(cast(Union[Dict[str, float], ParamResolver, Sweep], item))\n- ]\n+ return [sweep for item in sweepable for sweep in to_sweeps(item)]\n raise TypeError(f'Unrecognized sweepable type: {type(sweepable)}.\\nsweepable: {sweepable}')\n", "issue": "Sweepable is actually a recursive type\nThe current type signature of Sweepable is\r\n```\r\nSweepable = Union[Dict[str, float], ParamResolver, Sweep, Iterable[\r\n Union[Dict[str, float], ParamResolver, Sweep]], None]\r\n```\r\nBut there is no logical reason to only allow one level of iteration over Sweep. And in fact, `cirq.to_sweeps` is a recursive function, so it already assumes that Sweepable is a recursive type. So as it stands, Sweepable should be defined as a recursive type.\r\n\r\nBut I would like to ask whether Sweepable really needs to be this recursive type, rather than just something that can be converted to a single Sweep. Do people actually use lists of sweeps, rather than just plain sweeps? It seems like a rather confusing thing to attempt, given that one would need to keep track of how results correspond to the individual parameter resolvers.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines which types are Sweepable.\"\"\"\n\nfrom typing import Dict, Iterable, Iterator, List, Sequence, Union, cast\nimport warnings\n\nfrom cirq._doc import document\nfrom cirq.study.resolver import ParamResolver, ParamResolverOrSimilarType\nfrom cirq.study.sweeps import ListSweep, Points, Sweep, UnitSweep, Zip, dict_to_product_sweep\n\nSweepLike = Union[ParamResolverOrSimilarType, Sweep]\ndocument(SweepLike, \"\"\"An object similar to an iterable of parameter resolvers.\"\"\") # type: ignore\n\nSweepable = Union[SweepLike, Iterable[SweepLike]]\ndocument(\n Sweepable, # type: ignore\n \"\"\"An object or collection of objects representing a parameter sweep.\"\"\",\n)\n\n\ndef to_resolvers(sweepable: Sweepable) -> Iterator[ParamResolver]:\n \"\"\"Convert a Sweepable to a list of ParamResolvers.\"\"\"\n for sweep in to_sweeps(sweepable):\n yield from sweep\n\n\ndef to_sweeps(sweepable: Sweepable) -> List[Sweep]:\n \"\"\"Converts a Sweepable to a list of Sweeps.\"\"\"\n if sweepable is None:\n return [UnitSweep]\n if isinstance(sweepable, ParamResolver):\n return [_resolver_to_sweep(sweepable)]\n if isinstance(sweepable, Sweep):\n return [sweepable]\n if isinstance(sweepable, dict):\n if any(isinstance(val, Sequence) for val in sweepable.values()):\n warnings.warn(\n 'Implicit expansion of a dictionary into a Cartesian product '\n 'of sweeps is deprecated and will be removed in cirq 0.10. '\n 'Instead, expand the sweep explicitly using '\n '`cirq.dict_to_product_sweep`.',\n DeprecationWarning,\n stacklevel=2,\n )\n product_sweep = dict_to_product_sweep(sweepable)\n return [_resolver_to_sweep(resolver) for resolver in product_sweep]\n if isinstance(sweepable, Iterable) and not isinstance(sweepable, str):\n return [\n sweep\n for item in sweepable\n for sweep in to_sweeps(cast(Union[Dict[str, float], ParamResolver, Sweep], item))\n ]\n raise TypeError(f'Unrecognized sweepable type: {type(sweepable)}.\\nsweepable: {sweepable}')\n\n\ndef to_sweep(\n sweep_or_resolver_list: Union[\n 'Sweep', ParamResolverOrSimilarType, Iterable[ParamResolverOrSimilarType]\n ]\n) -> 'Sweep':\n \"\"\"Converts the argument into a ``cirq.Sweep``.\n\n Args:\n sweep_or_resolver_list: The object to try to turn into a\n ``cirq.Sweep`` . A ``cirq.Sweep``, a single ``cirq.ParamResolver``,\n or a list of ``cirq.ParamResolver`` s.\n\n Returns:\n A sweep equal to or containing the argument.\n \"\"\"\n if isinstance(sweep_or_resolver_list, Sweep):\n return sweep_or_resolver_list\n if isinstance(sweep_or_resolver_list, (ParamResolver, dict)):\n resolver = cast(ParamResolverOrSimilarType, sweep_or_resolver_list)\n return ListSweep([resolver])\n if isinstance(sweep_or_resolver_list, Iterable):\n resolver_iter = cast(Iterable[ParamResolverOrSimilarType], sweep_or_resolver_list)\n return ListSweep(resolver_iter)\n raise TypeError('Unexpected sweep-like value: {}'.format(sweep_or_resolver_list))\n\n\ndef _resolver_to_sweep(resolver: ParamResolver) -> Sweep:\n params = resolver.param_dict\n if not params:\n return UnitSweep\n return Zip(*[Points(key, [cast(float, value)]) for key, value in params.items()])\n", "path": "cirq/study/sweepable.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines which types are Sweepable.\"\"\"\n\nfrom typing import Dict, Iterable, Iterator, List, Sequence, Union, cast\nimport warnings\nfrom typing_extensions import Protocol\n\nfrom cirq._doc import document\nfrom cirq.study.resolver import ParamResolver, ParamResolverOrSimilarType\nfrom cirq.study.sweeps import ListSweep, Points, Sweep, UnitSweep, Zip, dict_to_product_sweep\n\nSweepLike = Union[ParamResolverOrSimilarType, Sweep]\ndocument(SweepLike, \"\"\"An object similar to an iterable of parameter resolvers.\"\"\")\n\n\nclass _Sweepable(Protocol):\n \"\"\"An intermediate class allowing for recursive definition of Sweepable,\n since recursive union definitions are not yet supported in mypy.\"\"\"\n\n def __iter__(self) -> Iterator[Union[SweepLike, '_Sweepable']]:\n pass\n\n\nSweepable = Union[SweepLike, _Sweepable]\ndocument(\n Sweepable,\n \"\"\"An object or collection of objects representing a parameter sweep.\"\"\",\n)\n\n\ndef to_resolvers(sweepable: Sweepable) -> Iterator[ParamResolver]:\n \"\"\"Convert a Sweepable to a list of ParamResolvers.\"\"\"\n for sweep in to_sweeps(sweepable):\n yield from sweep\n\n\ndef to_sweeps(sweepable: Sweepable) -> List[Sweep]:\n \"\"\"Converts a Sweepable to a list of Sweeps.\"\"\"\n if sweepable is None:\n return [UnitSweep]\n if isinstance(sweepable, ParamResolver):\n return [_resolver_to_sweep(sweepable)]\n if isinstance(sweepable, Sweep):\n return [sweepable]\n if isinstance(sweepable, dict):\n if any(isinstance(val, Sequence) for val in sweepable.values()):\n warnings.warn(\n 'Implicit expansion of a dictionary into a Cartesian product '\n 'of sweeps is deprecated and will be removed in cirq 0.10. '\n 'Instead, expand the sweep explicitly using '\n '`cirq.dict_to_product_sweep`.',\n DeprecationWarning,\n stacklevel=2,\n )\n product_sweep = dict_to_product_sweep(sweepable)\n return [_resolver_to_sweep(resolver) for resolver in product_sweep]\n if isinstance(sweepable, Iterable) and not isinstance(sweepable, str):\n return [sweep for item in sweepable for sweep in to_sweeps(item)]\n raise TypeError(f'Unrecognized sweepable type: {type(sweepable)}.\\nsweepable: {sweepable}')\n\n\ndef to_sweep(\n sweep_or_resolver_list: Union[\n 'Sweep', ParamResolverOrSimilarType, Iterable[ParamResolverOrSimilarType]\n ]\n) -> 'Sweep':\n \"\"\"Converts the argument into a ``cirq.Sweep``.\n\n Args:\n sweep_or_resolver_list: The object to try to turn into a\n ``cirq.Sweep`` . A ``cirq.Sweep``, a single ``cirq.ParamResolver``,\n or a list of ``cirq.ParamResolver`` s.\n\n Returns:\n A sweep equal to or containing the argument.\n \"\"\"\n if isinstance(sweep_or_resolver_list, Sweep):\n return sweep_or_resolver_list\n if isinstance(sweep_or_resolver_list, (ParamResolver, dict)):\n resolver = cast(ParamResolverOrSimilarType, sweep_or_resolver_list)\n return ListSweep([resolver])\n if isinstance(sweep_or_resolver_list, Iterable):\n resolver_iter = cast(Iterable[ParamResolverOrSimilarType], sweep_or_resolver_list)\n return ListSweep(resolver_iter)\n raise TypeError('Unexpected sweep-like value: {}'.format(sweep_or_resolver_list))\n\n\ndef _resolver_to_sweep(resolver: ParamResolver) -> Sweep:\n params = resolver.param_dict\n if not params:\n return UnitSweep\n return Zip(*[Points(key, [cast(float, value)]) for key, value in params.items()])\n", "path": "cirq/study/sweepable.py"}]}
| 1,581 | 478 |
gh_patches_debug_10227
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1836
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hide grouping separators by default in Number columns
## Current behavior
- We default to displaying grouping separators (e.g. `1,234`) in _all_ numbers.
- Users can turn off grouping separators by customizing their display options.
- This "on-by-default" behavior, feels a bit strange for columns like primary keys and years, where users typically don't see grouping separators.
- During import, Mathesar uses the `numeric` type by default -- even if all values within the import data can be cast to integers. @mathemancer explained that we made this decision early on in order to be flexible, and I think it's worth sticking with it. But it means that years don't get imported as integers by default, making it hard to take the approach were heading towards with the discussion in #1527.
- The back end does not set the grouping separator display option during import. It only gets set via the front end if the user manually adjust it.
## Desired behavior
- When the user has not configured any display options:
- Columns with a UI type of "Number" will be rendered without grouping separators (regardless of their Postgres type). This means that the user will need to manually turn them on sometimes.
- Columns with a UI type of "Money" will be rendered _with_ grouping separators. The user can turn them off if needed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/shared_serializers.py`
Content:
```
1 from django.core.exceptions import ImproperlyConfigured
2 from rest_framework import serializers
3
4 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
5 from mathesar.api.exceptions.validation_exceptions.exceptions import IncompatibleFractionDigitValuesAPIException
6 from mathesar.database.types import UIType, get_ui_type_from_db_type
7
8
9 class ReadOnlyPolymorphicSerializerMappingMixin:
10 """
11 This serializer mixin is helpful in serializing polymorphic models,
12 by switching to correct serializer based on the mapping field value.
13 """
14 default_serializer = None
15
16 def __new__(cls, *args, **kwargs):
17 if cls.serializers_mapping is None:
18 raise ImproperlyConfigured(
19 '`{cls}` is missing a '
20 '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)
21 )
22 return super().__new__(cls, *args, **kwargs)
23
24 def _init_serializer(self, serializer_cls, *args, **kwargs):
25 if callable(serializer_cls):
26 serializer = serializer_cls(*args, **kwargs)
27 serializer.parent = self
28 else:
29 serializer = serializer_cls
30 return serializer
31
32 def __init__(self, *args, **kwargs):
33 super().__init__(*args, **kwargs)
34 self.serializers_cls_mapping = {}
35 serializers_mapping = self.serializers_mapping
36 self.serializers_mapping = {}
37 if self.default_serializer is not None:
38 self.default_serializer = self._init_serializer(self.default_serializer, *args, **kwargs)
39 for identifier, serializer_cls in serializers_mapping.items():
40 serializer = self._init_serializer(serializer_cls, *args, **kwargs)
41 self.serializers_mapping[identifier] = serializer
42 self.serializers_cls_mapping[identifier] = serializer_cls
43
44 def get_serializer_class(self, identifier):
45 if identifier in self.serializers_mapping:
46 return self.serializers_mapping.get(identifier)
47 else:
48 return self.default_serializer
49
50 def to_representation(self, instance):
51 serializer = self.get_serializer_class(self.get_mapping_field(instance))
52 if serializer is not None:
53 return serializer.to_representation(instance)
54 else:
55 return instance
56
57 def get_mapping_field(self, data):
58 mapping_field = getattr(self, "mapping_field", None)
59 if mapping_field is None:
60 raise Exception(
61 "Add a `mapping_field` to be used as a identifier"
62 "or override this method to return a identifier to identify a proper serializer"
63 )
64 return mapping_field
65
66
67 class ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):
68 def to_internal_value(self, data):
69 serializer = self.get_serializer_class(self.get_mapping_field(data))
70 if serializer is not None:
71 return serializer.to_internal_value(data=data)
72 else:
73 data = {}
74 return data
75
76 def validate(self, attrs):
77 serializer = self.serializers_mapping.get(self.get_mapping_field(attrs))
78 if serializer is not None:
79 return serializer.validate(attrs)
80 return {}
81
82
83 class MonkeyPatchPartial:
84 """
85 Work around bug #3847 in djangorestframework by monkey-patching the partial
86 attribute of the root serializer during the call to validate_empty_values.
87 https://github.com/encode/django-rest-framework/issues/3847
88 """
89
90 def __init__(self, root):
91 self._root = root
92
93 def __enter__(self):
94 self._old = getattr(self._root, 'partial')
95 setattr(self._root, 'partial', False)
96
97 def __exit__(self, *args):
98 setattr(self._root, 'partial', self._old)
99
100
101 class OverrideRootPartialMixin:
102 """
103 This mixin is used to convert a serializer into a partial serializer,
104 based on the serializer `partial` property rather than the parent's `partial` property.
105 Refer to the issue
106 https://github.com/encode/django-rest-framework/issues/3847
107 """
108
109 def run_validation(self, *args, **kwargs):
110 if not self.partial:
111 with MonkeyPatchPartial(self.root):
112 return super().run_validation(*args, **kwargs)
113 return super().run_validation(*args, **kwargs)
114
115
116 class MathesarPolymorphicErrorMixin(MathesarErrorMessageMixin):
117 def get_serializer_fields(self, data):
118 return self.serializers_mapping[self.get_mapping_field(data)].fields
119
120
121 class BaseDisplayOptionsSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):
122 pass
123
124
125 class CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):
126 TRUE = serializers.CharField()
127 FALSE = serializers.CharField()
128
129
130 # This is the key which will determine which display options serializer is used. Its value is
131 # supposed to be the column's DB type (a DatabaseType instance).
132 DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'db_type'
133
134
135 class BooleanDisplayOptionSerializer(BaseDisplayOptionsSerializer):
136 input = serializers.ChoiceField(choices=[("dropdown", "dropdown"), ("checkbox", "checkbox")])
137 custom_labels = CustomBooleanLabelSerializer(required=False)
138
139
140 FRACTION_DIGITS_CONFIG = {
141 "required": False,
142 "allow_null": True,
143 "min_value": 0,
144 "max_value": 20
145 }
146 """
147 Max value of 20 is taken from [Intl.NumberFormat docs][1].
148
149 [1]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/NumberFormat/NumberFormat
150 """
151
152
153 class AbstractNumberDisplayOptionSerializer(BaseDisplayOptionsSerializer):
154 number_format = serializers.ChoiceField(
155 required=False,
156 allow_null=True,
157 choices=['english', 'german', 'french', 'hindi', 'swiss']
158 )
159
160 use_grouping = serializers.ChoiceField(required=False, choices=['true', 'false', 'auto'], default='auto')
161 """
162 The choices here correspond to the options available for the `useGrouping`
163 property within the [Intl API][1]. True and False are encoded as strings
164 instead of booleans to maintain consistency with the Intl API and to keep
165 the type consistent. We did considering using an optional boolean but
166 decided a string would be better, especially if we want to support other
167 options eventually, like "min2".
168
169 [1]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/NumberFormat/NumberFormat
170 """
171
172 minimum_fraction_digits = serializers.IntegerField(**FRACTION_DIGITS_CONFIG)
173 maximum_fraction_digits = serializers.IntegerField(**FRACTION_DIGITS_CONFIG)
174
175 def _validate_fraction_digits(self, data):
176 minimum = data.get("minimum_fraction_digits")
177 maximum = data.get("maximum_fraction_digits")
178 if minimum is None or maximum is None:
179 # No errors if one of the fields is not set
180 return
181 if minimum > maximum:
182 raise IncompatibleFractionDigitValuesAPIException()
183
184 def validate(self, data):
185 self._validate_fraction_digits(data)
186 return data
187
188
189 class NumberDisplayOptionSerializer(AbstractNumberDisplayOptionSerializer):
190 show_as_percentage = serializers.BooleanField(default=False)
191
192
193 class MoneyDisplayOptionSerializer(AbstractNumberDisplayOptionSerializer):
194 currency_symbol = serializers.CharField()
195 currency_symbol_location = serializers.ChoiceField(choices=['after-minus', 'end-with-space'])
196
197
198 class TimeFormatDisplayOptionSerializer(BaseDisplayOptionsSerializer):
199 format = serializers.CharField(max_length=255)
200
201
202 class DateTimeFormatDisplayOptionSerializer(BaseDisplayOptionsSerializer):
203 time_format = serializers.CharField(max_length=255)
204 date_format = serializers.CharField(max_length=255)
205
206
207 class DurationDisplayOptionSerializer(BaseDisplayOptionsSerializer):
208 min = serializers.CharField(max_length=255)
209 max = serializers.CharField(max_length=255)
210 show_units = serializers.BooleanField()
211
212
213 class DisplayOptionsMappingSerializer(
214 OverrideRootPartialMixin,
215 MathesarPolymorphicErrorMixin,
216 ReadWritePolymorphicSerializerMappingMixin,
217 serializers.Serializer
218 ):
219 serializers_mapping = {
220 UIType.BOOLEAN: BooleanDisplayOptionSerializer,
221 UIType.NUMBER: NumberDisplayOptionSerializer,
222 UIType.DATETIME: DateTimeFormatDisplayOptionSerializer,
223 UIType.DATE: TimeFormatDisplayOptionSerializer,
224 UIType.TIME: TimeFormatDisplayOptionSerializer,
225 UIType.DURATION: DurationDisplayOptionSerializer,
226 UIType.MONEY: MoneyDisplayOptionSerializer,
227 }
228 default_serializer = BaseDisplayOptionsSerializer
229
230 def get_mapping_field(self, _):
231 return self._get_ui_type_of_column_being_serialized()
232
233 def _get_ui_type_of_column_being_serialized(self):
234 db_type = self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY]
235 ui_type = get_ui_type_from_db_type(db_type)
236 return ui_type
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/api/serializers/shared_serializers.py b/mathesar/api/serializers/shared_serializers.py
--- a/mathesar/api/serializers/shared_serializers.py
+++ b/mathesar/api/serializers/shared_serializers.py
@@ -157,7 +157,7 @@
choices=['english', 'german', 'french', 'hindi', 'swiss']
)
- use_grouping = serializers.ChoiceField(required=False, choices=['true', 'false', 'auto'], default='auto')
+ use_grouping = serializers.ChoiceField(required=False, choices=['true', 'false'], default='false')
"""
The choices here correspond to the options available for the `useGrouping`
property within the [Intl API][1]. True and False are encoded as strings
|
{"golden_diff": "diff --git a/mathesar/api/serializers/shared_serializers.py b/mathesar/api/serializers/shared_serializers.py\n--- a/mathesar/api/serializers/shared_serializers.py\n+++ b/mathesar/api/serializers/shared_serializers.py\n@@ -157,7 +157,7 @@\n choices=['english', 'german', 'french', 'hindi', 'swiss']\n )\n \n- use_grouping = serializers.ChoiceField(required=False, choices=['true', 'false', 'auto'], default='auto')\n+ use_grouping = serializers.ChoiceField(required=False, choices=['true', 'false'], default='false')\n \"\"\"\n The choices here correspond to the options available for the `useGrouping`\n property within the [Intl API][1]. True and False are encoded as strings\n", "issue": "Hide grouping separators by default in Number columns\n## Current behavior\r\n\r\n- We default to displaying grouping separators (e.g. `1,234`) in _all_ numbers.\r\n- Users can turn off grouping separators by customizing their display options.\r\n- This \"on-by-default\" behavior, feels a bit strange for columns like primary keys and years, where users typically don't see grouping separators.\r\n- During import, Mathesar uses the `numeric` type by default -- even if all values within the import data can be cast to integers. @mathemancer explained that we made this decision early on in order to be flexible, and I think it's worth sticking with it. But it means that years don't get imported as integers by default, making it hard to take the approach were heading towards with the discussion in #1527.\r\n- The back end does not set the grouping separator display option during import. It only gets set via the front end if the user manually adjust it.\r\n\r\n## Desired behavior\r\n\r\n- When the user has not configured any display options:\r\n - Columns with a UI type of \"Number\" will be rendered without grouping separators (regardless of their Postgres type). This means that the user will need to manually turn them on sometimes.\r\n - Columns with a UI type of \"Money\" will be rendered _with_ grouping separators. The user can turn them off if needed.\r\n\r\n\n", "before_files": [{"content": "from django.core.exceptions import ImproperlyConfigured\nfrom rest_framework import serializers\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import IncompatibleFractionDigitValuesAPIException\nfrom mathesar.database.types import UIType, get_ui_type_from_db_type\n\n\nclass ReadOnlyPolymorphicSerializerMappingMixin:\n \"\"\"\n This serializer mixin is helpful in serializing polymorphic models,\n by switching to correct serializer based on the mapping field value.\n \"\"\"\n default_serializer = None\n\n def __new__(cls, *args, **kwargs):\n if cls.serializers_mapping is None:\n raise ImproperlyConfigured(\n '`{cls}` is missing a '\n '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)\n )\n return super().__new__(cls, *args, **kwargs)\n\n def _init_serializer(self, serializer_cls, *args, **kwargs):\n if callable(serializer_cls):\n serializer = serializer_cls(*args, **kwargs)\n serializer.parent = self\n else:\n serializer = serializer_cls\n return serializer\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.serializers_cls_mapping = {}\n serializers_mapping = self.serializers_mapping\n self.serializers_mapping = {}\n if self.default_serializer is not None:\n self.default_serializer = self._init_serializer(self.default_serializer, *args, **kwargs)\n for identifier, serializer_cls in serializers_mapping.items():\n serializer = self._init_serializer(serializer_cls, *args, **kwargs)\n self.serializers_mapping[identifier] = serializer\n self.serializers_cls_mapping[identifier] = serializer_cls\n\n def get_serializer_class(self, identifier):\n if identifier in self.serializers_mapping:\n return self.serializers_mapping.get(identifier)\n else:\n return self.default_serializer\n\n def to_representation(self, instance):\n serializer = self.get_serializer_class(self.get_mapping_field(instance))\n if serializer is not None:\n return serializer.to_representation(instance)\n else:\n return instance\n\n def get_mapping_field(self, data):\n mapping_field = getattr(self, \"mapping_field\", None)\n if mapping_field is None:\n raise Exception(\n \"Add a `mapping_field` to be used as a identifier\"\n \"or override this method to return a identifier to identify a proper serializer\"\n )\n return mapping_field\n\n\nclass ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):\n def to_internal_value(self, data):\n serializer = self.get_serializer_class(self.get_mapping_field(data))\n if serializer is not None:\n return serializer.to_internal_value(data=data)\n else:\n data = {}\n return data\n\n def validate(self, attrs):\n serializer = self.serializers_mapping.get(self.get_mapping_field(attrs))\n if serializer is not None:\n return serializer.validate(attrs)\n return {}\n\n\nclass MonkeyPatchPartial:\n \"\"\"\n Work around bug #3847 in djangorestframework by monkey-patching the partial\n attribute of the root serializer during the call to validate_empty_values.\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def __init__(self, root):\n self._root = root\n\n def __enter__(self):\n self._old = getattr(self._root, 'partial')\n setattr(self._root, 'partial', False)\n\n def __exit__(self, *args):\n setattr(self._root, 'partial', self._old)\n\n\nclass OverrideRootPartialMixin:\n \"\"\"\n This mixin is used to convert a serializer into a partial serializer,\n based on the serializer `partial` property rather than the parent's `partial` property.\n Refer to the issue\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def run_validation(self, *args, **kwargs):\n if not self.partial:\n with MonkeyPatchPartial(self.root):\n return super().run_validation(*args, **kwargs)\n return super().run_validation(*args, **kwargs)\n\n\nclass MathesarPolymorphicErrorMixin(MathesarErrorMessageMixin):\n def get_serializer_fields(self, data):\n return self.serializers_mapping[self.get_mapping_field(data)].fields\n\n\nclass BaseDisplayOptionsSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n pass\n\n\nclass CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n TRUE = serializers.CharField()\n FALSE = serializers.CharField()\n\n\n# This is the key which will determine which display options serializer is used. Its value is\n# supposed to be the column's DB type (a DatabaseType instance).\nDISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'db_type'\n\n\nclass BooleanDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n input = serializers.ChoiceField(choices=[(\"dropdown\", \"dropdown\"), (\"checkbox\", \"checkbox\")])\n custom_labels = CustomBooleanLabelSerializer(required=False)\n\n\nFRACTION_DIGITS_CONFIG = {\n \"required\": False,\n \"allow_null\": True,\n \"min_value\": 0,\n \"max_value\": 20\n}\n\"\"\"\nMax value of 20 is taken from [Intl.NumberFormat docs][1].\n\n[1]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/NumberFormat/NumberFormat\n\"\"\"\n\n\nclass AbstractNumberDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n number_format = serializers.ChoiceField(\n required=False,\n allow_null=True,\n choices=['english', 'german', 'french', 'hindi', 'swiss']\n )\n\n use_grouping = serializers.ChoiceField(required=False, choices=['true', 'false', 'auto'], default='auto')\n \"\"\"\n The choices here correspond to the options available for the `useGrouping`\n property within the [Intl API][1]. True and False are encoded as strings\n instead of booleans to maintain consistency with the Intl API and to keep\n the type consistent. We did considering using an optional boolean but\n decided a string would be better, especially if we want to support other\n options eventually, like \"min2\".\n\n [1]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/NumberFormat/NumberFormat\n \"\"\"\n\n minimum_fraction_digits = serializers.IntegerField(**FRACTION_DIGITS_CONFIG)\n maximum_fraction_digits = serializers.IntegerField(**FRACTION_DIGITS_CONFIG)\n\n def _validate_fraction_digits(self, data):\n minimum = data.get(\"minimum_fraction_digits\")\n maximum = data.get(\"maximum_fraction_digits\")\n if minimum is None or maximum is None:\n # No errors if one of the fields is not set\n return\n if minimum > maximum:\n raise IncompatibleFractionDigitValuesAPIException()\n\n def validate(self, data):\n self._validate_fraction_digits(data)\n return data\n\n\nclass NumberDisplayOptionSerializer(AbstractNumberDisplayOptionSerializer):\n show_as_percentage = serializers.BooleanField(default=False)\n\n\nclass MoneyDisplayOptionSerializer(AbstractNumberDisplayOptionSerializer):\n currency_symbol = serializers.CharField()\n currency_symbol_location = serializers.ChoiceField(choices=['after-minus', 'end-with-space'])\n\n\nclass TimeFormatDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n format = serializers.CharField(max_length=255)\n\n\nclass DateTimeFormatDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n time_format = serializers.CharField(max_length=255)\n date_format = serializers.CharField(max_length=255)\n\n\nclass DurationDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n min = serializers.CharField(max_length=255)\n max = serializers.CharField(max_length=255)\n show_units = serializers.BooleanField()\n\n\nclass DisplayOptionsMappingSerializer(\n OverrideRootPartialMixin,\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n serializers.Serializer\n):\n serializers_mapping = {\n UIType.BOOLEAN: BooleanDisplayOptionSerializer,\n UIType.NUMBER: NumberDisplayOptionSerializer,\n UIType.DATETIME: DateTimeFormatDisplayOptionSerializer,\n UIType.DATE: TimeFormatDisplayOptionSerializer,\n UIType.TIME: TimeFormatDisplayOptionSerializer,\n UIType.DURATION: DurationDisplayOptionSerializer,\n UIType.MONEY: MoneyDisplayOptionSerializer,\n }\n default_serializer = BaseDisplayOptionsSerializer\n\n def get_mapping_field(self, _):\n return self._get_ui_type_of_column_being_serialized()\n\n def _get_ui_type_of_column_being_serialized(self):\n db_type = self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY]\n ui_type = get_ui_type_from_db_type(db_type)\n return ui_type\n", "path": "mathesar/api/serializers/shared_serializers.py"}], "after_files": [{"content": "from django.core.exceptions import ImproperlyConfigured\nfrom rest_framework import serializers\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import IncompatibleFractionDigitValuesAPIException\nfrom mathesar.database.types import UIType, get_ui_type_from_db_type\n\n\nclass ReadOnlyPolymorphicSerializerMappingMixin:\n \"\"\"\n This serializer mixin is helpful in serializing polymorphic models,\n by switching to correct serializer based on the mapping field value.\n \"\"\"\n default_serializer = None\n\n def __new__(cls, *args, **kwargs):\n if cls.serializers_mapping is None:\n raise ImproperlyConfigured(\n '`{cls}` is missing a '\n '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)\n )\n return super().__new__(cls, *args, **kwargs)\n\n def _init_serializer(self, serializer_cls, *args, **kwargs):\n if callable(serializer_cls):\n serializer = serializer_cls(*args, **kwargs)\n serializer.parent = self\n else:\n serializer = serializer_cls\n return serializer\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.serializers_cls_mapping = {}\n serializers_mapping = self.serializers_mapping\n self.serializers_mapping = {}\n if self.default_serializer is not None:\n self.default_serializer = self._init_serializer(self.default_serializer, *args, **kwargs)\n for identifier, serializer_cls in serializers_mapping.items():\n serializer = self._init_serializer(serializer_cls, *args, **kwargs)\n self.serializers_mapping[identifier] = serializer\n self.serializers_cls_mapping[identifier] = serializer_cls\n\n def get_serializer_class(self, identifier):\n if identifier in self.serializers_mapping:\n return self.serializers_mapping.get(identifier)\n else:\n return self.default_serializer\n\n def to_representation(self, instance):\n serializer = self.get_serializer_class(self.get_mapping_field(instance))\n if serializer is not None:\n return serializer.to_representation(instance)\n else:\n return instance\n\n def get_mapping_field(self, data):\n mapping_field = getattr(self, \"mapping_field\", None)\n if mapping_field is None:\n raise Exception(\n \"Add a `mapping_field` to be used as a identifier\"\n \"or override this method to return a identifier to identify a proper serializer\"\n )\n return mapping_field\n\n\nclass ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):\n def to_internal_value(self, data):\n serializer = self.get_serializer_class(self.get_mapping_field(data))\n if serializer is not None:\n return serializer.to_internal_value(data=data)\n else:\n data = {}\n return data\n\n def validate(self, attrs):\n serializer = self.serializers_mapping.get(self.get_mapping_field(attrs))\n if serializer is not None:\n return serializer.validate(attrs)\n return {}\n\n\nclass MonkeyPatchPartial:\n \"\"\"\n Work around bug #3847 in djangorestframework by monkey-patching the partial\n attribute of the root serializer during the call to validate_empty_values.\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def __init__(self, root):\n self._root = root\n\n def __enter__(self):\n self._old = getattr(self._root, 'partial')\n setattr(self._root, 'partial', False)\n\n def __exit__(self, *args):\n setattr(self._root, 'partial', self._old)\n\n\nclass OverrideRootPartialMixin:\n \"\"\"\n This mixin is used to convert a serializer into a partial serializer,\n based on the serializer `partial` property rather than the parent's `partial` property.\n Refer to the issue\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def run_validation(self, *args, **kwargs):\n if not self.partial:\n with MonkeyPatchPartial(self.root):\n return super().run_validation(*args, **kwargs)\n return super().run_validation(*args, **kwargs)\n\n\nclass MathesarPolymorphicErrorMixin(MathesarErrorMessageMixin):\n def get_serializer_fields(self, data):\n return self.serializers_mapping[self.get_mapping_field(data)].fields\n\n\nclass BaseDisplayOptionsSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n pass\n\n\nclass CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n TRUE = serializers.CharField()\n FALSE = serializers.CharField()\n\n\n# This is the key which will determine which display options serializer is used. Its value is\n# supposed to be the column's DB type (a DatabaseType instance).\nDISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'db_type'\n\n\nclass BooleanDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n input = serializers.ChoiceField(choices=[(\"dropdown\", \"dropdown\"), (\"checkbox\", \"checkbox\")])\n custom_labels = CustomBooleanLabelSerializer(required=False)\n\n\nFRACTION_DIGITS_CONFIG = {\n \"required\": False,\n \"allow_null\": True,\n \"min_value\": 0,\n \"max_value\": 20\n}\n\"\"\"\nMax value of 20 is taken from [Intl.NumberFormat docs][1].\n\n[1]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/NumberFormat/NumberFormat\n\"\"\"\n\n\nclass AbstractNumberDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n number_format = serializers.ChoiceField(\n required=False,\n allow_null=True,\n choices=['english', 'german', 'french', 'hindi', 'swiss']\n )\n\n use_grouping = serializers.ChoiceField(required=False, choices=['true', 'false'], default='false')\n \"\"\"\n The choices here correspond to the options available for the `useGrouping`\n property within the [Intl API][1]. True and False are encoded as strings\n instead of booleans to maintain consistency with the Intl API and to keep\n the type consistent. We did considering using an optional boolean but\n decided a string would be better, especially if we want to support other\n options eventually, like \"min2\".\n\n [1]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/NumberFormat/NumberFormat\n \"\"\"\n\n minimum_fraction_digits = serializers.IntegerField(**FRACTION_DIGITS_CONFIG)\n maximum_fraction_digits = serializers.IntegerField(**FRACTION_DIGITS_CONFIG)\n\n def _validate_fraction_digits(self, data):\n minimum = data.get(\"minimum_fraction_digits\")\n maximum = data.get(\"maximum_fraction_digits\")\n if minimum is None or maximum is None:\n # No errors if one of the fields is not set\n return\n if minimum > maximum:\n raise IncompatibleFractionDigitValuesAPIException()\n\n def validate(self, data):\n self._validate_fraction_digits(data)\n return data\n\n\nclass NumberDisplayOptionSerializer(AbstractNumberDisplayOptionSerializer):\n show_as_percentage = serializers.BooleanField(default=False)\n\n\nclass MoneyDisplayOptionSerializer(AbstractNumberDisplayOptionSerializer):\n currency_symbol = serializers.CharField()\n currency_symbol_location = serializers.ChoiceField(choices=['after-minus', 'end-with-space'])\n\n\nclass TimeFormatDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n format = serializers.CharField(max_length=255)\n\n\nclass DateTimeFormatDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n time_format = serializers.CharField(max_length=255)\n date_format = serializers.CharField(max_length=255)\n\n\nclass DurationDisplayOptionSerializer(BaseDisplayOptionsSerializer):\n min = serializers.CharField(max_length=255)\n max = serializers.CharField(max_length=255)\n show_units = serializers.BooleanField()\n\n\nclass DisplayOptionsMappingSerializer(\n OverrideRootPartialMixin,\n MathesarPolymorphicErrorMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n serializers.Serializer\n):\n serializers_mapping = {\n UIType.BOOLEAN: BooleanDisplayOptionSerializer,\n UIType.NUMBER: NumberDisplayOptionSerializer,\n UIType.DATETIME: DateTimeFormatDisplayOptionSerializer,\n UIType.DATE: TimeFormatDisplayOptionSerializer,\n UIType.TIME: TimeFormatDisplayOptionSerializer,\n UIType.DURATION: DurationDisplayOptionSerializer,\n UIType.MONEY: MoneyDisplayOptionSerializer,\n }\n default_serializer = BaseDisplayOptionsSerializer\n\n def get_mapping_field(self, _):\n return self._get_ui_type_of_column_being_serialized()\n\n def _get_ui_type_of_column_being_serialized(self):\n db_type = self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY]\n ui_type = get_ui_type_from_db_type(db_type)\n return ui_type\n", "path": "mathesar/api/serializers/shared_serializers.py"}]}
| 3,010 | 177 |
gh_patches_debug_32562
|
rasdani/github-patches
|
git_diff
|
CiviWiki__OpenCiviWiki-1469
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "Followers" tab to the user profile view
We recently removed some features from the user Profile view and now want to reimplement them using only Django.
This task will be to add a "Followers" tab to the user profile view. The tab should link to a sub-path of the user profile so it is clear to the viewer that they are viewing followers related to the user.
## Task
All of these tasks should be done in the `accounts` app.
- [ ] create a Django view called `UserFollowers` (in `accounts/views.py`)
- [ ] create a URL definition `profile/<str:username>/followers` that renders the `UserFollowers` view ( in `accounts/urls.py`)
- [ ] ensure the `UserFollowers` view context contains a list of users that are following the relevant user (given via the `username` in the URL)
- note, it should be possible to get the followers via `user.profile.followers` or just `profile.followers`
- [ ] create a template to render the user followers - for now, extending `base.html` so we have consistent branding (in `accounts/templates/accounts/user_civis.html`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/accounts/views.py`
Content:
```
1 """
2 Class based views.
3
4 This module will include views for the accounts app.
5 """
6
7 from accounts.authentication import account_activation_token, send_activation_email
8 from accounts.forms import ProfileEditForm, UserRegistrationForm
9 from accounts.models import Profile
10 from django.conf import settings
11 from django.contrib.auth import get_user_model, login
12 from django.contrib.auth import views as auth_views
13 from django.contrib.auth.decorators import login_required
14 from django.contrib.auth.mixins import LoginRequiredMixin
15 from django.contrib.sites.shortcuts import get_current_site
16 from django.http import HttpResponseRedirect
17 from django.shortcuts import get_object_or_404, redirect
18 from django.template.response import TemplateResponse
19 from django.urls import reverse, reverse_lazy
20 from django.utils.encoding import force_str
21 from django.utils.http import urlsafe_base64_decode
22 from django.views import View
23 from django.views.generic.edit import FormView, UpdateView
24
25
26 class ProfileFollow(LoginRequiredMixin, View):
27 def get(self, request, *args, **kwargs):
28 # Prevent users from following themselves.
29 if request.user.username == kwargs["username"]:
30 pass
31 else:
32 following_profile = Profile.objects.get(user__username=kwargs["username"])
33
34 self.request.user.profile.following.add(following_profile)
35
36 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]})
37
38 return HttpResponseRedirect(redirect_to)
39
40
41 class ProfileUnfollow(LoginRequiredMixin, View):
42 def get(self, request, *args, **kwargs):
43 # Prevent users from following themselves.
44 if request.user.username == kwargs["username"]:
45 pass
46 else:
47 following_profile = Profile.objects.get(user__username=kwargs["username"])
48
49 self.request.user.profile.following.remove(following_profile)
50
51 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]})
52
53 return HttpResponseRedirect(redirect_to)
54
55
56 class RegisterView(FormView):
57 """
58 A form view that handles user registration.
59 """
60
61 template_name = "accounts/register/register.html"
62 form_class = UserRegistrationForm
63 success_url = "/"
64
65 def _create_user(self, form):
66 username = form.cleaned_data["username"]
67 password = form.cleaned_data["password"]
68 email = form.cleaned_data["email"]
69 user = get_user_model().objects.create_user(username, email, password)
70 return user
71
72 def _send_email(self, user):
73 domain = get_current_site(self.request).domain
74 send_activation_email(user, domain)
75
76 def _login(self, user):
77 login(self.request, user)
78
79 def form_valid(self, form):
80 user = self._create_user(form)
81
82 self._send_email(user)
83 self._login(user)
84
85 return super(RegisterView, self).form_valid(form)
86
87
88 class ProfileActivationView(View):
89 """
90 This shows different views to the user when they are verifying
91 their account based on whether they are already verified or not.
92 """
93
94 def get(self, request, uidb64, token):
95
96 try:
97 uid = force_str(urlsafe_base64_decode(uidb64))
98 user = get_user_model().objects.get(pk=uid)
99
100 except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):
101 user = None
102
103 redirect_link = {"href": "/", "label": "Back to Main"}
104
105 template_var = {
106 "link": redirect_link,
107 }
108
109 if user is not None and account_activation_token.check_token(user, token):
110 profile = user.profile
111
112 if profile.is_verified:
113 template_var["title"] = "Email Already Verified"
114 template_var["content"] = "You have already verified your email."
115 else:
116 profile.is_verified = True
117 profile.save()
118
119 template_var["title"] = "Email Verification Successful"
120 template_var["content"] = "Thank you for verifying your email."
121 else:
122 # invalid link
123 template_var["title"] = "Email Verification Error"
124 template_var["content"] = "Email could not be verified"
125
126 return TemplateResponse(request, "general_message.html", template_var)
127
128
129 class PasswordResetView(auth_views.PasswordResetView):
130 template_name = "accounts/users/password_reset.html"
131 email_template_name = "accounts/users/password_reset_email.html"
132 subject_template_name = "accounts/users/password_reset_subject.txt"
133 from_email = settings.EMAIL_HOST_USER
134 success_url = reverse_lazy("accounts_password_reset_done")
135
136
137 class PasswordResetDoneView(auth_views.PasswordResetDoneView):
138 template_name = "accounts/users/password_reset_done.html"
139
140
141 class PasswordResetConfirmView(auth_views.PasswordResetConfirmView):
142 template_name = "accounts/users/password_reset_confirm.html"
143 success_url = reverse_lazy("accounts_password_reset_complete")
144
145
146 class PasswordResetCompleteView(auth_views.PasswordResetCompleteView):
147 template_name = "accounts/users/password_reset_complete.html"
148
149
150 class SettingsView(LoginRequiredMixin, UpdateView):
151 """A form view to edit Profile"""
152
153 login_url = "accounts_login"
154 form_class = ProfileEditForm
155 success_url = reverse_lazy("accounts_settings")
156 template_name = "accounts/settings.html"
157
158 def get_object(self, queryset=None):
159 return Profile.objects.get(user=self.request.user)
160
161 def get_initial(self):
162 profile = Profile.objects.get(user=self.request.user)
163 self.initial.update(
164 {
165 "username": profile.user.username,
166 "email": profile.user.email,
167 "first_name": profile.first_name or None,
168 "last_name": profile.last_name or None,
169 "about_me": profile.about_me or None,
170 "profile_image": profile.profile_image or None,
171 }
172 )
173 return super(SettingsView, self).get_initial()
174
175
176 class UserProfileView(LoginRequiredMixin, View):
177 """A view that shows profile for authorized users"""
178
179 def get(self, request, username=None):
180 profile = get_object_or_404(Profile, user__username=username)
181
182 return TemplateResponse(
183 request,
184 "account.html",
185 {
186 "profile": profile,
187 },
188 )
189
190
191 class ProfileFollowing(LoginRequiredMixin, View):
192 """
193 A view that shows list of profiles
194 that profile with given username is following
195 """
196
197 def get(self, request, username=None):
198 profile = get_object_or_404(Profile, user__username=username)
199
200 return TemplateResponse(
201 request,
202 "profile_following.html",
203 {
204 "profile": profile,
205 },
206 )
207
208
209 class UserCivis(LoginRequiredMixin, View):
210 """
211 A view that shows list of civis
212 that profile with given username created
213 """
214
215 def get(self, request, username=None):
216 profile = get_object_or_404(Profile, user__username=username)
217 user = profile.user
218 civis = user.civis.all()
219
220 return TemplateResponse(
221 request,
222 "user_civis.html",
223 {"profile": profile, "civis": civis},
224 )
225
226
227 @login_required
228 def expunge_user(request):
229 """
230 Delete User Information
231 """
232
233 user_model = get_user_model()
234 user = get_object_or_404(user_model, username=request.user.username)
235
236 profile = get_object_or_404(Profile, user=user)
237
238 # Expunge personally identifiable data in user
239 expunged_user_data = {
240 "is_active": False,
241 "email": "",
242 "first_name": "",
243 "last_name": "",
244 "username": f"expunged-{ user.id }",
245 }
246 user.__dict__.update(expunged_user_data)
247 user.save()
248
249 # Expunge personally identifiable data in profile
250 expunged_profile_data = {
251 "first_name": "",
252 "last_name": "",
253 "about_me": "",
254 }
255 profile.__dict__.update(expunged_profile_data)
256 profile.save()
257
258 return redirect("/")
259
```
Path: `project/accounts/urls.py`
Content:
```
1 from accounts.views import (
2 PasswordResetCompleteView,
3 PasswordResetConfirmView,
4 PasswordResetDoneView,
5 PasswordResetView,
6 ProfileActivationView,
7 ProfileFollow,
8 ProfileUnfollow,
9 RegisterView,
10 SettingsView,
11 ProfileFollowing,
12 UserProfileView,
13 UserCivis,
14 expunge_user,
15 )
16 from django.contrib.auth import views as auth_views
17 from django.urls import path
18
19 urlpatterns = [
20 path(
21 "login/",
22 auth_views.LoginView.as_view(template_name="accounts/register/login.html"),
23 name="accounts_login",
24 ),
25 path("logout/", auth_views.LogoutView.as_view(), name="accounts_logout"),
26 path("register/", RegisterView.as_view(), name="accounts_register"),
27 path("settings/", SettingsView.as_view(), name="accounts_settings"),
28 path(
29 "activate_account/<uidb64>/<token>/",
30 ProfileActivationView.as_view(),
31 name="accounts_activate",
32 ),
33 path("profile/<str:username>/", UserProfileView.as_view(), name="profile"),
34 path(
35 "profile/<str:username>/follow", ProfileFollow.as_view(), name="profile-follow"
36 ),
37 path(
38 "profile/<str:username>/unfollow",
39 ProfileUnfollow.as_view(),
40 name="profile-unfollow",
41 ),
42 path(
43 "profile/<str:username>/following",
44 ProfileFollowing.as_view(),
45 name="profile-following",
46 ),
47 path(
48 "profile/<str:username>/civis/",
49 UserCivis.as_view(),
50 name="user-civis",
51 ),
52 path(
53 "accounts/password_reset/",
54 PasswordResetView.as_view(),
55 name="accounts_password_reset",
56 ),
57 path(
58 "accounts/password_reset_done/",
59 PasswordResetDoneView.as_view(),
60 name="accounts_password_reset_done",
61 ),
62 path(
63 "accounts/password_reset_confirm/<uidb64>/<token>/",
64 PasswordResetConfirmView.as_view(),
65 name="accounts_password_reset_confirm",
66 ),
67 path(
68 "accounts/password_reset_complete/",
69 PasswordResetCompleteView.as_view(),
70 name="accounts_password_reset_complete",
71 ),
72 path("accounts/expunge/", expunge_user, name="expunge_user"),
73 ]
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/project/accounts/urls.py b/project/accounts/urls.py
--- a/project/accounts/urls.py
+++ b/project/accounts/urls.py
@@ -8,6 +8,7 @@
ProfileUnfollow,
RegisterView,
SettingsView,
+ UserFollowers,
ProfileFollowing,
UserProfileView,
UserCivis,
@@ -34,6 +35,11 @@
path(
"profile/<str:username>/follow", ProfileFollow.as_view(), name="profile-follow"
),
+ path(
+ "profile/<str:username>/followers",
+ UserFollowers.as_view(),
+ name="user-followers",
+ ),
path(
"profile/<str:username>/unfollow",
ProfileUnfollow.as_view(),
diff --git a/project/accounts/views.py b/project/accounts/views.py
--- a/project/accounts/views.py
+++ b/project/accounts/views.py
@@ -188,6 +188,22 @@
)
+
+class UserFollowers(LoginRequiredMixin, View):
+ """A view that shows the followers for authorized users"""
+
+ def get(self, request, username=None):
+ profile = get_object_or_404(Profile, user__username=username)
+
+ return TemplateResponse(
+ request,
+ "user_followers.html",
+ {
+ "profile": profile,
+ },
+ )
+
+
class ProfileFollowing(LoginRequiredMixin, View):
"""
A view that shows list of profiles
@@ -199,13 +215,16 @@
return TemplateResponse(
request,
+
"profile_following.html",
+
{
"profile": profile,
},
)
+
class UserCivis(LoginRequiredMixin, View):
"""
A view that shows list of civis
@@ -224,6 +243,7 @@
)
+
@login_required
def expunge_user(request):
"""
|
{"golden_diff": "diff --git a/project/accounts/urls.py b/project/accounts/urls.py\n--- a/project/accounts/urls.py\n+++ b/project/accounts/urls.py\n@@ -8,6 +8,7 @@\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n+ UserFollowers,\n ProfileFollowing,\n UserProfileView,\n UserCivis,\n@@ -34,6 +35,11 @@\n path(\n \"profile/<str:username>/follow\", ProfileFollow.as_view(), name=\"profile-follow\"\n ),\n+ path(\n+ \"profile/<str:username>/followers\",\n+ UserFollowers.as_view(),\n+ name=\"user-followers\",\n+ ),\n path(\n \"profile/<str:username>/unfollow\",\n ProfileUnfollow.as_view(),\ndiff --git a/project/accounts/views.py b/project/accounts/views.py\n--- a/project/accounts/views.py\n+++ b/project/accounts/views.py\n@@ -188,6 +188,22 @@\n )\n \n \n+\n+class UserFollowers(LoginRequiredMixin, View):\n+ \"\"\"A view that shows the followers for authorized users\"\"\"\n+ \n+ def get(self, request, username=None):\n+ profile = get_object_or_404(Profile, user__username=username)\n+\n+ return TemplateResponse(\n+ request,\n+ \"user_followers.html\",\n+ {\n+ \"profile\": profile,\n+ },\n+ )\n+\n+\n class ProfileFollowing(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of profiles\n@@ -199,13 +215,16 @@\n \n return TemplateResponse(\n request,\n+\n \"profile_following.html\",\n+\n {\n \"profile\": profile,\n },\n )\n \n \n+\n class UserCivis(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of civis\n@@ -224,6 +243,7 @@\n )\n \n \n+\n @login_required\n def expunge_user(request):\n \"\"\"\n", "issue": "Add \"Followers\" tab to the user profile view\nWe recently removed some features from the user Profile view and now want to reimplement them using only Django.\r\n\r\nThis task will be to add a \"Followers\" tab to the user profile view. The tab should link to a sub-path of the user profile so it is clear to the viewer that they are viewing followers related to the user.\r\n\r\n## Task\r\nAll of these tasks should be done in the `accounts` app.\r\n\r\n- [ ] create a Django view called `UserFollowers` (in `accounts/views.py`)\r\n- [ ] create a URL definition `profile/<str:username>/followers` that renders the `UserFollowers` view ( in `accounts/urls.py`)\r\n- [ ] ensure the `UserFollowers` view context contains a list of users that are following the relevant user (given via the `username` in the URL) \r\n - note, it should be possible to get the followers via `user.profile.followers` or just `profile.followers`\r\n- [ ] create a template to render the user followers - for now, extending `base.html` so we have consistent branding (in `accounts/templates/accounts/user_civis.html`)\r\n\n", "before_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UserRegistrationForm\nfrom accounts.models import Profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\n\nclass ProfileFollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.add(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass ProfileUnfollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.remove(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n\n template_var = {\n \"link\": redirect_link,\n }\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n\n if profile.is_verified:\n template_var[\"title\"] = \"Email Already Verified\"\n template_var[\"content\"] = \"You have already verified your email.\"\n else:\n profile.is_verified = True\n profile.save()\n\n template_var[\"title\"] = \"Email Verification Successful\"\n template_var[\"content\"] = \"Thank you for verifying your email.\"\n else:\n # invalid link\n template_var[\"title\"] = \"Email Verification Error\"\n template_var[\"content\"] = \"Email could not be verified\"\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n \"profile_image\": profile.profile_image or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"account.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass ProfileFollowing(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of profiles\n that profile with given username is following\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"profile_following.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass UserCivis(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of civis\n that profile with given username created\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n user = profile.user\n civis = user.civis.all()\n\n return TemplateResponse(\n request,\n \"user_civis.html\",\n {\"profile\": profile, \"civis\": civis},\n )\n\n\n@login_required\ndef expunge_user(request):\n \"\"\"\n Delete User Information\n \"\"\"\n\n user_model = get_user_model()\n user = get_object_or_404(user_model, username=request.user.username)\n\n profile = get_object_or_404(Profile, user=user)\n\n # Expunge personally identifiable data in user\n expunged_user_data = {\n \"is_active\": False,\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"username\": f\"expunged-{ user.id }\",\n }\n user.__dict__.update(expunged_user_data)\n user.save()\n\n # Expunge personally identifiable data in profile\n expunged_profile_data = {\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"about_me\": \"\",\n }\n profile.__dict__.update(expunged_profile_data)\n profile.save()\n\n return redirect(\"/\")\n", "path": "project/accounts/views.py"}, {"content": "from accounts.views import (\n PasswordResetCompleteView,\n PasswordResetConfirmView,\n PasswordResetDoneView,\n PasswordResetView,\n ProfileActivationView,\n ProfileFollow,\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n ProfileFollowing,\n UserProfileView,\n UserCivis,\n expunge_user,\n)\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import path\n\nurlpatterns = [\n path(\n \"login/\",\n auth_views.LoginView.as_view(template_name=\"accounts/register/login.html\"),\n name=\"accounts_login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"accounts_logout\"),\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\n name=\"accounts_activate\",\n ),\n path(\"profile/<str:username>/\", UserProfileView.as_view(), name=\"profile\"),\n path(\n \"profile/<str:username>/follow\", ProfileFollow.as_view(), name=\"profile-follow\"\n ),\n path(\n \"profile/<str:username>/unfollow\",\n ProfileUnfollow.as_view(),\n name=\"profile-unfollow\",\n ),\n path(\n \"profile/<str:username>/following\",\n ProfileFollowing.as_view(),\n name=\"profile-following\",\n ),\n path(\n \"profile/<str:username>/civis/\",\n UserCivis.as_view(),\n name=\"user-civis\",\n ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\n name=\"accounts_password_reset\",\n ),\n path(\n \"accounts/password_reset_done/\",\n PasswordResetDoneView.as_view(),\n name=\"accounts_password_reset_done\",\n ),\n path(\n \"accounts/password_reset_confirm/<uidb64>/<token>/\",\n PasswordResetConfirmView.as_view(),\n name=\"accounts_password_reset_confirm\",\n ),\n path(\n \"accounts/password_reset_complete/\",\n PasswordResetCompleteView.as_view(),\n name=\"accounts_password_reset_complete\",\n ),\n path(\"accounts/expunge/\", expunge_user, name=\"expunge_user\"),\n]\n", "path": "project/accounts/urls.py"}], "after_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UserRegistrationForm\nfrom accounts.models import Profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\n\nclass ProfileFollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.add(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass ProfileUnfollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.remove(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n\n template_var = {\n \"link\": redirect_link,\n }\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n\n if profile.is_verified:\n template_var[\"title\"] = \"Email Already Verified\"\n template_var[\"content\"] = \"You have already verified your email.\"\n else:\n profile.is_verified = True\n profile.save()\n\n template_var[\"title\"] = \"Email Verification Successful\"\n template_var[\"content\"] = \"Thank you for verifying your email.\"\n else:\n # invalid link\n template_var[\"title\"] = \"Email Verification Error\"\n template_var[\"content\"] = \"Email could not be verified\"\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n \"profile_image\": profile.profile_image or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"account.html\",\n {\n \"profile\": profile,\n },\n )\n\n\n\nclass UserFollowers(LoginRequiredMixin, View):\n \"\"\"A view that shows the followers for authorized users\"\"\"\n \n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"user_followers.html\",\n {\n \"profile\": profile,\n },\n )\n\n\nclass ProfileFollowing(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of profiles\n that profile with given username is following\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n\n \"profile_following.html\",\n\n {\n \"profile\": profile,\n },\n )\n\n\n\nclass UserCivis(LoginRequiredMixin, View):\n \"\"\"\n A view that shows list of civis\n that profile with given username created\n \"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n user = profile.user\n civis = user.civis.all()\n\n return TemplateResponse(\n request,\n \"user_civis.html\",\n {\"profile\": profile, \"civis\": civis},\n )\n\n\n\n@login_required\ndef expunge_user(request):\n \"\"\"\n Delete User Information\n \"\"\"\n\n user_model = get_user_model()\n user = get_object_or_404(user_model, username=request.user.username)\n\n profile = get_object_or_404(Profile, user=user)\n\n # Expunge personally identifiable data in user\n expunged_user_data = {\n \"is_active\": False,\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"username\": f\"expunged-{ user.id }\",\n }\n user.__dict__.update(expunged_user_data)\n user.save()\n\n # Expunge personally identifiable data in profile\n expunged_profile_data = {\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"about_me\": \"\",\n }\n profile.__dict__.update(expunged_profile_data)\n profile.save()\n\n return redirect(\"/\")\n", "path": "project/accounts/views.py"}, {"content": "from accounts.views import (\n PasswordResetCompleteView,\n PasswordResetConfirmView,\n PasswordResetDoneView,\n PasswordResetView,\n ProfileActivationView,\n ProfileFollow,\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n UserFollowers,\n ProfileFollowing,\n UserProfileView,\n UserCivis,\n expunge_user,\n)\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import path\n\nurlpatterns = [\n path(\n \"login/\",\n auth_views.LoginView.as_view(template_name=\"accounts/register/login.html\"),\n name=\"accounts_login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"accounts_logout\"),\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\n name=\"accounts_activate\",\n ),\n path(\"profile/<str:username>/\", UserProfileView.as_view(), name=\"profile\"),\n path(\n \"profile/<str:username>/follow\", ProfileFollow.as_view(), name=\"profile-follow\"\n ),\n path(\n \"profile/<str:username>/followers\",\n UserFollowers.as_view(),\n name=\"user-followers\",\n ),\n path(\n \"profile/<str:username>/unfollow\",\n ProfileUnfollow.as_view(),\n name=\"profile-unfollow\",\n ),\n path(\n \"profile/<str:username>/following\",\n ProfileFollowing.as_view(),\n name=\"profile-following\",\n ),\n path(\n \"profile/<str:username>/civis/\",\n UserCivis.as_view(),\n name=\"user-civis\",\n ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\n name=\"accounts_password_reset\",\n ),\n path(\n \"accounts/password_reset_done/\",\n PasswordResetDoneView.as_view(),\n name=\"accounts_password_reset_done\",\n ),\n path(\n \"accounts/password_reset_confirm/<uidb64>/<token>/\",\n PasswordResetConfirmView.as_view(),\n name=\"accounts_password_reset_confirm\",\n ),\n path(\n \"accounts/password_reset_complete/\",\n PasswordResetCompleteView.as_view(),\n name=\"accounts_password_reset_complete\",\n ),\n path(\"accounts/expunge/\", expunge_user, name=\"expunge_user\"),\n]\n", "path": "project/accounts/urls.py"}]}
| 3,479 | 425 |
gh_patches_debug_32096
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-5900
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
emr extend network-location feature
**Is your feature request related to a problem? Please describe.**
Require subnet/sg tag comparison for resource: emr
**Describe the solution you'd like**
Extend filter `subnet`, `security-group` and `network-location` for emr
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/emr.py`
Content:
```
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15 import time
16 import json
17
18 from c7n.actions import ActionRegistry, BaseAction
19 from c7n.exceptions import PolicyValidationError
20 from c7n.filters import FilterRegistry, MetricsFilter
21 from c7n.manager import resources
22 from c7n.query import QueryResourceManager, TypeInfo
23 from c7n.utils import (
24 local_session, type_schema, get_retry)
25 from c7n.tags import (
26 TagDelayedAction, RemoveTag, TagActionFilter, Tag)
27
28 filters = FilterRegistry('emr.filters')
29 actions = ActionRegistry('emr.actions')
30 log = logging.getLogger('custodian.emr')
31
32 filters.register('marked-for-op', TagActionFilter)
33
34
35 @resources.register('emr')
36 class EMRCluster(QueryResourceManager):
37 """Resource manager for Elastic MapReduce clusters
38 """
39
40 class resource_type(TypeInfo):
41 service = 'emr'
42 arn_type = 'emr'
43 permission_prefix = 'elasticmapreduce'
44 cluster_states = ['WAITING', 'BOOTSTRAPPING', 'RUNNING', 'STARTING']
45 enum_spec = ('list_clusters', 'Clusters', {'ClusterStates': cluster_states})
46 name = 'Name'
47 id = 'Id'
48 date = "Status.Timeline.CreationDateTime"
49 cfn_type = 'AWS::EMR::Cluster'
50
51 action_registry = actions
52 filter_registry = filters
53 retry = staticmethod(get_retry(('ThrottlingException',)))
54
55 def __init__(self, ctx, data):
56 super(EMRCluster, self).__init__(ctx, data)
57 self.queries = QueryFilter.parse(
58 self.data.get('query', [
59 {'ClusterStates': [
60 'running', 'bootstrapping', 'waiting']}]))
61
62 @classmethod
63 def get_permissions(cls):
64 return ("elasticmapreduce:ListClusters",
65 "elasticmapreduce:DescribeCluster")
66
67 def get_resources(self, ids):
68 # no filtering by id set supported at the api
69 client = local_session(self.session_factory).client('emr')
70 results = []
71 for jid in ids:
72 results.append(
73 client.describe_cluster(ClusterId=jid)['Cluster'])
74 return results
75
76 def resources(self, query=None):
77 q = self.consolidate_query_filter()
78 if q is not None:
79 query = query or {}
80 for i in range(0, len(q)):
81 query[q[i]['Name']] = q[i]['Values']
82 return super(EMRCluster, self).resources(query=query)
83
84 def consolidate_query_filter(self):
85 result = []
86 names = set()
87 # allow same name to be specified multiple times and append the queries
88 # under the same name
89 for q in self.queries:
90 query_filter = q.query()
91 if query_filter['Name'] in names:
92 for filt in result:
93 if query_filter['Name'] == filt['Name']:
94 filt['Values'].extend(query_filter['Values'])
95 else:
96 names.add(query_filter['Name'])
97 result.append(query_filter)
98 if 'ClusterStates' not in names:
99 # include default query
100 result.append(
101 {
102 'Name': 'ClusterStates',
103 'Values': ['WAITING', 'RUNNING', 'BOOTSTRAPPING'],
104 }
105 )
106 return result
107
108 def augment(self, resources):
109 client = local_session(
110 self.get_resource_manager('emr').session_factory).client('emr')
111 result = []
112 # remap for cwmetrics
113 for r in resources:
114 cluster = self.retry(
115 client.describe_cluster, ClusterId=r['Id'])['Cluster']
116 result.append(cluster)
117 return result
118
119
120 @EMRCluster.filter_registry.register('metrics')
121 class EMRMetrics(MetricsFilter):
122
123 def get_dimensions(self, resource):
124 # Job flow id is legacy name for cluster id
125 return [{'Name': 'JobFlowId', 'Value': resource['Id']}]
126
127
128 @actions.register('mark-for-op')
129 class TagDelayedAction(TagDelayedAction):
130 """Action to specify an action to occur at a later date
131
132 :example:
133
134 .. code-block:: yaml
135
136 policies:
137 - name: emr-mark-for-op
138 resource: emr
139 filters:
140 - "tag:Name": absent
141 actions:
142 - type: mark-for-op
143 tag: custodian_cleanup
144 op: terminate
145 days: 4
146 msg: "Cluster does not have required tags"
147 """
148
149
150 @actions.register('tag')
151 class TagTable(Tag):
152 """Action to create tag(s) on a resource
153
154 :example:
155
156 .. code-block:: yaml
157
158 policies:
159 - name: emr-tag-table
160 resource: emr
161 filters:
162 - "tag:target-tag": absent
163 actions:
164 - type: tag
165 key: target-tag
166 value: target-tag-value
167 """
168
169 permissions = ('elasticmapreduce:AddTags',)
170 batch_size = 1
171 retry = staticmethod(get_retry(('ThrottlingException',)))
172
173 def process_resource_set(self, client, resources, tags):
174 for r in resources:
175 self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)
176
177
178 @actions.register('remove-tag')
179 class UntagTable(RemoveTag):
180 """Action to remove tag(s) on a resource
181
182 :example:
183
184 .. code-block:: yaml
185
186 policies:
187 - name: emr-remove-tag
188 resource: emr
189 filters:
190 - "tag:target-tag": present
191 actions:
192 - type: remove-tag
193 tags: ["target-tag"]
194 """
195
196 concurrency = 2
197 batch_size = 5
198 permissions = ('elasticmapreduce:RemoveTags',)
199
200 def process_resource_set(self, client, resources, tag_keys):
201 for r in resources:
202 client.remove_tags(ResourceId=r['Id'], TagKeys=tag_keys)
203
204
205 @actions.register('terminate')
206 class Terminate(BaseAction):
207 """Action to terminate EMR cluster(s)
208
209 It is recommended to apply a filter to the terminate action to avoid
210 termination of all EMR clusters
211
212 :example:
213
214 .. code-block:: yaml
215
216 policies:
217 - name: emr-terminate
218 resource: emr
219 query:
220 - ClusterStates: [STARTING, BOOTSTRAPPING, RUNNING, WAITING]
221 actions:
222 - terminate
223 """
224
225 schema = type_schema('terminate', force={'type': 'boolean'})
226 permissions = ("elasticmapreduce:TerminateJobFlows",)
227 delay = 5
228
229 def process(self, emrs):
230 client = local_session(self.manager.session_factory).client('emr')
231 cluster_ids = [emr['Id'] for emr in emrs]
232 if self.data.get('force'):
233 client.set_termination_protection(
234 JobFlowIds=cluster_ids, TerminationProtected=False)
235 time.sleep(self.delay)
236 client.terminate_job_flows(JobFlowIds=cluster_ids)
237 self.log.info("Deleted emrs: %s", cluster_ids)
238 return emrs
239
240
241 # Valid EMR Query Filters
242 EMR_VALID_FILTERS = {'CreatedAfter', 'CreatedBefore', 'ClusterStates'}
243
244
245 class QueryFilter:
246
247 @classmethod
248 def parse(cls, data):
249 results = []
250 for d in data:
251 if not isinstance(d, dict):
252 raise PolicyValidationError(
253 "EMR Query Filter Invalid structure %s" % d)
254 results.append(cls(d).validate())
255 return results
256
257 def __init__(self, data):
258 self.data = data
259 self.key = None
260 self.value = None
261
262 def validate(self):
263 if not len(list(self.data.keys())) == 1:
264 raise PolicyValidationError(
265 "EMR Query Filter Invalid %s" % self.data)
266 self.key = list(self.data.keys())[0]
267 self.value = list(self.data.values())[0]
268
269 if self.key not in EMR_VALID_FILTERS and not self.key.startswith(
270 'tag:'):
271 raise PolicyValidationError(
272 "EMR Query Filter invalid filter name %s" % (self.data))
273
274 if self.value is None:
275 raise PolicyValidationError(
276 "EMR Query Filters must have a value, use tag-key"
277 " w/ tag name as value for tag present checks"
278 " %s" % self.data)
279 return self
280
281 def query(self):
282 value = self.value
283 if isinstance(self.value, str):
284 value = [self.value]
285
286 return {'Name': self.key, 'Values': value}
287
288
289 @resources.register('emr-security-configuration')
290 class EMRSecurityConfiguration(QueryResourceManager):
291 """Resource manager for EMR Security Configuration
292 """
293
294 class resource_type(TypeInfo):
295 service = 'emr'
296 arn_type = 'emr'
297 permission_prefix = 'elasticmapreduce'
298 enum_spec = ('list_security_configurations', 'SecurityConfigurations', None)
299 detail_spec = ('describe_security_configuration', 'Name', 'Name', None)
300 id = name = 'Name'
301 cfn_type = 'AWS::EMR::SecurityConfiguration'
302
303 permissions = ('elasticmapreduce:ListSecurityConfigurations',
304 'elasticmapreduce:DescribeSecurityConfiguration',)
305
306 def augment(self, resources):
307 resources = super().augment(resources)
308 for r in resources:
309 r['SecurityConfiguration'] = json.loads(r['SecurityConfiguration'])
310 return resources
311
312
313 @EMRSecurityConfiguration.action_registry.register('delete')
314 class DeleteEMRSecurityConfiguration(BaseAction):
315
316 schema = type_schema('delete')
317 permissions = ('elasticmapreduce:DeleteSecurityConfiguration',)
318
319 def process(self, resources):
320 client = local_session(self.manager.session_factory).client('emr')
321 for r in resources:
322 try:
323 client.delete_security_configuration(Name=r['Name'])
324 except client.exceptions.EntityNotFoundException:
325 continue
326
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/c7n/resources/emr.py b/c7n/resources/emr.py
--- a/c7n/resources/emr.py
+++ b/c7n/resources/emr.py
@@ -14,6 +14,7 @@
import logging
import time
import json
+import jmespath
from c7n.actions import ActionRegistry, BaseAction
from c7n.exceptions import PolicyValidationError
@@ -24,6 +25,7 @@
local_session, type_schema, get_retry)
from c7n.tags import (
TagDelayedAction, RemoveTag, TagActionFilter, Tag)
+import c7n.filters.vpc as net_filters
filters = FilterRegistry('emr.filters')
actions = ActionRegistry('emr.actions')
@@ -286,6 +288,37 @@
return {'Name': self.key, 'Values': value}
[email protected]('subnet')
+class SubnetFilter(net_filters.SubnetFilter):
+
+ RelatedIdsExpression = "Ec2InstanceAttributes.RequestedEc2SubnetIds[]"
+
+
[email protected]('security-group')
+class SecurityGroupFilter(net_filters.SecurityGroupFilter):
+
+ RelatedIdsExpression = ""
+ expressions = ('Ec2InstanceAttributes.EmrManagedMasterSecurityGroup',
+ 'Ec2InstanceAttributes.EmrManagedSlaveSecurityGroup',
+ 'Ec2InstanceAttributes.ServiceAccessSecurityGroup',
+ 'Ec2InstanceAttributes.AdditionalMasterSecurityGroups[]',
+ 'Ec2InstanceAttributes.AdditionalSlaveSecurityGroups[]')
+
+ def get_related_ids(self, resources):
+ sg_ids = set()
+ for r in resources:
+ for exp in self.expressions:
+ ids = jmespath.search(exp, r)
+ if isinstance(ids, list):
+ sg_ids.update(tuple(ids))
+ elif isinstance(ids, str):
+ sg_ids.add(ids)
+ return list(sg_ids)
+
+
+filters.register('network-location', net_filters.NetworkLocation)
+
+
@resources.register('emr-security-configuration')
class EMRSecurityConfiguration(QueryResourceManager):
"""Resource manager for EMR Security Configuration
|
{"golden_diff": "diff --git a/c7n/resources/emr.py b/c7n/resources/emr.py\n--- a/c7n/resources/emr.py\n+++ b/c7n/resources/emr.py\n@@ -14,6 +14,7 @@\n import logging\n import time\n import json\n+import jmespath\n \n from c7n.actions import ActionRegistry, BaseAction\n from c7n.exceptions import PolicyValidationError\n@@ -24,6 +25,7 @@\n local_session, type_schema, get_retry)\n from c7n.tags import (\n TagDelayedAction, RemoveTag, TagActionFilter, Tag)\n+import c7n.filters.vpc as net_filters\n \n filters = FilterRegistry('emr.filters')\n actions = ActionRegistry('emr.actions')\n@@ -286,6 +288,37 @@\n return {'Name': self.key, 'Values': value}\n \n \[email protected]('subnet')\n+class SubnetFilter(net_filters.SubnetFilter):\n+\n+ RelatedIdsExpression = \"Ec2InstanceAttributes.RequestedEc2SubnetIds[]\"\n+\n+\[email protected]('security-group')\n+class SecurityGroupFilter(net_filters.SecurityGroupFilter):\n+\n+ RelatedIdsExpression = \"\"\n+ expressions = ('Ec2InstanceAttributes.EmrManagedMasterSecurityGroup',\n+ 'Ec2InstanceAttributes.EmrManagedSlaveSecurityGroup',\n+ 'Ec2InstanceAttributes.ServiceAccessSecurityGroup',\n+ 'Ec2InstanceAttributes.AdditionalMasterSecurityGroups[]',\n+ 'Ec2InstanceAttributes.AdditionalSlaveSecurityGroups[]')\n+\n+ def get_related_ids(self, resources):\n+ sg_ids = set()\n+ for r in resources:\n+ for exp in self.expressions:\n+ ids = jmespath.search(exp, r)\n+ if isinstance(ids, list):\n+ sg_ids.update(tuple(ids))\n+ elif isinstance(ids, str):\n+ sg_ids.add(ids)\n+ return list(sg_ids)\n+\n+\n+filters.register('network-location', net_filters.NetworkLocation)\n+\n+\n @resources.register('emr-security-configuration')\n class EMRSecurityConfiguration(QueryResourceManager):\n \"\"\"Resource manager for EMR Security Configuration\n", "issue": "emr extend network-location feature\n**Is your feature request related to a problem? Please describe.**\r\nRequire subnet/sg tag comparison for resource: emr\r\n\r\n**Describe the solution you'd like**\r\nExtend filter `subnet`, `security-group` and `network-location` for emr\r\n\r\n**Describe alternatives you've considered**\r\nA clear and concise description of any alternative solutions or features you've considered.\r\n\r\n**Additional context**\r\nAdd any other context or screenshots about the feature request here.\r\n\n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport time\nimport json\n\nfrom c7n.actions import ActionRegistry, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import FilterRegistry, MetricsFilter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo\nfrom c7n.utils import (\n local_session, type_schema, get_retry)\nfrom c7n.tags import (\n TagDelayedAction, RemoveTag, TagActionFilter, Tag)\n\nfilters = FilterRegistry('emr.filters')\nactions = ActionRegistry('emr.actions')\nlog = logging.getLogger('custodian.emr')\n\nfilters.register('marked-for-op', TagActionFilter)\n\n\[email protected]('emr')\nclass EMRCluster(QueryResourceManager):\n \"\"\"Resource manager for Elastic MapReduce clusters\n \"\"\"\n\n class resource_type(TypeInfo):\n service = 'emr'\n arn_type = 'emr'\n permission_prefix = 'elasticmapreduce'\n cluster_states = ['WAITING', 'BOOTSTRAPPING', 'RUNNING', 'STARTING']\n enum_spec = ('list_clusters', 'Clusters', {'ClusterStates': cluster_states})\n name = 'Name'\n id = 'Id'\n date = \"Status.Timeline.CreationDateTime\"\n cfn_type = 'AWS::EMR::Cluster'\n\n action_registry = actions\n filter_registry = filters\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def __init__(self, ctx, data):\n super(EMRCluster, self).__init__(ctx, data)\n self.queries = QueryFilter.parse(\n self.data.get('query', [\n {'ClusterStates': [\n 'running', 'bootstrapping', 'waiting']}]))\n\n @classmethod\n def get_permissions(cls):\n return (\"elasticmapreduce:ListClusters\",\n \"elasticmapreduce:DescribeCluster\")\n\n def get_resources(self, ids):\n # no filtering by id set supported at the api\n client = local_session(self.session_factory).client('emr')\n results = []\n for jid in ids:\n results.append(\n client.describe_cluster(ClusterId=jid)['Cluster'])\n return results\n\n def resources(self, query=None):\n q = self.consolidate_query_filter()\n if q is not None:\n query = query or {}\n for i in range(0, len(q)):\n query[q[i]['Name']] = q[i]['Values']\n return super(EMRCluster, self).resources(query=query)\n\n def consolidate_query_filter(self):\n result = []\n names = set()\n # allow same name to be specified multiple times and append the queries\n # under the same name\n for q in self.queries:\n query_filter = q.query()\n if query_filter['Name'] in names:\n for filt in result:\n if query_filter['Name'] == filt['Name']:\n filt['Values'].extend(query_filter['Values'])\n else:\n names.add(query_filter['Name'])\n result.append(query_filter)\n if 'ClusterStates' not in names:\n # include default query\n result.append(\n {\n 'Name': 'ClusterStates',\n 'Values': ['WAITING', 'RUNNING', 'BOOTSTRAPPING'],\n }\n )\n return result\n\n def augment(self, resources):\n client = local_session(\n self.get_resource_manager('emr').session_factory).client('emr')\n result = []\n # remap for cwmetrics\n for r in resources:\n cluster = self.retry(\n client.describe_cluster, ClusterId=r['Id'])['Cluster']\n result.append(cluster)\n return result\n\n\[email protected]_registry.register('metrics')\nclass EMRMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n # Job flow id is legacy name for cluster id\n return [{'Name': 'JobFlowId', 'Value': resource['Id']}]\n\n\[email protected]('mark-for-op')\nclass TagDelayedAction(TagDelayedAction):\n \"\"\"Action to specify an action to occur at a later date\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-mark-for-op\n resource: emr\n filters:\n - \"tag:Name\": absent\n actions:\n - type: mark-for-op\n tag: custodian_cleanup\n op: terminate\n days: 4\n msg: \"Cluster does not have required tags\"\n \"\"\"\n\n\[email protected]('tag')\nclass TagTable(Tag):\n \"\"\"Action to create tag(s) on a resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-tag-table\n resource: emr\n filters:\n - \"tag:target-tag\": absent\n actions:\n - type: tag\n key: target-tag\n value: target-tag-value\n \"\"\"\n\n permissions = ('elasticmapreduce:AddTags',)\n batch_size = 1\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def process_resource_set(self, client, resources, tags):\n for r in resources:\n self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)\n\n\[email protected]('remove-tag')\nclass UntagTable(RemoveTag):\n \"\"\"Action to remove tag(s) on a resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-remove-tag\n resource: emr\n filters:\n - \"tag:target-tag\": present\n actions:\n - type: remove-tag\n tags: [\"target-tag\"]\n \"\"\"\n\n concurrency = 2\n batch_size = 5\n permissions = ('elasticmapreduce:RemoveTags',)\n\n def process_resource_set(self, client, resources, tag_keys):\n for r in resources:\n client.remove_tags(ResourceId=r['Id'], TagKeys=tag_keys)\n\n\[email protected]('terminate')\nclass Terminate(BaseAction):\n \"\"\"Action to terminate EMR cluster(s)\n\n It is recommended to apply a filter to the terminate action to avoid\n termination of all EMR clusters\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-terminate\n resource: emr\n query:\n - ClusterStates: [STARTING, BOOTSTRAPPING, RUNNING, WAITING]\n actions:\n - terminate\n \"\"\"\n\n schema = type_schema('terminate', force={'type': 'boolean'})\n permissions = (\"elasticmapreduce:TerminateJobFlows\",)\n delay = 5\n\n def process(self, emrs):\n client = local_session(self.manager.session_factory).client('emr')\n cluster_ids = [emr['Id'] for emr in emrs]\n if self.data.get('force'):\n client.set_termination_protection(\n JobFlowIds=cluster_ids, TerminationProtected=False)\n time.sleep(self.delay)\n client.terminate_job_flows(JobFlowIds=cluster_ids)\n self.log.info(\"Deleted emrs: %s\", cluster_ids)\n return emrs\n\n\n# Valid EMR Query Filters\nEMR_VALID_FILTERS = {'CreatedAfter', 'CreatedBefore', 'ClusterStates'}\n\n\nclass QueryFilter:\n\n @classmethod\n def parse(cls, data):\n results = []\n for d in data:\n if not isinstance(d, dict):\n raise PolicyValidationError(\n \"EMR Query Filter Invalid structure %s\" % d)\n results.append(cls(d).validate())\n return results\n\n def __init__(self, data):\n self.data = data\n self.key = None\n self.value = None\n\n def validate(self):\n if not len(list(self.data.keys())) == 1:\n raise PolicyValidationError(\n \"EMR Query Filter Invalid %s\" % self.data)\n self.key = list(self.data.keys())[0]\n self.value = list(self.data.values())[0]\n\n if self.key not in EMR_VALID_FILTERS and not self.key.startswith(\n 'tag:'):\n raise PolicyValidationError(\n \"EMR Query Filter invalid filter name %s\" % (self.data))\n\n if self.value is None:\n raise PolicyValidationError(\n \"EMR Query Filters must have a value, use tag-key\"\n \" w/ tag name as value for tag present checks\"\n \" %s\" % self.data)\n return self\n\n def query(self):\n value = self.value\n if isinstance(self.value, str):\n value = [self.value]\n\n return {'Name': self.key, 'Values': value}\n\n\[email protected]('emr-security-configuration')\nclass EMRSecurityConfiguration(QueryResourceManager):\n \"\"\"Resource manager for EMR Security Configuration\n \"\"\"\n\n class resource_type(TypeInfo):\n service = 'emr'\n arn_type = 'emr'\n permission_prefix = 'elasticmapreduce'\n enum_spec = ('list_security_configurations', 'SecurityConfigurations', None)\n detail_spec = ('describe_security_configuration', 'Name', 'Name', None)\n id = name = 'Name'\n cfn_type = 'AWS::EMR::SecurityConfiguration'\n\n permissions = ('elasticmapreduce:ListSecurityConfigurations',\n 'elasticmapreduce:DescribeSecurityConfiguration',)\n\n def augment(self, resources):\n resources = super().augment(resources)\n for r in resources:\n r['SecurityConfiguration'] = json.loads(r['SecurityConfiguration'])\n return resources\n\n\[email protected]_registry.register('delete')\nclass DeleteEMRSecurityConfiguration(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('elasticmapreduce:DeleteSecurityConfiguration',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('emr')\n for r in resources:\n try:\n client.delete_security_configuration(Name=r['Name'])\n except client.exceptions.EntityNotFoundException:\n continue\n", "path": "c7n/resources/emr.py"}], "after_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport time\nimport json\nimport jmespath\n\nfrom c7n.actions import ActionRegistry, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import FilterRegistry, MetricsFilter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo\nfrom c7n.utils import (\n local_session, type_schema, get_retry)\nfrom c7n.tags import (\n TagDelayedAction, RemoveTag, TagActionFilter, Tag)\nimport c7n.filters.vpc as net_filters\n\nfilters = FilterRegistry('emr.filters')\nactions = ActionRegistry('emr.actions')\nlog = logging.getLogger('custodian.emr')\n\nfilters.register('marked-for-op', TagActionFilter)\n\n\[email protected]('emr')\nclass EMRCluster(QueryResourceManager):\n \"\"\"Resource manager for Elastic MapReduce clusters\n \"\"\"\n\n class resource_type(TypeInfo):\n service = 'emr'\n arn_type = 'emr'\n permission_prefix = 'elasticmapreduce'\n cluster_states = ['WAITING', 'BOOTSTRAPPING', 'RUNNING', 'STARTING']\n enum_spec = ('list_clusters', 'Clusters', {'ClusterStates': cluster_states})\n name = 'Name'\n id = 'Id'\n date = \"Status.Timeline.CreationDateTime\"\n cfn_type = 'AWS::EMR::Cluster'\n\n action_registry = actions\n filter_registry = filters\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def __init__(self, ctx, data):\n super(EMRCluster, self).__init__(ctx, data)\n self.queries = QueryFilter.parse(\n self.data.get('query', [\n {'ClusterStates': [\n 'running', 'bootstrapping', 'waiting']}]))\n\n @classmethod\n def get_permissions(cls):\n return (\"elasticmapreduce:ListClusters\",\n \"elasticmapreduce:DescribeCluster\")\n\n def get_resources(self, ids):\n # no filtering by id set supported at the api\n client = local_session(self.session_factory).client('emr')\n results = []\n for jid in ids:\n results.append(\n client.describe_cluster(ClusterId=jid)['Cluster'])\n return results\n\n def resources(self, query=None):\n q = self.consolidate_query_filter()\n if q is not None:\n query = query or {}\n for i in range(0, len(q)):\n query[q[i]['Name']] = q[i]['Values']\n return super(EMRCluster, self).resources(query=query)\n\n def consolidate_query_filter(self):\n result = []\n names = set()\n # allow same name to be specified multiple times and append the queries\n # under the same name\n for q in self.queries:\n query_filter = q.query()\n if query_filter['Name'] in names:\n for filt in result:\n if query_filter['Name'] == filt['Name']:\n filt['Values'].extend(query_filter['Values'])\n else:\n names.add(query_filter['Name'])\n result.append(query_filter)\n if 'ClusterStates' not in names:\n # include default query\n result.append(\n {\n 'Name': 'ClusterStates',\n 'Values': ['WAITING', 'RUNNING', 'BOOTSTRAPPING'],\n }\n )\n return result\n\n def augment(self, resources):\n client = local_session(\n self.get_resource_manager('emr').session_factory).client('emr')\n result = []\n # remap for cwmetrics\n for r in resources:\n cluster = self.retry(\n client.describe_cluster, ClusterId=r['Id'])['Cluster']\n result.append(cluster)\n return result\n\n\[email protected]_registry.register('metrics')\nclass EMRMetrics(MetricsFilter):\n\n def get_dimensions(self, resource):\n # Job flow id is legacy name for cluster id\n return [{'Name': 'JobFlowId', 'Value': resource['Id']}]\n\n\[email protected]('mark-for-op')\nclass TagDelayedAction(TagDelayedAction):\n \"\"\"Action to specify an action to occur at a later date\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-mark-for-op\n resource: emr\n filters:\n - \"tag:Name\": absent\n actions:\n - type: mark-for-op\n tag: custodian_cleanup\n op: terminate\n days: 4\n msg: \"Cluster does not have required tags\"\n \"\"\"\n\n\[email protected]('tag')\nclass TagTable(Tag):\n \"\"\"Action to create tag(s) on a resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-tag-table\n resource: emr\n filters:\n - \"tag:target-tag\": absent\n actions:\n - type: tag\n key: target-tag\n value: target-tag-value\n \"\"\"\n\n permissions = ('elasticmapreduce:AddTags',)\n batch_size = 1\n retry = staticmethod(get_retry(('ThrottlingException',)))\n\n def process_resource_set(self, client, resources, tags):\n for r in resources:\n self.retry(client.add_tags, ResourceId=r['Id'], Tags=tags)\n\n\[email protected]('remove-tag')\nclass UntagTable(RemoveTag):\n \"\"\"Action to remove tag(s) on a resource\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-remove-tag\n resource: emr\n filters:\n - \"tag:target-tag\": present\n actions:\n - type: remove-tag\n tags: [\"target-tag\"]\n \"\"\"\n\n concurrency = 2\n batch_size = 5\n permissions = ('elasticmapreduce:RemoveTags',)\n\n def process_resource_set(self, client, resources, tag_keys):\n for r in resources:\n client.remove_tags(ResourceId=r['Id'], TagKeys=tag_keys)\n\n\[email protected]('terminate')\nclass Terminate(BaseAction):\n \"\"\"Action to terminate EMR cluster(s)\n\n It is recommended to apply a filter to the terminate action to avoid\n termination of all EMR clusters\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: emr-terminate\n resource: emr\n query:\n - ClusterStates: [STARTING, BOOTSTRAPPING, RUNNING, WAITING]\n actions:\n - terminate\n \"\"\"\n\n schema = type_schema('terminate', force={'type': 'boolean'})\n permissions = (\"elasticmapreduce:TerminateJobFlows\",)\n delay = 5\n\n def process(self, emrs):\n client = local_session(self.manager.session_factory).client('emr')\n cluster_ids = [emr['Id'] for emr in emrs]\n if self.data.get('force'):\n client.set_termination_protection(\n JobFlowIds=cluster_ids, TerminationProtected=False)\n time.sleep(self.delay)\n client.terminate_job_flows(JobFlowIds=cluster_ids)\n self.log.info(\"Deleted emrs: %s\", cluster_ids)\n return emrs\n\n\n# Valid EMR Query Filters\nEMR_VALID_FILTERS = {'CreatedAfter', 'CreatedBefore', 'ClusterStates'}\n\n\nclass QueryFilter:\n\n @classmethod\n def parse(cls, data):\n results = []\n for d in data:\n if not isinstance(d, dict):\n raise PolicyValidationError(\n \"EMR Query Filter Invalid structure %s\" % d)\n results.append(cls(d).validate())\n return results\n\n def __init__(self, data):\n self.data = data\n self.key = None\n self.value = None\n\n def validate(self):\n if not len(list(self.data.keys())) == 1:\n raise PolicyValidationError(\n \"EMR Query Filter Invalid %s\" % self.data)\n self.key = list(self.data.keys())[0]\n self.value = list(self.data.values())[0]\n\n if self.key not in EMR_VALID_FILTERS and not self.key.startswith(\n 'tag:'):\n raise PolicyValidationError(\n \"EMR Query Filter invalid filter name %s\" % (self.data))\n\n if self.value is None:\n raise PolicyValidationError(\n \"EMR Query Filters must have a value, use tag-key\"\n \" w/ tag name as value for tag present checks\"\n \" %s\" % self.data)\n return self\n\n def query(self):\n value = self.value\n if isinstance(self.value, str):\n value = [self.value]\n\n return {'Name': self.key, 'Values': value}\n\n\[email protected]('subnet')\nclass SubnetFilter(net_filters.SubnetFilter):\n\n RelatedIdsExpression = \"Ec2InstanceAttributes.RequestedEc2SubnetIds[]\"\n\n\[email protected]('security-group')\nclass SecurityGroupFilter(net_filters.SecurityGroupFilter):\n\n RelatedIdsExpression = \"\"\n expressions = ('Ec2InstanceAttributes.EmrManagedMasterSecurityGroup',\n 'Ec2InstanceAttributes.EmrManagedSlaveSecurityGroup',\n 'Ec2InstanceAttributes.ServiceAccessSecurityGroup',\n 'Ec2InstanceAttributes.AdditionalMasterSecurityGroups[]',\n 'Ec2InstanceAttributes.AdditionalSlaveSecurityGroups[]')\n\n def get_related_ids(self, resources):\n sg_ids = set()\n for r in resources:\n for exp in self.expressions:\n ids = jmespath.search(exp, r)\n if isinstance(ids, list):\n sg_ids.update(tuple(ids))\n elif isinstance(ids, str):\n sg_ids.add(ids)\n return list(sg_ids)\n\n\nfilters.register('network-location', net_filters.NetworkLocation)\n\n\[email protected]('emr-security-configuration')\nclass EMRSecurityConfiguration(QueryResourceManager):\n \"\"\"Resource manager for EMR Security Configuration\n \"\"\"\n\n class resource_type(TypeInfo):\n service = 'emr'\n arn_type = 'emr'\n permission_prefix = 'elasticmapreduce'\n enum_spec = ('list_security_configurations', 'SecurityConfigurations', None)\n detail_spec = ('describe_security_configuration', 'Name', 'Name', None)\n id = name = 'Name'\n cfn_type = 'AWS::EMR::SecurityConfiguration'\n\n permissions = ('elasticmapreduce:ListSecurityConfigurations',\n 'elasticmapreduce:DescribeSecurityConfiguration',)\n\n def augment(self, resources):\n resources = super().augment(resources)\n for r in resources:\n r['SecurityConfiguration'] = json.loads(r['SecurityConfiguration'])\n return resources\n\n\[email protected]_registry.register('delete')\nclass DeleteEMRSecurityConfiguration(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('elasticmapreduce:DeleteSecurityConfiguration',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('emr')\n for r in resources:\n try:\n client.delete_security_configuration(Name=r['Name'])\n except client.exceptions.EntityNotFoundException:\n continue\n", "path": "c7n/resources/emr.py"}]}
| 3,498 | 459 |
gh_patches_debug_20631
|
rasdani/github-patches
|
git_diff
|
facebookresearch__xformers-401
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make optional Triton requirement more robust
# π Bug
Triton is currently an optional dependency, see
https://github.com/facebookresearch/xformers/blob/51dd1192620271598ba71bb1351c1c5508720b31/xformers/__init__.py#L75-L82
But if the installed version doesn't respect certain constraints, users can have issues at import time, such as https://github.com/huggingface/diffusers/pull/532#issuecomment-1249527248
We should probably make the triton requirement a bit more robust
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xformers/__init__.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import logging
7
8 import torch
9
10 # Please update the doc version in docs/source/conf.py as well.
11 __version__ = "0.0.13.dev"
12
13 _is_sparse_available: bool = True
14 _is_triton_available: bool = torch.cuda.is_available()
15
16 # Set to true to utilize functorch
17 _is_functorch_available: bool = False
18
19
20 def _register_extensions():
21 import importlib
22 import os
23
24 import torch
25
26 # load the custom_op_library and register the custom ops
27 lib_dir = os.path.dirname(__file__)
28 if os.name == "nt":
29 # Register the main torchvision library location on the default DLL path
30 import ctypes
31 import sys
32
33 kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
34 with_load_library_flags = hasattr(kernel32, "AddDllDirectory")
35 prev_error_mode = kernel32.SetErrorMode(0x0001)
36
37 if with_load_library_flags:
38 kernel32.AddDllDirectory.restype = ctypes.c_void_p
39
40 if sys.version_info >= (3, 8):
41 os.add_dll_directory(lib_dir)
42 elif with_load_library_flags:
43 res = kernel32.AddDllDirectory(lib_dir)
44 if res is None:
45 err = ctypes.WinError(ctypes.get_last_error())
46 err.strerror += f' Error adding "{lib_dir}" to the DLL directories.'
47 raise err
48
49 kernel32.SetErrorMode(prev_error_mode)
50
51 loader_details = (
52 importlib.machinery.ExtensionFileLoader,
53 importlib.machinery.EXTENSION_SUFFIXES,
54 )
55
56 extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
57 ext_specs = extfinder.find_spec("_C")
58 if ext_specs is None:
59 raise ImportError
60 torch.ops.load_library(ext_specs.origin)
61
62
63 if _is_sparse_available:
64 try:
65 _register_extensions()
66 except (ImportError, OSError) as e:
67 print(e)
68 logging.warning(
69 f"WARNING: {e}\nNeed to compile C++ extensions to get sparse attention suport."
70 + " Please run python setup.py build develop"
71 )
72 _is_sparse_available = False
73
74
75 if _is_triton_available:
76 try:
77 from xformers.triton.softmax import softmax as triton_softmax # noqa
78 except ImportError as e:
79 logging.warning(
80 f"Triton is not available, some optimizations will not be enabled.\nError {e}"
81 )
82 _is_triton_available = False
83
84
85 if _is_functorch_available:
86 try:
87 from xformers.components.nvfuser import NVFusedBiasActivationDropout # noqa
88 except ImportError as e:
89 logging.warning(
90 f"Functorch is not available, some optimizations will not be enabled.\nError {e}"
91 )
92 _is_functorch_available = False
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/xformers/__init__.py b/xformers/__init__.py
--- a/xformers/__init__.py
+++ b/xformers/__init__.py
@@ -75,9 +75,9 @@
if _is_triton_available:
try:
from xformers.triton.softmax import softmax as triton_softmax # noqa
- except ImportError as e:
+ except (ImportError, AttributeError) as e:
logging.warning(
- f"Triton is not available, some optimizations will not be enabled.\nError {e}"
+ f"A matching Triton is not available, some optimizations will not be enabled.\nError caught was: {e}"
)
_is_triton_available = False
@@ -87,6 +87,6 @@
from xformers.components.nvfuser import NVFusedBiasActivationDropout # noqa
except ImportError as e:
logging.warning(
- f"Functorch is not available, some optimizations will not be enabled.\nError {e}"
+ f"Functorch is not available, some optimizations will not be enabled.\nError caught was: {e}"
)
_is_functorch_available = False
|
{"golden_diff": "diff --git a/xformers/__init__.py b/xformers/__init__.py\n--- a/xformers/__init__.py\n+++ b/xformers/__init__.py\n@@ -75,9 +75,9 @@\n if _is_triton_available:\n try:\n from xformers.triton.softmax import softmax as triton_softmax # noqa\n- except ImportError as e:\n+ except (ImportError, AttributeError) as e:\n logging.warning(\n- f\"Triton is not available, some optimizations will not be enabled.\\nError {e}\"\n+ f\"A matching Triton is not available, some optimizations will not be enabled.\\nError caught was: {e}\"\n )\n _is_triton_available = False\n \n@@ -87,6 +87,6 @@\n from xformers.components.nvfuser import NVFusedBiasActivationDropout # noqa\n except ImportError as e:\n logging.warning(\n- f\"Functorch is not available, some optimizations will not be enabled.\\nError {e}\"\n+ f\"Functorch is not available, some optimizations will not be enabled.\\nError caught was: {e}\"\n )\n _is_functorch_available = False\n", "issue": "Make optional Triton requirement more robust\n# \ud83d\udc1b Bug\r\n\r\nTriton is currently an optional dependency, see\r\nhttps://github.com/facebookresearch/xformers/blob/51dd1192620271598ba71bb1351c1c5508720b31/xformers/__init__.py#L75-L82\r\n\r\nBut if the installed version doesn't respect certain constraints, users can have issues at import time, such as https://github.com/huggingface/diffusers/pull/532#issuecomment-1249527248\r\n\r\nWe should probably make the triton requirement a bit more robust\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\n\nimport torch\n\n# Please update the doc version in docs/source/conf.py as well.\n__version__ = \"0.0.13.dev\"\n\n_is_sparse_available: bool = True\n_is_triton_available: bool = torch.cuda.is_available()\n\n# Set to true to utilize functorch\n_is_functorch_available: bool = False\n\n\ndef _register_extensions():\n import importlib\n import os\n\n import torch\n\n # load the custom_op_library and register the custom ops\n lib_dir = os.path.dirname(__file__)\n if os.name == \"nt\":\n # Register the main torchvision library location on the default DLL path\n import ctypes\n import sys\n\n kernel32 = ctypes.WinDLL(\"kernel32.dll\", use_last_error=True)\n with_load_library_flags = hasattr(kernel32, \"AddDllDirectory\")\n prev_error_mode = kernel32.SetErrorMode(0x0001)\n\n if with_load_library_flags:\n kernel32.AddDllDirectory.restype = ctypes.c_void_p\n\n if sys.version_info >= (3, 8):\n os.add_dll_directory(lib_dir)\n elif with_load_library_flags:\n res = kernel32.AddDllDirectory(lib_dir)\n if res is None:\n err = ctypes.WinError(ctypes.get_last_error())\n err.strerror += f' Error adding \"{lib_dir}\" to the DLL directories.'\n raise err\n\n kernel32.SetErrorMode(prev_error_mode)\n\n loader_details = (\n importlib.machinery.ExtensionFileLoader,\n importlib.machinery.EXTENSION_SUFFIXES,\n )\n\n extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)\n ext_specs = extfinder.find_spec(\"_C\")\n if ext_specs is None:\n raise ImportError\n torch.ops.load_library(ext_specs.origin)\n\n\nif _is_sparse_available:\n try:\n _register_extensions()\n except (ImportError, OSError) as e:\n print(e)\n logging.warning(\n f\"WARNING: {e}\\nNeed to compile C++ extensions to get sparse attention suport.\"\n + \" Please run python setup.py build develop\"\n )\n _is_sparse_available = False\n\n\nif _is_triton_available:\n try:\n from xformers.triton.softmax import softmax as triton_softmax # noqa\n except ImportError as e:\n logging.warning(\n f\"Triton is not available, some optimizations will not be enabled.\\nError {e}\"\n )\n _is_triton_available = False\n\n\nif _is_functorch_available:\n try:\n from xformers.components.nvfuser import NVFusedBiasActivationDropout # noqa\n except ImportError as e:\n logging.warning(\n f\"Functorch is not available, some optimizations will not be enabled.\\nError {e}\"\n )\n _is_functorch_available = False\n", "path": "xformers/__init__.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\n\nimport torch\n\n# Please update the doc version in docs/source/conf.py as well.\n__version__ = \"0.0.13.dev\"\n\n_is_sparse_available: bool = True\n_is_triton_available: bool = torch.cuda.is_available()\n\n# Set to true to utilize functorch\n_is_functorch_available: bool = False\n\n\ndef _register_extensions():\n import importlib\n import os\n\n import torch\n\n # load the custom_op_library and register the custom ops\n lib_dir = os.path.dirname(__file__)\n if os.name == \"nt\":\n # Register the main torchvision library location on the default DLL path\n import ctypes\n import sys\n\n kernel32 = ctypes.WinDLL(\"kernel32.dll\", use_last_error=True)\n with_load_library_flags = hasattr(kernel32, \"AddDllDirectory\")\n prev_error_mode = kernel32.SetErrorMode(0x0001)\n\n if with_load_library_flags:\n kernel32.AddDllDirectory.restype = ctypes.c_void_p\n\n if sys.version_info >= (3, 8):\n os.add_dll_directory(lib_dir)\n elif with_load_library_flags:\n res = kernel32.AddDllDirectory(lib_dir)\n if res is None:\n err = ctypes.WinError(ctypes.get_last_error())\n err.strerror += f' Error adding \"{lib_dir}\" to the DLL directories.'\n raise err\n\n kernel32.SetErrorMode(prev_error_mode)\n\n loader_details = (\n importlib.machinery.ExtensionFileLoader,\n importlib.machinery.EXTENSION_SUFFIXES,\n )\n\n extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)\n ext_specs = extfinder.find_spec(\"_C\")\n if ext_specs is None:\n raise ImportError\n torch.ops.load_library(ext_specs.origin)\n\n\nif _is_sparse_available:\n try:\n _register_extensions()\n except (ImportError, OSError) as e:\n print(e)\n logging.warning(\n f\"WARNING: {e}\\nNeed to compile C++ extensions to get sparse attention suport.\"\n + \" Please run python setup.py build develop\"\n )\n _is_sparse_available = False\n\n\nif _is_triton_available:\n try:\n from xformers.triton.softmax import softmax as triton_softmax # noqa\n except (ImportError, AttributeError) as e:\n logging.warning(\n f\"A matching Triton is not available, some optimizations will not be enabled.\\nError caught was: {e}\"\n )\n _is_triton_available = False\n\n\nif _is_functorch_available:\n try:\n from xformers.components.nvfuser import NVFusedBiasActivationDropout # noqa\n except ImportError as e:\n logging.warning(\n f\"Functorch is not available, some optimizations will not be enabled.\\nError caught was: {e}\"\n )\n _is_functorch_available = False\n", "path": "xformers/__init__.py"}]}
| 1,264 | 266 |
gh_patches_debug_24131
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-958
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bootstrap script cannot be executed without setuptools already installed
The `bootstrap.py` script cannot be used to bootstrap from an environment without setuptools. The script imports `pip` which requires setuptools to be be installed. This behavior was introduced in #933.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bootstrap.py`
Content:
```
1 """
2 If setuptools is not already installed in the environment, it's not possible
3 to invoke setuptools' own commands. This routine will bootstrap this local
4 environment by creating a minimal egg-info directory and then invoking the
5 egg-info command to flesh out the egg-info directory.
6 """
7
8 from __future__ import unicode_literals
9
10 import os
11 import io
12 import re
13 import contextlib
14 import tempfile
15 import shutil
16 import sys
17 import textwrap
18 import subprocess
19
20 import pip
21
22 minimal_egg_info = textwrap.dedent("""
23 [distutils.commands]
24 egg_info = setuptools.command.egg_info:egg_info
25
26 [distutils.setup_keywords]
27 include_package_data = setuptools.dist:assert_bool
28 install_requires = setuptools.dist:check_requirements
29 extras_require = setuptools.dist:check_extras
30 entry_points = setuptools.dist:check_entry_points
31
32 [egg_info.writers]
33 dependency_links.txt = setuptools.command.egg_info:overwrite_arg
34 entry_points.txt = setuptools.command.egg_info:write_entries
35 requires.txt = setuptools.command.egg_info:write_requirements
36 """)
37
38
39 def ensure_egg_info():
40 if os.path.exists('setuptools.egg-info'):
41 return
42 print("adding minimal entry_points")
43 build_egg_info()
44
45
46 def build_egg_info():
47 """
48 Build a minimal egg-info, enough to invoke egg_info
49 """
50
51 os.mkdir('setuptools.egg-info')
52 filename = 'setuptools.egg-info/entry_points.txt'
53 with io.open(filename, 'w', encoding='utf-8') as ep:
54 ep.write(minimal_egg_info)
55
56
57 def run_egg_info():
58 cmd = [sys.executable, 'setup.py', 'egg_info']
59 print("Regenerating egg_info")
60 subprocess.check_call(cmd)
61 print("...and again.")
62 subprocess.check_call(cmd)
63
64
65 def gen_deps():
66 with io.open('setup.py', encoding='utf-8') as strm:
67 text = strm.read()
68 pattern = r'install_requires=\[(.*?)\]'
69 match = re.search(pattern, text, flags=re.M|re.DOTALL)
70 reqs = eval(match.group(1).replace('\n', ''))
71 with io.open('requirements.txt', 'w', encoding='utf-8') as reqs_file:
72 reqs_file.write('\n'.join(reqs))
73
74
75 @contextlib.contextmanager
76 def install_deps():
77 "Just in time make the deps available"
78 gen_deps()
79 tmpdir = tempfile.mkdtemp()
80 args = [
81 'install',
82 '-t', tmpdir,
83 '-r', 'requirements.txt',
84 ]
85 pip.main(args)
86 os.environ['PYTHONPATH'] = tmpdir
87 try:
88 yield tmpdir
89 finally:
90 shutil.rmtree(tmpdir)
91
92
93 if __name__ == '__main__':
94 ensure_egg_info()
95 with install_deps():
96 run_egg_info()
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bootstrap.py b/bootstrap.py
--- a/bootstrap.py
+++ b/bootstrap.py
@@ -7,6 +7,7 @@
from __future__ import unicode_literals
+import argparse
import os
import io
import re
@@ -17,7 +18,6 @@
import textwrap
import subprocess
-import pip
minimal_egg_info = textwrap.dedent("""
[distutils.commands]
@@ -75,6 +75,7 @@
@contextlib.contextmanager
def install_deps():
"Just in time make the deps available"
+ import pip
gen_deps()
tmpdir = tempfile.mkdtemp()
args = [
@@ -91,6 +92,15 @@
if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='bootstrap setuptools')
+ parser.add_argument(
+ '--skip-dep-install', action='store_true',
+ help=("Do not attempt to install setuptools dependencies. These "
+ "should be provided in the environment in another manner."))
+ args = parser.parse_args()
ensure_egg_info()
- with install_deps():
+ if args.skip_dep_install:
run_egg_info()
+ else:
+ with install_deps():
+ run_egg_info()
|
{"golden_diff": "diff --git a/bootstrap.py b/bootstrap.py\n--- a/bootstrap.py\n+++ b/bootstrap.py\n@@ -7,6 +7,7 @@\n \n from __future__ import unicode_literals\n \n+import argparse\n import os\n import io\n import re\n@@ -17,7 +18,6 @@\n import textwrap\n import subprocess\n \n-import pip\n \n minimal_egg_info = textwrap.dedent(\"\"\"\n [distutils.commands]\n@@ -75,6 +75,7 @@\n @contextlib.contextmanager\n def install_deps():\n \"Just in time make the deps available\"\n+ import pip\n gen_deps()\n tmpdir = tempfile.mkdtemp()\n args = [\n@@ -91,6 +92,15 @@\n \n \n if __name__ == '__main__':\n+ parser = argparse.ArgumentParser(description='bootstrap setuptools')\n+ parser.add_argument(\n+ '--skip-dep-install', action='store_true',\n+ help=(\"Do not attempt to install setuptools dependencies. These \"\n+ \"should be provided in the environment in another manner.\"))\n+ args = parser.parse_args()\n ensure_egg_info()\n- with install_deps():\n+ if args.skip_dep_install:\n run_egg_info()\n+ else:\n+ with install_deps():\n+ run_egg_info()\n", "issue": "bootstrap script cannot be executed without setuptools already installed\nThe `bootstrap.py` script cannot be used to bootstrap from an environment without setuptools. The script imports `pip` which requires setuptools to be be installed. This behavior was introduced in #933.\n", "before_files": [{"content": "\"\"\"\nIf setuptools is not already installed in the environment, it's not possible\nto invoke setuptools' own commands. This routine will bootstrap this local\nenvironment by creating a minimal egg-info directory and then invoking the\negg-info command to flesh out the egg-info directory.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\nimport io\nimport re\nimport contextlib\nimport tempfile\nimport shutil\nimport sys\nimport textwrap\nimport subprocess\n\nimport pip\n\nminimal_egg_info = textwrap.dedent(\"\"\"\n [distutils.commands]\n egg_info = setuptools.command.egg_info:egg_info\n\n [distutils.setup_keywords]\n include_package_data = setuptools.dist:assert_bool\n install_requires = setuptools.dist:check_requirements\n extras_require = setuptools.dist:check_extras\n entry_points = setuptools.dist:check_entry_points\n\n [egg_info.writers]\n dependency_links.txt = setuptools.command.egg_info:overwrite_arg\n entry_points.txt = setuptools.command.egg_info:write_entries\n requires.txt = setuptools.command.egg_info:write_requirements\n \"\"\")\n\n\ndef ensure_egg_info():\n if os.path.exists('setuptools.egg-info'):\n return\n print(\"adding minimal entry_points\")\n build_egg_info()\n\n\ndef build_egg_info():\n \"\"\"\n Build a minimal egg-info, enough to invoke egg_info\n \"\"\"\n\n os.mkdir('setuptools.egg-info')\n filename = 'setuptools.egg-info/entry_points.txt'\n with io.open(filename, 'w', encoding='utf-8') as ep:\n ep.write(minimal_egg_info)\n\n\ndef run_egg_info():\n cmd = [sys.executable, 'setup.py', 'egg_info']\n print(\"Regenerating egg_info\")\n subprocess.check_call(cmd)\n print(\"...and again.\")\n subprocess.check_call(cmd)\n\n\ndef gen_deps():\n with io.open('setup.py', encoding='utf-8') as strm:\n text = strm.read()\n pattern = r'install_requires=\\[(.*?)\\]'\n match = re.search(pattern, text, flags=re.M|re.DOTALL)\n reqs = eval(match.group(1).replace('\\n', ''))\n with io.open('requirements.txt', 'w', encoding='utf-8') as reqs_file:\n reqs_file.write('\\n'.join(reqs))\n\n\[email protected]\ndef install_deps():\n \"Just in time make the deps available\"\n gen_deps()\n tmpdir = tempfile.mkdtemp()\n args = [\n 'install',\n '-t', tmpdir,\n '-r', 'requirements.txt',\n ]\n pip.main(args)\n os.environ['PYTHONPATH'] = tmpdir\n try:\n yield tmpdir\n finally:\n shutil.rmtree(tmpdir)\n\n\nif __name__ == '__main__':\n ensure_egg_info()\n with install_deps():\n run_egg_info()\n", "path": "bootstrap.py"}], "after_files": [{"content": "\"\"\"\nIf setuptools is not already installed in the environment, it's not possible\nto invoke setuptools' own commands. This routine will bootstrap this local\nenvironment by creating a minimal egg-info directory and then invoking the\negg-info command to flesh out the egg-info directory.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\nimport io\nimport re\nimport contextlib\nimport tempfile\nimport shutil\nimport sys\nimport textwrap\nimport subprocess\n\n\nminimal_egg_info = textwrap.dedent(\"\"\"\n [distutils.commands]\n egg_info = setuptools.command.egg_info:egg_info\n\n [distutils.setup_keywords]\n include_package_data = setuptools.dist:assert_bool\n install_requires = setuptools.dist:check_requirements\n extras_require = setuptools.dist:check_extras\n entry_points = setuptools.dist:check_entry_points\n\n [egg_info.writers]\n dependency_links.txt = setuptools.command.egg_info:overwrite_arg\n entry_points.txt = setuptools.command.egg_info:write_entries\n requires.txt = setuptools.command.egg_info:write_requirements\n \"\"\")\n\n\ndef ensure_egg_info():\n if os.path.exists('setuptools.egg-info'):\n return\n print(\"adding minimal entry_points\")\n build_egg_info()\n\n\ndef build_egg_info():\n \"\"\"\n Build a minimal egg-info, enough to invoke egg_info\n \"\"\"\n\n os.mkdir('setuptools.egg-info')\n filename = 'setuptools.egg-info/entry_points.txt'\n with io.open(filename, 'w', encoding='utf-8') as ep:\n ep.write(minimal_egg_info)\n\n\ndef run_egg_info():\n cmd = [sys.executable, 'setup.py', 'egg_info']\n print(\"Regenerating egg_info\")\n subprocess.check_call(cmd)\n print(\"...and again.\")\n subprocess.check_call(cmd)\n\n\ndef gen_deps():\n with io.open('setup.py', encoding='utf-8') as strm:\n text = strm.read()\n pattern = r'install_requires=\\[(.*?)\\]'\n match = re.search(pattern, text, flags=re.M|re.DOTALL)\n reqs = eval(match.group(1).replace('\\n', ''))\n with io.open('requirements.txt', 'w', encoding='utf-8') as reqs_file:\n reqs_file.write('\\n'.join(reqs))\n\n\[email protected]\ndef install_deps():\n \"Just in time make the deps available\"\n import pip\n gen_deps()\n tmpdir = tempfile.mkdtemp()\n args = [\n 'install',\n '-t', tmpdir,\n '-r', 'requirements.txt',\n ]\n pip.main(args)\n os.environ['PYTHONPATH'] = tmpdir\n try:\n yield tmpdir\n finally:\n shutil.rmtree(tmpdir)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='bootstrap setuptools')\n parser.add_argument(\n '--skip-dep-install', action='store_true',\n help=(\"Do not attempt to install setuptools dependencies. These \"\n \"should be provided in the environment in another manner.\"))\n args = parser.parse_args()\n ensure_egg_info()\n if args.skip_dep_install:\n run_egg_info()\n else:\n with install_deps():\n run_egg_info()\n", "path": "bootstrap.py"}]}
| 1,120 | 276 |
gh_patches_debug_14456
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-4185
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
View response in Firefox gives file not found
#### Problem Description
Hitting `v` on a flow opens up firefox pointing to a temp mitmproxy file but the file doesn't exist
#### Steps to reproduce the behavior:
1. Capture a request
2. Press v in the flow view of request and 2 for viewing response
3. Firefox opens up to view the response html page but only displays file not found e.g. `Firefox canβt find the file at /tmp/mproxynqb8uytn.html`. (Hitting `e` and editing the response works correctly and opens up the response as a tmp file in my editor)
#### System Information
Mitmproxy: 5.1.1
Python: 3.8.3
OpenSSL: OpenSSL 1.1.1g 21 Apr 2020
Platform: Linux-5.6.19-2-MANJARO-x86_64-with-glibc2.2.5
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/tools/console/master.py`
Content:
```
1 import asyncio
2 import mailcap
3 import mimetypes
4 import os
5 import os.path
6 import shlex
7 import signal
8 import stat
9 import subprocess
10 import sys
11 import tempfile
12 import typing # noqa
13 import contextlib
14
15 import urwid
16
17 from mitmproxy import addons
18 from mitmproxy import master
19 from mitmproxy import log
20 from mitmproxy.addons import intercept
21 from mitmproxy.addons import eventstore
22 from mitmproxy.addons import readfile
23 from mitmproxy.addons import view
24 from mitmproxy.tools.console import consoleaddons
25 from mitmproxy.tools.console import defaultkeys
26 from mitmproxy.tools.console import keymap
27 from mitmproxy.tools.console import palettes
28 from mitmproxy.tools.console import signals
29 from mitmproxy.tools.console import window
30
31
32 class ConsoleMaster(master.Master):
33
34 def __init__(self, opts):
35 super().__init__(opts)
36
37 self.start_err: typing.Optional[log.LogEntry] = None
38
39 self.view: view.View = view.View()
40 self.events = eventstore.EventStore()
41 self.events.sig_add.connect(self.sig_add_log)
42
43 self.stream_path = None
44 self.keymap = keymap.Keymap(self)
45 defaultkeys.map(self.keymap)
46 self.options.errored.connect(self.options_error)
47
48 self.view_stack = []
49
50 signals.call_in.connect(self.sig_call_in)
51 self.addons.add(*addons.default_addons())
52 self.addons.add(
53 intercept.Intercept(),
54 self.view,
55 self.events,
56 consoleaddons.UnsupportedLog(),
57 readfile.ReadFile(),
58 consoleaddons.ConsoleAddon(self),
59 keymap.KeymapConfig(),
60 )
61
62 def sigint_handler(*args, **kwargs):
63 self.prompt_for_exit()
64
65 signal.signal(signal.SIGINT, sigint_handler)
66
67 self.window = None
68
69 def __setattr__(self, name, value):
70 super().__setattr__(name, value)
71 signals.update_settings.send(self)
72
73 def options_error(self, opts, exc):
74 signals.status_message.send(
75 message=str(exc),
76 expire=1
77 )
78
79 def prompt_for_exit(self):
80 signals.status_prompt_onekey.send(
81 self,
82 prompt = "Quit",
83 keys = (
84 ("yes", "y"),
85 ("no", "n"),
86 ),
87 callback = self.quit,
88 )
89
90 def sig_add_log(self, event_store, entry: log.LogEntry):
91 if log.log_tier(self.options.console_eventlog_verbosity) < log.log_tier(entry.level):
92 return
93 if entry.level in ("error", "warn", "alert"):
94 signals.status_message.send(
95 message = (
96 entry.level,
97 "{}: {}".format(entry.level.title(), str(entry.msg).lstrip())
98 ),
99 expire=5
100 )
101
102 def sig_call_in(self, sender, seconds, callback, args=()):
103 def cb(*_):
104 return callback(*args)
105 self.loop.set_alarm_in(seconds, cb)
106
107 @contextlib.contextmanager
108 def uistopped(self):
109 self.loop.stop()
110 try:
111 yield
112 finally:
113 self.loop.start()
114 self.loop.screen_size = None
115 self.loop.draw_screen()
116
117 def spawn_editor(self, data):
118 text = not isinstance(data, bytes)
119 fd, name = tempfile.mkstemp('', "mproxy", text=text)
120 with open(fd, "w" if text else "wb") as f:
121 f.write(data)
122 # if no EDITOR is set, assume 'vi'
123 c = os.environ.get("MITMPROXY_EDITOR") or os.environ.get("EDITOR") or "vi"
124 cmd = shlex.split(c)
125 cmd.append(name)
126 with self.uistopped():
127 try:
128 subprocess.call(cmd)
129 except:
130 signals.status_message.send(
131 message="Can't start editor: %s" % c
132 )
133 else:
134 with open(name, "r" if text else "rb") as f:
135 data = f.read()
136 os.unlink(name)
137 return data
138
139 def spawn_external_viewer(self, data, contenttype):
140 if contenttype:
141 contenttype = contenttype.split(";")[0]
142 ext = mimetypes.guess_extension(contenttype) or ""
143 else:
144 ext = ""
145 fd, name = tempfile.mkstemp(ext, "mproxy")
146 os.write(fd, data)
147 os.close(fd)
148
149 # read-only to remind the user that this is a view function
150 os.chmod(name, stat.S_IREAD)
151
152 cmd = None
153 shell = False
154
155 if contenttype:
156 c = mailcap.getcaps()
157 cmd, _ = mailcap.findmatch(c, contenttype, filename=name)
158 if cmd:
159 shell = True
160 if not cmd:
161 # hm which one should get priority?
162 c = os.environ.get("MITMPROXY_EDITOR") or os.environ.get("PAGER") or os.environ.get("EDITOR")
163 if not c:
164 c = "less"
165 cmd = shlex.split(c)
166 cmd.append(name)
167 with self.uistopped():
168 try:
169 subprocess.call(cmd, shell=shell)
170 except:
171 signals.status_message.send(
172 message="Can't start external viewer: %s" % " ".join(c)
173 )
174 os.unlink(name)
175
176 def set_palette(self, opts, updated):
177 self.ui.register_palette(
178 palettes.palettes[opts.console_palette].palette(
179 opts.console_palette_transparent
180 )
181 )
182 self.ui.clear()
183
184 def inject_key(self, key):
185 self.loop.process_input([key])
186
187 def run(self):
188 if not sys.stdout.isatty():
189 print("Error: mitmproxy's console interface requires a tty. "
190 "Please run mitmproxy in an interactive shell environment.", file=sys.stderr)
191 sys.exit(1)
192
193 self.ui = window.Screen()
194 self.ui.set_terminal_properties(256)
195 self.set_palette(self.options, None)
196 self.options.subscribe(
197 self.set_palette,
198 ["console_palette", "console_palette_transparent"]
199 )
200 self.loop = urwid.MainLoop(
201 urwid.SolidFill("x"),
202 event_loop=urwid.AsyncioEventLoop(loop=asyncio.get_event_loop()),
203 screen = self.ui,
204 handle_mouse = self.options.console_mouse,
205 )
206 self.window = window.Window(self)
207 self.loop.widget = self.window
208 self.window.refresh()
209
210 if self.start_err:
211 def display_err(*_):
212 self.sig_add_log(None, self.start_err)
213 self.start_err = None
214 self.loop.set_alarm_in(0.01, display_err)
215
216 super().run_loop(self.loop.run)
217
218 def overlay(self, widget, **kwargs):
219 self.window.set_overlay(widget, **kwargs)
220
221 def switch_view(self, name):
222 self.window.push(name)
223
224 def quit(self, a):
225 if a != "n":
226 self.shutdown()
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/tools/console/master.py b/mitmproxy/tools/console/master.py
--- a/mitmproxy/tools/console/master.py
+++ b/mitmproxy/tools/console/master.py
@@ -11,6 +11,7 @@
import tempfile
import typing # noqa
import contextlib
+import threading
import urwid
@@ -171,7 +172,9 @@
signals.status_message.send(
message="Can't start external viewer: %s" % " ".join(c)
)
- os.unlink(name)
+ # add a small delay before deletion so that the file is not removed before being loaded by the viewer
+ t = threading.Timer(1.0, os.unlink, args=[name])
+ t.start()
def set_palette(self, opts, updated):
self.ui.register_palette(
|
{"golden_diff": "diff --git a/mitmproxy/tools/console/master.py b/mitmproxy/tools/console/master.py\n--- a/mitmproxy/tools/console/master.py\n+++ b/mitmproxy/tools/console/master.py\n@@ -11,6 +11,7 @@\n import tempfile\n import typing # noqa\n import contextlib\n+import threading\n \n import urwid\n \n@@ -171,7 +172,9 @@\n signals.status_message.send(\n message=\"Can't start external viewer: %s\" % \" \".join(c)\n )\n- os.unlink(name)\n+ # add a small delay before deletion so that the file is not removed before being loaded by the viewer\n+ t = threading.Timer(1.0, os.unlink, args=[name])\n+ t.start()\n \n def set_palette(self, opts, updated):\n self.ui.register_palette(\n", "issue": "View response in Firefox gives file not found\n#### Problem Description\r\nHitting `v` on a flow opens up firefox pointing to a temp mitmproxy file but the file doesn't exist\r\n\r\n#### Steps to reproduce the behavior:\r\n1. Capture a request \r\n2. Press v in the flow view of request and 2 for viewing response\r\n3. Firefox opens up to view the response html page but only displays file not found e.g. `Firefox can\u2019t find the file at /tmp/mproxynqb8uytn.html`. (Hitting `e` and editing the response works correctly and opens up the response as a tmp file in my editor)\r\n\r\n#### System Information\r\nMitmproxy: 5.1.1\r\nPython: 3.8.3\r\nOpenSSL: OpenSSL 1.1.1g 21 Apr 2020\r\nPlatform: Linux-5.6.19-2-MANJARO-x86_64-with-glibc2.2.5\r\n\n", "before_files": [{"content": "import asyncio\nimport mailcap\nimport mimetypes\nimport os\nimport os.path\nimport shlex\nimport signal\nimport stat\nimport subprocess\nimport sys\nimport tempfile\nimport typing # noqa\nimport contextlib\n\nimport urwid\n\nfrom mitmproxy import addons\nfrom mitmproxy import master\nfrom mitmproxy import log\nfrom mitmproxy.addons import intercept\nfrom mitmproxy.addons import eventstore\nfrom mitmproxy.addons import readfile\nfrom mitmproxy.addons import view\nfrom mitmproxy.tools.console import consoleaddons\nfrom mitmproxy.tools.console import defaultkeys\nfrom mitmproxy.tools.console import keymap\nfrom mitmproxy.tools.console import palettes\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.tools.console import window\n\n\nclass ConsoleMaster(master.Master):\n\n def __init__(self, opts):\n super().__init__(opts)\n\n self.start_err: typing.Optional[log.LogEntry] = None\n\n self.view: view.View = view.View()\n self.events = eventstore.EventStore()\n self.events.sig_add.connect(self.sig_add_log)\n\n self.stream_path = None\n self.keymap = keymap.Keymap(self)\n defaultkeys.map(self.keymap)\n self.options.errored.connect(self.options_error)\n\n self.view_stack = []\n\n signals.call_in.connect(self.sig_call_in)\n self.addons.add(*addons.default_addons())\n self.addons.add(\n intercept.Intercept(),\n self.view,\n self.events,\n consoleaddons.UnsupportedLog(),\n readfile.ReadFile(),\n consoleaddons.ConsoleAddon(self),\n keymap.KeymapConfig(),\n )\n\n def sigint_handler(*args, **kwargs):\n self.prompt_for_exit()\n\n signal.signal(signal.SIGINT, sigint_handler)\n\n self.window = None\n\n def __setattr__(self, name, value):\n super().__setattr__(name, value)\n signals.update_settings.send(self)\n\n def options_error(self, opts, exc):\n signals.status_message.send(\n message=str(exc),\n expire=1\n )\n\n def prompt_for_exit(self):\n signals.status_prompt_onekey.send(\n self,\n prompt = \"Quit\",\n keys = (\n (\"yes\", \"y\"),\n (\"no\", \"n\"),\n ),\n callback = self.quit,\n )\n\n def sig_add_log(self, event_store, entry: log.LogEntry):\n if log.log_tier(self.options.console_eventlog_verbosity) < log.log_tier(entry.level):\n return\n if entry.level in (\"error\", \"warn\", \"alert\"):\n signals.status_message.send(\n message = (\n entry.level,\n \"{}: {}\".format(entry.level.title(), str(entry.msg).lstrip())\n ),\n expire=5\n )\n\n def sig_call_in(self, sender, seconds, callback, args=()):\n def cb(*_):\n return callback(*args)\n self.loop.set_alarm_in(seconds, cb)\n\n @contextlib.contextmanager\n def uistopped(self):\n self.loop.stop()\n try:\n yield\n finally:\n self.loop.start()\n self.loop.screen_size = None\n self.loop.draw_screen()\n\n def spawn_editor(self, data):\n text = not isinstance(data, bytes)\n fd, name = tempfile.mkstemp('', \"mproxy\", text=text)\n with open(fd, \"w\" if text else \"wb\") as f:\n f.write(data)\n # if no EDITOR is set, assume 'vi'\n c = os.environ.get(\"MITMPROXY_EDITOR\") or os.environ.get(\"EDITOR\") or \"vi\"\n cmd = shlex.split(c)\n cmd.append(name)\n with self.uistopped():\n try:\n subprocess.call(cmd)\n except:\n signals.status_message.send(\n message=\"Can't start editor: %s\" % c\n )\n else:\n with open(name, \"r\" if text else \"rb\") as f:\n data = f.read()\n os.unlink(name)\n return data\n\n def spawn_external_viewer(self, data, contenttype):\n if contenttype:\n contenttype = contenttype.split(\";\")[0]\n ext = mimetypes.guess_extension(contenttype) or \"\"\n else:\n ext = \"\"\n fd, name = tempfile.mkstemp(ext, \"mproxy\")\n os.write(fd, data)\n os.close(fd)\n\n # read-only to remind the user that this is a view function\n os.chmod(name, stat.S_IREAD)\n\n cmd = None\n shell = False\n\n if contenttype:\n c = mailcap.getcaps()\n cmd, _ = mailcap.findmatch(c, contenttype, filename=name)\n if cmd:\n shell = True\n if not cmd:\n # hm which one should get priority?\n c = os.environ.get(\"MITMPROXY_EDITOR\") or os.environ.get(\"PAGER\") or os.environ.get(\"EDITOR\")\n if not c:\n c = \"less\"\n cmd = shlex.split(c)\n cmd.append(name)\n with self.uistopped():\n try:\n subprocess.call(cmd, shell=shell)\n except:\n signals.status_message.send(\n message=\"Can't start external viewer: %s\" % \" \".join(c)\n )\n os.unlink(name)\n\n def set_palette(self, opts, updated):\n self.ui.register_palette(\n palettes.palettes[opts.console_palette].palette(\n opts.console_palette_transparent\n )\n )\n self.ui.clear()\n\n def inject_key(self, key):\n self.loop.process_input([key])\n\n def run(self):\n if not sys.stdout.isatty():\n print(\"Error: mitmproxy's console interface requires a tty. \"\n \"Please run mitmproxy in an interactive shell environment.\", file=sys.stderr)\n sys.exit(1)\n\n self.ui = window.Screen()\n self.ui.set_terminal_properties(256)\n self.set_palette(self.options, None)\n self.options.subscribe(\n self.set_palette,\n [\"console_palette\", \"console_palette_transparent\"]\n )\n self.loop = urwid.MainLoop(\n urwid.SolidFill(\"x\"),\n event_loop=urwid.AsyncioEventLoop(loop=asyncio.get_event_loop()),\n screen = self.ui,\n handle_mouse = self.options.console_mouse,\n )\n self.window = window.Window(self)\n self.loop.widget = self.window\n self.window.refresh()\n\n if self.start_err:\n def display_err(*_):\n self.sig_add_log(None, self.start_err)\n self.start_err = None\n self.loop.set_alarm_in(0.01, display_err)\n\n super().run_loop(self.loop.run)\n\n def overlay(self, widget, **kwargs):\n self.window.set_overlay(widget, **kwargs)\n\n def switch_view(self, name):\n self.window.push(name)\n\n def quit(self, a):\n if a != \"n\":\n self.shutdown()\n", "path": "mitmproxy/tools/console/master.py"}], "after_files": [{"content": "import asyncio\nimport mailcap\nimport mimetypes\nimport os\nimport os.path\nimport shlex\nimport signal\nimport stat\nimport subprocess\nimport sys\nimport tempfile\nimport typing # noqa\nimport contextlib\nimport threading\n\nimport urwid\n\nfrom mitmproxy import addons\nfrom mitmproxy import master\nfrom mitmproxy import log\nfrom mitmproxy.addons import intercept\nfrom mitmproxy.addons import eventstore\nfrom mitmproxy.addons import readfile\nfrom mitmproxy.addons import view\nfrom mitmproxy.tools.console import consoleaddons\nfrom mitmproxy.tools.console import defaultkeys\nfrom mitmproxy.tools.console import keymap\nfrom mitmproxy.tools.console import palettes\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.tools.console import window\n\n\nclass ConsoleMaster(master.Master):\n\n def __init__(self, opts):\n super().__init__(opts)\n\n self.start_err: typing.Optional[log.LogEntry] = None\n\n self.view: view.View = view.View()\n self.events = eventstore.EventStore()\n self.events.sig_add.connect(self.sig_add_log)\n\n self.stream_path = None\n self.keymap = keymap.Keymap(self)\n defaultkeys.map(self.keymap)\n self.options.errored.connect(self.options_error)\n\n self.view_stack = []\n\n signals.call_in.connect(self.sig_call_in)\n self.addons.add(*addons.default_addons())\n self.addons.add(\n intercept.Intercept(),\n self.view,\n self.events,\n consoleaddons.UnsupportedLog(),\n readfile.ReadFile(),\n consoleaddons.ConsoleAddon(self),\n keymap.KeymapConfig(),\n )\n\n def sigint_handler(*args, **kwargs):\n self.prompt_for_exit()\n\n signal.signal(signal.SIGINT, sigint_handler)\n\n self.window = None\n\n def __setattr__(self, name, value):\n super().__setattr__(name, value)\n signals.update_settings.send(self)\n\n def options_error(self, opts, exc):\n signals.status_message.send(\n message=str(exc),\n expire=1\n )\n\n def prompt_for_exit(self):\n signals.status_prompt_onekey.send(\n self,\n prompt = \"Quit\",\n keys = (\n (\"yes\", \"y\"),\n (\"no\", \"n\"),\n ),\n callback = self.quit,\n )\n\n def sig_add_log(self, event_store, entry: log.LogEntry):\n if log.log_tier(self.options.console_eventlog_verbosity) < log.log_tier(entry.level):\n return\n if entry.level in (\"error\", \"warn\", \"alert\"):\n signals.status_message.send(\n message = (\n entry.level,\n \"{}: {}\".format(entry.level.title(), str(entry.msg).lstrip())\n ),\n expire=5\n )\n\n def sig_call_in(self, sender, seconds, callback, args=()):\n def cb(*_):\n return callback(*args)\n self.loop.set_alarm_in(seconds, cb)\n\n @contextlib.contextmanager\n def uistopped(self):\n self.loop.stop()\n try:\n yield\n finally:\n self.loop.start()\n self.loop.screen_size = None\n self.loop.draw_screen()\n\n def spawn_editor(self, data):\n text = not isinstance(data, bytes)\n fd, name = tempfile.mkstemp('', \"mproxy\", text=text)\n with open(fd, \"w\" if text else \"wb\") as f:\n f.write(data)\n # if no EDITOR is set, assume 'vi'\n c = os.environ.get(\"MITMPROXY_EDITOR\") or os.environ.get(\"EDITOR\") or \"vi\"\n cmd = shlex.split(c)\n cmd.append(name)\n with self.uistopped():\n try:\n subprocess.call(cmd)\n except:\n signals.status_message.send(\n message=\"Can't start editor: %s\" % c\n )\n else:\n with open(name, \"r\" if text else \"rb\") as f:\n data = f.read()\n os.unlink(name)\n return data\n\n def spawn_external_viewer(self, data, contenttype):\n if contenttype:\n contenttype = contenttype.split(\";\")[0]\n ext = mimetypes.guess_extension(contenttype) or \"\"\n else:\n ext = \"\"\n fd, name = tempfile.mkstemp(ext, \"mproxy\")\n os.write(fd, data)\n os.close(fd)\n\n # read-only to remind the user that this is a view function\n os.chmod(name, stat.S_IREAD)\n\n cmd = None\n shell = False\n\n if contenttype:\n c = mailcap.getcaps()\n cmd, _ = mailcap.findmatch(c, contenttype, filename=name)\n if cmd:\n shell = True\n if not cmd:\n # hm which one should get priority?\n c = os.environ.get(\"MITMPROXY_EDITOR\") or os.environ.get(\"PAGER\") or os.environ.get(\"EDITOR\")\n if not c:\n c = \"less\"\n cmd = shlex.split(c)\n cmd.append(name)\n with self.uistopped():\n try:\n subprocess.call(cmd, shell=shell)\n except:\n signals.status_message.send(\n message=\"Can't start external viewer: %s\" % \" \".join(c)\n )\n # add a small delay before deletion so that the file is not removed before being loaded by the viewer\n t = threading.Timer(1.0, os.unlink, args=[name])\n t.start()\n\n def set_palette(self, opts, updated):\n self.ui.register_palette(\n palettes.palettes[opts.console_palette].palette(\n opts.console_palette_transparent\n )\n )\n self.ui.clear()\n\n def inject_key(self, key):\n self.loop.process_input([key])\n\n def run(self):\n if not sys.stdout.isatty():\n print(\"Error: mitmproxy's console interface requires a tty. \"\n \"Please run mitmproxy in an interactive shell environment.\", file=sys.stderr)\n sys.exit(1)\n\n self.ui = window.Screen()\n self.ui.set_terminal_properties(256)\n self.set_palette(self.options, None)\n self.options.subscribe(\n self.set_palette,\n [\"console_palette\", \"console_palette_transparent\"]\n )\n self.loop = urwid.MainLoop(\n urwid.SolidFill(\"x\"),\n event_loop=urwid.AsyncioEventLoop(loop=asyncio.get_event_loop()),\n screen = self.ui,\n handle_mouse = self.options.console_mouse,\n )\n self.window = window.Window(self)\n self.loop.widget = self.window\n self.window.refresh()\n\n if self.start_err:\n def display_err(*_):\n self.sig_add_log(None, self.start_err)\n self.start_err = None\n self.loop.set_alarm_in(0.01, display_err)\n\n super().run_loop(self.loop.run)\n\n def overlay(self, widget, **kwargs):\n self.window.set_overlay(widget, **kwargs)\n\n def switch_view(self, name):\n self.window.push(name)\n\n def quit(self, a):\n if a != \"n\":\n self.shutdown()\n", "path": "mitmproxy/tools/console/master.py"}]}
| 2,540 | 182 |
gh_patches_debug_28624
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-7500
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: '_OpNamespace' 'torchvision' object has no attribute 'roi_align'
Hi, I'm getting the following error while running the e2e tests in Torch-MLIR. I'm trying to get a small reproducible for the issue. Meanwhile, if someone is familiar with the error or knows what could be the probable reason behind this, it would be helpful to know. Some changes have been made here: https://github.com/pytorch/vision/commit/8f2e5c90ce0f55877eddb1f7fee8f8b48004849b, but I'm not sure how this relates to the below error.
```
Traceback (most recent call last):
38535 File "/main_checkout/torch-mlir/docker_venv/lib/python3.10/site-packages/torch/_ops.py", line 705, in __getattr__
38536 op, overload_names = torch._C._jit_get_operation(qualified_op_name)
38537RuntimeError: No such operator torchvision::roi_align
38538
38539The above exception was the direct cause of the following exception:
38540
38541Traceback (most recent call last):
38542 File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
38543 return _run_code(code, main_globals, None,
38544 File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
38545 exec(code, run_globals)
38546 File "/main_checkout/torch-mlir/e2e_testing/main.py", line 34, in <module>
38547 register_all_tests()
38548 File "/main_checkout/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/__init__.py", line 18, in register_all_tests
38549 from . import vision_models
38550 File "/main_checkout/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/vision_models.py", line 7, in <module>
38551 import torchvision.models as models
38552 File "/main_checkout/torch-mlir/docker_venv/lib/python3.10/site-packages/torchvision/__init__.py", line 6, in <module>
38553 from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils
38554 File "/main_checkout/torch-mlir/docker_venv/lib/python3.10/site-packages/torchvision/_meta_registrations.py", line 22, in <module>
38555 @register_meta(vision.roi_align.default)
38556 File "/main_checkout/torch-mlir/docker_venv/lib/python3.10/site-packages/torch/_ops.py", line 709, in __getattr__
38557 raise AttributeError(
38558AttributeError: '_OpNamespace' 'torchvision' object has no attribute 'roi_align'
38559Error: Process completed with exit code 1.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/_meta_registrations.py`
Content:
```
1 import torch
2 import torch.library
3
4 # Ensure that torch.ops.torchvision is visible
5 import torchvision.extension # noqa: F401
6
7 from torch._prims_common import check
8
9 _meta_lib = torch.library.Library("torchvision", "IMPL", "Meta")
10
11 vision = torch.ops.torchvision
12
13
14 def register_meta(op):
15 def wrapper(fn):
16 _meta_lib.impl(op, fn)
17 return fn
18
19 return wrapper
20
21
22 @register_meta(vision.roi_align.default)
23 def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
24 check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
25 check(
26 input.dtype == rois.dtype,
27 lambda: (
28 "Expected tensor for input to have the same type as tensor for rois; "
29 f"but type {input.dtype} does not equal {rois.dtype}"
30 ),
31 )
32 num_rois = rois.size(0)
33 _, channels, height, width = input.size()
34 return input.new_empty((num_rois, channels, pooled_height, pooled_width))
35
36
37 @register_meta(vision._roi_align_backward.default)
38 def meta_roi_align_backward(
39 grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned
40 ):
41 check(
42 grad.dtype == rois.dtype,
43 lambda: (
44 "Expected tensor for grad to have the same type as tensor for rois; "
45 f"but type {grad.dtype} does not equal {rois.dtype}"
46 ),
47 )
48 return grad.new_empty((batch_size, channels, height, width))
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/_meta_registrations.py b/torchvision/_meta_registrations.py
--- a/torchvision/_meta_registrations.py
+++ b/torchvision/_meta_registrations.py
@@ -1,3 +1,5 @@
+import functools
+
import torch
import torch.library
@@ -6,20 +8,22 @@
from torch._prims_common import check
-_meta_lib = torch.library.Library("torchvision", "IMPL", "Meta")
-vision = torch.ops.torchvision
[email protected]_cache(None)
+def get_meta_lib():
+ return torch.library.Library("torchvision", "IMPL", "Meta")
-def register_meta(op):
+def register_meta(op_name, overload_name="default"):
def wrapper(fn):
- _meta_lib.impl(op, fn)
+ if torchvision.extension._has_ops():
+ get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn)
return fn
return wrapper
-@register_meta(vision.roi_align.default)
+@register_meta("roi_align")
def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
check(rois.size(1) == 5, lambda: "rois must have shape as Tensor[K, 5]")
check(
@@ -34,7 +38,7 @@
return input.new_empty((num_rois, channels, pooled_height, pooled_width))
-@register_meta(vision._roi_align_backward.default)
+@register_meta("_roi_align_backward")
def meta_roi_align_backward(
grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned
):
|
{"golden_diff": "diff --git a/torchvision/_meta_registrations.py b/torchvision/_meta_registrations.py\n--- a/torchvision/_meta_registrations.py\n+++ b/torchvision/_meta_registrations.py\n@@ -1,3 +1,5 @@\n+import functools\n+\n import torch\n import torch.library\n \n@@ -6,20 +8,22 @@\n \n from torch._prims_common import check\n \n-_meta_lib = torch.library.Library(\"torchvision\", \"IMPL\", \"Meta\")\n \n-vision = torch.ops.torchvision\[email protected]_cache(None)\n+def get_meta_lib():\n+ return torch.library.Library(\"torchvision\", \"IMPL\", \"Meta\")\n \n \n-def register_meta(op):\n+def register_meta(op_name, overload_name=\"default\"):\n def wrapper(fn):\n- _meta_lib.impl(op, fn)\n+ if torchvision.extension._has_ops():\n+ get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn)\n return fn\n \n return wrapper\n \n \n-@register_meta(vision.roi_align.default)\n+@register_meta(\"roi_align\")\n def meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):\n check(rois.size(1) == 5, lambda: \"rois must have shape as Tensor[K, 5]\")\n check(\n@@ -34,7 +38,7 @@\n return input.new_empty((num_rois, channels, pooled_height, pooled_width))\n \n \n-@register_meta(vision._roi_align_backward.default)\n+@register_meta(\"_roi_align_backward\")\n def meta_roi_align_backward(\n grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned\n ):\n", "issue": "AttributeError: '_OpNamespace' 'torchvision' object has no attribute 'roi_align' \nHi, I'm getting the following error while running the e2e tests in Torch-MLIR. I'm trying to get a small reproducible for the issue. Meanwhile, if someone is familiar with the error or knows what could be the probable reason behind this, it would be helpful to know. Some changes have been made here: https://github.com/pytorch/vision/commit/8f2e5c90ce0f55877eddb1f7fee8f8b48004849b, but I'm not sure how this relates to the below error.\n\n```\nTraceback (most recent call last): \n\n38535 File \"/main_checkout/torch-mlir/docker_venv/lib/python3.10/site-packages/torch/_ops.py\", line 705, in __getattr__ \n\n38536 op, overload_names = torch._C._jit_get_operation(qualified_op_name) \n\n38537RuntimeError: No such operator torchvision::roi_align \n\n38538 \n\n38539The above exception was the direct cause of the following exception: \n\n38540 \n\n38541Traceback (most recent call last): \n\n38542 File \"/usr/lib/python3.10/runpy.py\", line 196, in _run_module_as_main \n\n38543 return _run_code(code, main_globals, None, \n\n38544 File \"/usr/lib/python3.10/runpy.py\", line 86, in _run_code \n\n38545 exec(code, run_globals) \n\n38546 File \"/main_checkout/torch-mlir/e2e_testing/main.py\", line 34, in <module> \n\n38547 register_all_tests() \n\n38548 File \"/main_checkout/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/__init__.py\", line 18, in register_all_tests \n\n38549 from . import vision_models \n\n38550 File \"/main_checkout/torch-mlir/build/tools/torch-mlir/python_packages/torch_mlir/torch_mlir_e2e_test/test_suite/vision_models.py\", line 7, in <module> \n\n38551 import torchvision.models as models \n\n38552 File \"/main_checkout/torch-mlir/docker_venv/lib/python3.10/site-packages/torchvision/__init__.py\", line 6, in <module> \n\n38553 from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils \n\n38554 File \"/main_checkout/torch-mlir/docker_venv/lib/python3.10/site-packages/torchvision/_meta_registrations.py\", line 22, in <module> \n\n38555 @register_meta(vision.roi_align.default) \n\n38556 File \"/main_checkout/torch-mlir/docker_venv/lib/python3.10/site-packages/torch/_ops.py\", line 709, in __getattr__ \n\n38557 raise AttributeError( \n\n38558AttributeError: '_OpNamespace' 'torchvision' object has no attribute 'roi_align' \n\n38559Error: Process completed with exit code 1.\n```\n", "before_files": [{"content": "import torch\nimport torch.library\n\n# Ensure that torch.ops.torchvision is visible\nimport torchvision.extension # noqa: F401\n\nfrom torch._prims_common import check\n\n_meta_lib = torch.library.Library(\"torchvision\", \"IMPL\", \"Meta\")\n\nvision = torch.ops.torchvision\n\n\ndef register_meta(op):\n def wrapper(fn):\n _meta_lib.impl(op, fn)\n return fn\n\n return wrapper\n\n\n@register_meta(vision.roi_align.default)\ndef meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):\n check(rois.size(1) == 5, lambda: \"rois must have shape as Tensor[K, 5]\")\n check(\n input.dtype == rois.dtype,\n lambda: (\n \"Expected tensor for input to have the same type as tensor for rois; \"\n f\"but type {input.dtype} does not equal {rois.dtype}\"\n ),\n )\n num_rois = rois.size(0)\n _, channels, height, width = input.size()\n return input.new_empty((num_rois, channels, pooled_height, pooled_width))\n\n\n@register_meta(vision._roi_align_backward.default)\ndef meta_roi_align_backward(\n grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned\n):\n check(\n grad.dtype == rois.dtype,\n lambda: (\n \"Expected tensor for grad to have the same type as tensor for rois; \"\n f\"but type {grad.dtype} does not equal {rois.dtype}\"\n ),\n )\n return grad.new_empty((batch_size, channels, height, width))\n", "path": "torchvision/_meta_registrations.py"}], "after_files": [{"content": "import functools\n\nimport torch\nimport torch.library\n\n# Ensure that torch.ops.torchvision is visible\nimport torchvision.extension # noqa: F401\n\nfrom torch._prims_common import check\n\n\[email protected]_cache(None)\ndef get_meta_lib():\n return torch.library.Library(\"torchvision\", \"IMPL\", \"Meta\")\n\n\ndef register_meta(op_name, overload_name=\"default\"):\n def wrapper(fn):\n if torchvision.extension._has_ops():\n get_meta_lib().impl(getattr(getattr(torch.ops.torchvision, op_name), overload_name), fn)\n return fn\n\n return wrapper\n\n\n@register_meta(\"roi_align\")\ndef meta_roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):\n check(rois.size(1) == 5, lambda: \"rois must have shape as Tensor[K, 5]\")\n check(\n input.dtype == rois.dtype,\n lambda: (\n \"Expected tensor for input to have the same type as tensor for rois; \"\n f\"but type {input.dtype} does not equal {rois.dtype}\"\n ),\n )\n num_rois = rois.size(0)\n _, channels, height, width = input.size()\n return input.new_empty((num_rois, channels, pooled_height, pooled_width))\n\n\n@register_meta(\"_roi_align_backward\")\ndef meta_roi_align_backward(\n grad, rois, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width, sampling_ratio, aligned\n):\n check(\n grad.dtype == rois.dtype,\n lambda: (\n \"Expected tensor for grad to have the same type as tensor for rois; \"\n f\"but type {grad.dtype} does not equal {rois.dtype}\"\n ),\n )\n return grad.new_empty((batch_size, channels, height, width))\n", "path": "torchvision/_meta_registrations.py"}]}
| 1,488 | 389 |
gh_patches_debug_36095
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-5902
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release SecureDrop 1.8.1
This is a tracking issue for the release of SecureDrop 1.8.1. This minor release will provide improvements to the restore functionality, and update OSSEC rules to support the transition to Ubuntu 20.04 Focal.
Tentatively scheduled as follows:
**String and feature freeze:** 2021-04-07
**Pre-release announcement:** 2021-04-08
**Release date:** 2021-04-14
**Release manager:** @kushaldas
**Deputy release manager:** @emkll
**Communications manager:**: @eloquence / @emkll
_SecureDrop maintainers and testers:_ As you QA 1.8.1, please report back your testing results as comments on this ticket. File GitHub issues for any problems found, tag them "QA: Release", and associate them with the [1.8.1 milestone](https://github.com/freedomofpress/securedrop/milestone/71) for tracking (or ask a maintainer to do so).
Test debian packages will be posted on https://apt-test.freedom.press signed with [the test key](https://gist.githubusercontent.com/conorsch/ec4008b111bc3142fca522693f3cce7e/raw/2968621e8ad92db4505a31fcc5776422d7d26729/apt-test%2520apt%2520pubkey). An Ansible playbook testing the upgrade path is [here](https://gist.github.com/rmol/eb605e28e290e90f45347f3a2f1ad43e).
# [QA Matrix for 1.8.1](https://docs.google.com/spreadsheets/d/1JZZ6kozgfFSkMV2XWjajWoLDmABjnzUsmbMCOhUaAAg/edit?usp=sharing)
# [Test Plan for 1.8.1](https://github.com/freedomofpress/securedrop/wiki/1.8.1-Test-Plan)
# Prepare release candidate (1.8.1~rc1)
- [x] Cherry-pick changes to release/1.8.0 branch @zenmonkeykstop https://github.com/freedomofpress/securedrop/pull/5890
- [ ] Link to latest version of Tails, including release candidates, to test against during QA
- [x] Prepare 1.8.1~rc1 release changelog
- [x] Prepare 1.8.1~rc1 Tag https://github.com/freedomofpress/securedrop/tree/1.8.1-rc1
- [x] Build debs and put up `1.8.1~rc1` on test apt server: https://github.com/freedomofpress/securedrop-dev-packages-lfs/pull/102
- [x] Commit build logs to https://github.com/freedomofpress/build-logs/commit/639b9f5d2c0aea1b03415ee1724d1f602ac017ed
# Final release
- [x] Ensure builder in release branch is updated and/or update builder image
- [x] Push signed tag https://github.com/freedomofpress/securedrop/releases/tag/1.8.1
- [x] Pre-Flight: Test updater logic in Tails (apt-qa tracks the `release` branch in the LFS repo)
- [x] Build final Debian packages for 1.8.0 (and preserve build logs)
- [x] Commit package build logs to https://github.com/freedomofpress/build-logs/commit/03a90fa69bbb8256cbb034675b09ddd60e17f25c
- [x] Upload Debian packages to apt-qa server
- [x] Pre-Flight: Test that install and upgrade from 1.8.0 to 1.8.1 works w/ prod repo debs (apt-qa.freedom.press polls the `release` branch in the LFS repo for the debs)
- [x] Flip apt QA server to prod status (merge to `main` in the LFS repo)
- [x] Merge Docs branch changes to ``main`` and verify new docs build in securedrop-docs repo
- [x] Prepare release messaging
# Post release
- [x] Create GitHub release object https://github.com/freedomofpress/securedrop/releases/tag/1.8.0
- [x] Once release object is created, update versions in `securedrop-docs` and Wagtail
- [x] Build readthedocs and verify new docs show up on https://docs.securedrop.org
- [x] Publish announcements
- [ ] Merge changelog, https://github.com/freedomofpress/securedrop/pull/5899, and https://github.com/freedomofpress/securedrop/pull/5894/ back to `develop`
- [ ] Update upgrade testing boxes (might not be possible due to https://github.com/freedomofpress/securedrop/issues/5883)
- [x] Update roadmap wiki page: https://github.com/freedomofpress/securedrop/wiki/Development-Roadmap
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `install_files/ansible-base/roles/restore/files/disable_v2.py`
Content:
```
1 #!/usr/bin/env python3
2 # To execute on prod:
3 # python3 disable_v2.py /etc/tor/torrc /etc/tor/torrc
4 # To execute for testing locally:
5 # python3 disable_v2.py /etc/tor/torrc /tmp/dumytorrc
6 import sys
7
8
9 def filter_v2(filename):
10 # Read the file
11 with open(filename) as f:
12 data = f.readlines()
13 # We will store the filtered lines to result
14 result = []
15
16 i = 0
17 while i < len(data):
18 line = data[i]
19 if line == "HiddenServiceDir /var/lib/tor/services/source\n":
20 i += 1
21 while data[i].strip() == "":
22 i += 1
23 line = data[i]
24 if line == "HiddenServiceVersion 2\n":
25 i += 1
26 line = data[i]
27 while data[i].strip() == "":
28 i += 1
29 line = data[i]
30 if line == "HiddenServicePort 80 127.0.0.1:80\n":
31 i += 1
32 continue
33 # Now check for journalist
34 if line == "HiddenServiceDir /var/lib/tor/services/journalist\n":
35 i += 1
36 while data[i].strip() == "":
37 i += 1
38 line = data[i]
39 if line == "HiddenServiceVersion 2\n":
40 i += 1
41 line = data[i]
42 while data[i].strip() == "":
43 i += 1
44 line = data[i]
45 if line == "HiddenServicePort 80 127.0.0.1:8080\n":
46 i += 1
47 line = data[i]
48 while data[i].strip() == "":
49 i += 1
50 line = data[i]
51 if line == "HiddenServiceAuthorizeClient stealth journalist\n":
52 i += 1
53 continue
54 # Now the v2 ssh access
55 if line == "HiddenServiceDir /var/lib/tor/services/ssh\n":
56 i += 1
57 while data[i].strip() == "":
58 i += 1
59 line = data[i]
60 if line == "HiddenServiceVersion 2\n":
61 i += 1
62 line = data[i]
63 while data[i].strip() == "":
64 i += 1
65 line = data[i]
66 if line == "HiddenServicePort 22 127.0.0.1:22\n":
67 i += 1
68 line = data[i]
69 while data[i].strip() == "":
70 i += 1
71 line = data[i]
72 if line == "HiddenServiceAuthorizeClient stealth admin\n":
73 i += 1
74 continue
75
76 result.append(line)
77 i += 1
78
79 # Now return the result
80 return result
81
82
83 if __name__ == "__main__":
84 filename = sys.argv[1]
85 outputfilename = sys.argv[2]
86 result = filter_v2(filename)
87 with open(outputfilename, "w") as fobj:
88 for line in result:
89 fobj.write(line)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/install_files/ansible-base/roles/restore/files/disable_v2.py b/install_files/ansible-base/roles/restore/files/disable_v2.py
deleted file mode 100644
--- a/install_files/ansible-base/roles/restore/files/disable_v2.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python3
-# To execute on prod:
-# python3 disable_v2.py /etc/tor/torrc /etc/tor/torrc
-# To execute for testing locally:
-# python3 disable_v2.py /etc/tor/torrc /tmp/dumytorrc
-import sys
-
-
-def filter_v2(filename):
- # Read the file
- with open(filename) as f:
- data = f.readlines()
- # We will store the filtered lines to result
- result = []
-
- i = 0
- while i < len(data):
- line = data[i]
- if line == "HiddenServiceDir /var/lib/tor/services/source\n":
- i += 1
- while data[i].strip() == "":
- i += 1
- line = data[i]
- if line == "HiddenServiceVersion 2\n":
- i += 1
- line = data[i]
- while data[i].strip() == "":
- i += 1
- line = data[i]
- if line == "HiddenServicePort 80 127.0.0.1:80\n":
- i += 1
- continue
- # Now check for journalist
- if line == "HiddenServiceDir /var/lib/tor/services/journalist\n":
- i += 1
- while data[i].strip() == "":
- i += 1
- line = data[i]
- if line == "HiddenServiceVersion 2\n":
- i += 1
- line = data[i]
- while data[i].strip() == "":
- i += 1
- line = data[i]
- if line == "HiddenServicePort 80 127.0.0.1:8080\n":
- i += 1
- line = data[i]
- while data[i].strip() == "":
- i += 1
- line = data[i]
- if line == "HiddenServiceAuthorizeClient stealth journalist\n":
- i += 1
- continue
- # Now the v2 ssh access
- if line == "HiddenServiceDir /var/lib/tor/services/ssh\n":
- i += 1
- while data[i].strip() == "":
- i += 1
- line = data[i]
- if line == "HiddenServiceVersion 2\n":
- i += 1
- line = data[i]
- while data[i].strip() == "":
- i += 1
- line = data[i]
- if line == "HiddenServicePort 22 127.0.0.1:22\n":
- i += 1
- line = data[i]
- while data[i].strip() == "":
- i += 1
- line = data[i]
- if line == "HiddenServiceAuthorizeClient stealth admin\n":
- i += 1
- continue
-
- result.append(line)
- i += 1
-
- # Now return the result
- return result
-
-
-if __name__ == "__main__":
- filename = sys.argv[1]
- outputfilename = sys.argv[2]
- result = filter_v2(filename)
- with open(outputfilename, "w") as fobj:
- for line in result:
- fobj.write(line)
|
{"golden_diff": "diff --git a/install_files/ansible-base/roles/restore/files/disable_v2.py b/install_files/ansible-base/roles/restore/files/disable_v2.py\ndeleted file mode 100644\n--- a/install_files/ansible-base/roles/restore/files/disable_v2.py\n+++ /dev/null\n@@ -1,89 +0,0 @@\n-#!/usr/bin/env python3\n-# To execute on prod:\n-# python3 disable_v2.py /etc/tor/torrc /etc/tor/torrc\n-# To execute for testing locally:\n-# python3 disable_v2.py /etc/tor/torrc /tmp/dumytorrc\n-import sys\n-\n-\n-def filter_v2(filename):\n- # Read the file\n- with open(filename) as f:\n- data = f.readlines()\n- # We will store the filtered lines to result\n- result = []\n-\n- i = 0\n- while i < len(data):\n- line = data[i]\n- if line == \"HiddenServiceDir /var/lib/tor/services/source\\n\":\n- i += 1\n- while data[i].strip() == \"\":\n- i += 1\n- line = data[i]\n- if line == \"HiddenServiceVersion 2\\n\":\n- i += 1\n- line = data[i]\n- while data[i].strip() == \"\":\n- i += 1\n- line = data[i]\n- if line == \"HiddenServicePort 80 127.0.0.1:80\\n\":\n- i += 1\n- continue\n- # Now check for journalist\n- if line == \"HiddenServiceDir /var/lib/tor/services/journalist\\n\":\n- i += 1\n- while data[i].strip() == \"\":\n- i += 1\n- line = data[i]\n- if line == \"HiddenServiceVersion 2\\n\":\n- i += 1\n- line = data[i]\n- while data[i].strip() == \"\":\n- i += 1\n- line = data[i]\n- if line == \"HiddenServicePort 80 127.0.0.1:8080\\n\":\n- i += 1\n- line = data[i]\n- while data[i].strip() == \"\":\n- i += 1\n- line = data[i]\n- if line == \"HiddenServiceAuthorizeClient stealth journalist\\n\":\n- i += 1\n- continue\n- # Now the v2 ssh access\n- if line == \"HiddenServiceDir /var/lib/tor/services/ssh\\n\":\n- i += 1\n- while data[i].strip() == \"\":\n- i += 1\n- line = data[i]\n- if line == \"HiddenServiceVersion 2\\n\":\n- i += 1\n- line = data[i]\n- while data[i].strip() == \"\":\n- i += 1\n- line = data[i]\n- if line == \"HiddenServicePort 22 127.0.0.1:22\\n\":\n- i += 1\n- line = data[i]\n- while data[i].strip() == \"\":\n- i += 1\n- line = data[i]\n- if line == \"HiddenServiceAuthorizeClient stealth admin\\n\":\n- i += 1\n- continue\n-\n- result.append(line)\n- i += 1\n-\n- # Now return the result\n- return result\n-\n-\n-if __name__ == \"__main__\":\n- filename = sys.argv[1]\n- outputfilename = sys.argv[2]\n- result = filter_v2(filename)\n- with open(outputfilename, \"w\") as fobj:\n- for line in result:\n- fobj.write(line)\n", "issue": "Release SecureDrop 1.8.1\nThis is a tracking issue for the release of SecureDrop 1.8.1. This minor release will provide improvements to the restore functionality, and update OSSEC rules to support the transition to Ubuntu 20.04 Focal.\r\n\r\nTentatively scheduled as follows:\r\n\r\n**String and feature freeze:** 2021-04-07\r\n**Pre-release announcement:** 2021-04-08\r\n**Release date:** 2021-04-14\r\n\r\n**Release manager:** @kushaldas \r\n**Deputy release manager:** @emkll \r\n**Communications manager:**: @eloquence / @emkll \r\n\r\n_SecureDrop maintainers and testers:_ As you QA 1.8.1, please report back your testing results as comments on this ticket. File GitHub issues for any problems found, tag them \"QA: Release\", and associate them with the [1.8.1 milestone](https://github.com/freedomofpress/securedrop/milestone/71) for tracking (or ask a maintainer to do so).\r\n\r\nTest debian packages will be posted on https://apt-test.freedom.press signed with [the test key](https://gist.githubusercontent.com/conorsch/ec4008b111bc3142fca522693f3cce7e/raw/2968621e8ad92db4505a31fcc5776422d7d26729/apt-test%2520apt%2520pubkey). An Ansible playbook testing the upgrade path is [here](https://gist.github.com/rmol/eb605e28e290e90f45347f3a2f1ad43e).\r\n\r\n# [QA Matrix for 1.8.1](https://docs.google.com/spreadsheets/d/1JZZ6kozgfFSkMV2XWjajWoLDmABjnzUsmbMCOhUaAAg/edit?usp=sharing)\r\n# [Test Plan for 1.8.1](https://github.com/freedomofpress/securedrop/wiki/1.8.1-Test-Plan)\r\n\r\n\r\n# Prepare release candidate (1.8.1~rc1)\r\n- [x] Cherry-pick changes to release/1.8.0 branch @zenmonkeykstop https://github.com/freedomofpress/securedrop/pull/5890\r\n- [ ] Link to latest version of Tails, including release candidates, to test against during QA\r\n- [x] Prepare 1.8.1~rc1 release changelog\r\n- [x] Prepare 1.8.1~rc1 Tag https://github.com/freedomofpress/securedrop/tree/1.8.1-rc1\r\n- [x] Build debs and put up `1.8.1~rc1` on test apt server: https://github.com/freedomofpress/securedrop-dev-packages-lfs/pull/102\r\n- [x] Commit build logs to https://github.com/freedomofpress/build-logs/commit/639b9f5d2c0aea1b03415ee1724d1f602ac017ed\r\n\r\n# Final release\r\n- [x] Ensure builder in release branch is updated and/or update builder image\r\n- [x] Push signed tag https://github.com/freedomofpress/securedrop/releases/tag/1.8.1\r\n- [x] Pre-Flight: Test updater logic in Tails (apt-qa tracks the `release` branch in the LFS repo)\r\n- [x] Build final Debian packages for 1.8.0 (and preserve build logs)\r\n- [x] Commit package build logs to https://github.com/freedomofpress/build-logs/commit/03a90fa69bbb8256cbb034675b09ddd60e17f25c\r\n- [x] Upload Debian packages to apt-qa server \r\n- [x] Pre-Flight: Test that install and upgrade from 1.8.0 to 1.8.1 works w/ prod repo debs (apt-qa.freedom.press polls the `release` branch in the LFS repo for the debs)\r\n- [x] Flip apt QA server to prod status (merge to `main` in the LFS repo)\r\n- [x] Merge Docs branch changes to ``main`` and verify new docs build in securedrop-docs repo\r\n- [x] Prepare release messaging\r\n\r\n# Post release\r\n- [x] Create GitHub release object https://github.com/freedomofpress/securedrop/releases/tag/1.8.0\r\n- [x] Once release object is created, update versions in `securedrop-docs` and Wagtail\r\n- [x] Build readthedocs and verify new docs show up on https://docs.securedrop.org\r\n- [x] Publish announcements\r\n- [ ] Merge changelog, https://github.com/freedomofpress/securedrop/pull/5899, and https://github.com/freedomofpress/securedrop/pull/5894/ back to `develop`\r\n- [ ] Update upgrade testing boxes (might not be possible due to https://github.com/freedomofpress/securedrop/issues/5883)\r\n- [x] Update roadmap wiki page: https://github.com/freedomofpress/securedrop/wiki/Development-Roadmap\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# To execute on prod:\n# python3 disable_v2.py /etc/tor/torrc /etc/tor/torrc\n# To execute for testing locally:\n# python3 disable_v2.py /etc/tor/torrc /tmp/dumytorrc\nimport sys\n\n\ndef filter_v2(filename):\n # Read the file\n with open(filename) as f:\n data = f.readlines()\n # We will store the filtered lines to result\n result = []\n\n i = 0\n while i < len(data):\n line = data[i]\n if line == \"HiddenServiceDir /var/lib/tor/services/source\\n\":\n i += 1\n while data[i].strip() == \"\":\n i += 1\n line = data[i]\n if line == \"HiddenServiceVersion 2\\n\":\n i += 1\n line = data[i]\n while data[i].strip() == \"\":\n i += 1\n line = data[i]\n if line == \"HiddenServicePort 80 127.0.0.1:80\\n\":\n i += 1\n continue\n # Now check for journalist\n if line == \"HiddenServiceDir /var/lib/tor/services/journalist\\n\":\n i += 1\n while data[i].strip() == \"\":\n i += 1\n line = data[i]\n if line == \"HiddenServiceVersion 2\\n\":\n i += 1\n line = data[i]\n while data[i].strip() == \"\":\n i += 1\n line = data[i]\n if line == \"HiddenServicePort 80 127.0.0.1:8080\\n\":\n i += 1\n line = data[i]\n while data[i].strip() == \"\":\n i += 1\n line = data[i]\n if line == \"HiddenServiceAuthorizeClient stealth journalist\\n\":\n i += 1\n continue\n # Now the v2 ssh access\n if line == \"HiddenServiceDir /var/lib/tor/services/ssh\\n\":\n i += 1\n while data[i].strip() == \"\":\n i += 1\n line = data[i]\n if line == \"HiddenServiceVersion 2\\n\":\n i += 1\n line = data[i]\n while data[i].strip() == \"\":\n i += 1\n line = data[i]\n if line == \"HiddenServicePort 22 127.0.0.1:22\\n\":\n i += 1\n line = data[i]\n while data[i].strip() == \"\":\n i += 1\n line = data[i]\n if line == \"HiddenServiceAuthorizeClient stealth admin\\n\":\n i += 1\n continue\n\n result.append(line)\n i += 1\n\n # Now return the result\n return result\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n outputfilename = sys.argv[2]\n result = filter_v2(filename)\n with open(outputfilename, \"w\") as fobj:\n for line in result:\n fobj.write(line)\n", "path": "install_files/ansible-base/roles/restore/files/disable_v2.py"}], "after_files": [{"content": null, "path": "install_files/ansible-base/roles/restore/files/disable_v2.py"}]}
| 2,361 | 856 |
gh_patches_debug_24760
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-2884
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix `OSError: [Errno 98] Address already in use` in `bind_sockets` call in CI
We often get the following traceback in GitHub Actions:
```
==================================== ERRORS ====================================
______________ ERROR at setup of TestRetryAfter.test_retry_after _______________
Traceback (most recent call last):
File "/home/runner/work/urllib3/urllib3/dummyserver/testcase.py", line 141, in setup_class
cls._start_server()
File "/home/runner/work/urllib3/urllib3/dummyserver/testcase.py", line 129, in _start_server
app, cls.io_loop, cls.certs, cls.scheme, cls.host
File "/home/runner/work/urllib3/urllib3/dummyserver/server.py", line 182, in run_tornado_app
sockets = tornado.netutil.bind_sockets(None, address=host)
File "/home/runner/work/urllib3/urllib3/.nox/test-3-7/lib/python3.7/site-packages/tornado/netutil.py", line 161, in bind_sockets
sock.bind(sockaddr)
OSError: [Errno 98] Address already in use
```
I think we hit https://github.com/tornadoweb/tornado/issues/1860. When we request a socket with host `localhost` and port zero (`None` in Python), then Tornado gets a free IPv4 port and requests that same port in IPv6. But that port could easily be taken with IPv6, especially in crowded CI environments.
From Tornado's maintainer in issue above:
> Yeah, putting the whole of bind_sockets in a retry loop seems reasonable, but it's a bit of a special case (we'd only want to retry when port 0 is requested). I'd accept a PR to do that, but my advice would be that whenever you use port 0 you bind to a specific IP address (i.e. either 127.0.0.1 or ::1) instead of a hostname. That will limit you to a single network interface too and avoid this problem.
The problem with requiring 127.0.0.1 is that we have tests where we use both `localhost` and `127.0.0.1` in order to get two hosts.
So I'd be in favor of putting the `bind_sockets` in a retry loop.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dummyserver/server.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Dummy server used for unit testing.
5 """
6
7 from __future__ import annotations
8
9 import asyncio
10 import concurrent.futures
11 import contextlib
12 import logging
13 import os
14 import socket
15 import ssl
16 import sys
17 import threading
18 import typing
19 import warnings
20 from collections.abc import Coroutine, Generator
21 from datetime import datetime
22
23 import tornado.httpserver
24 import tornado.ioloop
25 import tornado.netutil
26 import tornado.web
27 import trustme
28 from cryptography.hazmat.backends import default_backend
29 from cryptography.hazmat.primitives import serialization
30
31 from urllib3.exceptions import HTTPWarning
32 from urllib3.util import ALPN_PROTOCOLS, resolve_cert_reqs, resolve_ssl_version
33
34 if typing.TYPE_CHECKING:
35 from typing_extensions import ParamSpec
36
37 P = ParamSpec("P")
38
39 log = logging.getLogger(__name__)
40
41 CERTS_PATH = os.path.join(os.path.dirname(__file__), "certs")
42 DEFAULT_CERTS: dict[str, typing.Any] = {
43 "certfile": os.path.join(CERTS_PATH, "server.crt"),
44 "keyfile": os.path.join(CERTS_PATH, "server.key"),
45 "cert_reqs": ssl.CERT_OPTIONAL,
46 "ca_certs": os.path.join(CERTS_PATH, "cacert.pem"),
47 "alpn_protocols": ALPN_PROTOCOLS,
48 }
49 DEFAULT_CA = os.path.join(CERTS_PATH, "cacert.pem")
50 DEFAULT_CA_KEY = os.path.join(CERTS_PATH, "cacert.key")
51
52
53 def _resolves_to_ipv6(host: str) -> bool:
54 """Returns True if the system resolves host to an IPv6 address by default."""
55 resolves_to_ipv6 = False
56 try:
57 for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC):
58 af, _, _, _, _ = res
59 if af == socket.AF_INET6:
60 resolves_to_ipv6 = True
61 except socket.gaierror:
62 pass
63
64 return resolves_to_ipv6
65
66
67 def _has_ipv6(host: str) -> bool:
68 """Returns True if the system can bind an IPv6 address."""
69 sock = None
70 has_ipv6 = False
71
72 if socket.has_ipv6:
73 # has_ipv6 returns true if cPython was compiled with IPv6 support.
74 # It does not tell us if the system has IPv6 support enabled. To
75 # determine that we must bind to an IPv6 address.
76 # https://github.com/urllib3/urllib3/pull/611
77 # https://bugs.python.org/issue658327
78 try:
79 sock = socket.socket(socket.AF_INET6)
80 sock.bind((host, 0))
81 has_ipv6 = _resolves_to_ipv6("localhost")
82 except Exception:
83 pass
84
85 if sock:
86 sock.close()
87 return has_ipv6
88
89
90 # Some systems may have IPv6 support but DNS may not be configured
91 # properly. We can not count that localhost will resolve to ::1 on all
92 # systems. See https://github.com/urllib3/urllib3/pull/611 and
93 # https://bugs.python.org/issue18792
94 HAS_IPV6_AND_DNS = _has_ipv6("localhost")
95 HAS_IPV6 = _has_ipv6("::1")
96
97
98 # Different types of servers we have:
99
100
101 class NoIPv6Warning(HTTPWarning):
102 "IPv6 is not available"
103
104
105 class SocketServerThread(threading.Thread):
106 """
107 :param socket_handler: Callable which receives a socket argument for one
108 request.
109 :param ready_event: Event which gets set when the socket handler is
110 ready to receive requests.
111 """
112
113 USE_IPV6 = HAS_IPV6_AND_DNS
114
115 def __init__(
116 self,
117 socket_handler: typing.Callable[[socket.socket], None],
118 host: str = "localhost",
119 ready_event: threading.Event | None = None,
120 ) -> None:
121 super().__init__()
122 self.daemon = True
123
124 self.socket_handler = socket_handler
125 self.host = host
126 self.ready_event = ready_event
127
128 def _start_server(self) -> None:
129 if self.USE_IPV6:
130 sock = socket.socket(socket.AF_INET6)
131 else:
132 warnings.warn("No IPv6 support. Falling back to IPv4.", NoIPv6Warning)
133 sock = socket.socket(socket.AF_INET)
134 if sys.platform != "win32":
135 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
136 sock.bind((self.host, 0))
137 self.port = sock.getsockname()[1]
138
139 # Once listen() returns, the server socket is ready
140 sock.listen(1)
141
142 if self.ready_event:
143 self.ready_event.set()
144
145 self.socket_handler(sock)
146 sock.close()
147
148 def run(self) -> None:
149 self._start_server()
150
151
152 def ssl_options_to_context( # type: ignore[no-untyped-def]
153 keyfile=None,
154 certfile=None,
155 server_side=None,
156 cert_reqs=None,
157 ssl_version: str | int | None = None,
158 ca_certs=None,
159 do_handshake_on_connect=None,
160 suppress_ragged_eofs=None,
161 ciphers=None,
162 alpn_protocols=None,
163 ) -> ssl.SSLContext:
164 """Return an equivalent SSLContext based on ssl.wrap_socket args."""
165 ssl_version = resolve_ssl_version(ssl_version)
166 cert_none = resolve_cert_reqs("CERT_NONE")
167 if cert_reqs is None:
168 cert_reqs = cert_none
169 else:
170 cert_reqs = resolve_cert_reqs(cert_reqs)
171
172 ctx = ssl.SSLContext(ssl_version)
173 ctx.load_cert_chain(certfile, keyfile)
174 ctx.verify_mode = cert_reqs
175 if ctx.verify_mode != cert_none:
176 ctx.load_verify_locations(cafile=ca_certs)
177 if alpn_protocols and hasattr(ctx, "set_alpn_protocols"):
178 try:
179 ctx.set_alpn_protocols(alpn_protocols)
180 except NotImplementedError:
181 pass
182 return ctx
183
184
185 def run_tornado_app(
186 app: tornado.web.Application,
187 certs: dict[str, typing.Any] | None,
188 scheme: str,
189 host: str,
190 ) -> tuple[tornado.httpserver.HTTPServer, int]:
191 # We can't use fromtimestamp(0) because of CPython issue 29097, so we'll
192 # just construct the datetime object directly.
193 app.last_req = datetime(1970, 1, 1) # type: ignore[attr-defined]
194
195 if scheme == "https":
196 assert certs is not None
197 ssl_opts = ssl_options_to_context(**certs)
198 http_server = tornado.httpserver.HTTPServer(app, ssl_options=ssl_opts)
199 else:
200 http_server = tornado.httpserver.HTTPServer(app)
201
202 sockets = tornado.netutil.bind_sockets(None, address=host) # type: ignore[arg-type]
203 port = sockets[0].getsockname()[1]
204 http_server.add_sockets(sockets)
205 return http_server, port
206
207
208 def get_unreachable_address() -> tuple[str, int]:
209 # reserved as per rfc2606
210 return ("something.invalid", 54321)
211
212
213 def encrypt_key_pem(private_key_pem: trustme.Blob, password: bytes) -> trustme.Blob:
214 private_key = serialization.load_pem_private_key(
215 private_key_pem.bytes(), password=None, backend=default_backend()
216 )
217 encrypted_key = private_key.private_bytes(
218 serialization.Encoding.PEM,
219 serialization.PrivateFormat.TraditionalOpenSSL,
220 serialization.BestAvailableEncryption(password),
221 )
222 return trustme.Blob(encrypted_key)
223
224
225 R = typing.TypeVar("R")
226
227
228 def _run_and_close_tornado(
229 async_fn: typing.Callable[P, Coroutine[typing.Any, typing.Any, R]],
230 *args: P.args,
231 **kwargs: P.kwargs,
232 ) -> R:
233 tornado_loop = None
234
235 async def inner_fn() -> R:
236 nonlocal tornado_loop
237 tornado_loop = tornado.ioloop.IOLoop.current()
238 return await async_fn(*args, **kwargs)
239
240 try:
241 return asyncio.run(inner_fn())
242 finally:
243 tornado_loop.close(all_fds=True) # type: ignore[union-attr]
244
245
246 @contextlib.contextmanager
247 def run_loop_in_thread() -> Generator[tornado.ioloop.IOLoop, None, None]:
248 loop_started: concurrent.futures.Future[
249 tuple[tornado.ioloop.IOLoop, asyncio.Event]
250 ] = concurrent.futures.Future()
251 with concurrent.futures.ThreadPoolExecutor(
252 1, thread_name_prefix="test IOLoop"
253 ) as tpe:
254
255 async def run() -> None:
256 io_loop = tornado.ioloop.IOLoop.current()
257 stop_event = asyncio.Event()
258 loop_started.set_result((io_loop, stop_event))
259 await stop_event.wait()
260
261 # run asyncio.run in a thread and collect exceptions from *either*
262 # the loop failing to start, or failing to close
263 ran = tpe.submit(_run_and_close_tornado, run) # type: ignore[arg-type]
264 for f in concurrent.futures.as_completed((loop_started, ran)): # type: ignore[misc]
265 if f is loop_started:
266 io_loop, stop_event = loop_started.result()
267 try:
268 yield io_loop
269 finally:
270 io_loop.add_callback(stop_event.set)
271
272 elif f is ran:
273 # if this is the first iteration the loop failed to start
274 # if it's the second iteration the loop has finished or
275 # the loop failed to close and we need to raise the exception
276 ran.result()
277 return
278
279
280 def main() -> int:
281 # For debugging dummyserver itself - python -m dummyserver.server
282 from .handlers import TestingApp
283
284 host = "127.0.0.1"
285
286 async def amain() -> int:
287 app = tornado.web.Application([(r".*", TestingApp)])
288 server, port = run_tornado_app(app, None, "http", host)
289
290 print(f"Listening on http://{host}:{port}")
291 await asyncio.Event().wait()
292 return 0
293
294 return asyncio.run(amain())
295
296
297 if __name__ == "__main__":
298 sys.exit(main())
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dummyserver/server.py b/dummyserver/server.py
--- a/dummyserver/server.py
+++ b/dummyserver/server.py
@@ -9,6 +9,7 @@
import asyncio
import concurrent.futures
import contextlib
+import errno
import logging
import os
import socket
@@ -199,7 +200,24 @@
else:
http_server = tornado.httpserver.HTTPServer(app)
- sockets = tornado.netutil.bind_sockets(None, address=host) # type: ignore[arg-type]
+ # When we request a socket with host localhost and port zero (None in Python), then
+ # Tornado gets a free IPv4 port and requests that same port in IPv6. But that port
+ # could easily be taken with IPv6, especially in crowded CI environments. For this
+ # reason we put bind_sockets in a retry loop. Full details:
+ # * https://github.com/urllib3/urllib3/issues/2171
+ # * https://github.com/tornadoweb/tornado/issues/1860
+ for i in range(10):
+ try:
+ sockets = tornado.netutil.bind_sockets(None, address=host) # type: ignore[arg-type]
+ except OSError as e:
+ if e.errno == errno.EADDRINUSE:
+ # TODO this should be a warning if there's a way for pytest to print it
+ print(
+ f"Retrying bind_sockets({host}) after EADDRINUSE", file=sys.stderr
+ )
+ continue
+ break
+
port = sockets[0].getsockname()[1]
http_server.add_sockets(sockets)
return http_server, port
|
{"golden_diff": "diff --git a/dummyserver/server.py b/dummyserver/server.py\n--- a/dummyserver/server.py\n+++ b/dummyserver/server.py\n@@ -9,6 +9,7 @@\n import asyncio\n import concurrent.futures\n import contextlib\n+import errno\n import logging\n import os\n import socket\n@@ -199,7 +200,24 @@\n else:\n http_server = tornado.httpserver.HTTPServer(app)\n \n- sockets = tornado.netutil.bind_sockets(None, address=host) # type: ignore[arg-type]\n+ # When we request a socket with host localhost and port zero (None in Python), then\n+ # Tornado gets a free IPv4 port and requests that same port in IPv6. But that port\n+ # could easily be taken with IPv6, especially in crowded CI environments. For this\n+ # reason we put bind_sockets in a retry loop. Full details:\n+ # * https://github.com/urllib3/urllib3/issues/2171\n+ # * https://github.com/tornadoweb/tornado/issues/1860\n+ for i in range(10):\n+ try:\n+ sockets = tornado.netutil.bind_sockets(None, address=host) # type: ignore[arg-type]\n+ except OSError as e:\n+ if e.errno == errno.EADDRINUSE:\n+ # TODO this should be a warning if there's a way for pytest to print it\n+ print(\n+ f\"Retrying bind_sockets({host}) after EADDRINUSE\", file=sys.stderr\n+ )\n+ continue\n+ break\n+\n port = sockets[0].getsockname()[1]\n http_server.add_sockets(sockets)\n return http_server, port\n", "issue": "Fix `OSError: [Errno 98] Address already in use` in `bind_sockets` call in CI\nWe often get the following traceback in GitHub Actions:\r\n\r\n```\r\n==================================== ERRORS ====================================\r\n______________ ERROR at setup of TestRetryAfter.test_retry_after _______________\r\nTraceback (most recent call last):\r\n File \"/home/runner/work/urllib3/urllib3/dummyserver/testcase.py\", line 141, in setup_class\r\n cls._start_server()\r\n File \"/home/runner/work/urllib3/urllib3/dummyserver/testcase.py\", line 129, in _start_server\r\n app, cls.io_loop, cls.certs, cls.scheme, cls.host\r\n File \"/home/runner/work/urllib3/urllib3/dummyserver/server.py\", line 182, in run_tornado_app\r\n sockets = tornado.netutil.bind_sockets(None, address=host)\r\n File \"/home/runner/work/urllib3/urllib3/.nox/test-3-7/lib/python3.7/site-packages/tornado/netutil.py\", line 161, in bind_sockets\r\n sock.bind(sockaddr)\r\nOSError: [Errno 98] Address already in use\r\n```\r\n\r\nI think we hit https://github.com/tornadoweb/tornado/issues/1860. When we request a socket with host `localhost` and port zero (`None` in Python), then Tornado gets a free IPv4 port and requests that same port in IPv6. But that port could easily be taken with IPv6, especially in crowded CI environments.\r\n\r\nFrom Tornado's maintainer in issue above:\r\n\r\n> Yeah, putting the whole of bind_sockets in a retry loop seems reasonable, but it's a bit of a special case (we'd only want to retry when port 0 is requested). I'd accept a PR to do that, but my advice would be that whenever you use port 0 you bind to a specific IP address (i.e. either 127.0.0.1 or ::1) instead of a hostname. That will limit you to a single network interface too and avoid this problem.\r\n\r\nThe problem with requiring 127.0.0.1 is that we have tests where we use both `localhost` and `127.0.0.1` in order to get two hosts.\r\n\r\nSo I'd be in favor of putting the `bind_sockets` in a retry loop.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport concurrent.futures\nimport contextlib\nimport logging\nimport os\nimport socket\nimport ssl\nimport sys\nimport threading\nimport typing\nimport warnings\nfrom collections.abc import Coroutine, Generator\nfrom datetime import datetime\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.netutil\nimport tornado.web\nimport trustme\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\n\nfrom urllib3.exceptions import HTTPWarning\nfrom urllib3.util import ALPN_PROTOCOLS, resolve_cert_reqs, resolve_ssl_version\n\nif typing.TYPE_CHECKING:\n from typing_extensions import ParamSpec\n\n P = ParamSpec(\"P\")\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), \"certs\")\nDEFAULT_CERTS: dict[str, typing.Any] = {\n \"certfile\": os.path.join(CERTS_PATH, \"server.crt\"),\n \"keyfile\": os.path.join(CERTS_PATH, \"server.key\"),\n \"cert_reqs\": ssl.CERT_OPTIONAL,\n \"ca_certs\": os.path.join(CERTS_PATH, \"cacert.pem\"),\n \"alpn_protocols\": ALPN_PROTOCOLS,\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, \"cacert.pem\")\nDEFAULT_CA_KEY = os.path.join(CERTS_PATH, \"cacert.key\")\n\n\ndef _resolves_to_ipv6(host: str) -> bool:\n \"\"\"Returns True if the system resolves host to an IPv6 address by default.\"\"\"\n resolves_to_ipv6 = False\n try:\n for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC):\n af, _, _, _, _ = res\n if af == socket.AF_INET6:\n resolves_to_ipv6 = True\n except socket.gaierror:\n pass\n\n return resolves_to_ipv6\n\n\ndef _has_ipv6(host: str) -> bool:\n \"\"\"Returns True if the system can bind an IPv6 address.\"\"\"\n sock = None\n has_ipv6 = False\n\n if socket.has_ipv6:\n # has_ipv6 returns true if cPython was compiled with IPv6 support.\n # It does not tell us if the system has IPv6 support enabled. To\n # determine that we must bind to an IPv6 address.\n # https://github.com/urllib3/urllib3/pull/611\n # https://bugs.python.org/issue658327\n try:\n sock = socket.socket(socket.AF_INET6)\n sock.bind((host, 0))\n has_ipv6 = _resolves_to_ipv6(\"localhost\")\n except Exception:\n pass\n\n if sock:\n sock.close()\n return has_ipv6\n\n\n# Some systems may have IPv6 support but DNS may not be configured\n# properly. We can not count that localhost will resolve to ::1 on all\n# systems. See https://github.com/urllib3/urllib3/pull/611 and\n# https://bugs.python.org/issue18792\nHAS_IPV6_AND_DNS = _has_ipv6(\"localhost\")\nHAS_IPV6 = _has_ipv6(\"::1\")\n\n\n# Different types of servers we have:\n\n\nclass NoIPv6Warning(HTTPWarning):\n \"IPv6 is not available\"\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_event: Event which gets set when the socket handler is\n ready to receive requests.\n \"\"\"\n\n USE_IPV6 = HAS_IPV6_AND_DNS\n\n def __init__(\n self,\n socket_handler: typing.Callable[[socket.socket], None],\n host: str = \"localhost\",\n ready_event: threading.Event | None = None,\n ) -> None:\n super().__init__()\n self.daemon = True\n\n self.socket_handler = socket_handler\n self.host = host\n self.ready_event = ready_event\n\n def _start_server(self) -> None:\n if self.USE_IPV6:\n sock = socket.socket(socket.AF_INET6)\n else:\n warnings.warn(\"No IPv6 support. Falling back to IPv4.\", NoIPv6Warning)\n sock = socket.socket(socket.AF_INET)\n if sys.platform != \"win32\":\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, 0))\n self.port = sock.getsockname()[1]\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_event:\n self.ready_event.set()\n\n self.socket_handler(sock)\n sock.close()\n\n def run(self) -> None:\n self._start_server()\n\n\ndef ssl_options_to_context( # type: ignore[no-untyped-def]\n keyfile=None,\n certfile=None,\n server_side=None,\n cert_reqs=None,\n ssl_version: str | int | None = None,\n ca_certs=None,\n do_handshake_on_connect=None,\n suppress_ragged_eofs=None,\n ciphers=None,\n alpn_protocols=None,\n) -> ssl.SSLContext:\n \"\"\"Return an equivalent SSLContext based on ssl.wrap_socket args.\"\"\"\n ssl_version = resolve_ssl_version(ssl_version)\n cert_none = resolve_cert_reqs(\"CERT_NONE\")\n if cert_reqs is None:\n cert_reqs = cert_none\n else:\n cert_reqs = resolve_cert_reqs(cert_reqs)\n\n ctx = ssl.SSLContext(ssl_version)\n ctx.load_cert_chain(certfile, keyfile)\n ctx.verify_mode = cert_reqs\n if ctx.verify_mode != cert_none:\n ctx.load_verify_locations(cafile=ca_certs)\n if alpn_protocols and hasattr(ctx, \"set_alpn_protocols\"):\n try:\n ctx.set_alpn_protocols(alpn_protocols)\n except NotImplementedError:\n pass\n return ctx\n\n\ndef run_tornado_app(\n app: tornado.web.Application,\n certs: dict[str, typing.Any] | None,\n scheme: str,\n host: str,\n) -> tuple[tornado.httpserver.HTTPServer, int]:\n # We can't use fromtimestamp(0) because of CPython issue 29097, so we'll\n # just construct the datetime object directly.\n app.last_req = datetime(1970, 1, 1) # type: ignore[attr-defined]\n\n if scheme == \"https\":\n assert certs is not None\n ssl_opts = ssl_options_to_context(**certs)\n http_server = tornado.httpserver.HTTPServer(app, ssl_options=ssl_opts)\n else:\n http_server = tornado.httpserver.HTTPServer(app)\n\n sockets = tornado.netutil.bind_sockets(None, address=host) # type: ignore[arg-type]\n port = sockets[0].getsockname()[1]\n http_server.add_sockets(sockets)\n return http_server, port\n\n\ndef get_unreachable_address() -> tuple[str, int]:\n # reserved as per rfc2606\n return (\"something.invalid\", 54321)\n\n\ndef encrypt_key_pem(private_key_pem: trustme.Blob, password: bytes) -> trustme.Blob:\n private_key = serialization.load_pem_private_key(\n private_key_pem.bytes(), password=None, backend=default_backend()\n )\n encrypted_key = private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.TraditionalOpenSSL,\n serialization.BestAvailableEncryption(password),\n )\n return trustme.Blob(encrypted_key)\n\n\nR = typing.TypeVar(\"R\")\n\n\ndef _run_and_close_tornado(\n async_fn: typing.Callable[P, Coroutine[typing.Any, typing.Any, R]],\n *args: P.args,\n **kwargs: P.kwargs,\n) -> R:\n tornado_loop = None\n\n async def inner_fn() -> R:\n nonlocal tornado_loop\n tornado_loop = tornado.ioloop.IOLoop.current()\n return await async_fn(*args, **kwargs)\n\n try:\n return asyncio.run(inner_fn())\n finally:\n tornado_loop.close(all_fds=True) # type: ignore[union-attr]\n\n\[email protected]\ndef run_loop_in_thread() -> Generator[tornado.ioloop.IOLoop, None, None]:\n loop_started: concurrent.futures.Future[\n tuple[tornado.ioloop.IOLoop, asyncio.Event]\n ] = concurrent.futures.Future()\n with concurrent.futures.ThreadPoolExecutor(\n 1, thread_name_prefix=\"test IOLoop\"\n ) as tpe:\n\n async def run() -> None:\n io_loop = tornado.ioloop.IOLoop.current()\n stop_event = asyncio.Event()\n loop_started.set_result((io_loop, stop_event))\n await stop_event.wait()\n\n # run asyncio.run in a thread and collect exceptions from *either*\n # the loop failing to start, or failing to close\n ran = tpe.submit(_run_and_close_tornado, run) # type: ignore[arg-type]\n for f in concurrent.futures.as_completed((loop_started, ran)): # type: ignore[misc]\n if f is loop_started:\n io_loop, stop_event = loop_started.result()\n try:\n yield io_loop\n finally:\n io_loop.add_callback(stop_event.set)\n\n elif f is ran:\n # if this is the first iteration the loop failed to start\n # if it's the second iteration the loop has finished or\n # the loop failed to close and we need to raise the exception\n ran.result()\n return\n\n\ndef main() -> int:\n # For debugging dummyserver itself - python -m dummyserver.server\n from .handlers import TestingApp\n\n host = \"127.0.0.1\"\n\n async def amain() -> int:\n app = tornado.web.Application([(r\".*\", TestingApp)])\n server, port = run_tornado_app(app, None, \"http\", host)\n\n print(f\"Listening on http://{host}:{port}\")\n await asyncio.Event().wait()\n return 0\n\n return asyncio.run(amain())\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "dummyserver/server.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport concurrent.futures\nimport contextlib\nimport errno\nimport logging\nimport os\nimport socket\nimport ssl\nimport sys\nimport threading\nimport typing\nimport warnings\nfrom collections.abc import Coroutine, Generator\nfrom datetime import datetime\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.netutil\nimport tornado.web\nimport trustme\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\n\nfrom urllib3.exceptions import HTTPWarning\nfrom urllib3.util import ALPN_PROTOCOLS, resolve_cert_reqs, resolve_ssl_version\n\nif typing.TYPE_CHECKING:\n from typing_extensions import ParamSpec\n\n P = ParamSpec(\"P\")\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), \"certs\")\nDEFAULT_CERTS: dict[str, typing.Any] = {\n \"certfile\": os.path.join(CERTS_PATH, \"server.crt\"),\n \"keyfile\": os.path.join(CERTS_PATH, \"server.key\"),\n \"cert_reqs\": ssl.CERT_OPTIONAL,\n \"ca_certs\": os.path.join(CERTS_PATH, \"cacert.pem\"),\n \"alpn_protocols\": ALPN_PROTOCOLS,\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, \"cacert.pem\")\nDEFAULT_CA_KEY = os.path.join(CERTS_PATH, \"cacert.key\")\n\n\ndef _resolves_to_ipv6(host: str) -> bool:\n \"\"\"Returns True if the system resolves host to an IPv6 address by default.\"\"\"\n resolves_to_ipv6 = False\n try:\n for res in socket.getaddrinfo(host, None, socket.AF_UNSPEC):\n af, _, _, _, _ = res\n if af == socket.AF_INET6:\n resolves_to_ipv6 = True\n except socket.gaierror:\n pass\n\n return resolves_to_ipv6\n\n\ndef _has_ipv6(host: str) -> bool:\n \"\"\"Returns True if the system can bind an IPv6 address.\"\"\"\n sock = None\n has_ipv6 = False\n\n if socket.has_ipv6:\n # has_ipv6 returns true if cPython was compiled with IPv6 support.\n # It does not tell us if the system has IPv6 support enabled. To\n # determine that we must bind to an IPv6 address.\n # https://github.com/urllib3/urllib3/pull/611\n # https://bugs.python.org/issue658327\n try:\n sock = socket.socket(socket.AF_INET6)\n sock.bind((host, 0))\n has_ipv6 = _resolves_to_ipv6(\"localhost\")\n except Exception:\n pass\n\n if sock:\n sock.close()\n return has_ipv6\n\n\n# Some systems may have IPv6 support but DNS may not be configured\n# properly. We can not count that localhost will resolve to ::1 on all\n# systems. See https://github.com/urllib3/urllib3/pull/611 and\n# https://bugs.python.org/issue18792\nHAS_IPV6_AND_DNS = _has_ipv6(\"localhost\")\nHAS_IPV6 = _has_ipv6(\"::1\")\n\n\n# Different types of servers we have:\n\n\nclass NoIPv6Warning(HTTPWarning):\n \"IPv6 is not available\"\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_event: Event which gets set when the socket handler is\n ready to receive requests.\n \"\"\"\n\n USE_IPV6 = HAS_IPV6_AND_DNS\n\n def __init__(\n self,\n socket_handler: typing.Callable[[socket.socket], None],\n host: str = \"localhost\",\n ready_event: threading.Event | None = None,\n ) -> None:\n super().__init__()\n self.daemon = True\n\n self.socket_handler = socket_handler\n self.host = host\n self.ready_event = ready_event\n\n def _start_server(self) -> None:\n if self.USE_IPV6:\n sock = socket.socket(socket.AF_INET6)\n else:\n warnings.warn(\"No IPv6 support. Falling back to IPv4.\", NoIPv6Warning)\n sock = socket.socket(socket.AF_INET)\n if sys.platform != \"win32\":\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, 0))\n self.port = sock.getsockname()[1]\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_event:\n self.ready_event.set()\n\n self.socket_handler(sock)\n sock.close()\n\n def run(self) -> None:\n self._start_server()\n\n\ndef ssl_options_to_context( # type: ignore[no-untyped-def]\n keyfile=None,\n certfile=None,\n server_side=None,\n cert_reqs=None,\n ssl_version: str | int | None = None,\n ca_certs=None,\n do_handshake_on_connect=None,\n suppress_ragged_eofs=None,\n ciphers=None,\n alpn_protocols=None,\n) -> ssl.SSLContext:\n \"\"\"Return an equivalent SSLContext based on ssl.wrap_socket args.\"\"\"\n ssl_version = resolve_ssl_version(ssl_version)\n cert_none = resolve_cert_reqs(\"CERT_NONE\")\n if cert_reqs is None:\n cert_reqs = cert_none\n else:\n cert_reqs = resolve_cert_reqs(cert_reqs)\n\n ctx = ssl.SSLContext(ssl_version)\n ctx.load_cert_chain(certfile, keyfile)\n ctx.verify_mode = cert_reqs\n if ctx.verify_mode != cert_none:\n ctx.load_verify_locations(cafile=ca_certs)\n if alpn_protocols and hasattr(ctx, \"set_alpn_protocols\"):\n try:\n ctx.set_alpn_protocols(alpn_protocols)\n except NotImplementedError:\n pass\n return ctx\n\n\ndef run_tornado_app(\n app: tornado.web.Application,\n certs: dict[str, typing.Any] | None,\n scheme: str,\n host: str,\n) -> tuple[tornado.httpserver.HTTPServer, int]:\n # We can't use fromtimestamp(0) because of CPython issue 29097, so we'll\n # just construct the datetime object directly.\n app.last_req = datetime(1970, 1, 1) # type: ignore[attr-defined]\n\n if scheme == \"https\":\n assert certs is not None\n ssl_opts = ssl_options_to_context(**certs)\n http_server = tornado.httpserver.HTTPServer(app, ssl_options=ssl_opts)\n else:\n http_server = tornado.httpserver.HTTPServer(app)\n\n # When we request a socket with host localhost and port zero (None in Python), then\n # Tornado gets a free IPv4 port and requests that same port in IPv6. But that port\n # could easily be taken with IPv6, especially in crowded CI environments. For this\n # reason we put bind_sockets in a retry loop. Full details:\n # * https://github.com/urllib3/urllib3/issues/2171\n # * https://github.com/tornadoweb/tornado/issues/1860\n for i in range(10):\n try:\n sockets = tornado.netutil.bind_sockets(None, address=host) # type: ignore[arg-type]\n except OSError as e:\n if e.errno == errno.EADDRINUSE:\n # TODO this should be a warning if there's a way for pytest to print it\n print(\n f\"Retrying bind_sockets({host}) after EADDRINUSE\", file=sys.stderr\n )\n continue\n break\n\n port = sockets[0].getsockname()[1]\n http_server.add_sockets(sockets)\n return http_server, port\n\n\ndef get_unreachable_address() -> tuple[str, int]:\n # reserved as per rfc2606\n return (\"something.invalid\", 54321)\n\n\ndef encrypt_key_pem(private_key_pem: trustme.Blob, password: bytes) -> trustme.Blob:\n private_key = serialization.load_pem_private_key(\n private_key_pem.bytes(), password=None, backend=default_backend()\n )\n encrypted_key = private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.TraditionalOpenSSL,\n serialization.BestAvailableEncryption(password),\n )\n return trustme.Blob(encrypted_key)\n\n\nR = typing.TypeVar(\"R\")\n\n\ndef _run_and_close_tornado(\n async_fn: typing.Callable[P, Coroutine[typing.Any, typing.Any, R]],\n *args: P.args,\n **kwargs: P.kwargs,\n) -> R:\n tornado_loop = None\n\n async def inner_fn() -> R:\n nonlocal tornado_loop\n tornado_loop = tornado.ioloop.IOLoop.current()\n return await async_fn(*args, **kwargs)\n\n try:\n return asyncio.run(inner_fn())\n finally:\n tornado_loop.close(all_fds=True) # type: ignore[union-attr]\n\n\[email protected]\ndef run_loop_in_thread() -> Generator[tornado.ioloop.IOLoop, None, None]:\n loop_started: concurrent.futures.Future[\n tuple[tornado.ioloop.IOLoop, asyncio.Event]\n ] = concurrent.futures.Future()\n with concurrent.futures.ThreadPoolExecutor(\n 1, thread_name_prefix=\"test IOLoop\"\n ) as tpe:\n\n async def run() -> None:\n io_loop = tornado.ioloop.IOLoop.current()\n stop_event = asyncio.Event()\n loop_started.set_result((io_loop, stop_event))\n await stop_event.wait()\n\n # run asyncio.run in a thread and collect exceptions from *either*\n # the loop failing to start, or failing to close\n ran = tpe.submit(_run_and_close_tornado, run) # type: ignore[arg-type]\n for f in concurrent.futures.as_completed((loop_started, ran)): # type: ignore[misc]\n if f is loop_started:\n io_loop, stop_event = loop_started.result()\n try:\n yield io_loop\n finally:\n io_loop.add_callback(stop_event.set)\n\n elif f is ran:\n # if this is the first iteration the loop failed to start\n # if it's the second iteration the loop has finished or\n # the loop failed to close and we need to raise the exception\n ran.result()\n return\n\n\ndef main() -> int:\n # For debugging dummyserver itself - python -m dummyserver.server\n from .handlers import TestingApp\n\n host = \"127.0.0.1\"\n\n async def amain() -> int:\n app = tornado.web.Application([(r\".*\", TestingApp)])\n server, port = run_tornado_app(app, None, \"http\", host)\n\n print(f\"Listening on http://{host}:{port}\")\n await asyncio.Event().wait()\n return 0\n\n return asyncio.run(amain())\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "dummyserver/server.py"}]}
| 3,831 | 394 |
gh_patches_debug_41403
|
rasdani/github-patches
|
git_diff
|
Pyomo__pyomo-498
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Appropriate behavior for activate() and deactivate() for nested disjunctions?
```
from pyomo.environ import *
m = ConcreteModel()
m.d1 = Disjunct()
m.d2 = Disjunct()
m.d1.sub1 = Disjunct()
m.d1.sub2 = Disjunct()
m.d1.disj = Disjunction(expr=[m.d1.sub1, m.d1.sub2])
m.disj = Disjunction(expr=[m.d1, m.d2])
```
What should happen when disjuncts containing nested disjunctions are deactivated? `m.d1.deactivate()`.
The reclassifier hack complains about `m.d1.sub1` and `m.d1.sub2` not being expanded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/gdp/plugins/gdp_var_mover.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 """Collection of GDP-related hacks.
12
13 Hacks for dealing with the fact that solver writers may sometimes fail to
14 detect variables inside of Disjuncts or deactivated Blocks.
15 """
16
17 import logging
18 import textwrap
19 from pyomo.common.plugin import alias
20 from pyomo.core.base import Transformation, Block, Constraint
21 from pyomo.gdp import Disjunct
22
23 from six import itervalues
24
25 logger = logging.getLogger('pyomo.gdp')
26
27
28 class HACK_GDP_Var_Mover(Transformation):
29 """Move indicator vars to top block.
30
31 HACK: this will move all indicator variables on the model to the top block
32 so the writers can find them.
33
34 """
35
36 alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
37
38 def _apply_to(self, instance, **kwds):
39 assert not kwds
40 count = 0
41 disjunct_generator = instance.component_data_objects(
42 Disjunct, descend_into=(Block, Disjunct))
43 for disjunct in disjunct_generator:
44 count += 1
45 var = disjunct.indicator_var
46 var.doc = "%s(Moved from %s)" % (
47 var.doc + " " if var.doc else "", var.name, )
48 disjunct.del_component(var)
49 instance.add_component("_gdp_moved_IV_%s" % (count,), var)
50
51
52 class HACK_GDP_Disjunct_Reclassifier(Transformation):
53 """Reclassify Disjuncts to Blocks.
54
55 HACK: this will reclassify all Disjuncts to Blocks so the current writers
56 can find the variables
57
58 """
59
60 alias('gdp.reclassify',
61 doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
62
63 def _apply_to(self, instance, **kwds):
64 assert not kwds
65 disjunct_generator = instance.component_objects(
66 Disjunct, descend_into=(Block, Disjunct))
67 for disjunct_component in disjunct_generator:
68 for disjunct in itervalues(disjunct_component._data):
69 if disjunct.active:
70 logger.error("""
71 Reclassifying active Disjunct "%s" as a Block. This
72 is generally an error as it indicates that the model
73 was not completely relaxed before applying the
74 gdp.reclassify transformation""" % (disjunct.name,))
75
76 # Reclassify this disjunct as a block
77 disjunct_component.parent_block().reclassify_component_type(
78 disjunct_component, Block)
79 disjunct_component._activate_without_unfixing_indicator()
80
81 # Deactivate all constraints. Note that we only need to
82 # descend into blocks: we will catch disjuncts in the outer
83 # loop.
84 #
85 # Note that we defer this until AFTER we reactivate the
86 # block, as the component_objects generator will not
87 # return anything when active=True and the block is
88 # deactivated.
89 for disjunct in itervalues(disjunct_component._data):
90 cons_in_disjunct = disjunct.component_objects(
91 Constraint, descend_into=Block, active=True)
92 for con in cons_in_disjunct:
93 con.deactivate()
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyomo/gdp/plugins/gdp_var_mover.py b/pyomo/gdp/plugins/gdp_var_mover.py
--- a/pyomo/gdp/plugins/gdp_var_mover.py
+++ b/pyomo/gdp/plugins/gdp_var_mover.py
@@ -18,7 +18,9 @@
import textwrap
from pyomo.common.plugin import alias
from pyomo.core.base import Transformation, Block, Constraint
-from pyomo.gdp import Disjunct
+from pyomo.gdp import Disjunct, GDP_Error
+from pyomo.core import TraversalStrategy
+from pyomo.common.deprecation import deprecated
from six import itervalues
@@ -35,6 +37,8 @@
alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
+ @deprecated(msg="The gdp.varmover transformation has been deprecated in "
+ "favor of the gdp.reclassify transformation.")
def _apply_to(self, instance, **kwds):
assert not kwds
count = 0
@@ -61,13 +65,19 @@
doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
def _apply_to(self, instance, **kwds):
- assert not kwds
+ assert not kwds # no keywords expected to the transformation
disjunct_generator = instance.component_objects(
- Disjunct, descend_into=(Block, Disjunct))
+ Disjunct, descend_into=(Block, Disjunct),
+ descent_order=TraversalStrategy.PostfixDFS)
for disjunct_component in disjunct_generator:
+ # Check that the disjuncts being reclassified are all relaxed or
+ # are not on an active block.
for disjunct in itervalues(disjunct_component._data):
- if disjunct.active:
- logger.error("""
+ if (disjunct.active and
+ self._disjunct_not_relaxed(disjunct) and
+ self._disjunct_on_active_block(disjunct) and
+ self._disjunct_not_fixed_true(disjunct)):
+ raise GDP_Error("""
Reclassifying active Disjunct "%s" as a Block. This
is generally an error as it indicates that the model
was not completely relaxed before applying the
@@ -91,3 +101,31 @@
Constraint, descend_into=Block, active=True)
for con in cons_in_disjunct:
con.deactivate()
+
+ def _disjunct_not_fixed_true(self, disjunct):
+ # Return true if the disjunct indicator variable is not fixed to True
+ return not (disjunct.indicator_var.fixed and
+ disjunct.indicator_var.value == 1)
+
+ def _disjunct_not_relaxed(self, disjunct):
+ # Return True if the disjunct was not relaxed by a transformation.
+ return not getattr(
+ disjunct, '_gdp_transformation_info', {}).get('relaxed', False)
+
+ def _disjunct_on_active_block(self, disjunct):
+ # Check first to make sure that the disjunct is not a
+ # descendent of an inactive Block or fixed and deactivated
+ # Disjunct, before raising a warning.
+ parent_block = disjunct.parent_block()
+ while parent_block is not None:
+ if parent_block.type() is Block and not parent_block.active:
+ return False
+ elif (parent_block.type() is Disjunct and not parent_block.active
+ and parent_block.indicator_var.value == 0
+ and parent_block.indicator_var.fixed):
+ return False
+ else:
+ # Step up one level in the hierarchy
+ parent_block = parent_block.parent_block()
+ continue
+ return True
|
{"golden_diff": "diff --git a/pyomo/gdp/plugins/gdp_var_mover.py b/pyomo/gdp/plugins/gdp_var_mover.py\n--- a/pyomo/gdp/plugins/gdp_var_mover.py\n+++ b/pyomo/gdp/plugins/gdp_var_mover.py\n@@ -18,7 +18,9 @@\n import textwrap\n from pyomo.common.plugin import alias\n from pyomo.core.base import Transformation, Block, Constraint\n-from pyomo.gdp import Disjunct\n+from pyomo.gdp import Disjunct, GDP_Error\n+from pyomo.core import TraversalStrategy\n+from pyomo.common.deprecation import deprecated\n \n from six import itervalues\n \n@@ -35,6 +37,8 @@\n \n alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n \n+ @deprecated(msg=\"The gdp.varmover transformation has been deprecated in \"\n+ \"favor of the gdp.reclassify transformation.\")\n def _apply_to(self, instance, **kwds):\n assert not kwds\n count = 0\n@@ -61,13 +65,19 @@\n doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n \n def _apply_to(self, instance, **kwds):\n- assert not kwds\n+ assert not kwds # no keywords expected to the transformation\n disjunct_generator = instance.component_objects(\n- Disjunct, descend_into=(Block, Disjunct))\n+ Disjunct, descend_into=(Block, Disjunct),\n+ descent_order=TraversalStrategy.PostfixDFS)\n for disjunct_component in disjunct_generator:\n+ # Check that the disjuncts being reclassified are all relaxed or\n+ # are not on an active block.\n for disjunct in itervalues(disjunct_component._data):\n- if disjunct.active:\n- logger.error(\"\"\"\n+ if (disjunct.active and\n+ self._disjunct_not_relaxed(disjunct) and\n+ self._disjunct_on_active_block(disjunct) and\n+ self._disjunct_not_fixed_true(disjunct)):\n+ raise GDP_Error(\"\"\"\n Reclassifying active Disjunct \"%s\" as a Block. This\n is generally an error as it indicates that the model\n was not completely relaxed before applying the\n@@ -91,3 +101,31 @@\n Constraint, descend_into=Block, active=True)\n for con in cons_in_disjunct:\n con.deactivate()\n+\n+ def _disjunct_not_fixed_true(self, disjunct):\n+ # Return true if the disjunct indicator variable is not fixed to True\n+ return not (disjunct.indicator_var.fixed and\n+ disjunct.indicator_var.value == 1)\n+\n+ def _disjunct_not_relaxed(self, disjunct):\n+ # Return True if the disjunct was not relaxed by a transformation.\n+ return not getattr(\n+ disjunct, '_gdp_transformation_info', {}).get('relaxed', False)\n+\n+ def _disjunct_on_active_block(self, disjunct):\n+ # Check first to make sure that the disjunct is not a\n+ # descendent of an inactive Block or fixed and deactivated\n+ # Disjunct, before raising a warning.\n+ parent_block = disjunct.parent_block()\n+ while parent_block is not None:\n+ if parent_block.type() is Block and not parent_block.active:\n+ return False\n+ elif (parent_block.type() is Disjunct and not parent_block.active\n+ and parent_block.indicator_var.value == 0\n+ and parent_block.indicator_var.fixed):\n+ return False\n+ else:\n+ # Step up one level in the hierarchy\n+ parent_block = parent_block.parent_block()\n+ continue\n+ return True\n", "issue": "Appropriate behavior for activate() and deactivate() for nested disjunctions?\n```\r\nfrom pyomo.environ import *\r\nm = ConcreteModel()\r\nm.d1 = Disjunct()\r\nm.d2 = Disjunct()\r\nm.d1.sub1 = Disjunct()\r\nm.d1.sub2 = Disjunct()\r\nm.d1.disj = Disjunction(expr=[m.d1.sub1, m.d1.sub2])\r\nm.disj = Disjunction(expr=[m.d1, m.d2])\r\n```\r\n\r\nWhat should happen when disjuncts containing nested disjunctions are deactivated? `m.d1.deactivate()`.\r\nThe reclassifier hack complains about `m.d1.sub1` and `m.d1.sub2` not being expanded.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"Collection of GDP-related hacks.\n\nHacks for dealing with the fact that solver writers may sometimes fail to\ndetect variables inside of Disjuncts or deactivated Blocks.\n\"\"\"\n\nimport logging\nimport textwrap\nfrom pyomo.common.plugin import alias\nfrom pyomo.core.base import Transformation, Block, Constraint\nfrom pyomo.gdp import Disjunct\n\nfrom six import itervalues\n\nlogger = logging.getLogger('pyomo.gdp')\n\n\nclass HACK_GDP_Var_Mover(Transformation):\n \"\"\"Move indicator vars to top block.\n\n HACK: this will move all indicator variables on the model to the top block\n so the writers can find them.\n\n \"\"\"\n\n alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n\n def _apply_to(self, instance, **kwds):\n assert not kwds\n count = 0\n disjunct_generator = instance.component_data_objects(\n Disjunct, descend_into=(Block, Disjunct))\n for disjunct in disjunct_generator:\n count += 1\n var = disjunct.indicator_var\n var.doc = \"%s(Moved from %s)\" % (\n var.doc + \" \" if var.doc else \"\", var.name, )\n disjunct.del_component(var)\n instance.add_component(\"_gdp_moved_IV_%s\" % (count,), var)\n\n\nclass HACK_GDP_Disjunct_Reclassifier(Transformation):\n \"\"\"Reclassify Disjuncts to Blocks.\n\n HACK: this will reclassify all Disjuncts to Blocks so the current writers\n can find the variables\n\n \"\"\"\n\n alias('gdp.reclassify',\n doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n\n def _apply_to(self, instance, **kwds):\n assert not kwds\n disjunct_generator = instance.component_objects(\n Disjunct, descend_into=(Block, Disjunct))\n for disjunct_component in disjunct_generator:\n for disjunct in itervalues(disjunct_component._data):\n if disjunct.active:\n logger.error(\"\"\"\n Reclassifying active Disjunct \"%s\" as a Block. This\n is generally an error as it indicates that the model\n was not completely relaxed before applying the\n gdp.reclassify transformation\"\"\" % (disjunct.name,))\n\n # Reclassify this disjunct as a block\n disjunct_component.parent_block().reclassify_component_type(\n disjunct_component, Block)\n disjunct_component._activate_without_unfixing_indicator()\n\n # Deactivate all constraints. Note that we only need to\n # descend into blocks: we will catch disjuncts in the outer\n # loop.\n #\n # Note that we defer this until AFTER we reactivate the\n # block, as the component_objects generator will not\n # return anything when active=True and the block is\n # deactivated.\n for disjunct in itervalues(disjunct_component._data):\n cons_in_disjunct = disjunct.component_objects(\n Constraint, descend_into=Block, active=True)\n for con in cons_in_disjunct:\n con.deactivate()\n", "path": "pyomo/gdp/plugins/gdp_var_mover.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"Collection of GDP-related hacks.\n\nHacks for dealing with the fact that solver writers may sometimes fail to\ndetect variables inside of Disjuncts or deactivated Blocks.\n\"\"\"\n\nimport logging\nimport textwrap\nfrom pyomo.common.plugin import alias\nfrom pyomo.core.base import Transformation, Block, Constraint\nfrom pyomo.gdp import Disjunct, GDP_Error\nfrom pyomo.core import TraversalStrategy\nfrom pyomo.common.deprecation import deprecated\n\nfrom six import itervalues\n\nlogger = logging.getLogger('pyomo.gdp')\n\n\nclass HACK_GDP_Var_Mover(Transformation):\n \"\"\"Move indicator vars to top block.\n\n HACK: this will move all indicator variables on the model to the top block\n so the writers can find them.\n\n \"\"\"\n\n alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n\n @deprecated(msg=\"The gdp.varmover transformation has been deprecated in \"\n \"favor of the gdp.reclassify transformation.\")\n def _apply_to(self, instance, **kwds):\n assert not kwds\n count = 0\n disjunct_generator = instance.component_data_objects(\n Disjunct, descend_into=(Block, Disjunct))\n for disjunct in disjunct_generator:\n count += 1\n var = disjunct.indicator_var\n var.doc = \"%s(Moved from %s)\" % (\n var.doc + \" \" if var.doc else \"\", var.name, )\n disjunct.del_component(var)\n instance.add_component(\"_gdp_moved_IV_%s\" % (count,), var)\n\n\nclass HACK_GDP_Disjunct_Reclassifier(Transformation):\n \"\"\"Reclassify Disjuncts to Blocks.\n\n HACK: this will reclassify all Disjuncts to Blocks so the current writers\n can find the variables\n\n \"\"\"\n\n alias('gdp.reclassify',\n doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n\n def _apply_to(self, instance, **kwds):\n assert not kwds # no keywords expected to the transformation\n disjunct_generator = instance.component_objects(\n Disjunct, descend_into=(Block, Disjunct),\n descent_order=TraversalStrategy.PostfixDFS)\n for disjunct_component in disjunct_generator:\n # Check that the disjuncts being reclassified are all relaxed or\n # are not on an active block.\n for disjunct in itervalues(disjunct_component._data):\n if (disjunct.active and\n self._disjunct_not_relaxed(disjunct) and\n self._disjunct_on_active_block(disjunct) and\n self._disjunct_not_fixed_true(disjunct)):\n raise GDP_Error(\"\"\"\n Reclassifying active Disjunct \"%s\" as a Block. This\n is generally an error as it indicates that the model\n was not completely relaxed before applying the\n gdp.reclassify transformation\"\"\" % (disjunct.name,))\n\n # Reclassify this disjunct as a block\n disjunct_component.parent_block().reclassify_component_type(\n disjunct_component, Block)\n disjunct_component._activate_without_unfixing_indicator()\n\n # Deactivate all constraints. Note that we only need to\n # descend into blocks: we will catch disjuncts in the outer\n # loop.\n #\n # Note that we defer this until AFTER we reactivate the\n # block, as the component_objects generator will not\n # return anything when active=True and the block is\n # deactivated.\n for disjunct in itervalues(disjunct_component._data):\n cons_in_disjunct = disjunct.component_objects(\n Constraint, descend_into=Block, active=True)\n for con in cons_in_disjunct:\n con.deactivate()\n\n def _disjunct_not_fixed_true(self, disjunct):\n # Return true if the disjunct indicator variable is not fixed to True\n return not (disjunct.indicator_var.fixed and\n disjunct.indicator_var.value == 1)\n\n def _disjunct_not_relaxed(self, disjunct):\n # Return True if the disjunct was not relaxed by a transformation.\n return not getattr(\n disjunct, '_gdp_transformation_info', {}).get('relaxed', False)\n\n def _disjunct_on_active_block(self, disjunct):\n # Check first to make sure that the disjunct is not a\n # descendent of an inactive Block or fixed and deactivated\n # Disjunct, before raising a warning.\n parent_block = disjunct.parent_block()\n while parent_block is not None:\n if parent_block.type() is Block and not parent_block.active:\n return False\n elif (parent_block.type() is Disjunct and not parent_block.active\n and parent_block.indicator_var.value == 0\n and parent_block.indicator_var.fixed):\n return False\n else:\n # Step up one level in the hierarchy\n parent_block = parent_block.parent_block()\n continue\n return True\n", "path": "pyomo/gdp/plugins/gdp_var_mover.py"}]}
| 1,401 | 864 |
gh_patches_debug_14547
|
rasdani/github-patches
|
git_diff
|
inducer__relate-548
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
After pretend-facility, impersonate disappears from menu
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `course/templatetags/coursetags.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 __copyright__ = "Copyright (C) 2016 Dong Zhuang, Andreas Kloeckner"
4
5 __license__ = """
6 Permission is hereby granted, free of charge, to any person obtaining a copy
7 of this software and associated documentation files (the "Software"), to deal
8 in the Software without restriction, including without limitation the rights
9 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 copies of the Software, and to permit persons to whom the Software is
11 furnished to do so, subject to the following conditions:
12
13 The above copyright notice and this permission notice shall be included in
14 all copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 THE SOFTWARE.
23 """
24
25 from django.template import Library, Node, TemplateSyntaxError
26 from django.utils import translation
27
28 register = Library()
29
30
31 # {{{ get language_code in JS traditional naming format
32
33 class GetCurrentLanguageJsFmtNode(Node):
34 def __init__(self, variable):
35 self.variable = variable
36
37 def render(self, context):
38 lang_name = (
39 translation.to_locale(translation.get_language()).replace("_", "-"))
40 context[self.variable] = lang_name
41 return ''
42
43
44 @register.tag("get_current_js_lang_name")
45 def do_get_current_js_lang_name(parser, token):
46 """
47 This will store the current language in the context, in js lang format.
48 This is different with built-in do_get_current_language, which returns
49 languange name like "en-us", "zh-hans". This method return lang name
50 "en-US", "zh-Hans", with the country code capitallized if country code
51 has 2 characters, and capitalize first if country code has more than 2
52 characters.
53
54 Usage::
55
56 {% get_current_language_js_lang_format as language %}
57
58 This will fetch the currently active language name with js tradition and
59 put it's value into the ``language`` context variable.
60 """
61 # token.split_contents() isn't useful here because this tag doesn't
62 # accept variable as arguments
63 args = token.contents.split()
64 if len(args) != 3 or args[1] != 'as':
65 raise TemplateSyntaxError("'get_current_js_lang_name' requires "
66 "'as variable' (got %r)" % args)
67 return GetCurrentLanguageJsFmtNode(args[2])
68
69
70 @register.filter(name='js_lang_fallback')
71 def js_lang_fallback(lang_name, js_name=None):
72 """
73 Return the fallback lang name for js files.
74 :param a :class:`str:`
75 :param js_name: a :class:`str:`, optional.
76 :return: a :class:`str:`
77 """
78
79 # The mapping is crap, we use a special case table to fix it.
80 if js_name == "fullcalendar":
81 known_fallback_mapping = {
82 "zh-hans": "zh-cn",
83 "zh-hant": "zh-tw"}
84 return known_fallback_mapping.get(lang_name.lower(), lang_name).lower()
85
86 return lang_name
87
88 # }}}
89
90
91 # {{{ filter for participation.has_permission()
92
93 @register.filter(name='has_permission')
94 def has_permission(participation, arg):
95 """
96 Check if a participation instance has specific permission.
97 :param participation: a :class:`participation:` instance
98 :param arg: String, with permission and arguments separated by comma
99 :return: a :class:`bool`
100 """
101 has_pperm = False
102 try:
103 arg_list = [s.strip() for s in arg.split(",")]
104 perm = arg_list[0]
105 argument = None
106 if len(arg_list) > 1:
107 argument = arg_list[1]
108 has_pperm = participation.has_permission(perm, argument)
109 except Exception:
110 # fail silently
111 pass
112
113 return has_pperm
114
115 # }}}
116
117
118 @register.filter(name='commit_message_as_html')
119 def commit_message_as_html(commit_sha, repo):
120 from course.versioning import _get_commit_message_as_html
121 return _get_commit_message_as_html(repo, commit_sha)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/course/templatetags/coursetags.py b/course/templatetags/coursetags.py
--- a/course/templatetags/coursetags.py
+++ b/course/templatetags/coursetags.py
@@ -115,6 +115,28 @@
# }}}
[email protected](name='may_set_fake_time')
+def may_set_fake_time(user):
+ """
+ Check if a user may set fake time.
+ :param user: a :class:`accounts.User:` instance
+ :return: a :class:`bool`
+ """
+ from course.views import may_set_fake_time as msf
+ return msf(user)
+
+
[email protected](name='may_set_pretend_facility')
+def may_set_pretend_facility(user):
+ """
+ Check if a user may set pretend_facility
+ :param user: a :class:`accounts.User:` instance
+ :return: a :class:`bool`
+ """
+ from course.views import may_set_pretend_facility as mspf
+ return mspf(user)
+
+
@register.filter(name='commit_message_as_html')
def commit_message_as_html(commit_sha, repo):
from course.versioning import _get_commit_message_as_html
|
{"golden_diff": "diff --git a/course/templatetags/coursetags.py b/course/templatetags/coursetags.py\n--- a/course/templatetags/coursetags.py\n+++ b/course/templatetags/coursetags.py\n@@ -115,6 +115,28 @@\n # }}}\n \n \[email protected](name='may_set_fake_time')\n+def may_set_fake_time(user):\n+ \"\"\"\n+ Check if a user may set fake time.\n+ :param user: a :class:`accounts.User:` instance\n+ :return: a :class:`bool`\n+ \"\"\"\n+ from course.views import may_set_fake_time as msf\n+ return msf(user)\n+\n+\[email protected](name='may_set_pretend_facility')\n+def may_set_pretend_facility(user):\n+ \"\"\"\n+ Check if a user may set pretend_facility\n+ :param user: a :class:`accounts.User:` instance\n+ :return: a :class:`bool`\n+ \"\"\"\n+ from course.views import may_set_pretend_facility as mspf\n+ return mspf(user)\n+\n+\n @register.filter(name='commit_message_as_html')\n def commit_message_as_html(commit_sha, repo):\n from course.versioning import _get_commit_message_as_html\n", "issue": "After pretend-facility, impersonate disappears from menu\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n__copyright__ = \"Copyright (C) 2016 Dong Zhuang, Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom django.template import Library, Node, TemplateSyntaxError\nfrom django.utils import translation\n\nregister = Library()\n\n\n# {{{ get language_code in JS traditional naming format\n\nclass GetCurrentLanguageJsFmtNode(Node):\n def __init__(self, variable):\n self.variable = variable\n\n def render(self, context):\n lang_name = (\n translation.to_locale(translation.get_language()).replace(\"_\", \"-\"))\n context[self.variable] = lang_name\n return ''\n\n\[email protected](\"get_current_js_lang_name\")\ndef do_get_current_js_lang_name(parser, token):\n \"\"\"\n This will store the current language in the context, in js lang format.\n This is different with built-in do_get_current_language, which returns\n languange name like \"en-us\", \"zh-hans\". This method return lang name\n \"en-US\", \"zh-Hans\", with the country code capitallized if country code\n has 2 characters, and capitalize first if country code has more than 2\n characters.\n\n Usage::\n\n {% get_current_language_js_lang_format as language %}\n\n This will fetch the currently active language name with js tradition and\n put it's value into the ``language`` context variable.\n \"\"\"\n # token.split_contents() isn't useful here because this tag doesn't\n # accept variable as arguments\n args = token.contents.split()\n if len(args) != 3 or args[1] != 'as':\n raise TemplateSyntaxError(\"'get_current_js_lang_name' requires \"\n \"'as variable' (got %r)\" % args)\n return GetCurrentLanguageJsFmtNode(args[2])\n\n\[email protected](name='js_lang_fallback')\ndef js_lang_fallback(lang_name, js_name=None):\n \"\"\"\n Return the fallback lang name for js files.\n :param a :class:`str:`\n :param js_name: a :class:`str:`, optional.\n :return: a :class:`str:`\n \"\"\"\n\n # The mapping is crap, we use a special case table to fix it.\n if js_name == \"fullcalendar\":\n known_fallback_mapping = {\n \"zh-hans\": \"zh-cn\",\n \"zh-hant\": \"zh-tw\"}\n return known_fallback_mapping.get(lang_name.lower(), lang_name).lower()\n\n return lang_name\n\n# }}}\n\n\n# {{{ filter for participation.has_permission()\n\[email protected](name='has_permission')\ndef has_permission(participation, arg):\n \"\"\"\n Check if a participation instance has specific permission.\n :param participation: a :class:`participation:` instance\n :param arg: String, with permission and arguments separated by comma\n :return: a :class:`bool`\n \"\"\"\n has_pperm = False\n try:\n arg_list = [s.strip() for s in arg.split(\",\")]\n perm = arg_list[0]\n argument = None\n if len(arg_list) > 1:\n argument = arg_list[1]\n has_pperm = participation.has_permission(perm, argument)\n except Exception:\n # fail silently\n pass\n\n return has_pperm\n\n# }}}\n\n\[email protected](name='commit_message_as_html')\ndef commit_message_as_html(commit_sha, repo):\n from course.versioning import _get_commit_message_as_html\n return _get_commit_message_as_html(repo, commit_sha)\n", "path": "course/templatetags/coursetags.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n__copyright__ = \"Copyright (C) 2016 Dong Zhuang, Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom django.template import Library, Node, TemplateSyntaxError\nfrom django.utils import translation\n\nregister = Library()\n\n\n# {{{ get language_code in JS traditional naming format\n\nclass GetCurrentLanguageJsFmtNode(Node):\n def __init__(self, variable):\n self.variable = variable\n\n def render(self, context):\n lang_name = (\n translation.to_locale(translation.get_language()).replace(\"_\", \"-\"))\n context[self.variable] = lang_name\n return ''\n\n\[email protected](\"get_current_js_lang_name\")\ndef do_get_current_js_lang_name(parser, token):\n \"\"\"\n This will store the current language in the context, in js lang format.\n This is different with built-in do_get_current_language, which returns\n languange name like \"en-us\", \"zh-hans\". This method return lang name\n \"en-US\", \"zh-Hans\", with the country code capitallized if country code\n has 2 characters, and capitalize first if country code has more than 2\n characters.\n\n Usage::\n\n {% get_current_language_js_lang_format as language %}\n\n This will fetch the currently active language name with js tradition and\n put it's value into the ``language`` context variable.\n \"\"\"\n # token.split_contents() isn't useful here because this tag doesn't\n # accept variable as arguments\n args = token.contents.split()\n if len(args) != 3 or args[1] != 'as':\n raise TemplateSyntaxError(\"'get_current_js_lang_name' requires \"\n \"'as variable' (got %r)\" % args)\n return GetCurrentLanguageJsFmtNode(args[2])\n\n\[email protected](name='js_lang_fallback')\ndef js_lang_fallback(lang_name, js_name=None):\n \"\"\"\n Return the fallback lang name for js files.\n :param a :class:`str:`\n :param js_name: a :class:`str:`, optional.\n :return: a :class:`str:`\n \"\"\"\n\n # The mapping is crap, we use a special case table to fix it.\n if js_name == \"fullcalendar\":\n known_fallback_mapping = {\n \"zh-hans\": \"zh-cn\",\n \"zh-hant\": \"zh-tw\"}\n return known_fallback_mapping.get(lang_name.lower(), lang_name).lower()\n\n return lang_name\n\n# }}}\n\n\n# {{{ filter for participation.has_permission()\n\[email protected](name='has_permission')\ndef has_permission(participation, arg):\n \"\"\"\n Check if a participation instance has specific permission.\n :param participation: a :class:`participation:` instance\n :param arg: String, with permission and arguments separated by comma\n :return: a :class:`bool`\n \"\"\"\n has_pperm = False\n try:\n arg_list = [s.strip() for s in arg.split(\",\")]\n perm = arg_list[0]\n argument = None\n if len(arg_list) > 1:\n argument = arg_list[1]\n has_pperm = participation.has_permission(perm, argument)\n except Exception:\n # fail silently\n pass\n\n return has_pperm\n\n# }}}\n\n\[email protected](name='may_set_fake_time')\ndef may_set_fake_time(user):\n \"\"\"\n Check if a user may set fake time.\n :param user: a :class:`accounts.User:` instance\n :return: a :class:`bool`\n \"\"\"\n from course.views import may_set_fake_time as msf\n return msf(user)\n\n\[email protected](name='may_set_pretend_facility')\ndef may_set_pretend_facility(user):\n \"\"\"\n Check if a user may set pretend_facility\n :param user: a :class:`accounts.User:` instance\n :return: a :class:`bool`\n \"\"\"\n from course.views import may_set_pretend_facility as mspf\n return mspf(user)\n\n\[email protected](name='commit_message_as_html')\ndef commit_message_as_html(commit_sha, repo):\n from course.versioning import _get_commit_message_as_html\n return _get_commit_message_as_html(repo, commit_sha)\n", "path": "course/templatetags/coursetags.py"}]}
| 1,503 | 290 |
gh_patches_debug_7752
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-29188
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "View stream subscribers" page
We should add a "View stream subscribers" page (below "Create a stream" under Streams & topics). It should document the right sidebar UI in #26717 (currently test-deployed on CZO), as well as how to see subscribers via settings.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/markdown/tabbed_sections.py`
Content:
```
1 import re
2 from typing import Any, Dict, List, Mapping, Optional
3
4 import markdown
5 from markdown.extensions import Extension
6 from markdown.preprocessors import Preprocessor
7 from typing_extensions import override
8
9 from zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES
10
11 START_TABBED_SECTION_REGEX = re.compile(r"^\{start_tabs\}$")
12 END_TABBED_SECTION_REGEX = re.compile(r"^\{end_tabs\}$")
13 TAB_CONTENT_REGEX = re.compile(r"^\{tab\|([^}]+)\}$")
14
15 TABBED_SECTION_TEMPLATE = """
16 <div class="tabbed-section {tab_class}" markdown="1">
17 {nav_bar}
18 <div class="blocks">
19 {blocks}
20 </div>
21 </div>
22 """.strip()
23
24 NAV_BAR_TEMPLATE = """
25 <ul class="nav">
26 {tabs}
27 </ul>
28 """.strip()
29
30 NAV_LIST_ITEM_TEMPLATE = """
31 <li data-tab-key="{data_tab_key}" tabindex="0">{label}</li>
32 """.strip()
33
34 DIV_TAB_CONTENT_TEMPLATE = """
35 <div data-tab-key="{data_tab_key}" markdown="1">
36 {content}
37 </div>
38 """.strip()
39
40 # If adding new entries here, also check if you need to update
41 # tabbed-instructions.js
42 TAB_SECTION_LABELS = {
43 "desktop-web": "Desktop/Web",
44 "ios": "iOS",
45 "android": "Android",
46 "mac": "macOS",
47 "windows": "Windows",
48 "linux": "Linux",
49 "most-systems": "Most systems",
50 "linux-with-apt": "Linux with APT",
51 "python": "Python",
52 "js": "JavaScript",
53 "curl": "curl",
54 "zulip-send": "zulip-send",
55 "web": "Web",
56 "desktop": "Desktop",
57 "mobile": "Mobile",
58 "mm-default": "Default installation",
59 "mm-cloud": "Cloud instance",
60 "mm-docker": "Docker",
61 "mm-gitlab-omnibus": "GitLab Omnibus",
62 "mm-self-hosting-cloud-export": "Self hosting (cloud export)",
63 "require-invitations": "Require invitations",
64 "allow-anyone-to-join": "Allow anyone to join",
65 "restrict-by-email-domain": "Restrict by email domain",
66 "zoom": "Zoom",
67 "jitsi-meet": "Jitsi Meet",
68 "bigbluebutton": "BigBlueButton",
69 "disable": "Disabled",
70 "chrome": "Chrome",
71 "firefox": "Firefox",
72 "desktop-app": "Desktop app",
73 "system-proxy-settings": "System proxy settings",
74 "custom-proxy-settings": "Custom proxy settings",
75 "stream": "From a stream view",
76 "not-stream": "From other views",
77 "via-recent-conversations": "Via recent conversations",
78 "via-inbox-view": "Via inbox view",
79 "via-left-sidebar": "Via left sidebar",
80 "instructions-for-all-platforms": "Instructions for all platforms",
81 "public-streams": "Public streams",
82 "private-streams": "Private streams",
83 "web-public-streams": "Web-public streams",
84 "via-user-card": "Via user card",
85 "via-user-profile": "Via user profile",
86 "via-organization-settings": "Via organization settings",
87 "via-personal-settings": "Via personal settings",
88 "via-stream-settings": "Via stream settings",
89 "default-subdomain": "Default subdomain",
90 "custom-subdomain": "Custom subdomain",
91 "zulip-cloud-standard": "Zulip Cloud Standard",
92 "zulip-cloud-plus": "Zulip Cloud Plus",
93 "request-sponsorship": "Request sponsorship",
94 "request-education-pricing": "Request education pricing",
95 "zulip-cloud": "Zulip Cloud",
96 "self-hosting": "Self hosting",
97 "okta": "Okta",
98 "onelogin": "OneLogin",
99 "azuread": "AzureAD",
100 "keycloak": "Keycloak",
101 "auth0": "Auth0",
102 "logged-in": "If you are logged in",
103 "logged-out": "If you are logged out",
104 "user": "User",
105 "bot": "Bot",
106 "on-sign-up": "On sign-up",
107 "via-paste": "Via paste",
108 "via-drag-and-drop": "Via drag-and-drop",
109 "via-markdown": "Via Markdown",
110 "via-compose-box-buttons": "Via compose box buttons",
111 "stream-compose": "Compose to a stream",
112 "dm-compose": "Compose a DM",
113 "v8": "Zulip Server 8.0+",
114 "v6": "Zulip Server 6.0+",
115 "v4": "Zulip Server 4.0+",
116 "all-versions": "All versions",
117 "for-a-bot": "For a bot",
118 "for-yourself": "For yourself",
119 }
120
121
122 class TabbedSectionsGenerator(Extension):
123 @override
124 def extendMarkdown(self, md: markdown.Markdown) -> None:
125 md.preprocessors.register(
126 TabbedSectionsPreprocessor(md, self.getConfigs()),
127 "tabbed_sections",
128 PREPROCESSOR_PRIORITES["tabbed_sections"],
129 )
130
131
132 class TabbedSectionsPreprocessor(Preprocessor):
133 def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
134 super().__init__(md)
135
136 @override
137 def run(self, lines: List[str]) -> List[str]:
138 tab_section = self.parse_tabs(lines)
139 while tab_section:
140 if "tabs" in tab_section:
141 tab_class = "has-tabs"
142 else:
143 tab_class = "no-tabs"
144 tab_section["tabs"] = [
145 {
146 "tab_key": "instructions-for-all-platforms",
147 "start": tab_section["start_tabs_index"],
148 }
149 ]
150 nav_bar = self.generate_nav_bar(tab_section)
151 content_blocks = self.generate_content_blocks(tab_section, lines)
152 rendered_tabs = TABBED_SECTION_TEMPLATE.format(
153 tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks
154 )
155
156 start = tab_section["start_tabs_index"]
157 end = tab_section["end_tabs_index"] + 1
158 lines = [*lines[:start], rendered_tabs, *lines[end:]]
159 tab_section = self.parse_tabs(lines)
160 return lines
161
162 def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:
163 tab_content_blocks = []
164 for index, tab in enumerate(tab_section["tabs"]):
165 start_index = tab["start"] + 1
166 try:
167 # If there are more tabs, we can use the starting index
168 # of the next tab as the ending index of the previous one
169 end_index = tab_section["tabs"][index + 1]["start"]
170 except IndexError:
171 # Otherwise, just use the end of the entire section
172 end_index = tab_section["end_tabs_index"]
173
174 content = "\n".join(lines[start_index:end_index]).strip()
175 tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(
176 data_tab_key=tab["tab_key"],
177 # Wrapping the content in two newlines is necessary here.
178 # If we don't do this, the inner Markdown does not get
179 # rendered properly.
180 content=f"\n{content}\n",
181 )
182 tab_content_blocks.append(tab_content_block)
183 return "\n".join(tab_content_blocks)
184
185 def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:
186 li_elements = []
187 for tab in tab_section["tabs"]:
188 tab_key = tab.get("tab_key")
189 tab_label = TAB_SECTION_LABELS.get(tab_key)
190 if tab_label is None:
191 raise ValueError(
192 f"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py"
193 )
194
195 li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)
196 li_elements.append(li)
197
198 return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements))
199
200 def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:
201 block: Dict[str, Any] = {}
202 for index, line in enumerate(lines):
203 start_match = START_TABBED_SECTION_REGEX.search(line)
204 if start_match:
205 block["start_tabs_index"] = index
206
207 tab_content_match = TAB_CONTENT_REGEX.search(line)
208 if tab_content_match:
209 block.setdefault("tabs", [])
210 tab = {"start": index, "tab_key": tab_content_match.group(1)}
211 block["tabs"].append(tab)
212
213 end_match = END_TABBED_SECTION_REGEX.search(line)
214 if end_match:
215 block["end_tabs_index"] = index
216 break
217 return block
218
219
220 def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:
221 return TabbedSectionsGenerator(**kwargs)
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -77,6 +77,7 @@
"via-recent-conversations": "Via recent conversations",
"via-inbox-view": "Via inbox view",
"via-left-sidebar": "Via left sidebar",
+ "via-right-sidebar": "Via right sidebar",
"instructions-for-all-platforms": "Instructions for all platforms",
"public-streams": "Public streams",
"private-streams": "Private streams",
|
{"golden_diff": "diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py\n--- a/zerver/lib/markdown/tabbed_sections.py\n+++ b/zerver/lib/markdown/tabbed_sections.py\n@@ -77,6 +77,7 @@\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n+ \"via-right-sidebar\": \"Via right sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n", "issue": "Add \"View stream subscribers\" page\nWe should add a \"View stream subscribers\" page (below \"Create a stream\" under Streams & topics). It should document the right sidebar UI in #26717 (currently test-deployed on CZO), as well as how to see subscribers via settings.\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"most-systems\": \"Most systems\",\n \"linux-with-apt\": \"Linux with APT\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud-standard\": \"Zulip Cloud Standard\",\n \"zulip-cloud-plus\": \"Zulip Cloud Plus\",\n \"request-sponsorship\": \"Request sponsorship\",\n \"request-education-pricing\": \"Request education pricing\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v8\": \"Zulip Server 8.0+\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n \"all-versions\": \"All versions\",\n \"for-a-bot\": \"For a bot\",\n \"for-yourself\": \"For yourself\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}], "after_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"most-systems\": \"Most systems\",\n \"linux-with-apt\": \"Linux with APT\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"via-right-sidebar\": \"Via right sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud-standard\": \"Zulip Cloud Standard\",\n \"zulip-cloud-plus\": \"Zulip Cloud Plus\",\n \"request-sponsorship\": \"Request sponsorship\",\n \"request-education-pricing\": \"Request education pricing\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v8\": \"Zulip Server 8.0+\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n \"all-versions\": \"All versions\",\n \"for-a-bot\": \"For a bot\",\n \"for-yourself\": \"For yourself\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}]}
| 2,829 | 142 |
gh_patches_debug_23065
|
rasdani/github-patches
|
git_diff
|
modoboa__modoboa-1658
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SPF parser failure
Parser can fail because of extra space characters.
```python
File "/srv/modoboa/instance/manage.py", line 25, in <module>
execute_from_command_line(sys.argv)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/subcommand/base.py", line 53, in run_from_argv
return super(SubcommandCommand, self).run_from_argv(argv)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/subcommand/base.py", line 86, in handle
return command.run_from_argv(argv)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/admin/management/commands/subcommands/_mx.py", line 275, in handle
self.check_domain(domain, **options)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/admin/management/commands/subcommands/_mx.py", line 210, in check_domain
domain, "spf", ttl)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/models.py", line 37, in get_or_create_for_domain
record.check_syntax(ttl)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/models.py", line 74, in check_syntax
result = func(self.value)
File "/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/lib.py", line 173, in check_spf_syntax
if part[0] in ["+", "-", "~", "?"]:
IndexError: string index out of range
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modoboa/dnstools/lib.py`
Content:
```
1 """dnstools library."""
2
3 from __future__ import unicode_literals
4
5 import ipaddress
6
7 from django.utils.translation import ugettext as _
8
9 from modoboa.admin import lib as admin_lib
10
11 from . import constants
12
13
14 def get_spf_record(domain):
15 """Return SPF record for domain (if any)."""
16 records = admin_lib.get_dns_records(domain, "TXT")
17 if records is None:
18 return None
19 for record in records:
20 value = str(record).strip('"')
21 if value.startswith("v=spf1"):
22 return value
23 return None
24
25
26 def get_dkim_record(domain, selector):
27 """Return DKIM records form domain (if any)."""
28 name = "{}._domainkey.{}".format(selector, domain)
29 records = admin_lib.get_dns_records(name, "TXT")
30 if records is None:
31 return None
32 for record in records:
33 value = str(record).strip('"')
34 if value.startswith("v=DKIM1"):
35 return value
36 return None
37
38
39 def get_dmarc_record(domain):
40 """Return DMARC record for domain (if any)."""
41 name = "_dmarc.{}".format(domain)
42 records = admin_lib.get_dns_records(name, "TXT")
43 if records is None:
44 return None
45 for record in records:
46 value = str(record).strip('"')
47 if value.startswith("v=DMARC1"):
48 return value
49 return None
50
51
52 def _get_simple_record(name):
53 """We just want to know if name is declared."""
54 for rdtype in ["A", "CNAME", "AAAA"]:
55 records = admin_lib.get_dns_records(name, rdtype)
56 if records is not None:
57 break
58 else:
59 return None
60 for record in records:
61 value = str(record).strip('"')
62 break
63 return value
64
65
66 def get_autoconfig_record(domain):
67 """Return autoconfig record for domain (if any)."""
68 return _get_simple_record("autoconfig.{}".format(domain))
69
70
71 def get_autodiscover_record(domain):
72 """Return autodiscover record for domain (if any)."""
73 return _get_simple_record("autodiscover.{}".format(domain))
74
75
76 class DNSSyntaxError(Exception):
77 """Custom exception for DNS errors."""
78
79 pass
80
81
82 def check_spf_ip4(value):
83 """Check syntax of ip4 mechanism."""
84 parts = value.split(":")
85 if len(parts) != 2:
86 raise DNSSyntaxError(_("Wrong ip4 mechanism syntax"))
87 try:
88 ipaddress.ip_network(parts[1], False)
89 except ValueError:
90 raise DNSSyntaxError(_("Wrong IPv4 address format"))
91
92
93 def check_spf_ip6(value):
94 """Check syntax of ip6 mechanism."""
95 if not value.startswith("ip6:"):
96 raise DNSSyntaxError(_("Wrong ip6 mechanism syntax"))
97 value = value.replace("ip6:", "")
98 try:
99 ipaddress.ip_network(value, False)
100 except ValueError:
101 raise DNSSyntaxError(_("Wrong IPv6 address format"))
102
103
104 def _check_domain_and_mask(value, mechanism):
105 """Check for valid domain / mask."""
106 domain = None
107 mask = None
108 if ":" in value:
109 mechanism, domain = value.split(":")
110 if "/" in domain:
111 domain, mask = domain.split("/")
112 elif "/" in value:
113 mechanism, mask = value.split("/")
114 else:
115 raise DNSSyntaxError(
116 _("Invalid syntax for {} mechanism").format(mechanism))
117 if mask and (not mask.isdigit() or int(mask) > 32):
118 raise DNSSyntaxError(_("Invalid mask found {}").format(mask))
119
120
121 def check_spf_a(value):
122 """Check syntax of a mechanism."""
123 if value == "a":
124 return
125 _check_domain_and_mask(value, "a")
126
127
128 def check_spf_mx(value):
129 """Check syntax of mx mechanism."""
130 if value == "mx":
131 return
132 _check_domain_and_mask(value, "mx")
133
134
135 def _check_simple(value, mechanism):
136 """Simple check."""
137 if value == mechanism:
138 return
139 parts = value.split(":")
140 if len(parts) != 2:
141 raise DNSSyntaxError(
142 _("Invalid syntax for {} mechanism").format(mechanism))
143
144
145 def check_spf_ptr(value):
146 """Check syntax of ptr mechanism."""
147 _check_simple(value, "ptr")
148
149
150 def check_spf_exists(value):
151 """Check syntax of ptr mechanism."""
152 _check_simple(value, "exists")
153
154
155 def check_spf_include(value):
156 """Check syntax of include mechanism."""
157 _check_simple(value, "include")
158
159
160 def check_spf_syntax(record):
161 """Check if record has a valid SPF syntax."""
162 if not record.startswith("v=spf1"):
163 raise DNSSyntaxError(_("Not an SPF record"))
164 parts = record.split(" ")[1:]
165 modifiers = []
166 mechanisms = []
167 for part in parts:
168 qualifier = None
169 if part[0] in ["+", "-", "~", "?"]:
170 qualifier = part[0]
171 part = part[1:]
172 if part == "all":
173 continue
174 for mechanism in constants.SPF_MECHANISMS:
175 if part.startswith(mechanism):
176 globals()["check_spf_{}".format(mechanism)](part)
177 mechanisms.append(mechanism)
178 break
179 else:
180 # Look for modifier
181 modifier = part.split("=")
182 if len(modifier) != 2:
183 raise DNSSyntaxError(_("Unknown mechanism {}").format(part))
184 if modifier[0] not in ["redirect", "exp"]:
185 raise DNSSyntaxError(_("Unknown modifier {}").format(
186 modifier[0]))
187 if modifier[0] in modifiers:
188 raise DNSSyntaxError(_("Duplicate modifier {} found").format(
189 modifier[0]))
190 modifiers.append(modifier[0])
191 if not len(mechanisms) and not len(modifiers):
192 raise DNSSyntaxError(_("No mechanism found"))
193 return None
194
195
196 def check_dkim_syntax(record):
197 """Check if record has a valid DKIM syntax."""
198 if not record.startswith("v=DKIM1"):
199 raise DNSSyntaxError(_("Not a valid DKIM record"))
200 key = None
201 for tag in record.split(";")[1:]:
202 tag = tag.strip(" ")
203 parts = tag.split("=", 1)
204 if len(parts) != 2:
205 raise DNSSyntaxError(_("Invalid tag {}").format(tag))
206 name = parts[0].strip(" ")
207 if name == "p":
208 key = "".join(part.strip('"') for part in parts[1].split(" "))
209 if key is None:
210 raise DNSSyntaxError(_("No key found in record"))
211 return key
212
213
214 def check_dmarc_tag_string_value(tag, value):
215 """Check if value is valid for tag."""
216 tdef = constants.DMARC_TAGS[tag]
217 error = _("Wrong value {} for tag {}").format(value, tag)
218 if "values" in tdef and value not in tdef["values"]:
219 raise DNSSyntaxError(error)
220 elif "regex" in tdef and tdef["regex"].match(value) is None:
221 raise DNSSyntaxError(error)
222
223
224 def check_dmarc_tag(tag, value):
225 """Check if tag is valid."""
226 tdef = constants.DMARC_TAGS[tag]
227 ttype = tdef.get("type", "string")
228 if ttype == "list":
229 for svalue in value.split(","):
230 check_dmarc_tag_string_value(tag, svalue)
231 elif ttype == "int":
232 error = _("Wrong value {} for tag {}:").format(value, tag)
233 try:
234 value = int(value)
235 except ValueError:
236 raise DNSSyntaxError(error + _(" not an integer"))
237 if "min_value" in tdef and value < tdef["min_value"]:
238 raise DNSSyntaxError(
239 error + _(" less than {}").format(tdef["min_value"]))
240 if "max_value" in tdef and value > tdef["max_value"]:
241 raise DNSSyntaxError(
242 error + _(" greater than {}").format(tdef["max_value"]))
243 else:
244 check_dmarc_tag_string_value(tag, value)
245
246
247 def check_dmarc_syntax(record):
248 """Check if record has a valid DMARC syntax."""
249 if not record.startswith("v=DMARC1"):
250 raise DNSSyntaxError(_("Not a valid DMARC record"))
251 tags = {}
252 for tag in record.split(";")[1:]:
253 tag = tag.strip(" ")
254 parts = tag.split("=")
255 if len(parts) != 2:
256 raise DNSSyntaxError(_("Invalid tag {}").format(tag))
257 name = parts[0].strip(" ")
258 if name not in constants.DMARC_TAGS:
259 raise DNSSyntaxError(_("Unknown tag {}").format(name))
260 value = parts[1].strip(" ")
261 check_dmarc_tag(name, value)
262 tags[name] = value
263 if "p" not in tags:
264 raise DNSSyntaxError(_("Missing required p tag"))
265
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modoboa/dnstools/lib.py b/modoboa/dnstools/lib.py
--- a/modoboa/dnstools/lib.py
+++ b/modoboa/dnstools/lib.py
@@ -165,6 +165,8 @@
modifiers = []
mechanisms = []
for part in parts:
+ if part == "":
+ continue
qualifier = None
if part[0] in ["+", "-", "~", "?"]:
qualifier = part[0]
@@ -200,6 +202,8 @@
key = None
for tag in record.split(";")[1:]:
tag = tag.strip(" ")
+ if tag == "":
+ continue
parts = tag.split("=", 1)
if len(parts) != 2:
raise DNSSyntaxError(_("Invalid tag {}").format(tag))
@@ -250,6 +254,8 @@
raise DNSSyntaxError(_("Not a valid DMARC record"))
tags = {}
for tag in record.split(";")[1:]:
+ if tag == "":
+ continue
tag = tag.strip(" ")
parts = tag.split("=")
if len(parts) != 2:
|
{"golden_diff": "diff --git a/modoboa/dnstools/lib.py b/modoboa/dnstools/lib.py\n--- a/modoboa/dnstools/lib.py\n+++ b/modoboa/dnstools/lib.py\n@@ -165,6 +165,8 @@\n modifiers = []\n mechanisms = []\n for part in parts:\n+ if part == \"\":\n+ continue\n qualifier = None\n if part[0] in [\"+\", \"-\", \"~\", \"?\"]:\n qualifier = part[0]\n@@ -200,6 +202,8 @@\n key = None\n for tag in record.split(\";\")[1:]:\n tag = tag.strip(\" \")\n+ if tag == \"\":\n+ continue\n parts = tag.split(\"=\", 1)\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Invalid tag {}\").format(tag))\n@@ -250,6 +254,8 @@\n raise DNSSyntaxError(_(\"Not a valid DMARC record\"))\n tags = {}\n for tag in record.split(\";\")[1:]:\n+ if tag == \"\":\n+ continue\n tag = tag.strip(\" \")\n parts = tag.split(\"=\")\n if len(parts) != 2:\n", "issue": "SPF parser failure\nParser can fail because of extra space characters.\r\n```python\r\n File \"/srv/modoboa/instance/manage.py\", line 25, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 364, in execute_from_command_line\r\n utility.execute()\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 356, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/subcommand/base.py\", line 53, in run_from_argv\r\n return super(SubcommandCommand, self).run_from_argv(argv)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 283, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 330, in execute\r\n output = self.handle(*args, **options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/subcommand/base.py\", line 86, in handle\r\n return command.run_from_argv(argv)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 283, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 330, in execute\r\n output = self.handle(*args, **options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/admin/management/commands/subcommands/_mx.py\", line 275, in handle\r\n self.check_domain(domain, **options)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/admin/management/commands/subcommands/_mx.py\", line 210, in check_domain\r\n domain, \"spf\", ttl)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/models.py\", line 37, in get_or_create_for_domain\r\n record.check_syntax(ttl)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/models.py\", line 74, in check_syntax\r\n result = func(self.value)\r\n File \"/srv/modoboa/env/local/lib/python2.7/site-packages/modoboa/dnstools/lib.py\", line 173, in check_spf_syntax\r\n if part[0] in [\"+\", \"-\", \"~\", \"?\"]:\r\nIndexError: string index out of range\r\n```\n", "before_files": [{"content": "\"\"\"dnstools library.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport ipaddress\n\nfrom django.utils.translation import ugettext as _\n\nfrom modoboa.admin import lib as admin_lib\n\nfrom . import constants\n\n\ndef get_spf_record(domain):\n \"\"\"Return SPF record for domain (if any).\"\"\"\n records = admin_lib.get_dns_records(domain, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=spf1\"):\n return value\n return None\n\n\ndef get_dkim_record(domain, selector):\n \"\"\"Return DKIM records form domain (if any).\"\"\"\n name = \"{}._domainkey.{}\".format(selector, domain)\n records = admin_lib.get_dns_records(name, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=DKIM1\"):\n return value\n return None\n\n\ndef get_dmarc_record(domain):\n \"\"\"Return DMARC record for domain (if any).\"\"\"\n name = \"_dmarc.{}\".format(domain)\n records = admin_lib.get_dns_records(name, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=DMARC1\"):\n return value\n return None\n\n\ndef _get_simple_record(name):\n \"\"\"We just want to know if name is declared.\"\"\"\n for rdtype in [\"A\", \"CNAME\", \"AAAA\"]:\n records = admin_lib.get_dns_records(name, rdtype)\n if records is not None:\n break\n else:\n return None\n for record in records:\n value = str(record).strip('\"')\n break\n return value\n\n\ndef get_autoconfig_record(domain):\n \"\"\"Return autoconfig record for domain (if any).\"\"\"\n return _get_simple_record(\"autoconfig.{}\".format(domain))\n\n\ndef get_autodiscover_record(domain):\n \"\"\"Return autodiscover record for domain (if any).\"\"\"\n return _get_simple_record(\"autodiscover.{}\".format(domain))\n\n\nclass DNSSyntaxError(Exception):\n \"\"\"Custom exception for DNS errors.\"\"\"\n\n pass\n\n\ndef check_spf_ip4(value):\n \"\"\"Check syntax of ip4 mechanism.\"\"\"\n parts = value.split(\":\")\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Wrong ip4 mechanism syntax\"))\n try:\n ipaddress.ip_network(parts[1], False)\n except ValueError:\n raise DNSSyntaxError(_(\"Wrong IPv4 address format\"))\n\n\ndef check_spf_ip6(value):\n \"\"\"Check syntax of ip6 mechanism.\"\"\"\n if not value.startswith(\"ip6:\"):\n raise DNSSyntaxError(_(\"Wrong ip6 mechanism syntax\"))\n value = value.replace(\"ip6:\", \"\")\n try:\n ipaddress.ip_network(value, False)\n except ValueError:\n raise DNSSyntaxError(_(\"Wrong IPv6 address format\"))\n\n\ndef _check_domain_and_mask(value, mechanism):\n \"\"\"Check for valid domain / mask.\"\"\"\n domain = None\n mask = None\n if \":\" in value:\n mechanism, domain = value.split(\":\")\n if \"/\" in domain:\n domain, mask = domain.split(\"/\")\n elif \"/\" in value:\n mechanism, mask = value.split(\"/\")\n else:\n raise DNSSyntaxError(\n _(\"Invalid syntax for {} mechanism\").format(mechanism))\n if mask and (not mask.isdigit() or int(mask) > 32):\n raise DNSSyntaxError(_(\"Invalid mask found {}\").format(mask))\n\n\ndef check_spf_a(value):\n \"\"\"Check syntax of a mechanism.\"\"\"\n if value == \"a\":\n return\n _check_domain_and_mask(value, \"a\")\n\n\ndef check_spf_mx(value):\n \"\"\"Check syntax of mx mechanism.\"\"\"\n if value == \"mx\":\n return\n _check_domain_and_mask(value, \"mx\")\n\n\ndef _check_simple(value, mechanism):\n \"\"\"Simple check.\"\"\"\n if value == mechanism:\n return\n parts = value.split(\":\")\n if len(parts) != 2:\n raise DNSSyntaxError(\n _(\"Invalid syntax for {} mechanism\").format(mechanism))\n\n\ndef check_spf_ptr(value):\n \"\"\"Check syntax of ptr mechanism.\"\"\"\n _check_simple(value, \"ptr\")\n\n\ndef check_spf_exists(value):\n \"\"\"Check syntax of ptr mechanism.\"\"\"\n _check_simple(value, \"exists\")\n\n\ndef check_spf_include(value):\n \"\"\"Check syntax of include mechanism.\"\"\"\n _check_simple(value, \"include\")\n\n\ndef check_spf_syntax(record):\n \"\"\"Check if record has a valid SPF syntax.\"\"\"\n if not record.startswith(\"v=spf1\"):\n raise DNSSyntaxError(_(\"Not an SPF record\"))\n parts = record.split(\" \")[1:]\n modifiers = []\n mechanisms = []\n for part in parts:\n qualifier = None\n if part[0] in [\"+\", \"-\", \"~\", \"?\"]:\n qualifier = part[0]\n part = part[1:]\n if part == \"all\":\n continue\n for mechanism in constants.SPF_MECHANISMS:\n if part.startswith(mechanism):\n globals()[\"check_spf_{}\".format(mechanism)](part)\n mechanisms.append(mechanism)\n break\n else:\n # Look for modifier\n modifier = part.split(\"=\")\n if len(modifier) != 2:\n raise DNSSyntaxError(_(\"Unknown mechanism {}\").format(part))\n if modifier[0] not in [\"redirect\", \"exp\"]:\n raise DNSSyntaxError(_(\"Unknown modifier {}\").format(\n modifier[0]))\n if modifier[0] in modifiers:\n raise DNSSyntaxError(_(\"Duplicate modifier {} found\").format(\n modifier[0]))\n modifiers.append(modifier[0])\n if not len(mechanisms) and not len(modifiers):\n raise DNSSyntaxError(_(\"No mechanism found\"))\n return None\n\n\ndef check_dkim_syntax(record):\n \"\"\"Check if record has a valid DKIM syntax.\"\"\"\n if not record.startswith(\"v=DKIM1\"):\n raise DNSSyntaxError(_(\"Not a valid DKIM record\"))\n key = None\n for tag in record.split(\";\")[1:]:\n tag = tag.strip(\" \")\n parts = tag.split(\"=\", 1)\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Invalid tag {}\").format(tag))\n name = parts[0].strip(\" \")\n if name == \"p\":\n key = \"\".join(part.strip('\"') for part in parts[1].split(\" \"))\n if key is None:\n raise DNSSyntaxError(_(\"No key found in record\"))\n return key\n\n\ndef check_dmarc_tag_string_value(tag, value):\n \"\"\"Check if value is valid for tag.\"\"\"\n tdef = constants.DMARC_TAGS[tag]\n error = _(\"Wrong value {} for tag {}\").format(value, tag)\n if \"values\" in tdef and value not in tdef[\"values\"]:\n raise DNSSyntaxError(error)\n elif \"regex\" in tdef and tdef[\"regex\"].match(value) is None:\n raise DNSSyntaxError(error)\n\n\ndef check_dmarc_tag(tag, value):\n \"\"\"Check if tag is valid.\"\"\"\n tdef = constants.DMARC_TAGS[tag]\n ttype = tdef.get(\"type\", \"string\")\n if ttype == \"list\":\n for svalue in value.split(\",\"):\n check_dmarc_tag_string_value(tag, svalue)\n elif ttype == \"int\":\n error = _(\"Wrong value {} for tag {}:\").format(value, tag)\n try:\n value = int(value)\n except ValueError:\n raise DNSSyntaxError(error + _(\" not an integer\"))\n if \"min_value\" in tdef and value < tdef[\"min_value\"]:\n raise DNSSyntaxError(\n error + _(\" less than {}\").format(tdef[\"min_value\"]))\n if \"max_value\" in tdef and value > tdef[\"max_value\"]:\n raise DNSSyntaxError(\n error + _(\" greater than {}\").format(tdef[\"max_value\"]))\n else:\n check_dmarc_tag_string_value(tag, value)\n\n\ndef check_dmarc_syntax(record):\n \"\"\"Check if record has a valid DMARC syntax.\"\"\"\n if not record.startswith(\"v=DMARC1\"):\n raise DNSSyntaxError(_(\"Not a valid DMARC record\"))\n tags = {}\n for tag in record.split(\";\")[1:]:\n tag = tag.strip(\" \")\n parts = tag.split(\"=\")\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Invalid tag {}\").format(tag))\n name = parts[0].strip(\" \")\n if name not in constants.DMARC_TAGS:\n raise DNSSyntaxError(_(\"Unknown tag {}\").format(name))\n value = parts[1].strip(\" \")\n check_dmarc_tag(name, value)\n tags[name] = value\n if \"p\" not in tags:\n raise DNSSyntaxError(_(\"Missing required p tag\"))\n", "path": "modoboa/dnstools/lib.py"}], "after_files": [{"content": "\"\"\"dnstools library.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport ipaddress\n\nfrom django.utils.translation import ugettext as _\n\nfrom modoboa.admin import lib as admin_lib\n\nfrom . import constants\n\n\ndef get_spf_record(domain):\n \"\"\"Return SPF record for domain (if any).\"\"\"\n records = admin_lib.get_dns_records(domain, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=spf1\"):\n return value\n return None\n\n\ndef get_dkim_record(domain, selector):\n \"\"\"Return DKIM records form domain (if any).\"\"\"\n name = \"{}._domainkey.{}\".format(selector, domain)\n records = admin_lib.get_dns_records(name, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=DKIM1\"):\n return value\n return None\n\n\ndef get_dmarc_record(domain):\n \"\"\"Return DMARC record for domain (if any).\"\"\"\n name = \"_dmarc.{}\".format(domain)\n records = admin_lib.get_dns_records(name, \"TXT\")\n if records is None:\n return None\n for record in records:\n value = str(record).strip('\"')\n if value.startswith(\"v=DMARC1\"):\n return value\n return None\n\n\ndef _get_simple_record(name):\n \"\"\"We just want to know if name is declared.\"\"\"\n for rdtype in [\"A\", \"CNAME\", \"AAAA\"]:\n records = admin_lib.get_dns_records(name, rdtype)\n if records is not None:\n break\n else:\n return None\n for record in records:\n value = str(record).strip('\"')\n break\n return value\n\n\ndef get_autoconfig_record(domain):\n \"\"\"Return autoconfig record for domain (if any).\"\"\"\n return _get_simple_record(\"autoconfig.{}\".format(domain))\n\n\ndef get_autodiscover_record(domain):\n \"\"\"Return autodiscover record for domain (if any).\"\"\"\n return _get_simple_record(\"autodiscover.{}\".format(domain))\n\n\nclass DNSSyntaxError(Exception):\n \"\"\"Custom exception for DNS errors.\"\"\"\n\n pass\n\n\ndef check_spf_ip4(value):\n \"\"\"Check syntax of ip4 mechanism.\"\"\"\n parts = value.split(\":\")\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Wrong ip4 mechanism syntax\"))\n try:\n ipaddress.ip_network(parts[1], False)\n except ValueError:\n raise DNSSyntaxError(_(\"Wrong IPv4 address format\"))\n\n\ndef check_spf_ip6(value):\n \"\"\"Check syntax of ip6 mechanism.\"\"\"\n if not value.startswith(\"ip6:\"):\n raise DNSSyntaxError(_(\"Wrong ip6 mechanism syntax\"))\n value = value.replace(\"ip6:\", \"\")\n try:\n ipaddress.ip_network(value, False)\n except ValueError:\n raise DNSSyntaxError(_(\"Wrong IPv6 address format\"))\n\n\ndef _check_domain_and_mask(value, mechanism):\n \"\"\"Check for valid domain / mask.\"\"\"\n domain = None\n mask = None\n if \":\" in value:\n mechanism, domain = value.split(\":\")\n if \"/\" in domain:\n domain, mask = domain.split(\"/\")\n elif \"/\" in value:\n mechanism, mask = value.split(\"/\")\n else:\n raise DNSSyntaxError(\n _(\"Invalid syntax for {} mechanism\").format(mechanism))\n if mask and (not mask.isdigit() or int(mask) > 32):\n raise DNSSyntaxError(_(\"Invalid mask found {}\").format(mask))\n\n\ndef check_spf_a(value):\n \"\"\"Check syntax of a mechanism.\"\"\"\n if value == \"a\":\n return\n _check_domain_and_mask(value, \"a\")\n\n\ndef check_spf_mx(value):\n \"\"\"Check syntax of mx mechanism.\"\"\"\n if value == \"mx\":\n return\n _check_domain_and_mask(value, \"mx\")\n\n\ndef _check_simple(value, mechanism):\n \"\"\"Simple check.\"\"\"\n if value == mechanism:\n return\n parts = value.split(\":\")\n if len(parts) != 2:\n raise DNSSyntaxError(\n _(\"Invalid syntax for {} mechanism\").format(mechanism))\n\n\ndef check_spf_ptr(value):\n \"\"\"Check syntax of ptr mechanism.\"\"\"\n _check_simple(value, \"ptr\")\n\n\ndef check_spf_exists(value):\n \"\"\"Check syntax of ptr mechanism.\"\"\"\n _check_simple(value, \"exists\")\n\n\ndef check_spf_include(value):\n \"\"\"Check syntax of include mechanism.\"\"\"\n _check_simple(value, \"include\")\n\n\ndef check_spf_syntax(record):\n \"\"\"Check if record has a valid SPF syntax.\"\"\"\n if not record.startswith(\"v=spf1\"):\n raise DNSSyntaxError(_(\"Not an SPF record\"))\n parts = record.split(\" \")[1:]\n modifiers = []\n mechanisms = []\n for part in parts:\n if part == \"\":\n continue\n qualifier = None\n if part[0] in [\"+\", \"-\", \"~\", \"?\"]:\n qualifier = part[0]\n part = part[1:]\n if part == \"all\":\n continue\n for mechanism in constants.SPF_MECHANISMS:\n if part.startswith(mechanism):\n globals()[\"check_spf_{}\".format(mechanism)](part)\n mechanisms.append(mechanism)\n break\n else:\n # Look for modifier\n modifier = part.split(\"=\")\n if len(modifier) != 2:\n raise DNSSyntaxError(_(\"Unknown mechanism {}\").format(part))\n if modifier[0] not in [\"redirect\", \"exp\"]:\n raise DNSSyntaxError(_(\"Unknown modifier {}\").format(\n modifier[0]))\n if modifier[0] in modifiers:\n raise DNSSyntaxError(_(\"Duplicate modifier {} found\").format(\n modifier[0]))\n modifiers.append(modifier[0])\n if not len(mechanisms) and not len(modifiers):\n raise DNSSyntaxError(_(\"No mechanism found\"))\n return None\n\n\ndef check_dkim_syntax(record):\n \"\"\"Check if record has a valid DKIM syntax.\"\"\"\n if not record.startswith(\"v=DKIM1\"):\n raise DNSSyntaxError(_(\"Not a valid DKIM record\"))\n key = None\n for tag in record.split(\";\")[1:]:\n tag = tag.strip(\" \")\n if tag == \"\":\n continue\n parts = tag.split(\"=\", 1)\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Invalid tag {}\").format(tag))\n name = parts[0].strip(\" \")\n if name == \"p\":\n key = \"\".join(part.strip('\"') for part in parts[1].split(\" \"))\n if key is None:\n raise DNSSyntaxError(_(\"No key found in record\"))\n return key\n\n\ndef check_dmarc_tag_string_value(tag, value):\n \"\"\"Check if value is valid for tag.\"\"\"\n tdef = constants.DMARC_TAGS[tag]\n error = _(\"Wrong value {} for tag {}\").format(value, tag)\n if \"values\" in tdef and value not in tdef[\"values\"]:\n raise DNSSyntaxError(error)\n elif \"regex\" in tdef and tdef[\"regex\"].match(value) is None:\n raise DNSSyntaxError(error)\n\n\ndef check_dmarc_tag(tag, value):\n \"\"\"Check if tag is valid.\"\"\"\n tdef = constants.DMARC_TAGS[tag]\n ttype = tdef.get(\"type\", \"string\")\n if ttype == \"list\":\n for svalue in value.split(\",\"):\n check_dmarc_tag_string_value(tag, svalue)\n elif ttype == \"int\":\n error = _(\"Wrong value {} for tag {}:\").format(value, tag)\n try:\n value = int(value)\n except ValueError:\n raise DNSSyntaxError(error + _(\" not an integer\"))\n if \"min_value\" in tdef and value < tdef[\"min_value\"]:\n raise DNSSyntaxError(\n error + _(\" less than {}\").format(tdef[\"min_value\"]))\n if \"max_value\" in tdef and value > tdef[\"max_value\"]:\n raise DNSSyntaxError(\n error + _(\" greater than {}\").format(tdef[\"max_value\"]))\n else:\n check_dmarc_tag_string_value(tag, value)\n\n\ndef check_dmarc_syntax(record):\n \"\"\"Check if record has a valid DMARC syntax.\"\"\"\n if not record.startswith(\"v=DMARC1\"):\n raise DNSSyntaxError(_(\"Not a valid DMARC record\"))\n tags = {}\n for tag in record.split(\";\")[1:]:\n if tag == \"\":\n continue\n tag = tag.strip(\" \")\n parts = tag.split(\"=\")\n if len(parts) != 2:\n raise DNSSyntaxError(_(\"Invalid tag {}\").format(tag))\n name = parts[0].strip(\" \")\n if name not in constants.DMARC_TAGS:\n raise DNSSyntaxError(_(\"Unknown tag {}\").format(name))\n value = parts[1].strip(\" \")\n check_dmarc_tag(name, value)\n tags[name] = value\n if \"p\" not in tags:\n raise DNSSyntaxError(_(\"Missing required p tag\"))\n", "path": "modoboa/dnstools/lib.py"}]}
| 3,553 | 262 |
gh_patches_debug_38706
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-1991
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Interface's name isn't necessary to be ended with a digit
Remove the step to convert to integer as it doesn't affect the result.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/arch/bpf/core.py`
Content:
```
1 # Guillaume Valadon <[email protected]>
2
3 """
4 Scapy *BSD native support - core
5 """
6
7 from __future__ import absolute_import
8 from scapy.config import conf
9 from scapy.error import Scapy_Exception, warning
10 from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER
11 from scapy.arch.common import get_if, compile_filter
12 from scapy.consts import LOOPBACK_NAME
13
14 from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF
15
16 import os
17 import socket
18 import fcntl
19 import struct
20
21 from ctypes import cdll, cast, pointer
22 from ctypes import c_int, c_ulong, c_char_p
23 from ctypes.util import find_library
24 from scapy.modules.six.moves import range
25
26
27 # ctypes definitions
28
29 LIBC = cdll.LoadLibrary(find_library("libc"))
30 LIBC.ioctl.argtypes = [c_int, c_ulong, c_char_p]
31 LIBC.ioctl.restype = c_int
32
33
34 # Addresses manipulation functions
35
36 def get_if_raw_addr(ifname):
37 """Returns the IPv4 address configured on 'ifname', packed with inet_pton.""" # noqa: E501
38
39 # Get ifconfig output
40 try:
41 fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
42 except OSError as msg:
43 warning("Failed to execute ifconfig: (%s)", msg)
44 return b"\0\0\0\0"
45
46 # Get IPv4 addresses
47 addresses = [l for l in fd if l.find("inet ") >= 0]
48 if not addresses:
49 warning("No IPv4 address found on %s !", ifname)
50 return b"\0\0\0\0"
51
52 # Pack the first address
53 address = addresses[0].split(' ')[1]
54 if '/' in address: # NetBSD 8.0
55 address = address.split("/")[0]
56 return socket.inet_pton(socket.AF_INET, address)
57
58
59 def get_if_raw_hwaddr(ifname):
60 """Returns the packed MAC address configured on 'ifname'."""
61
62 NULL_MAC_ADDRESS = b'\x00' * 6
63
64 # Handle the loopback interface separately
65 if ifname == LOOPBACK_NAME:
66 return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS)
67
68 # Get ifconfig output
69 try:
70 fd = os.popen("%s %s" % (conf.prog.ifconfig, ifname))
71 except OSError as msg:
72 raise Scapy_Exception("Failed to execute ifconfig: (%s)" % msg)
73
74 # Get MAC addresses
75 addresses = [l for l in fd.readlines() if l.find("ether") >= 0 or
76 l.find("lladdr") >= 0 or
77 l.find("address") >= 0]
78 if not addresses:
79 raise Scapy_Exception("No MAC address found on %s !" % ifname)
80
81 # Pack and return the MAC address
82 mac = addresses[0].split(' ')[1]
83 mac = [chr(int(b, 16)) for b in mac.split(':')]
84 return (ARPHDR_ETHER, ''.join(mac))
85
86
87 # BPF specific functions
88
89 def get_dev_bpf():
90 """Returns an opened BPF file object"""
91
92 # Get the first available BPF handle
93 for bpf in range(256):
94 try:
95 fd = os.open("/dev/bpf%i" % bpf, os.O_RDWR)
96 return (fd, bpf)
97 except OSError:
98 continue
99
100 raise Scapy_Exception("No /dev/bpf handle is available !")
101
102
103 def attach_filter(fd, bpf_filter, iface):
104 """Attach a BPF filter to the BPF file descriptor"""
105 bp = compile_filter(bpf_filter, iface)
106 # Assign the BPF program to the interface
107 ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p))
108 if ret < 0:
109 raise Scapy_Exception("Can't attach the BPF filter !")
110
111
112 # Interface manipulation functions
113
114 def get_if_list():
115 """Returns a list containing all network interfaces."""
116
117 # Get ifconfig output
118 try:
119 fd = os.popen("%s -a" % conf.prog.ifconfig)
120 except OSError as msg:
121 raise Scapy_Exception("Failed to execute ifconfig: (%s)" % msg)
122
123 # Get interfaces
124 interfaces = [line[:line.find(':')] for line in fd.readlines()
125 if ": flags" in line.lower()]
126 return interfaces
127
128
129 def get_working_ifaces():
130 """
131 Returns an ordered list of interfaces that could be used with BPF.
132 Note: the order mimics pcap_findalldevs() behavior
133 """
134
135 # Only root is allowed to perform the following ioctl() call
136 if os.getuid() != 0:
137 return []
138
139 # Test all network interfaces
140 interfaces = []
141 for ifname in get_if_list():
142
143 # Unlike pcap_findalldevs(), we do not care of loopback interfaces.
144 if ifname == LOOPBACK_NAME:
145 continue
146
147 # Get interface flags
148 try:
149 result = get_if(ifname, SIOCGIFFLAGS)
150 except IOError:
151 warning("ioctl(SIOCGIFFLAGS) failed on %s !", ifname)
152 continue
153
154 # Convert flags
155 ifflags = struct.unpack("16xH14x", result)[0]
156 if ifflags & 0x1: # IFF_UP
157
158 # Get a BPF handle
159 fd, _ = get_dev_bpf()
160 if fd is None:
161 raise Scapy_Exception("No /dev/bpf are available !")
162
163 # Check if the interface can be used
164 try:
165 fcntl.ioctl(fd, BIOCSETIF, struct.pack("16s16x", ifname.encode())) # noqa: E501
166 interfaces.append((ifname, int(ifname[-1])))
167 except IOError:
168 pass
169
170 # Close the file descriptor
171 os.close(fd)
172
173 # Sort to mimic pcap_findalldevs() order
174 interfaces.sort(key=lambda elt: elt[1])
175
176 return interfaces
177
178
179 def get_working_if():
180 """Returns the first interface than can be used with BPF"""
181
182 ifaces = get_working_ifaces()
183 if not ifaces:
184 # A better interface will be selected later using the routing table
185 return LOOPBACK_NAME
186 return ifaces[0][0]
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scapy/arch/bpf/core.py b/scapy/arch/bpf/core.py
--- a/scapy/arch/bpf/core.py
+++ b/scapy/arch/bpf/core.py
@@ -5,22 +5,22 @@
"""
from __future__ import absolute_import
-from scapy.config import conf
-from scapy.error import Scapy_Exception, warning
-from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER
-from scapy.arch.common import get_if, compile_filter
-from scapy.consts import LOOPBACK_NAME
-
-from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF
+from ctypes import cdll, cast, pointer
+from ctypes import c_int, c_ulong, c_char_p
+from ctypes.util import find_library
+import fcntl
import os
+import re
import socket
-import fcntl
import struct
-from ctypes import cdll, cast, pointer
-from ctypes import c_int, c_ulong, c_char_p
-from ctypes.util import find_library
+from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF
+from scapy.arch.common import get_if, compile_filter
+from scapy.config import conf
+from scapy.consts import LOOPBACK_NAME
+from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER
+from scapy.error import Scapy_Exception, warning
from scapy.modules.six.moves import range
@@ -126,6 +126,9 @@
return interfaces
+_IFNUM = re.compile("([0-9]*)([ab]?)$")
+
+
def get_working_ifaces():
"""
Returns an ordered list of interfaces that could be used with BPF.
@@ -156,24 +159,27 @@
if ifflags & 0x1: # IFF_UP
# Get a BPF handle
- fd, _ = get_dev_bpf()
+ fd = get_dev_bpf()[0]
if fd is None:
raise Scapy_Exception("No /dev/bpf are available !")
# Check if the interface can be used
try:
- fcntl.ioctl(fd, BIOCSETIF, struct.pack("16s16x", ifname.encode())) # noqa: E501
- interfaces.append((ifname, int(ifname[-1])))
+ fcntl.ioctl(fd, BIOCSETIF, struct.pack("16s16x",
+ ifname.encode()))
except IOError:
pass
-
- # Close the file descriptor
- os.close(fd)
+ else:
+ ifnum, ifab = _IFNUM.search(ifname).groups()
+ interfaces.append((ifname, int(ifnum) if ifnum else -1, ifab))
+ finally:
+ # Close the file descriptor
+ os.close(fd)
# Sort to mimic pcap_findalldevs() order
- interfaces.sort(key=lambda elt: elt[1])
+ interfaces.sort(key=lambda elt: (elt[1], elt[2], elt[0]))
- return interfaces
+ return [iface[0] for iface in interfaces]
def get_working_if():
@@ -183,4 +189,4 @@
if not ifaces:
# A better interface will be selected later using the routing table
return LOOPBACK_NAME
- return ifaces[0][0]
+ return ifaces[0]
|
{"golden_diff": "diff --git a/scapy/arch/bpf/core.py b/scapy/arch/bpf/core.py\n--- a/scapy/arch/bpf/core.py\n+++ b/scapy/arch/bpf/core.py\n@@ -5,22 +5,22 @@\n \"\"\"\n \n from __future__ import absolute_import\n-from scapy.config import conf\n-from scapy.error import Scapy_Exception, warning\n-from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\n-from scapy.arch.common import get_if, compile_filter\n-from scapy.consts import LOOPBACK_NAME\n-\n-from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF\n \n+from ctypes import cdll, cast, pointer\n+from ctypes import c_int, c_ulong, c_char_p\n+from ctypes.util import find_library\n+import fcntl\n import os\n+import re\n import socket\n-import fcntl\n import struct\n \n-from ctypes import cdll, cast, pointer\n-from ctypes import c_int, c_ulong, c_char_p\n-from ctypes.util import find_library\n+from scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF\n+from scapy.arch.common import get_if, compile_filter\n+from scapy.config import conf\n+from scapy.consts import LOOPBACK_NAME\n+from scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\n+from scapy.error import Scapy_Exception, warning\n from scapy.modules.six.moves import range\n \n \n@@ -126,6 +126,9 @@\n return interfaces\n \n \n+_IFNUM = re.compile(\"([0-9]*)([ab]?)$\")\n+\n+\n def get_working_ifaces():\n \"\"\"\n Returns an ordered list of interfaces that could be used with BPF.\n@@ -156,24 +159,27 @@\n if ifflags & 0x1: # IFF_UP\n \n # Get a BPF handle\n- fd, _ = get_dev_bpf()\n+ fd = get_dev_bpf()[0]\n if fd is None:\n raise Scapy_Exception(\"No /dev/bpf are available !\")\n \n # Check if the interface can be used\n try:\n- fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\", ifname.encode())) # noqa: E501\n- interfaces.append((ifname, int(ifname[-1])))\n+ fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\",\n+ ifname.encode()))\n except IOError:\n pass\n-\n- # Close the file descriptor\n- os.close(fd)\n+ else:\n+ ifnum, ifab = _IFNUM.search(ifname).groups()\n+ interfaces.append((ifname, int(ifnum) if ifnum else -1, ifab))\n+ finally:\n+ # Close the file descriptor\n+ os.close(fd)\n \n # Sort to mimic pcap_findalldevs() order\n- interfaces.sort(key=lambda elt: elt[1])\n+ interfaces.sort(key=lambda elt: (elt[1], elt[2], elt[0]))\n \n- return interfaces\n+ return [iface[0] for iface in interfaces]\n \n \n def get_working_if():\n@@ -183,4 +189,4 @@\n if not ifaces:\n # A better interface will be selected later using the routing table\n return LOOPBACK_NAME\n- return ifaces[0][0]\n+ return ifaces[0]\n", "issue": "Interface's name isn't necessary to be ended with a digit\nRemove the step to convert to integer as it doesn't affect the result.\n", "before_files": [{"content": "# Guillaume Valadon <[email protected]>\n\n\"\"\"\nScapy *BSD native support - core\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom scapy.config import conf\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\nfrom scapy.arch.common import get_if, compile_filter\nfrom scapy.consts import LOOPBACK_NAME\n\nfrom scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF\n\nimport os\nimport socket\nimport fcntl\nimport struct\n\nfrom ctypes import cdll, cast, pointer\nfrom ctypes import c_int, c_ulong, c_char_p\nfrom ctypes.util import find_library\nfrom scapy.modules.six.moves import range\n\n\n# ctypes definitions\n\nLIBC = cdll.LoadLibrary(find_library(\"libc\"))\nLIBC.ioctl.argtypes = [c_int, c_ulong, c_char_p]\nLIBC.ioctl.restype = c_int\n\n\n# Addresses manipulation functions\n\ndef get_if_raw_addr(ifname):\n \"\"\"Returns the IPv4 address configured on 'ifname', packed with inet_pton.\"\"\" # noqa: E501\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n warning(\"Failed to execute ifconfig: (%s)\", msg)\n return b\"\\0\\0\\0\\0\"\n\n # Get IPv4 addresses\n addresses = [l for l in fd if l.find(\"inet \") >= 0]\n if not addresses:\n warning(\"No IPv4 address found on %s !\", ifname)\n return b\"\\0\\0\\0\\0\"\n\n # Pack the first address\n address = addresses[0].split(' ')[1]\n if '/' in address: # NetBSD 8.0\n address = address.split(\"/\")[0]\n return socket.inet_pton(socket.AF_INET, address)\n\n\ndef get_if_raw_hwaddr(ifname):\n \"\"\"Returns the packed MAC address configured on 'ifname'.\"\"\"\n\n NULL_MAC_ADDRESS = b'\\x00' * 6\n\n # Handle the loopback interface separately\n if ifname == LOOPBACK_NAME:\n return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS)\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get MAC addresses\n addresses = [l for l in fd.readlines() if l.find(\"ether\") >= 0 or\n l.find(\"lladdr\") >= 0 or\n l.find(\"address\") >= 0]\n if not addresses:\n raise Scapy_Exception(\"No MAC address found on %s !\" % ifname)\n\n # Pack and return the MAC address\n mac = addresses[0].split(' ')[1]\n mac = [chr(int(b, 16)) for b in mac.split(':')]\n return (ARPHDR_ETHER, ''.join(mac))\n\n\n# BPF specific functions\n\ndef get_dev_bpf():\n \"\"\"Returns an opened BPF file object\"\"\"\n\n # Get the first available BPF handle\n for bpf in range(256):\n try:\n fd = os.open(\"/dev/bpf%i\" % bpf, os.O_RDWR)\n return (fd, bpf)\n except OSError:\n continue\n\n raise Scapy_Exception(\"No /dev/bpf handle is available !\")\n\n\ndef attach_filter(fd, bpf_filter, iface):\n \"\"\"Attach a BPF filter to the BPF file descriptor\"\"\"\n bp = compile_filter(bpf_filter, iface)\n # Assign the BPF program to the interface\n ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p))\n if ret < 0:\n raise Scapy_Exception(\"Can't attach the BPF filter !\")\n\n\n# Interface manipulation functions\n\ndef get_if_list():\n \"\"\"Returns a list containing all network interfaces.\"\"\"\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s -a\" % conf.prog.ifconfig)\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get interfaces\n interfaces = [line[:line.find(':')] for line in fd.readlines()\n if \": flags\" in line.lower()]\n return interfaces\n\n\ndef get_working_ifaces():\n \"\"\"\n Returns an ordered list of interfaces that could be used with BPF.\n Note: the order mimics pcap_findalldevs() behavior\n \"\"\"\n\n # Only root is allowed to perform the following ioctl() call\n if os.getuid() != 0:\n return []\n\n # Test all network interfaces\n interfaces = []\n for ifname in get_if_list():\n\n # Unlike pcap_findalldevs(), we do not care of loopback interfaces.\n if ifname == LOOPBACK_NAME:\n continue\n\n # Get interface flags\n try:\n result = get_if(ifname, SIOCGIFFLAGS)\n except IOError:\n warning(\"ioctl(SIOCGIFFLAGS) failed on %s !\", ifname)\n continue\n\n # Convert flags\n ifflags = struct.unpack(\"16xH14x\", result)[0]\n if ifflags & 0x1: # IFF_UP\n\n # Get a BPF handle\n fd, _ = get_dev_bpf()\n if fd is None:\n raise Scapy_Exception(\"No /dev/bpf are available !\")\n\n # Check if the interface can be used\n try:\n fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\", ifname.encode())) # noqa: E501\n interfaces.append((ifname, int(ifname[-1])))\n except IOError:\n pass\n\n # Close the file descriptor\n os.close(fd)\n\n # Sort to mimic pcap_findalldevs() order\n interfaces.sort(key=lambda elt: elt[1])\n\n return interfaces\n\n\ndef get_working_if():\n \"\"\"Returns the first interface than can be used with BPF\"\"\"\n\n ifaces = get_working_ifaces()\n if not ifaces:\n # A better interface will be selected later using the routing table\n return LOOPBACK_NAME\n return ifaces[0][0]\n", "path": "scapy/arch/bpf/core.py"}], "after_files": [{"content": "# Guillaume Valadon <[email protected]>\n\n\"\"\"\nScapy *BSD native support - core\n\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom ctypes import cdll, cast, pointer\nfrom ctypes import c_int, c_ulong, c_char_p\nfrom ctypes.util import find_library\nimport fcntl\nimport os\nimport re\nimport socket\nimport struct\n\nfrom scapy.arch.bpf.consts import BIOCSETF, SIOCGIFFLAGS, BIOCSETIF\nfrom scapy.arch.common import get_if, compile_filter\nfrom scapy.config import conf\nfrom scapy.consts import LOOPBACK_NAME\nfrom scapy.data import ARPHDR_LOOPBACK, ARPHDR_ETHER\nfrom scapy.error import Scapy_Exception, warning\nfrom scapy.modules.six.moves import range\n\n\n# ctypes definitions\n\nLIBC = cdll.LoadLibrary(find_library(\"libc\"))\nLIBC.ioctl.argtypes = [c_int, c_ulong, c_char_p]\nLIBC.ioctl.restype = c_int\n\n\n# Addresses manipulation functions\n\ndef get_if_raw_addr(ifname):\n \"\"\"Returns the IPv4 address configured on 'ifname', packed with inet_pton.\"\"\" # noqa: E501\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n warning(\"Failed to execute ifconfig: (%s)\", msg)\n return b\"\\0\\0\\0\\0\"\n\n # Get IPv4 addresses\n addresses = [l for l in fd if l.find(\"inet \") >= 0]\n if not addresses:\n warning(\"No IPv4 address found on %s !\", ifname)\n return b\"\\0\\0\\0\\0\"\n\n # Pack the first address\n address = addresses[0].split(' ')[1]\n if '/' in address: # NetBSD 8.0\n address = address.split(\"/\")[0]\n return socket.inet_pton(socket.AF_INET, address)\n\n\ndef get_if_raw_hwaddr(ifname):\n \"\"\"Returns the packed MAC address configured on 'ifname'.\"\"\"\n\n NULL_MAC_ADDRESS = b'\\x00' * 6\n\n # Handle the loopback interface separately\n if ifname == LOOPBACK_NAME:\n return (ARPHDR_LOOPBACK, NULL_MAC_ADDRESS)\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s %s\" % (conf.prog.ifconfig, ifname))\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get MAC addresses\n addresses = [l for l in fd.readlines() if l.find(\"ether\") >= 0 or\n l.find(\"lladdr\") >= 0 or\n l.find(\"address\") >= 0]\n if not addresses:\n raise Scapy_Exception(\"No MAC address found on %s !\" % ifname)\n\n # Pack and return the MAC address\n mac = addresses[0].split(' ')[1]\n mac = [chr(int(b, 16)) for b in mac.split(':')]\n return (ARPHDR_ETHER, ''.join(mac))\n\n\n# BPF specific functions\n\ndef get_dev_bpf():\n \"\"\"Returns an opened BPF file object\"\"\"\n\n # Get the first available BPF handle\n for bpf in range(256):\n try:\n fd = os.open(\"/dev/bpf%i\" % bpf, os.O_RDWR)\n return (fd, bpf)\n except OSError:\n continue\n\n raise Scapy_Exception(\"No /dev/bpf handle is available !\")\n\n\ndef attach_filter(fd, bpf_filter, iface):\n \"\"\"Attach a BPF filter to the BPF file descriptor\"\"\"\n bp = compile_filter(bpf_filter, iface)\n # Assign the BPF program to the interface\n ret = LIBC.ioctl(c_int(fd), BIOCSETF, cast(pointer(bp), c_char_p))\n if ret < 0:\n raise Scapy_Exception(\"Can't attach the BPF filter !\")\n\n\n# Interface manipulation functions\n\ndef get_if_list():\n \"\"\"Returns a list containing all network interfaces.\"\"\"\n\n # Get ifconfig output\n try:\n fd = os.popen(\"%s -a\" % conf.prog.ifconfig)\n except OSError as msg:\n raise Scapy_Exception(\"Failed to execute ifconfig: (%s)\" % msg)\n\n # Get interfaces\n interfaces = [line[:line.find(':')] for line in fd.readlines()\n if \": flags\" in line.lower()]\n return interfaces\n\n\n_IFNUM = re.compile(\"([0-9]*)([ab]?)$\")\n\n\ndef get_working_ifaces():\n \"\"\"\n Returns an ordered list of interfaces that could be used with BPF.\n Note: the order mimics pcap_findalldevs() behavior\n \"\"\"\n\n # Only root is allowed to perform the following ioctl() call\n if os.getuid() != 0:\n return []\n\n # Test all network interfaces\n interfaces = []\n for ifname in get_if_list():\n\n # Unlike pcap_findalldevs(), we do not care of loopback interfaces.\n if ifname == LOOPBACK_NAME:\n continue\n\n # Get interface flags\n try:\n result = get_if(ifname, SIOCGIFFLAGS)\n except IOError:\n warning(\"ioctl(SIOCGIFFLAGS) failed on %s !\", ifname)\n continue\n\n # Convert flags\n ifflags = struct.unpack(\"16xH14x\", result)[0]\n if ifflags & 0x1: # IFF_UP\n\n # Get a BPF handle\n fd = get_dev_bpf()[0]\n if fd is None:\n raise Scapy_Exception(\"No /dev/bpf are available !\")\n\n # Check if the interface can be used\n try:\n fcntl.ioctl(fd, BIOCSETIF, struct.pack(\"16s16x\",\n ifname.encode()))\n except IOError:\n pass\n else:\n ifnum, ifab = _IFNUM.search(ifname).groups()\n interfaces.append((ifname, int(ifnum) if ifnum else -1, ifab))\n finally:\n # Close the file descriptor\n os.close(fd)\n\n # Sort to mimic pcap_findalldevs() order\n interfaces.sort(key=lambda elt: (elt[1], elt[2], elt[0]))\n\n return [iface[0] for iface in interfaces]\n\n\ndef get_working_if():\n \"\"\"Returns the first interface than can be used with BPF\"\"\"\n\n ifaces = get_working_ifaces()\n if not ifaces:\n # A better interface will be selected later using the routing table\n return LOOPBACK_NAME\n return ifaces[0]\n", "path": "scapy/arch/bpf/core.py"}]}
| 2,188 | 782 |
gh_patches_debug_30616
|
rasdani/github-patches
|
git_diff
|
rotki__rotki-897
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing fiat currency does not change manually tracked balances value column title
## Problem Definition
Seen in v1.4.0 by @cryptomole1
Changing the FIAT currency via the drop-down menu if the user has manually tracked balances changes the value but not the title of the column. The title of the column is stuck to being `USD Value`.
I can confirm the problem, just tested it.
## Task
- Fix it
- Write a unit test
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/balances/manual.py`
Content:
```
1 from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
2
3 from rotkehlchen.assets.asset import Asset
4 from rotkehlchen.errors import InputError
5 from rotkehlchen.fval import FVal
6 from rotkehlchen.inquirer import Inquirer
7 from rotkehlchen.typing import Location
8
9 if TYPE_CHECKING:
10 from rotkehlchen.db.dbhandler import DBHandler
11
12
13 class ManuallyTrackedBalance(NamedTuple):
14 asset: Asset
15 label: str
16 amount: FVal
17 location: Location
18 tags: Optional[List[str]]
19
20
21 class ManuallyTrackedBalanceWithValue(NamedTuple):
22 # NamedTuples can't use inheritance. Make sure this has same fields as
23 # ManuallyTrackedBalance until usd_value
24 asset: Asset
25 label: str
26 amount: FVal
27 location: Location
28 tags: Optional[List[str]]
29 usd_value: FVal
30
31
32 def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:
33 """Gets the manually tracked balances
34
35 May raise:
36 - RemoteError if there is a problem querying for the current price of an asset
37 """
38 balances = db.get_manually_tracked_balances()
39 balances_with_value = []
40 for entry in balances:
41 price = Inquirer().find_usd_price(entry.asset)
42 # https://github.com/python/mypy/issues/2582 --> for the type ignore below
43 balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore
44 **entry._asdict(),
45 usd_value=price * entry.amount,
46 ))
47
48 return balances_with_value
49
50
51 def add_manually_tracked_balances(
52 db: 'DBHandler',
53 data: List[ManuallyTrackedBalance],
54 ) -> None:
55 """Adds manually tracked balances
56
57 May raise:
58 - InputError if any of the given balance entry labels already exist in the DB
59 - TagConstraintError if any of the given manually tracked balances contain unknown tags.
60 """
61 if len(data) == 0:
62 raise InputError('Empty list of manually tracked balances to add was given')
63 db.ensure_tags_exist(
64 given_data=data,
65 action='adding',
66 data_type='manually tracked balances',
67 )
68 db.add_manually_tracked_balances(data=data)
69
70
71 def edit_manually_tracked_balances(db: 'DBHandler', data: List[ManuallyTrackedBalance]) -> None:
72 """Edits manually tracked balances
73
74 May raise:
75 - InputError if the given balances list is empty or if
76 any of the balance entry labels to edit do not exist in the DB.
77 - TagConstraintError if any of the given balance data contain unknown tags.
78 """
79 if len(data) == 0:
80 raise InputError('Empty list of manually tracked balances to edit was given')
81 db.ensure_tags_exist(
82 given_data=data,
83 action='editing',
84 data_type='manually tracked balances',
85 )
86 db.edit_manually_tracked_balances(data)
87
88
89 def account_for_manually_tracked_balances(
90 db: 'DBHandler',
91 balances: Dict[str, Any],
92 ) -> Dict[str, Any]:
93 """Given the big balances mapping adds to it all manually tracked balances"""
94 manually_tracked_balances = get_manually_tracked_balances(db)
95 for m_entry in manually_tracked_balances:
96 location_str = str(m_entry.location)
97 if location_str not in balances:
98 balances[location_str] = {}
99 balances[location_str][m_entry.asset.identifier] = {
100 'amount': m_entry.amount,
101 'usd_value': m_entry.usd_value,
102 }
103 else:
104 if m_entry.asset.identifier not in balances[location_str]:
105 balances[location_str][m_entry.asset.identifier] = {
106 'amount': m_entry.amount,
107 'usd_value': m_entry.usd_value,
108 }
109 else:
110 old_amount = balances[location_str][m_entry.asset.identifier]['amount']
111 old_usd_value = balances[location_str][m_entry.asset.identifier]['usd_value']
112 balances[location_str][m_entry.asset.identifier] = {
113 'amount': old_amount + m_entry.amount,
114 'usd_value': old_usd_value + m_entry.usd_value,
115 }
116
117 return balances
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rotkehlchen/balances/manual.py b/rotkehlchen/balances/manual.py
--- a/rotkehlchen/balances/manual.py
+++ b/rotkehlchen/balances/manual.py
@@ -1,10 +1,11 @@
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
from rotkehlchen.assets.asset import Asset
-from rotkehlchen.errors import InputError
+from rotkehlchen.constants.misc import ZERO
+from rotkehlchen.errors import InputError, RemoteError
from rotkehlchen.fval import FVal
from rotkehlchen.inquirer import Inquirer
-from rotkehlchen.typing import Location
+from rotkehlchen.typing import Location, Price
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
@@ -30,15 +31,18 @@
def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:
- """Gets the manually tracked balances
-
- May raise:
- - RemoteError if there is a problem querying for the current price of an asset
- """
+ """Gets the manually tracked balances"""
balances = db.get_manually_tracked_balances()
balances_with_value = []
for entry in balances:
- price = Inquirer().find_usd_price(entry.asset)
+ try:
+ price = Inquirer().find_usd_price(entry.asset)
+ except RemoteError as e:
+ db.msg_aggregator.add_warning(
+ f'Could not find price for {entry.asset.identifier} during '
+ f'manually tracked balance querying due to {str(e)}',
+ )
+ price = Price(ZERO)
# https://github.com/python/mypy/issues/2582 --> for the type ignore below
balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore
**entry._asdict(),
|
{"golden_diff": "diff --git a/rotkehlchen/balances/manual.py b/rotkehlchen/balances/manual.py\n--- a/rotkehlchen/balances/manual.py\n+++ b/rotkehlchen/balances/manual.py\n@@ -1,10 +1,11 @@\n from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n \n from rotkehlchen.assets.asset import Asset\n-from rotkehlchen.errors import InputError\n+from rotkehlchen.constants.misc import ZERO\n+from rotkehlchen.errors import InputError, RemoteError\n from rotkehlchen.fval import FVal\n from rotkehlchen.inquirer import Inquirer\n-from rotkehlchen.typing import Location\n+from rotkehlchen.typing import Location, Price\n \n if TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n@@ -30,15 +31,18 @@\n \n \n def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:\n- \"\"\"Gets the manually tracked balances\n-\n- May raise:\n- - RemoteError if there is a problem querying for the current price of an asset\n- \"\"\"\n+ \"\"\"Gets the manually tracked balances\"\"\"\n balances = db.get_manually_tracked_balances()\n balances_with_value = []\n for entry in balances:\n- price = Inquirer().find_usd_price(entry.asset)\n+ try:\n+ price = Inquirer().find_usd_price(entry.asset)\n+ except RemoteError as e:\n+ db.msg_aggregator.add_warning(\n+ f'Could not find price for {entry.asset.identifier} during '\n+ f'manually tracked balance querying due to {str(e)}',\n+ )\n+ price = Price(ZERO)\n # https://github.com/python/mypy/issues/2582 --> for the type ignore below\n balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore\n **entry._asdict(),\n", "issue": "Changing fiat currency does not change manually tracked balances value column title\n## Problem Definition\r\n\r\nSeen in v1.4.0 by @cryptomole1\r\n\r\nChanging the FIAT currency via the drop-down menu if the user has manually tracked balances changes the value but not the title of the column. The title of the column is stuck to being `USD Value`.\r\n\r\nI can confirm the problem, just tested it.\r\n\r\n## Task\r\n\r\n- Fix it\r\n- Write a unit test\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.errors import InputError\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.typing import Location\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\n\nclass ManuallyTrackedBalance(NamedTuple):\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n\n\nclass ManuallyTrackedBalanceWithValue(NamedTuple):\n # NamedTuples can't use inheritance. Make sure this has same fields as\n # ManuallyTrackedBalance until usd_value\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n usd_value: FVal\n\n\ndef get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:\n \"\"\"Gets the manually tracked balances\n\n May raise:\n - RemoteError if there is a problem querying for the current price of an asset\n \"\"\"\n balances = db.get_manually_tracked_balances()\n balances_with_value = []\n for entry in balances:\n price = Inquirer().find_usd_price(entry.asset)\n # https://github.com/python/mypy/issues/2582 --> for the type ignore below\n balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore\n **entry._asdict(),\n usd_value=price * entry.amount,\n ))\n\n return balances_with_value\n\n\ndef add_manually_tracked_balances(\n db: 'DBHandler',\n data: List[ManuallyTrackedBalance],\n) -> None:\n \"\"\"Adds manually tracked balances\n\n May raise:\n - InputError if any of the given balance entry labels already exist in the DB\n - TagConstraintError if any of the given manually tracked balances contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to add was given')\n db.ensure_tags_exist(\n given_data=data,\n action='adding',\n data_type='manually tracked balances',\n )\n db.add_manually_tracked_balances(data=data)\n\n\ndef edit_manually_tracked_balances(db: 'DBHandler', data: List[ManuallyTrackedBalance]) -> None:\n \"\"\"Edits manually tracked balances\n\n May raise:\n - InputError if the given balances list is empty or if\n any of the balance entry labels to edit do not exist in the DB.\n - TagConstraintError if any of the given balance data contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to edit was given')\n db.ensure_tags_exist(\n given_data=data,\n action='editing',\n data_type='manually tracked balances',\n )\n db.edit_manually_tracked_balances(data)\n\n\ndef account_for_manually_tracked_balances(\n db: 'DBHandler',\n balances: Dict[str, Any],\n) -> Dict[str, Any]:\n \"\"\"Given the big balances mapping adds to it all manually tracked balances\"\"\"\n manually_tracked_balances = get_manually_tracked_balances(db)\n for m_entry in manually_tracked_balances:\n location_str = str(m_entry.location)\n if location_str not in balances:\n balances[location_str] = {}\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n if m_entry.asset.identifier not in balances[location_str]:\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n old_amount = balances[location_str][m_entry.asset.identifier]['amount']\n old_usd_value = balances[location_str][m_entry.asset.identifier]['usd_value']\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': old_amount + m_entry.amount,\n 'usd_value': old_usd_value + m_entry.usd_value,\n }\n\n return balances\n", "path": "rotkehlchen/balances/manual.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.constants.misc import ZERO\nfrom rotkehlchen.errors import InputError, RemoteError\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.typing import Location, Price\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\n\nclass ManuallyTrackedBalance(NamedTuple):\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n\n\nclass ManuallyTrackedBalanceWithValue(NamedTuple):\n # NamedTuples can't use inheritance. Make sure this has same fields as\n # ManuallyTrackedBalance until usd_value\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n usd_value: FVal\n\n\ndef get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:\n \"\"\"Gets the manually tracked balances\"\"\"\n balances = db.get_manually_tracked_balances()\n balances_with_value = []\n for entry in balances:\n try:\n price = Inquirer().find_usd_price(entry.asset)\n except RemoteError as e:\n db.msg_aggregator.add_warning(\n f'Could not find price for {entry.asset.identifier} during '\n f'manually tracked balance querying due to {str(e)}',\n )\n price = Price(ZERO)\n # https://github.com/python/mypy/issues/2582 --> for the type ignore below\n balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore\n **entry._asdict(),\n usd_value=price * entry.amount,\n ))\n\n return balances_with_value\n\n\ndef add_manually_tracked_balances(\n db: 'DBHandler',\n data: List[ManuallyTrackedBalance],\n) -> None:\n \"\"\"Adds manually tracked balances\n\n May raise:\n - InputError if any of the given balance entry labels already exist in the DB\n - TagConstraintError if any of the given manually tracked balances contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to add was given')\n db.ensure_tags_exist(\n given_data=data,\n action='adding',\n data_type='manually tracked balances',\n )\n db.add_manually_tracked_balances(data=data)\n\n\ndef edit_manually_tracked_balances(db: 'DBHandler', data: List[ManuallyTrackedBalance]) -> None:\n \"\"\"Edits manually tracked balances\n\n May raise:\n - InputError if the given balances list is empty or if\n any of the balance entry labels to edit do not exist in the DB.\n - TagConstraintError if any of the given balance data contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to edit was given')\n db.ensure_tags_exist(\n given_data=data,\n action='editing',\n data_type='manually tracked balances',\n )\n db.edit_manually_tracked_balances(data)\n\n\ndef account_for_manually_tracked_balances(\n db: 'DBHandler',\n balances: Dict[str, Any],\n) -> Dict[str, Any]:\n \"\"\"Given the big balances mapping adds to it all manually tracked balances\"\"\"\n manually_tracked_balances = get_manually_tracked_balances(db)\n for m_entry in manually_tracked_balances:\n location_str = str(m_entry.location)\n if location_str not in balances:\n balances[location_str] = {}\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n if m_entry.asset.identifier not in balances[location_str]:\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n old_amount = balances[location_str][m_entry.asset.identifier]['amount']\n old_usd_value = balances[location_str][m_entry.asset.identifier]['usd_value']\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': old_amount + m_entry.amount,\n 'usd_value': old_usd_value + m_entry.usd_value,\n }\n\n return balances\n", "path": "rotkehlchen/balances/manual.py"}]}
| 1,551 | 439 |
gh_patches_debug_31127
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-290
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ΠΠ΅ΡΠ½ΠΈ ΡΠΏΠΈΡΠΎΠΊ Π·Π°ΠΊΠ°Π·ΠΎΠ² Π² Π°Π΄ΠΌΠΈΠ½ΠΊΡ
ΠΠΎΠΌΠ½ΠΈΡΡΡ Ρ Π½Π°Ρ Π±ΡΠ» ΡΠΏΠΈΡΠΎΠΊ Π·Π°ΠΊΠ°Π·ΠΎΠ² Π² Π°Π΄ΠΌΠΈΠ½ΠΊΠ΅, Π½ΠΎ Π΅Π³ΠΎ Π½Π΅ΡΠΊΠΎΠ»ΡΠΊΠΎ ΡΠ°Π· Π»ΠΎΠΌΠ°Π»ΠΈ ΠΈ Π²ΡΠΏΠΈΠ»ΠΈΠ²Π°Π»ΠΈ. ΠΠΎΠ·Π²ΡΠ°ΡΠ°ΠΉ.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/admin.py`
Content:
```
1 from django.contrib import admin
2 from django.contrib.admin.widgets import FilteredSelectMultiple
3 from django.contrib.redirects.models import Redirect
4 from django.db import models as django_models
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.utils.translation import ugettext_lazy as _
8
9 from django_select2.forms import ModelSelect2Widget
10
11 from pages.models import CustomPage, FlatPage, PageTemplate
12 from generic_admin import inlines, models, sites
13
14 from shopelectro import models as se_models
15 from shopelectro.views.admin import TableEditor
16
17
18 class SEAdminSite(sites.SiteWithTableEditor):
19
20 site_header = 'Shopelectro administration'
21 table_editor_view = TableEditor
22
23
24 def prepare_has_filter_queryset(value, db_table, queryset):
25 if not value:
26 return
27
28 query = '{}__tags__isnull'.format(db_table)
29
30 # Use brackets, because `Explicit is better than implicit`.
31 return queryset.filter(**{query: value != 'yes'})
32
33
34 class HasTagsFilter(admin.SimpleListFilter):
35
36 product_model = se_models.Product
37 title = _('has tags')
38 parameter_name = 'has_tags'
39
40 def lookups(self, request, model_admin):
41 return (
42 ('yes', _('Has tags')),
43 ('no', _('Has no tags')),
44 )
45
46 def queryset(self, request, queryset):
47 return prepare_has_filter_queryset(
48 self.value(),
49 self.product_model._meta.db_table,
50 queryset
51 )
52
53
54 class HasCategoryFilter(admin.SimpleListFilter):
55
56 product_model = se_models.Product
57 title = _('has category')
58 parameter_name = 'has_category'
59
60 def lookups(self, request, model_admin):
61 return (
62 ('yes', _('Has category')),
63 ('no', _('Has no category')),
64 )
65
66 def queryset(self, request, queryset):
67 return prepare_has_filter_queryset(
68 self.value(),
69 self.product_model._meta.db_table,
70 queryset
71 )
72
73
74 class TagInline(admin.StackedInline):
75
76 model = se_models.Tag
77 extra = 0
78
79
80 class CategoryInline(inlines.CategoryInline):
81
82 model = se_models.Category
83
84 fieldsets = ((None, {
85 'classes': ('primary-chars', ),
86 'fields': (
87 ('name', 'id'),
88 ('parent', 'correct_parent_id'),
89 )
90 }),)
91
92
93 class ProductInline(inlines.ProductInline):
94
95 model = se_models.Product
96
97 formfield_overrides = {
98 django_models.ManyToManyField: {
99 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)
100 },
101 }
102
103 fieldsets = ((None, {
104 'classes': ('primary-chars', ),
105 'fields': (
106 ('name', 'id'),
107 ('category', 'correct_category_id'),
108 ('price', 'in_stock'),
109 'is_popular',
110 ('purchase_price', 'wholesale_small'),
111 ('wholesale_medium', 'wholesale_large'),
112 'tags',
113 )
114 }),)
115
116 def formfield_for_dbfield(self, db_field, **kwargs):
117 if db_field.name == 'category':
118 kwargs['widget'] = ModelSelect2Widget(
119 model=se_models.Category,
120 search_fields=[
121 'name__icontains',
122 'pk__startswith',
123 ],
124 )
125 return super(ProductInline, self).formfield_for_dbfield(
126 db_field,
127 **kwargs,
128 )
129
130
131 class CategoryPageAdmin(models.CategoryPageAdmin):
132
133 add = False
134 delete = False
135 inlines = [CategoryInline, inlines.ImageInline]
136
137 def get_queryset(self, request):
138 return (
139 super(CategoryPageAdmin, self)
140 .get_queryset(request)
141 .select_related('shopelectro_category')
142 )
143
144
145 class ProductPageAdmin(models.ProductPageAdmin):
146
147 add = False
148 delete = False
149 category_page_model = se_models.CategoryPage
150 list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]
151 inlines = [ProductInline, inlines.ImageInline]
152 search_fields = [
153 'shopelectro_product__vendor_code', 'name', 'slug',
154 ]
155
156 def model_id(self, obj):
157 return obj.model.vendor_code
158
159 model_id.short_description = _('Vendor code')
160 model_id.admin_order_field = 'shopelectro_product__vendor_code'
161
162 def get_queryset(self, request):
163 return (
164 super(ProductPageAdmin, self)
165 .get_queryset(request)
166 .select_related('shopelectro_product')
167 )
168
169
170 class ProductFeedbackPageAdmin(admin.ModelAdmin):
171
172 add = False
173 delete = False
174 list_filter = ['rating']
175 list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']
176
177 def links(self, obj):
178 return format_html(
179 '''
180 <a href="{url}" class="field-link" title="ΠΠΎΡΠΌΠΎΡΡΠ΅ΡΡ Π½Π° ΡΠ°ΠΉΡΠ΅" target="_blank">
181 <i class="fa fa-link" aria-hidden="true"></i>
182 </a>
183 '''.format(url=obj.product.url))
184
185 links.short_description = _('Link')
186
187 def get_queryset(self, request):
188 return (
189 super(ProductFeedbackPageAdmin, self)
190 .get_queryset(request)
191 .select_related('product')
192 )
193
194
195 class TagGroupAdmin(admin.ModelAdmin):
196
197 list_display = ['id', 'name', 'position', 'count_tags']
198 list_display_links = ['name']
199
200 inlines = [TagInline]
201
202 def get_queryset(self, request):
203 return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')
204
205 def count_tags(self, obj):
206 return obj.tags.count()
207
208
209 class TagAdmin(admin.ModelAdmin):
210
211 search_fields = ['id', 'name']
212 list_display = ['id', 'name', 'position', 'custom_group']
213 list_display_links = ['name']
214
215 def get_queryset(self, request):
216 return super(TagAdmin, self).get_queryset(request).prefetch_related('group')
217
218 def custom_group(self, obj):
219 group = obj.group
220
221 return format_html(
222 '<a href="{url}">{group}</a>',
223 group=group,
224 url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))
225 )
226
227 custom_group.admin_order_field = 'group'
228 custom_group.short_description = _('Group')
229
230
231 se_admin = SEAdminSite(name='se_admin')
232 se_admin.register(CustomPage, models.CustomPageAdmin)
233 se_admin.register(FlatPage, models.FlatPageAdmin)
234 se_admin.register(PageTemplate, models.CustomPageTemplateAdmin)
235
236 se_admin.register(se_models.CategoryPage, CategoryPageAdmin)
237 se_admin.register(se_models.ProductPage, ProductPageAdmin)
238 se_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)
239 se_admin.register(se_models.TagGroup, TagGroupAdmin)
240 se_admin.register(se_models.Tag, TagAdmin)
241
242 se_admin.register(Redirect)
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/admin.py b/shopelectro/admin.py
--- a/shopelectro/admin.py
+++ b/shopelectro/admin.py
@@ -8,8 +8,9 @@
from django_select2.forms import ModelSelect2Widget
+from ecommerce.models import Position
from pages.models import CustomPage, FlatPage, PageTemplate
-from generic_admin import inlines, models, sites
+from generic_admin import inlines, mixins, models, sites
from shopelectro import models as se_models
from shopelectro.views.admin import TableEditor
@@ -128,6 +129,11 @@
)
+class PositionInline(admin.StackedInline):
+
+ model = Position
+
+
class CategoryPageAdmin(models.CategoryPageAdmin):
add = False
@@ -228,12 +234,21 @@
custom_group.short_description = _('Group')
+class OrderAdmin(mixins.PermissionsControl):
+
+ add = False
+ inlines = [PositionInline]
+ list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']
+ search_fields = ['name', 'email', 'phone']
+ list_display_links = ['name']
+
se_admin = SEAdminSite(name='se_admin')
se_admin.register(CustomPage, models.CustomPageAdmin)
se_admin.register(FlatPage, models.FlatPageAdmin)
se_admin.register(PageTemplate, models.CustomPageTemplateAdmin)
se_admin.register(se_models.CategoryPage, CategoryPageAdmin)
+se_admin.register(se_models.Order, OrderAdmin)
se_admin.register(se_models.ProductPage, ProductPageAdmin)
se_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)
se_admin.register(se_models.TagGroup, TagGroupAdmin)
|
{"golden_diff": "diff --git a/shopelectro/admin.py b/shopelectro/admin.py\n--- a/shopelectro/admin.py\n+++ b/shopelectro/admin.py\n@@ -8,8 +8,9 @@\n \n from django_select2.forms import ModelSelect2Widget\n \n+from ecommerce.models import Position\n from pages.models import CustomPage, FlatPage, PageTemplate\n-from generic_admin import inlines, models, sites\n+from generic_admin import inlines, mixins, models, sites\n \n from shopelectro import models as se_models\n from shopelectro.views.admin import TableEditor\n@@ -128,6 +129,11 @@\n )\n \n \n+class PositionInline(admin.StackedInline):\n+\n+ model = Position\n+\n+\n class CategoryPageAdmin(models.CategoryPageAdmin):\n \n add = False\n@@ -228,12 +234,21 @@\n custom_group.short_description = _('Group')\n \n \n+class OrderAdmin(mixins.PermissionsControl):\n+\n+ add = False\n+ inlines = [PositionInline]\n+ list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']\n+ search_fields = ['name', 'email', 'phone']\n+ list_display_links = ['name']\n+\n se_admin = SEAdminSite(name='se_admin')\n se_admin.register(CustomPage, models.CustomPageAdmin)\n se_admin.register(FlatPage, models.FlatPageAdmin)\n se_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n \n se_admin.register(se_models.CategoryPage, CategoryPageAdmin)\n+se_admin.register(se_models.Order, OrderAdmin)\n se_admin.register(se_models.ProductPage, ProductPageAdmin)\n se_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\n se_admin.register(se_models.TagGroup, TagGroupAdmin)\n", "issue": "\u0412\u0435\u0440\u043d\u0438 \u0441\u043f\u0438\u0441\u043e\u043a \u0437\u0430\u043a\u0430\u0437\u043e\u0432 \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0443\n\u041f\u043e\u043c\u043d\u0438\u0442\u0441\u044f \u0443 \u043d\u0430\u0441 \u0431\u044b\u043b \u0441\u043f\u0438\u0441\u043e\u043a \u0437\u0430\u043a\u0430\u0437\u043e\u0432 \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0435, \u043d\u043e \u0435\u0433\u043e \u043d\u0435\u0441\u043a\u043e\u043b\u044c\u043a\u043e \u0440\u0430\u0437 \u043b\u043e\u043c\u0430\u043b\u0438 \u0438 \u0432\u044b\u043f\u0438\u043b\u0438\u0432\u0430\u043b\u0438. \u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0439.\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.redirects.models import Redirect\nfrom django.db import models as django_models\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_select2.forms import ModelSelect2Widget\n\nfrom pages.models import CustomPage, FlatPage, PageTemplate\nfrom generic_admin import inlines, models, sites\n\nfrom shopelectro import models as se_models\nfrom shopelectro.views.admin import TableEditor\n\n\nclass SEAdminSite(sites.SiteWithTableEditor):\n\n site_header = 'Shopelectro administration'\n table_editor_view = TableEditor\n\n\ndef prepare_has_filter_queryset(value, db_table, queryset):\n if not value:\n return\n\n query = '{}__tags__isnull'.format(db_table)\n\n # Use brackets, because `Explicit is better than implicit`.\n return queryset.filter(**{query: value != 'yes'})\n\n\nclass HasTagsFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has tags')\n parameter_name = 'has_tags'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has tags')),\n ('no', _('Has no tags')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass HasCategoryFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has category')\n parameter_name = 'has_category'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has category')),\n ('no', _('Has no category')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass TagInline(admin.StackedInline):\n\n model = se_models.Tag\n extra = 0\n\n\nclass CategoryInline(inlines.CategoryInline):\n\n model = se_models.Category\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('parent', 'correct_parent_id'),\n )\n }),)\n\n\nclass ProductInline(inlines.ProductInline):\n\n model = se_models.Product\n\n formfield_overrides = {\n django_models.ManyToManyField: {\n 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)\n },\n }\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('category', 'correct_category_id'),\n ('price', 'in_stock'),\n 'is_popular',\n ('purchase_price', 'wholesale_small'),\n ('wholesale_medium', 'wholesale_large'),\n 'tags',\n )\n }),)\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name == 'category':\n kwargs['widget'] = ModelSelect2Widget(\n model=se_models.Category,\n search_fields=[\n 'name__icontains',\n 'pk__startswith',\n ],\n )\n return super(ProductInline, self).formfield_for_dbfield(\n db_field,\n **kwargs,\n )\n\n\nclass CategoryPageAdmin(models.CategoryPageAdmin):\n\n add = False\n delete = False\n inlines = [CategoryInline, inlines.ImageInline]\n\n def get_queryset(self, request):\n return (\n super(CategoryPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_category')\n )\n\n\nclass ProductPageAdmin(models.ProductPageAdmin):\n\n add = False\n delete = False\n category_page_model = se_models.CategoryPage\n list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]\n inlines = [ProductInline, inlines.ImageInline]\n search_fields = [\n 'shopelectro_product__vendor_code', 'name', 'slug',\n ]\n\n def model_id(self, obj):\n return obj.model.vendor_code\n\n model_id.short_description = _('Vendor code')\n model_id.admin_order_field = 'shopelectro_product__vendor_code'\n\n def get_queryset(self, request):\n return (\n super(ProductPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_product')\n )\n\n\nclass ProductFeedbackPageAdmin(admin.ModelAdmin):\n\n add = False\n delete = False\n list_filter = ['rating']\n list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']\n\n def links(self, obj):\n return format_html(\n '''\n <a href=\"{url}\" class=\"field-link\" title=\"\u041f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\" target=\"_blank\">\n <i class=\"fa fa-link\" aria-hidden=\"true\"></i>\n </a>\n '''.format(url=obj.product.url))\n\n links.short_description = _('Link')\n\n def get_queryset(self, request):\n return (\n super(ProductFeedbackPageAdmin, self)\n .get_queryset(request)\n .select_related('product')\n )\n\n\nclass TagGroupAdmin(admin.ModelAdmin):\n\n list_display = ['id', 'name', 'position', 'count_tags']\n list_display_links = ['name']\n\n inlines = [TagInline]\n\n def get_queryset(self, request):\n return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')\n\n def count_tags(self, obj):\n return obj.tags.count()\n\n\nclass TagAdmin(admin.ModelAdmin):\n\n search_fields = ['id', 'name']\n list_display = ['id', 'name', 'position', 'custom_group']\n list_display_links = ['name']\n\n def get_queryset(self, request):\n return super(TagAdmin, self).get_queryset(request).prefetch_related('group')\n\n def custom_group(self, obj):\n group = obj.group\n\n return format_html(\n '<a href=\"{url}\">{group}</a>',\n group=group,\n url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))\n )\n\n custom_group.admin_order_field = 'group'\n custom_group.short_description = _('Group')\n\n\nse_admin = SEAdminSite(name='se_admin')\nse_admin.register(CustomPage, models.CustomPageAdmin)\nse_admin.register(FlatPage, models.FlatPageAdmin)\nse_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n\nse_admin.register(se_models.CategoryPage, CategoryPageAdmin)\nse_admin.register(se_models.ProductPage, ProductPageAdmin)\nse_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\nse_admin.register(se_models.TagGroup, TagGroupAdmin)\nse_admin.register(se_models.Tag, TagAdmin)\n\nse_admin.register(Redirect)\n", "path": "shopelectro/admin.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.redirects.models import Redirect\nfrom django.db import models as django_models\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_select2.forms import ModelSelect2Widget\n\nfrom ecommerce.models import Position\nfrom pages.models import CustomPage, FlatPage, PageTemplate\nfrom generic_admin import inlines, mixins, models, sites\n\nfrom shopelectro import models as se_models\nfrom shopelectro.views.admin import TableEditor\n\n\nclass SEAdminSite(sites.SiteWithTableEditor):\n\n site_header = 'Shopelectro administration'\n table_editor_view = TableEditor\n\n\ndef prepare_has_filter_queryset(value, db_table, queryset):\n if not value:\n return\n\n query = '{}__tags__isnull'.format(db_table)\n\n # Use brackets, because `Explicit is better than implicit`.\n return queryset.filter(**{query: value != 'yes'})\n\n\nclass HasTagsFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has tags')\n parameter_name = 'has_tags'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has tags')),\n ('no', _('Has no tags')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass HasCategoryFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has category')\n parameter_name = 'has_category'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has category')),\n ('no', _('Has no category')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass TagInline(admin.StackedInline):\n\n model = se_models.Tag\n extra = 0\n\n\nclass CategoryInline(inlines.CategoryInline):\n\n model = se_models.Category\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('parent', 'correct_parent_id'),\n )\n }),)\n\n\nclass ProductInline(inlines.ProductInline):\n\n model = se_models.Product\n\n formfield_overrides = {\n django_models.ManyToManyField: {\n 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)\n },\n }\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('category', 'correct_category_id'),\n ('price', 'in_stock'),\n 'is_popular',\n ('purchase_price', 'wholesale_small'),\n ('wholesale_medium', 'wholesale_large'),\n 'tags',\n )\n }),)\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name == 'category':\n kwargs['widget'] = ModelSelect2Widget(\n model=se_models.Category,\n search_fields=[\n 'name__icontains',\n 'pk__startswith',\n ],\n )\n return super(ProductInline, self).formfield_for_dbfield(\n db_field,\n **kwargs,\n )\n\n\nclass PositionInline(admin.StackedInline):\n\n model = Position\n\n\nclass CategoryPageAdmin(models.CategoryPageAdmin):\n\n add = False\n delete = False\n inlines = [CategoryInline, inlines.ImageInline]\n\n def get_queryset(self, request):\n return (\n super(CategoryPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_category')\n )\n\n\nclass ProductPageAdmin(models.ProductPageAdmin):\n\n add = False\n delete = False\n category_page_model = se_models.CategoryPage\n list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]\n inlines = [ProductInline, inlines.ImageInline]\n search_fields = [\n 'shopelectro_product__vendor_code', 'name', 'slug',\n ]\n\n def model_id(self, obj):\n return obj.model.vendor_code\n\n model_id.short_description = _('Vendor code')\n model_id.admin_order_field = 'shopelectro_product__vendor_code'\n\n def get_queryset(self, request):\n return (\n super(ProductPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_product')\n )\n\n\nclass ProductFeedbackPageAdmin(admin.ModelAdmin):\n\n add = False\n delete = False\n list_filter = ['rating']\n list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']\n\n def links(self, obj):\n return format_html(\n '''\n <a href=\"{url}\" class=\"field-link\" title=\"\u041f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\" target=\"_blank\">\n <i class=\"fa fa-link\" aria-hidden=\"true\"></i>\n </a>\n '''.format(url=obj.product.url))\n\n links.short_description = _('Link')\n\n def get_queryset(self, request):\n return (\n super(ProductFeedbackPageAdmin, self)\n .get_queryset(request)\n .select_related('product')\n )\n\n\nclass TagGroupAdmin(admin.ModelAdmin):\n\n list_display = ['id', 'name', 'position', 'count_tags']\n list_display_links = ['name']\n\n inlines = [TagInline]\n\n def get_queryset(self, request):\n return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')\n\n def count_tags(self, obj):\n return obj.tags.count()\n\n\nclass TagAdmin(admin.ModelAdmin):\n\n search_fields = ['id', 'name']\n list_display = ['id', 'name', 'position', 'custom_group']\n list_display_links = ['name']\n\n def get_queryset(self, request):\n return super(TagAdmin, self).get_queryset(request).prefetch_related('group')\n\n def custom_group(self, obj):\n group = obj.group\n\n return format_html(\n '<a href=\"{url}\">{group}</a>',\n group=group,\n url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))\n )\n\n custom_group.admin_order_field = 'group'\n custom_group.short_description = _('Group')\n\n\nclass OrderAdmin(mixins.PermissionsControl):\n\n add = False\n inlines = [PositionInline]\n list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']\n search_fields = ['name', 'email', 'phone']\n list_display_links = ['name']\n\nse_admin = SEAdminSite(name='se_admin')\nse_admin.register(CustomPage, models.CustomPageAdmin)\nse_admin.register(FlatPage, models.FlatPageAdmin)\nse_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n\nse_admin.register(se_models.CategoryPage, CategoryPageAdmin)\nse_admin.register(se_models.Order, OrderAdmin)\nse_admin.register(se_models.ProductPage, ProductPageAdmin)\nse_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\nse_admin.register(se_models.TagGroup, TagGroupAdmin)\nse_admin.register(se_models.Tag, TagAdmin)\n\nse_admin.register(Redirect)\n", "path": "shopelectro/admin.py"}]}
| 2,426 | 388 |
gh_patches_debug_25844
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ArrayDataSource get_data() returns scalar 0.0 when _data is None
The `ArrayDataSource` is meant to represent a 1D data structure stored in an array. For some reason, when no data value has been set, the `get_data()` method returns a scalar `0.0` value.
It would probably make most sense for the value to be `empty(shape=(0,))` (ie. a 1D array of length 0).
Changing this may break existing code which expects empty `ArrayDataSources` to return a scalar 0. On the flip side, this will potentially simplify the logic for `get_bounds()` and similar.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/array_data_source.py`
Content:
```
1 """ Defines the ArrayDataSource class."""
2
3 # Major library imports
4 from numpy import array, isfinite, ones, ndarray
5 import numpy as np
6
7 # Enthought library imports
8 from traits.api import Any, Constant, Int, Tuple
9
10 # Chaco imports
11 from .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait
12 from .abstract_data_source import AbstractDataSource
13
14
15 def bounded_nanargmin(arr):
16 """Find the index of the minimum value, ignoring NaNs.
17
18 If all NaNs, return 0.
19 """
20 # Different versions of numpy behave differently in the all-NaN case, so we
21 # catch this condition in two different ways.
22 try:
23 if np.issubdtype(arr.dtype, np.floating):
24 min = np.nanargmin(arr)
25 elif np.issubdtype(arr.dtype, np.number):
26 min = np.argmin(arr)
27 else:
28 min = 0
29 except ValueError:
30 return 0
31 if isfinite(min):
32 return min
33 else:
34 return 0
35
36
37 def bounded_nanargmax(arr):
38 """Find the index of the maximum value, ignoring NaNs.
39
40 If all NaNs, return -1.
41 """
42 try:
43 if np.issubdtype(arr.dtype, np.floating):
44 max = np.nanargmax(arr)
45 elif np.issubdtype(arr.dtype, np.number):
46 max = np.argmax(arr)
47 else:
48 max = -1
49 except ValueError:
50 return -1
51 if isfinite(max):
52 return max
53 else:
54 return -1
55
56
57 class ArrayDataSource(AbstractDataSource):
58 """A data source representing a single, continuous array of numerical data.
59
60 This class does not listen to the array for value changes; if you need that
61 behavior, create a subclass that hooks up the appropriate listeners.
62 """
63
64 # ------------------------------------------------------------------------
65 # AbstractDataSource traits
66 # ------------------------------------------------------------------------
67
68 #: The dimensionality of the indices into this data source (overrides
69 #: AbstractDataSource).
70 index_dimension = Constant("scalar")
71
72 #: The dimensionality of the value at each index point (overrides
73 #: AbstractDataSource).
74 value_dimension = Constant("scalar")
75
76 #: The sort order of the data.
77 #: This is a specialized optimization for 1-D arrays, but it's an important
78 #: one that's used everywhere.
79 sort_order = SortOrderTrait
80
81 # ------------------------------------------------------------------------
82 # Private traits
83 # ------------------------------------------------------------------------
84
85 # The data array itself.
86 _data = NumericalSequenceTrait
87
88 # Cached values of min and max as long as **_data** doesn't change.
89 _cached_bounds = Tuple
90
91 # Not necessary, since this is not a filter, but provided for convenience.
92 _cached_mask = Any
93
94 # The index of the (first) minimum value in self._data
95 # FIXME: This is an Any instead of an Int trait because of how Traits
96 # typechecks numpy.int64 on 64-bit Windows systems.
97 _min_index = Any
98
99 # The index of the (first) maximum value in self._data
100 # FIXME: This is an Any instead of an Int trait because of how Traits
101 # typechecks numpy.int64 on 64-bit Windows systems.
102 _max_index = Any
103
104 # ------------------------------------------------------------------------
105 # Public methods
106 # ------------------------------------------------------------------------
107
108 def __init__(self, data=array([]), sort_order="none", **kw):
109 AbstractDataSource.__init__(self, **kw)
110 self.set_data(data, sort_order)
111
112 def set_data(self, newdata, sort_order=None):
113 """Sets the data, and optionally the sort order, for this data source.
114
115 Parameters
116 ----------
117 newdata : array
118 The data to use.
119 sort_order : SortOrderTrait
120 The sort order of the data
121 """
122 self._data = newdata
123 if sort_order is not None:
124 self.sort_order = sort_order
125 self._compute_bounds()
126 self.data_changed = True
127
128 def set_mask(self, mask):
129 """Sets the mask for this data source."""
130 self._cached_mask = mask
131 self.data_changed = True
132
133 def remove_mask(self):
134 """Removes the mask on this data source."""
135 self._cached_mask = None
136 self.data_changed = True
137
138 # ------------------------------------------------------------------------
139 # AbstractDataSource interface
140 # ------------------------------------------------------------------------
141
142 def get_data(self):
143 """Returns the data for this data source, or 0.0 if it has no data.
144
145 Implements AbstractDataSource.
146 """
147 if self._data is not None:
148 return self._data
149 else:
150 return 0.0
151
152 def get_data_mask(self):
153 """get_data_mask() -> (data_array, mask_array)
154
155 Implements AbstractDataSource.
156 """
157 if self._cached_mask is None:
158 return self._data, ones(len(self._data), dtype=bool)
159 else:
160 return self._data, self._cached_mask
161
162 def is_masked(self):
163 """is_masked() -> bool
164
165 Implements AbstractDataSource.
166 """
167 if self._cached_mask is not None:
168 return True
169 else:
170 return False
171
172 def get_size(self):
173 """get_size() -> int
174
175 Implements AbstractDataSource.
176 """
177 if self._data is not None:
178 return len(self._data)
179 else:
180 return 0
181
182 def get_bounds(self):
183 """Returns the minimum and maximum values of the data source's data.
184
185 Implements AbstractDataSource.
186 """
187 if (
188 self._cached_bounds is None
189 or self._cached_bounds == ()
190 or self._cached_bounds == 0.0
191 ):
192 self._compute_bounds()
193 return self._cached_bounds
194
195 def reverse_map(self, pt, index=0, outside_returns_none=True):
196 """Returns the index of *pt* in the data source.
197
198 Parameters
199 ----------
200 pt : scalar value
201 value to find
202 index
203 ignored for data series with 1-D indices
204 outside_returns_none : Boolean
205 Whether the method returns None if *pt* is outside the range of
206 the data source; if False, the method returns the value of the
207 bound that *pt* is outside of.
208 """
209 if self.sort_order == "none":
210 raise NotImplementedError
211
212 # index is ignored for dataseries with 1-dimensional indices
213 minval, maxval = self._cached_bounds
214 if pt < minval:
215 if outside_returns_none:
216 return None
217 else:
218 return self._min_index
219 elif pt > maxval:
220 if outside_returns_none:
221 return None
222 else:
223 return self._max_index
224 else:
225 return reverse_map_1d(self._data, pt, self.sort_order)
226
227 # ------------------------------------------------------------------------
228 # Private methods
229 # ------------------------------------------------------------------------
230
231 def _compute_bounds(self, data=None):
232 """Computes the minimum and maximum values of self._data.
233
234 If a data array is passed in, then that is used instead of self._data.
235 This behavior is useful for subclasses.
236 """
237 # TODO: as an optimization, perhaps create and cache a sorted
238 # version of the dataset?
239
240 if data is None:
241 # Several sources weren't setting the _data attribute, so we
242 # go through the interface. This seems like the correct thing
243 # to do anyway... right?
244 # data = self._data
245 data = self.get_data()
246
247 data_len = 0
248 try:
249 data_len = len(data)
250 except Exception:
251 pass
252 if data_len == 0:
253 self._min_index = 0
254 self._max_index = 0
255 self._cached_bounds = (0.0, 0.0)
256 elif data_len == 1:
257 self._min_index = 0
258 self._max_index = 0
259 self._cached_bounds = (data[0], data[0])
260 else:
261 if self.sort_order == "ascending":
262 self._min_index = 0
263 self._max_index = -1
264 elif self.sort_order == "descending":
265 self._min_index = -1
266 self._max_index = 0
267 else:
268 # ignore NaN values. This is probably a little slower,
269 # but also much safer.
270
271 # data might be an array of strings or objects that
272 # can't have argmin calculated on them.
273 try:
274 # the data may be in a subclass of numpy.array, viewing
275 # the data as a ndarray will remove side effects of
276 # the subclasses, such as different operator behaviors
277 self._min_index = bounded_nanargmin(data.view(ndarray))
278 self._max_index = bounded_nanargmax(data.view(ndarray))
279 except (TypeError, IndexError, NotImplementedError):
280 # For strings and objects, we punt... These show up in
281 # label-ish data sources.
282 self._cached_bounds = (0.0, 0.0)
283
284 self._cached_bounds = (
285 data[self._min_index],
286 data[self._max_index],
287 )
288
289 # ------------------------------------------------------------------------
290 # Event handlers
291 # ------------------------------------------------------------------------
292
293 def _metadata_changed(self, event):
294 self.metadata_changed = True
295
296 def _metadata_items_changed(self, event):
297 self.metadata_changed = True
298
299 # ------------------------------------------------------------------------
300 # Persistence-related methods
301 # ------------------------------------------------------------------------
302
303 def __getstate__(self):
304 state = self.__dict__.copy()
305 if not self.persist_data:
306 state.pop("_data", None)
307 state.pop("_cached_mask", None)
308 state.pop("_cached_bounds", None)
309 state.pop("_min_index", None)
310 state.pop("_max_index", None)
311 return state
312
313 def _post_load(self):
314 super(ArrayDataSource, self)._post_load()
315 self._cached_bounds = ()
316 self._cached_mask = None
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chaco/array_data_source.py b/chaco/array_data_source.py
--- a/chaco/array_data_source.py
+++ b/chaco/array_data_source.py
@@ -1,7 +1,7 @@
""" Defines the ArrayDataSource class."""
# Major library imports
-from numpy import array, isfinite, ones, ndarray
+from numpy import array, empty, isfinite, ones, ndarray
import numpy as np
# Enthought library imports
@@ -147,7 +147,7 @@
if self._data is not None:
return self._data
else:
- return 0.0
+ return empty(shape=(0,))
def get_data_mask(self):
"""get_data_mask() -> (data_array, mask_array)
@@ -238,17 +238,10 @@
# version of the dataset?
if data is None:
- # Several sources weren't setting the _data attribute, so we
- # go through the interface. This seems like the correct thing
- # to do anyway... right?
- # data = self._data
data = self.get_data()
- data_len = 0
- try:
- data_len = len(data)
- except Exception:
- pass
+ data_len = len(data)
+
if data_len == 0:
self._min_index = 0
self._max_index = 0
|
{"golden_diff": "diff --git a/chaco/array_data_source.py b/chaco/array_data_source.py\n--- a/chaco/array_data_source.py\n+++ b/chaco/array_data_source.py\n@@ -1,7 +1,7 @@\n \"\"\" Defines the ArrayDataSource class.\"\"\"\n \n # Major library imports\n-from numpy import array, isfinite, ones, ndarray\n+from numpy import array, empty, isfinite, ones, ndarray\n import numpy as np\n \n # Enthought library imports\n@@ -147,7 +147,7 @@\n if self._data is not None:\n return self._data\n else:\n- return 0.0\n+ return empty(shape=(0,))\n \n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n@@ -238,17 +238,10 @@\n # version of the dataset?\n \n if data is None:\n- # Several sources weren't setting the _data attribute, so we\n- # go through the interface. This seems like the correct thing\n- # to do anyway... right?\n- # data = self._data\n data = self.get_data()\n \n- data_len = 0\n- try:\n- data_len = len(data)\n- except Exception:\n- pass\n+ data_len = len(data)\n+\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n", "issue": "ArrayDataSource get_data() returns scalar 0.0 when _data is None\nThe `ArrayDataSource` is meant to represent a 1D data structure stored in an array. For some reason, when no data value has been set, the `get_data()` method returns a scalar `0.0` value.\n\nIt would probably make most sense for the value to be `empty(shape=(0,))` (ie. a 1D array of length 0).\n\nChanging this may break existing code which expects empty `ArrayDataSources` to return a scalar 0. On the flip side, this will potentially simplify the logic for `get_bounds()` and similar.\n\n", "before_files": [{"content": "\"\"\" Defines the ArrayDataSource class.\"\"\"\n\n# Major library imports\nfrom numpy import array, isfinite, ones, ndarray\nimport numpy as np\n\n# Enthought library imports\nfrom traits.api import Any, Constant, Int, Tuple\n\n# Chaco imports\nfrom .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait\nfrom .abstract_data_source import AbstractDataSource\n\n\ndef bounded_nanargmin(arr):\n \"\"\"Find the index of the minimum value, ignoring NaNs.\n\n If all NaNs, return 0.\n \"\"\"\n # Different versions of numpy behave differently in the all-NaN case, so we\n # catch this condition in two different ways.\n try:\n if np.issubdtype(arr.dtype, np.floating):\n min = np.nanargmin(arr)\n elif np.issubdtype(arr.dtype, np.number):\n min = np.argmin(arr)\n else:\n min = 0\n except ValueError:\n return 0\n if isfinite(min):\n return min\n else:\n return 0\n\n\ndef bounded_nanargmax(arr):\n \"\"\"Find the index of the maximum value, ignoring NaNs.\n\n If all NaNs, return -1.\n \"\"\"\n try:\n if np.issubdtype(arr.dtype, np.floating):\n max = np.nanargmax(arr)\n elif np.issubdtype(arr.dtype, np.number):\n max = np.argmax(arr)\n else:\n max = -1\n except ValueError:\n return -1\n if isfinite(max):\n return max\n else:\n return -1\n\n\nclass ArrayDataSource(AbstractDataSource):\n \"\"\"A data source representing a single, continuous array of numerical data.\n\n This class does not listen to the array for value changes; if you need that\n behavior, create a subclass that hooks up the appropriate listeners.\n \"\"\"\n\n # ------------------------------------------------------------------------\n # AbstractDataSource traits\n # ------------------------------------------------------------------------\n\n #: The dimensionality of the indices into this data source (overrides\n #: AbstractDataSource).\n index_dimension = Constant(\"scalar\")\n\n #: The dimensionality of the value at each index point (overrides\n #: AbstractDataSource).\n value_dimension = Constant(\"scalar\")\n\n #: The sort order of the data.\n #: This is a specialized optimization for 1-D arrays, but it's an important\n #: one that's used everywhere.\n sort_order = SortOrderTrait\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The data array itself.\n _data = NumericalSequenceTrait\n\n # Cached values of min and max as long as **_data** doesn't change.\n _cached_bounds = Tuple\n\n # Not necessary, since this is not a filter, but provided for convenience.\n _cached_mask = Any\n\n # The index of the (first) minimum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _min_index = Any\n\n # The index of the (first) maximum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _max_index = Any\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def __init__(self, data=array([]), sort_order=\"none\", **kw):\n AbstractDataSource.__init__(self, **kw)\n self.set_data(data, sort_order)\n\n def set_data(self, newdata, sort_order=None):\n \"\"\"Sets the data, and optionally the sort order, for this data source.\n\n Parameters\n ----------\n newdata : array\n The data to use.\n sort_order : SortOrderTrait\n The sort order of the data\n \"\"\"\n self._data = newdata\n if sort_order is not None:\n self.sort_order = sort_order\n self._compute_bounds()\n self.data_changed = True\n\n def set_mask(self, mask):\n \"\"\"Sets the mask for this data source.\"\"\"\n self._cached_mask = mask\n self.data_changed = True\n\n def remove_mask(self):\n \"\"\"Removes the mask on this data source.\"\"\"\n self._cached_mask = None\n self.data_changed = True\n\n # ------------------------------------------------------------------------\n # AbstractDataSource interface\n # ------------------------------------------------------------------------\n\n def get_data(self):\n \"\"\"Returns the data for this data source, or 0.0 if it has no data.\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return self._data\n else:\n return 0.0\n\n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is None:\n return self._data, ones(len(self._data), dtype=bool)\n else:\n return self._data, self._cached_mask\n\n def is_masked(self):\n \"\"\"is_masked() -> bool\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is not None:\n return True\n else:\n return False\n\n def get_size(self):\n \"\"\"get_size() -> int\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return len(self._data)\n else:\n return 0\n\n def get_bounds(self):\n \"\"\"Returns the minimum and maximum values of the data source's data.\n\n Implements AbstractDataSource.\n \"\"\"\n if (\n self._cached_bounds is None\n or self._cached_bounds == ()\n or self._cached_bounds == 0.0\n ):\n self._compute_bounds()\n return self._cached_bounds\n\n def reverse_map(self, pt, index=0, outside_returns_none=True):\n \"\"\"Returns the index of *pt* in the data source.\n\n Parameters\n ----------\n pt : scalar value\n value to find\n index\n ignored for data series with 1-D indices\n outside_returns_none : Boolean\n Whether the method returns None if *pt* is outside the range of\n the data source; if False, the method returns the value of the\n bound that *pt* is outside of.\n \"\"\"\n if self.sort_order == \"none\":\n raise NotImplementedError\n\n # index is ignored for dataseries with 1-dimensional indices\n minval, maxval = self._cached_bounds\n if pt < minval:\n if outside_returns_none:\n return None\n else:\n return self._min_index\n elif pt > maxval:\n if outside_returns_none:\n return None\n else:\n return self._max_index\n else:\n return reverse_map_1d(self._data, pt, self.sort_order)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n def _compute_bounds(self, data=None):\n \"\"\"Computes the minimum and maximum values of self._data.\n\n If a data array is passed in, then that is used instead of self._data.\n This behavior is useful for subclasses.\n \"\"\"\n # TODO: as an optimization, perhaps create and cache a sorted\n # version of the dataset?\n\n if data is None:\n # Several sources weren't setting the _data attribute, so we\n # go through the interface. This seems like the correct thing\n # to do anyway... right?\n # data = self._data\n data = self.get_data()\n\n data_len = 0\n try:\n data_len = len(data)\n except Exception:\n pass\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (0.0, 0.0)\n elif data_len == 1:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (data[0], data[0])\n else:\n if self.sort_order == \"ascending\":\n self._min_index = 0\n self._max_index = -1\n elif self.sort_order == \"descending\":\n self._min_index = -1\n self._max_index = 0\n else:\n # ignore NaN values. This is probably a little slower,\n # but also much safer.\n\n # data might be an array of strings or objects that\n # can't have argmin calculated on them.\n try:\n # the data may be in a subclass of numpy.array, viewing\n # the data as a ndarray will remove side effects of\n # the subclasses, such as different operator behaviors\n self._min_index = bounded_nanargmin(data.view(ndarray))\n self._max_index = bounded_nanargmax(data.view(ndarray))\n except (TypeError, IndexError, NotImplementedError):\n # For strings and objects, we punt... These show up in\n # label-ish data sources.\n self._cached_bounds = (0.0, 0.0)\n\n self._cached_bounds = (\n data[self._min_index],\n data[self._max_index],\n )\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _metadata_changed(self, event):\n self.metadata_changed = True\n\n def _metadata_items_changed(self, event):\n self.metadata_changed = True\n\n # ------------------------------------------------------------------------\n # Persistence-related methods\n # ------------------------------------------------------------------------\n\n def __getstate__(self):\n state = self.__dict__.copy()\n if not self.persist_data:\n state.pop(\"_data\", None)\n state.pop(\"_cached_mask\", None)\n state.pop(\"_cached_bounds\", None)\n state.pop(\"_min_index\", None)\n state.pop(\"_max_index\", None)\n return state\n\n def _post_load(self):\n super(ArrayDataSource, self)._post_load()\n self._cached_bounds = ()\n self._cached_mask = None\n", "path": "chaco/array_data_source.py"}], "after_files": [{"content": "\"\"\" Defines the ArrayDataSource class.\"\"\"\n\n# Major library imports\nfrom numpy import array, empty, isfinite, ones, ndarray\nimport numpy as np\n\n# Enthought library imports\nfrom traits.api import Any, Constant, Int, Tuple\n\n# Chaco imports\nfrom .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait\nfrom .abstract_data_source import AbstractDataSource\n\n\ndef bounded_nanargmin(arr):\n \"\"\"Find the index of the minimum value, ignoring NaNs.\n\n If all NaNs, return 0.\n \"\"\"\n # Different versions of numpy behave differently in the all-NaN case, so we\n # catch this condition in two different ways.\n try:\n if np.issubdtype(arr.dtype, np.floating):\n min = np.nanargmin(arr)\n elif np.issubdtype(arr.dtype, np.number):\n min = np.argmin(arr)\n else:\n min = 0\n except ValueError:\n return 0\n if isfinite(min):\n return min\n else:\n return 0\n\n\ndef bounded_nanargmax(arr):\n \"\"\"Find the index of the maximum value, ignoring NaNs.\n\n If all NaNs, return -1.\n \"\"\"\n try:\n if np.issubdtype(arr.dtype, np.floating):\n max = np.nanargmax(arr)\n elif np.issubdtype(arr.dtype, np.number):\n max = np.argmax(arr)\n else:\n max = -1\n except ValueError:\n return -1\n if isfinite(max):\n return max\n else:\n return -1\n\n\nclass ArrayDataSource(AbstractDataSource):\n \"\"\"A data source representing a single, continuous array of numerical data.\n\n This class does not listen to the array for value changes; if you need that\n behavior, create a subclass that hooks up the appropriate listeners.\n \"\"\"\n\n # ------------------------------------------------------------------------\n # AbstractDataSource traits\n # ------------------------------------------------------------------------\n\n #: The dimensionality of the indices into this data source (overrides\n #: AbstractDataSource).\n index_dimension = Constant(\"scalar\")\n\n #: The dimensionality of the value at each index point (overrides\n #: AbstractDataSource).\n value_dimension = Constant(\"scalar\")\n\n #: The sort order of the data.\n #: This is a specialized optimization for 1-D arrays, but it's an important\n #: one that's used everywhere.\n sort_order = SortOrderTrait\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The data array itself.\n _data = NumericalSequenceTrait\n\n # Cached values of min and max as long as **_data** doesn't change.\n _cached_bounds = Tuple\n\n # Not necessary, since this is not a filter, but provided for convenience.\n _cached_mask = Any\n\n # The index of the (first) minimum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _min_index = Any\n\n # The index of the (first) maximum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _max_index = Any\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def __init__(self, data=array([]), sort_order=\"none\", **kw):\n AbstractDataSource.__init__(self, **kw)\n self.set_data(data, sort_order)\n\n def set_data(self, newdata, sort_order=None):\n \"\"\"Sets the data, and optionally the sort order, for this data source.\n\n Parameters\n ----------\n newdata : array\n The data to use.\n sort_order : SortOrderTrait\n The sort order of the data\n \"\"\"\n self._data = newdata\n if sort_order is not None:\n self.sort_order = sort_order\n self._compute_bounds()\n self.data_changed = True\n\n def set_mask(self, mask):\n \"\"\"Sets the mask for this data source.\"\"\"\n self._cached_mask = mask\n self.data_changed = True\n\n def remove_mask(self):\n \"\"\"Removes the mask on this data source.\"\"\"\n self._cached_mask = None\n self.data_changed = True\n\n # ------------------------------------------------------------------------\n # AbstractDataSource interface\n # ------------------------------------------------------------------------\n\n def get_data(self):\n \"\"\"Returns the data for this data source, or 0.0 if it has no data.\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return self._data\n else:\n return empty(shape=(0,))\n\n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is None:\n return self._data, ones(len(self._data), dtype=bool)\n else:\n return self._data, self._cached_mask\n\n def is_masked(self):\n \"\"\"is_masked() -> bool\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is not None:\n return True\n else:\n return False\n\n def get_size(self):\n \"\"\"get_size() -> int\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return len(self._data)\n else:\n return 0\n\n def get_bounds(self):\n \"\"\"Returns the minimum and maximum values of the data source's data.\n\n Implements AbstractDataSource.\n \"\"\"\n if (\n self._cached_bounds is None\n or self._cached_bounds == ()\n or self._cached_bounds == 0.0\n ):\n self._compute_bounds()\n return self._cached_bounds\n\n def reverse_map(self, pt, index=0, outside_returns_none=True):\n \"\"\"Returns the index of *pt* in the data source.\n\n Parameters\n ----------\n pt : scalar value\n value to find\n index\n ignored for data series with 1-D indices\n outside_returns_none : Boolean\n Whether the method returns None if *pt* is outside the range of\n the data source; if False, the method returns the value of the\n bound that *pt* is outside of.\n \"\"\"\n if self.sort_order == \"none\":\n raise NotImplementedError\n\n # index is ignored for dataseries with 1-dimensional indices\n minval, maxval = self._cached_bounds\n if pt < minval:\n if outside_returns_none:\n return None\n else:\n return self._min_index\n elif pt > maxval:\n if outside_returns_none:\n return None\n else:\n return self._max_index\n else:\n return reverse_map_1d(self._data, pt, self.sort_order)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n def _compute_bounds(self, data=None):\n \"\"\"Computes the minimum and maximum values of self._data.\n\n If a data array is passed in, then that is used instead of self._data.\n This behavior is useful for subclasses.\n \"\"\"\n # TODO: as an optimization, perhaps create and cache a sorted\n # version of the dataset?\n\n if data is None:\n data = self.get_data()\n\n data_len = len(data)\n\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (0.0, 0.0)\n elif data_len == 1:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (data[0], data[0])\n else:\n if self.sort_order == \"ascending\":\n self._min_index = 0\n self._max_index = -1\n elif self.sort_order == \"descending\":\n self._min_index = -1\n self._max_index = 0\n else:\n # ignore NaN values. This is probably a little slower,\n # but also much safer.\n\n # data might be an array of strings or objects that\n # can't have argmin calculated on them.\n try:\n # the data may be in a subclass of numpy.array, viewing\n # the data as a ndarray will remove side effects of\n # the subclasses, such as different operator behaviors\n self._min_index = bounded_nanargmin(data.view(ndarray))\n self._max_index = bounded_nanargmax(data.view(ndarray))\n except (TypeError, IndexError, NotImplementedError):\n # For strings and objects, we punt... These show up in\n # label-ish data sources.\n self._cached_bounds = (0.0, 0.0)\n\n self._cached_bounds = (\n data[self._min_index],\n data[self._max_index],\n )\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _metadata_changed(self, event):\n self.metadata_changed = True\n\n def _metadata_items_changed(self, event):\n self.metadata_changed = True\n\n # ------------------------------------------------------------------------\n # Persistence-related methods\n # ------------------------------------------------------------------------\n\n def __getstate__(self):\n state = self.__dict__.copy()\n if not self.persist_data:\n state.pop(\"_data\", None)\n state.pop(\"_cached_mask\", None)\n state.pop(\"_cached_bounds\", None)\n state.pop(\"_min_index\", None)\n state.pop(\"_max_index\", None)\n return state\n\n def _post_load(self):\n super(ArrayDataSource, self)._post_load()\n self._cached_bounds = ()\n self._cached_mask = None\n", "path": "chaco/array_data_source.py"}]}
| 3,396 | 320 |
gh_patches_debug_29682
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-990
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add requirement about projection units : should be meters
http://gis.stackexchange.com/questions/85955/how-can-i-obtain-the-unit-of-a-projection-srid
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/common/__init__.py`
Content:
```
1 """
2
3 Geotrek startup script.
4
5 This is executed only once at startup.
6
7 """
8 from south.signals import post_migrate
9 from django.conf import settings
10 from django.db.models.signals import post_syncdb
11
12 from mapentity.helpers import api_bbox
13
14 from geotrek.common.utils.postgresql import load_sql_files
15
16
17 """
18 http://djangosnippets.org/snippets/2311/
19 Ensure South will update our custom SQL during a call to `migrate`.
20 """
21
22 def run_initial_sql_post_migrate(sender, **kwargs):
23 app_label = kwargs.get('app')
24 load_sql_files(app_label)
25
26
27 def run_initial_sql_post_syncdb(sender, **kwargs):
28 app = kwargs.get('app')
29 models_module = app.__name__
30 app_label = models_module.rsplit('.')[-2]
31 load_sql_files(app_label)
32
33
34 if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:
35 post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid="geotrek.core.sqlautoload")
36 # During tests, the signal is received twice unfortunately
37 # https://code.djangoproject.com/ticket/17977
38 else:
39 post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid="geotrek.core.sqlautoload")
40
41
42 """
43 Computed client-side setting.
44 """
45 settings.LEAFLET_CONFIG['SPATIAL_EXTENT'] = api_bbox(settings.SPATIAL_EXTENT, buffer=0.5)
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geotrek/common/__init__.py b/geotrek/common/__init__.py
--- a/geotrek/common/__init__.py
+++ b/geotrek/common/__init__.py
@@ -5,9 +5,11 @@
This is executed only once at startup.
"""
-from south.signals import post_migrate
+from south.signals import pre_migrate, post_migrate
from django.conf import settings
-from django.db.models.signals import post_syncdb
+from django.db import connection
+from django.db.models.signals import pre_syncdb, post_syncdb
+from django.core.exceptions import ImproperlyConfigured
from mapentity.helpers import api_bbox
@@ -31,11 +33,26 @@
load_sql_files(app_label)
+def check_srid_has_meter_unit(sender, **kwargs):
+ if not hasattr(check_srid_has_meter_unit, '_checked'):
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT * FROM spatial_ref_sys
+ WHERE srtext ILIKE '%%meter%%' AND srid=%s;""", [settings.SRID])
+ results = cursor.fetchall()
+ if len(results) == 0:
+ err_msg = 'Unit of SRID EPSG:%s is not meter.' % settings.SRID
+ raise ImproperlyConfigured(err_msg)
+ check_srid_has_meter_unit._checked = True
+
+
if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:
+ pre_syncdb.connect(check_srid_has_meter_unit, dispatch_uid="geotrek.core.checksrid")
post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid="geotrek.core.sqlautoload")
# During tests, the signal is received twice unfortunately
# https://code.djangoproject.com/ticket/17977
else:
+ pre_migrate.connect(check_srid_has_meter_unit, dispatch_uid="geotrek.core.checksrid")
post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid="geotrek.core.sqlautoload")
|
{"golden_diff": "diff --git a/geotrek/common/__init__.py b/geotrek/common/__init__.py\n--- a/geotrek/common/__init__.py\n+++ b/geotrek/common/__init__.py\n@@ -5,9 +5,11 @@\n This is executed only once at startup.\n \n \"\"\"\n-from south.signals import post_migrate\n+from south.signals import pre_migrate, post_migrate\n from django.conf import settings\n-from django.db.models.signals import post_syncdb\n+from django.db import connection\n+from django.db.models.signals import pre_syncdb, post_syncdb\n+from django.core.exceptions import ImproperlyConfigured\n \n from mapentity.helpers import api_bbox\n \n@@ -31,11 +33,26 @@\n load_sql_files(app_label)\n \n \n+def check_srid_has_meter_unit(sender, **kwargs):\n+ if not hasattr(check_srid_has_meter_unit, '_checked'):\n+ cursor = connection.cursor()\n+ cursor.execute(\"\"\"\n+ SELECT * FROM spatial_ref_sys\n+ WHERE srtext ILIKE '%%meter%%' AND srid=%s;\"\"\", [settings.SRID])\n+ results = cursor.fetchall()\n+ if len(results) == 0:\n+ err_msg = 'Unit of SRID EPSG:%s is not meter.' % settings.SRID\n+ raise ImproperlyConfigured(err_msg)\n+ check_srid_has_meter_unit._checked = True\n+\n+\n if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:\n+ pre_syncdb.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid=\"geotrek.core.sqlautoload\")\n # During tests, the signal is received twice unfortunately\n # https://code.djangoproject.com/ticket/17977\n else:\n+ pre_migrate.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid=\"geotrek.core.sqlautoload\")\n", "issue": "Add requirement about projection units : should be meters\nhttp://gis.stackexchange.com/questions/85955/how-can-i-obtain-the-unit-of-a-projection-srid\n\n", "before_files": [{"content": "\"\"\"\n\n Geotrek startup script.\n\n This is executed only once at startup.\n\n\"\"\"\nfrom south.signals import post_migrate\nfrom django.conf import settings\nfrom django.db.models.signals import post_syncdb\n\nfrom mapentity.helpers import api_bbox\n\nfrom geotrek.common.utils.postgresql import load_sql_files\n\n\n\"\"\"\n http://djangosnippets.org/snippets/2311/\n Ensure South will update our custom SQL during a call to `migrate`.\n\"\"\"\n\ndef run_initial_sql_post_migrate(sender, **kwargs):\n app_label = kwargs.get('app')\n load_sql_files(app_label)\n\n\ndef run_initial_sql_post_syncdb(sender, **kwargs):\n app = kwargs.get('app')\n models_module = app.__name__\n app_label = models_module.rsplit('.')[-2]\n load_sql_files(app_label)\n\n\nif settings.TEST and not settings.SOUTH_TESTS_MIGRATE:\n post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid=\"geotrek.core.sqlautoload\")\n # During tests, the signal is received twice unfortunately\n # https://code.djangoproject.com/ticket/17977\nelse:\n post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid=\"geotrek.core.sqlautoload\")\n\n\n\"\"\"\n Computed client-side setting.\n\"\"\"\nsettings.LEAFLET_CONFIG['SPATIAL_EXTENT'] = api_bbox(settings.SPATIAL_EXTENT, buffer=0.5)\n", "path": "geotrek/common/__init__.py"}], "after_files": [{"content": "\"\"\"\n\n Geotrek startup script.\n\n This is executed only once at startup.\n\n\"\"\"\nfrom south.signals import pre_migrate, post_migrate\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.db.models.signals import pre_syncdb, post_syncdb\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom mapentity.helpers import api_bbox\n\nfrom geotrek.common.utils.postgresql import load_sql_files\n\n\n\"\"\"\n http://djangosnippets.org/snippets/2311/\n Ensure South will update our custom SQL during a call to `migrate`.\n\"\"\"\n\ndef run_initial_sql_post_migrate(sender, **kwargs):\n app_label = kwargs.get('app')\n load_sql_files(app_label)\n\n\ndef run_initial_sql_post_syncdb(sender, **kwargs):\n app = kwargs.get('app')\n models_module = app.__name__\n app_label = models_module.rsplit('.')[-2]\n load_sql_files(app_label)\n\n\ndef check_srid_has_meter_unit(sender, **kwargs):\n if not hasattr(check_srid_has_meter_unit, '_checked'):\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT * FROM spatial_ref_sys\n WHERE srtext ILIKE '%%meter%%' AND srid=%s;\"\"\", [settings.SRID])\n results = cursor.fetchall()\n if len(results) == 0:\n err_msg = 'Unit of SRID EPSG:%s is not meter.' % settings.SRID\n raise ImproperlyConfigured(err_msg)\n check_srid_has_meter_unit._checked = True\n\n\nif settings.TEST and not settings.SOUTH_TESTS_MIGRATE:\n pre_syncdb.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid=\"geotrek.core.sqlautoload\")\n # During tests, the signal is received twice unfortunately\n # https://code.djangoproject.com/ticket/17977\nelse:\n pre_migrate.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid=\"geotrek.core.sqlautoload\")\n\n\n\"\"\"\n Computed client-side setting.\n\"\"\"\nsettings.LEAFLET_CONFIG['SPATIAL_EXTENT'] = api_bbox(settings.SPATIAL_EXTENT, buffer=0.5)\n", "path": "geotrek/common/__init__.py"}]}
| 693 | 442 |
gh_patches_debug_8892
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-2788
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The captcha does not work under Python 3
@morxa [reported](https://github.com/fedora-infra/bodhi/issues/2700#issuecomment-441245759) that the captcha was not working in Fedora's Bodhi deployment:

It turns out that it does work if Bodhi is run with Python 2, but fails under Python 3. My guess is that there is a ```str```/```bytes``` mismatch somewhere that we need to find and fix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/captcha.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright Β© 2014-2018 Red Hat, Inc.
3 #
4 # This file is part of Bodhi.
5 #
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20 #
21 # Authors: Ralph Bean <[email protected]>
22 """Define utilities and a view pertaining to captcha images for unauthenticated users."""
23
24 from __future__ import division
25 import base64
26 import math
27 import random
28 import binascii
29
30 from PIL import Image, ImageDraw, ImageFont
31 from pyramid.httpexceptions import HTTPGone, HTTPNotFound
32 from pyramid.view import view_config
33 import cryptography.fernet
34 import six
35
36
37 def math_generator(plainkey, settings):
38 """
39 Given a plainkey, return its expected value.
40
41 Args:
42 plainkey (basestring or None): The key to encrypt as ciphertext. If None, a random key is
43 generated.
44 settings (bodhi.server.config.BodhiConfig): Bodhi's settings, unsued.
45 Returns:
46 tuple: A 2-tuple is returned. The first element is the plainkey, and the second is its
47 encrypted value.
48 Raises:
49 ValueError: If the plainkey is not valid.
50 """
51 if not plainkey:
52 x = random.randint(1, 100)
53 y = random.randint(1, 100)
54 plainkey = "%i + %i =" % (x, y)
55
56 tokens = plainkey.split()
57 if not len(tokens) == 4:
58 raise ValueError("%s is an invalid plainkey" % plainkey)
59
60 if tokens[1] != '+' or tokens[3] != '=':
61 raise ValueError("%s is an invalid plainkey" % plainkey)
62
63 x, y = int(tokens[0]), int(tokens[2])
64
65 value = six.text_type(x + y)
66 return plainkey, value
67
68
69 def jpeg_generator(plainkey, settings):
70 """
71 Generate an image with plainkey written in it.
72
73 Args:
74 plainkey (basestring): The text to include in the generated image.
75 settings (bodhi.server.config.BodhiConfig): Bodhi's settings.
76 Returns:
77 PIL.Image.Image: An image containing the given text.
78 """
79 image_size = image_width, image_height = (
80 settings.get('captcha.image_width'),
81 settings.get('captcha.image_height'),
82 )
83 font_path = settings.get('captcha.font_path')
84 font_size = settings.get('captcha.font_size')
85 font_color = settings.get('captcha.font_color')
86 background_color = settings.get('captcha.background_color')
87 padding = settings.get('captcha.padding')
88
89 img = Image.new('RGB', image_size, color=background_color)
90
91 font = ImageFont.truetype(font_path, font_size)
92 width, height = font.getsize(plainkey)
93
94 draw = ImageDraw.Draw(img)
95 position = (
96 random.randint(padding, (image_width - width - padding)),
97 random.randint(padding, (image_height - height - padding)))
98 draw.text(position, plainkey, font=font, fill=font_color)
99
100 # Make it crazy!
101 img = warp_image(img)
102
103 return img
104
105
106 def warp_image(image):
107 """
108 Apply some random bending operations to the given image.
109
110 This function attempts to make it harder for bots to read the text inside the image, while
111 allowing humans to read it.
112
113 Args:
114 image (PIL.Image.Image): The image to warp.
115 Returns:
116 PIL.Image.Image: A warped transformation of the given image.
117 """
118 r = 10 # individually warp a bunch of 10x10 tiles.
119 mesh_x = (image.size[0] // r) + 2
120 mesh_y = (image.size[1] // r) + 2
121
122 # Set up some random values we'll use over and over...
123 amplitude = random.uniform(6, 10)
124 period = random.uniform(0.65, 0.74)
125 offset = (
126 random.uniform(0, math.pi * 2 / period),
127 random.uniform(0, math.pi * 2 / period),
128 )
129
130 def _sine(x, y, a=amplitude, p=period, o=offset):
131 """Given a single point, warp it."""
132 return (
133 math.sin((y + o[0]) * p) * a + x,
134 math.sin((x + o[1]) * p) * a + y,
135 )
136
137 def _clamp(x, y):
138 """Don't warp things outside the bounds of the image."""
139 return (
140 max(0, min(image.size[0] - 1, x)),
141 max(0, min(image.size[1] - 1, y)),
142 )
143
144 # Build a map of the corners of our r by r tiles, warping each one.
145 warp = [
146 [
147 _clamp(*_sine(i * r, j * r))
148 for j in range(mesh_y)
149 ] for i in range(mesh_x)
150 ]
151
152 def _destination_rectangle(i, j):
153 """Return a happy tile from the original space."""
154 return (i * r, j * r, (i + 1) * r, (j + 1) * r)
155
156 def _source_quadrilateral(i, j):
157 """
158 Return the set of warped corners for a given tile.
159
160 Specified counter-clockwise as a tuple.
161 """
162 return (
163 warp[i][j][0], warp[i][j][1],
164 warp[i][j + 1][0], warp[i][j + 1][1],
165 warp[i + 1][j + 1][0], warp[i + 1][j + 1][1],
166 warp[i + 1][j][0], warp[i + 1][j][1],
167 )
168
169 # Finally, prepare our list of sources->destinations for PIL.
170 mesh = [
171 (
172 _destination_rectangle(i, j),
173 _source_quadrilateral(i, j),
174 )
175 for j in range(mesh_y - 1)
176 for i in range(mesh_x - 1)
177 ]
178 # And do it.
179 return image.transform(image.size, Image.MESH, mesh, Image.BILINEAR)
180
181
182 def validate(request, cipherkey, value):
183 """
184 Return whether the value matches the expected value, based on the cipherkey.
185
186 Args:
187 request (pyramid.util.Request): The current web request.
188 cipherkey (basestring): The encrypted Fernet key.
189 value (basestring): The value to be validated.
190 Returns:
191 bool: True if value matches the expected value based on the cipherkey, False otherwise.
192 False is also returned if the cipherkey is not found to be a valid Fernet token.
193 """
194 settings = request.registry.settings
195
196 plainkey = decrypt(cipherkey, settings)
197
198 _, expected_value = math_generator(plainkey=plainkey, settings=settings)
199 return value == expected_value
200
201
202 def generate_captcha(context, request):
203 """
204 Generate a key and a URL to a captcha image that matches the key.
205
206 Args:
207 context (mako.runtime.Context): Unused.
208 request (pyramid.util.Request): The current web request.
209 Returns:
210 tuple: A 2-tuple of strings. The first is the ciphertext key for a captcha, and the second
211 is a URL to the captcha image that matches that key.
212 """
213 settings = request.registry.settings
214 plainkey, value = math_generator(plainkey=None, settings=settings)
215 cipherkey = encrypt(plainkey, settings)
216 url = request.route_url('captcha_image', cipherkey=cipherkey)
217 request.session['captcha'] = cipherkey # Remember this to stop replay.
218 return cipherkey, url
219
220
221 def encrypt(plaintext, settings):
222 """
223 Calculate and return the ciphertext key from the given plaintext key.
224
225 Args:
226 plaintext (basestring): A key you wish you encrypt.
227 settings (bodhi.server.config.BodhiConfig): Bodhi's settings.
228 Returns:
229 str: The ciphertext version of the given captcha key.
230 """
231 secret = settings['captcha.secret']
232 engine = cryptography.fernet.Fernet(secret)
233 ciphertext = engine.encrypt(plaintext.encode('utf-8'))
234 ciphertext = base64.urlsafe_b64encode(ciphertext)
235 return ciphertext
236
237
238 def decrypt(ciphertext, settings):
239 """
240 Calculate and return the plaintext key from the given ciphertext.
241
242 Args:
243 ciphertext (str): The encrypted secret for a captcha image.
244 settings (bodhi.server.config.BodhiConfig): Bodhi's settings.
245 Returns:
246 unicode: The plaintext secret for a captcha image.
247 Raises:
248 pyramid.httpexceptions.HTTPNotFound: If the ciphertext can not be decoded as base64.
249 pyramid.httpexceptions.HTTPGone: If the captcha token has expired.
250 """
251 ttl = settings['captcha.ttl']
252 secret = settings['captcha.secret']
253 engine = cryptography.fernet.Fernet(secret)
254
255 if isinstance(ciphertext, six.text_type):
256 ciphertext = ciphertext.encode('utf-8')
257
258 try:
259 ciphertext = base64.urlsafe_b64decode(ciphertext)
260 except (TypeError, binascii.Error):
261 raise HTTPNotFound("%s is garbage" % ciphertext.decode('utf-8'))
262
263 try:
264 plaintext = engine.decrypt(ciphertext, ttl=ttl)
265 except cryptography.fernet.InvalidToken:
266 raise HTTPGone('captcha token is no longer valid')
267
268 return plaintext.decode('utf-8')
269
270
271 @view_config(route_name='captcha_image', renderer='jpeg')
272 def captcha_image(request):
273 """
274 Generate and return a captcha image.
275
276 Args:
277 request (pyramid.util.Request): The current web request.
278 Returns:
279 PIL.Image.Image: The generated captcha image.
280 """
281 cipherkey = request.matchdict['cipherkey']
282 plainkey = decrypt(cipherkey, request.registry.settings)
283 image = jpeg_generator(plainkey, request.registry.settings)
284 return image
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bodhi/server/captcha.py b/bodhi/server/captcha.py
--- a/bodhi/server/captcha.py
+++ b/bodhi/server/captcha.py
@@ -212,7 +212,7 @@
"""
settings = request.registry.settings
plainkey, value = math_generator(plainkey=None, settings=settings)
- cipherkey = encrypt(plainkey, settings)
+ cipherkey = encrypt(plainkey, settings).decode('utf8')
url = request.route_url('captcha_image', cipherkey=cipherkey)
request.session['captcha'] = cipherkey # Remember this to stop replay.
return cipherkey, url
|
{"golden_diff": "diff --git a/bodhi/server/captcha.py b/bodhi/server/captcha.py\n--- a/bodhi/server/captcha.py\n+++ b/bodhi/server/captcha.py\n@@ -212,7 +212,7 @@\n \"\"\"\n settings = request.registry.settings\n plainkey, value = math_generator(plainkey=None, settings=settings)\n- cipherkey = encrypt(plainkey, settings)\n+ cipherkey = encrypt(plainkey, settings).decode('utf8')\n url = request.route_url('captcha_image', cipherkey=cipherkey)\n request.session['captcha'] = cipherkey # Remember this to stop replay.\n return cipherkey, url\n", "issue": "The captcha does not work under Python 3\n@morxa [reported](https://github.com/fedora-infra/bodhi/issues/2700#issuecomment-441245759) that the captcha was not working in Fedora's Bodhi deployment:\r\n\r\n\r\n\r\n\r\nIt turns out that it does work if Bodhi is run with Python 2, but fails under Python 3. My guess is that there is a ```str```/```bytes``` mismatch somewhere that we need to find and fix.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2014-2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA.\n#\n# Authors: Ralph Bean <[email protected]>\n\"\"\"Define utilities and a view pertaining to captcha images for unauthenticated users.\"\"\"\n\nfrom __future__ import division\nimport base64\nimport math\nimport random\nimport binascii\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom pyramid.httpexceptions import HTTPGone, HTTPNotFound\nfrom pyramid.view import view_config\nimport cryptography.fernet\nimport six\n\n\ndef math_generator(plainkey, settings):\n \"\"\"\n Given a plainkey, return its expected value.\n\n Args:\n plainkey (basestring or None): The key to encrypt as ciphertext. If None, a random key is\n generated.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings, unsued.\n Returns:\n tuple: A 2-tuple is returned. The first element is the plainkey, and the second is its\n encrypted value.\n Raises:\n ValueError: If the plainkey is not valid.\n \"\"\"\n if not plainkey:\n x = random.randint(1, 100)\n y = random.randint(1, 100)\n plainkey = \"%i + %i =\" % (x, y)\n\n tokens = plainkey.split()\n if not len(tokens) == 4:\n raise ValueError(\"%s is an invalid plainkey\" % plainkey)\n\n if tokens[1] != '+' or tokens[3] != '=':\n raise ValueError(\"%s is an invalid plainkey\" % plainkey)\n\n x, y = int(tokens[0]), int(tokens[2])\n\n value = six.text_type(x + y)\n return plainkey, value\n\n\ndef jpeg_generator(plainkey, settings):\n \"\"\"\n Generate an image with plainkey written in it.\n\n Args:\n plainkey (basestring): The text to include in the generated image.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n PIL.Image.Image: An image containing the given text.\n \"\"\"\n image_size = image_width, image_height = (\n settings.get('captcha.image_width'),\n settings.get('captcha.image_height'),\n )\n font_path = settings.get('captcha.font_path')\n font_size = settings.get('captcha.font_size')\n font_color = settings.get('captcha.font_color')\n background_color = settings.get('captcha.background_color')\n padding = settings.get('captcha.padding')\n\n img = Image.new('RGB', image_size, color=background_color)\n\n font = ImageFont.truetype(font_path, font_size)\n width, height = font.getsize(plainkey)\n\n draw = ImageDraw.Draw(img)\n position = (\n random.randint(padding, (image_width - width - padding)),\n random.randint(padding, (image_height - height - padding)))\n draw.text(position, plainkey, font=font, fill=font_color)\n\n # Make it crazy!\n img = warp_image(img)\n\n return img\n\n\ndef warp_image(image):\n \"\"\"\n Apply some random bending operations to the given image.\n\n This function attempts to make it harder for bots to read the text inside the image, while\n allowing humans to read it.\n\n Args:\n image (PIL.Image.Image): The image to warp.\n Returns:\n PIL.Image.Image: A warped transformation of the given image.\n \"\"\"\n r = 10 # individually warp a bunch of 10x10 tiles.\n mesh_x = (image.size[0] // r) + 2\n mesh_y = (image.size[1] // r) + 2\n\n # Set up some random values we'll use over and over...\n amplitude = random.uniform(6, 10)\n period = random.uniform(0.65, 0.74)\n offset = (\n random.uniform(0, math.pi * 2 / period),\n random.uniform(0, math.pi * 2 / period),\n )\n\n def _sine(x, y, a=amplitude, p=period, o=offset):\n \"\"\"Given a single point, warp it.\"\"\"\n return (\n math.sin((y + o[0]) * p) * a + x,\n math.sin((x + o[1]) * p) * a + y,\n )\n\n def _clamp(x, y):\n \"\"\"Don't warp things outside the bounds of the image.\"\"\"\n return (\n max(0, min(image.size[0] - 1, x)),\n max(0, min(image.size[1] - 1, y)),\n )\n\n # Build a map of the corners of our r by r tiles, warping each one.\n warp = [\n [\n _clamp(*_sine(i * r, j * r))\n for j in range(mesh_y)\n ] for i in range(mesh_x)\n ]\n\n def _destination_rectangle(i, j):\n \"\"\"Return a happy tile from the original space.\"\"\"\n return (i * r, j * r, (i + 1) * r, (j + 1) * r)\n\n def _source_quadrilateral(i, j):\n \"\"\"\n Return the set of warped corners for a given tile.\n\n Specified counter-clockwise as a tuple.\n \"\"\"\n return (\n warp[i][j][0], warp[i][j][1],\n warp[i][j + 1][0], warp[i][j + 1][1],\n warp[i + 1][j + 1][0], warp[i + 1][j + 1][1],\n warp[i + 1][j][0], warp[i + 1][j][1],\n )\n\n # Finally, prepare our list of sources->destinations for PIL.\n mesh = [\n (\n _destination_rectangle(i, j),\n _source_quadrilateral(i, j),\n )\n for j in range(mesh_y - 1)\n for i in range(mesh_x - 1)\n ]\n # And do it.\n return image.transform(image.size, Image.MESH, mesh, Image.BILINEAR)\n\n\ndef validate(request, cipherkey, value):\n \"\"\"\n Return whether the value matches the expected value, based on the cipherkey.\n\n Args:\n request (pyramid.util.Request): The current web request.\n cipherkey (basestring): The encrypted Fernet key.\n value (basestring): The value to be validated.\n Returns:\n bool: True if value matches the expected value based on the cipherkey, False otherwise.\n False is also returned if the cipherkey is not found to be a valid Fernet token.\n \"\"\"\n settings = request.registry.settings\n\n plainkey = decrypt(cipherkey, settings)\n\n _, expected_value = math_generator(plainkey=plainkey, settings=settings)\n return value == expected_value\n\n\ndef generate_captcha(context, request):\n \"\"\"\n Generate a key and a URL to a captcha image that matches the key.\n\n Args:\n context (mako.runtime.Context): Unused.\n request (pyramid.util.Request): The current web request.\n Returns:\n tuple: A 2-tuple of strings. The first is the ciphertext key for a captcha, and the second\n is a URL to the captcha image that matches that key.\n \"\"\"\n settings = request.registry.settings\n plainkey, value = math_generator(plainkey=None, settings=settings)\n cipherkey = encrypt(plainkey, settings)\n url = request.route_url('captcha_image', cipherkey=cipherkey)\n request.session['captcha'] = cipherkey # Remember this to stop replay.\n return cipherkey, url\n\n\ndef encrypt(plaintext, settings):\n \"\"\"\n Calculate and return the ciphertext key from the given plaintext key.\n\n Args:\n plaintext (basestring): A key you wish you encrypt.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n str: The ciphertext version of the given captcha key.\n \"\"\"\n secret = settings['captcha.secret']\n engine = cryptography.fernet.Fernet(secret)\n ciphertext = engine.encrypt(plaintext.encode('utf-8'))\n ciphertext = base64.urlsafe_b64encode(ciphertext)\n return ciphertext\n\n\ndef decrypt(ciphertext, settings):\n \"\"\"\n Calculate and return the plaintext key from the given ciphertext.\n\n Args:\n ciphertext (str): The encrypted secret for a captcha image.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n unicode: The plaintext secret for a captcha image.\n Raises:\n pyramid.httpexceptions.HTTPNotFound: If the ciphertext can not be decoded as base64.\n pyramid.httpexceptions.HTTPGone: If the captcha token has expired.\n \"\"\"\n ttl = settings['captcha.ttl']\n secret = settings['captcha.secret']\n engine = cryptography.fernet.Fernet(secret)\n\n if isinstance(ciphertext, six.text_type):\n ciphertext = ciphertext.encode('utf-8')\n\n try:\n ciphertext = base64.urlsafe_b64decode(ciphertext)\n except (TypeError, binascii.Error):\n raise HTTPNotFound(\"%s is garbage\" % ciphertext.decode('utf-8'))\n\n try:\n plaintext = engine.decrypt(ciphertext, ttl=ttl)\n except cryptography.fernet.InvalidToken:\n raise HTTPGone('captcha token is no longer valid')\n\n return plaintext.decode('utf-8')\n\n\n@view_config(route_name='captcha_image', renderer='jpeg')\ndef captcha_image(request):\n \"\"\"\n Generate and return a captcha image.\n\n Args:\n request (pyramid.util.Request): The current web request.\n Returns:\n PIL.Image.Image: The generated captcha image.\n \"\"\"\n cipherkey = request.matchdict['cipherkey']\n plainkey = decrypt(cipherkey, request.registry.settings)\n image = jpeg_generator(plainkey, request.registry.settings)\n return image\n", "path": "bodhi/server/captcha.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2014-2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA.\n#\n# Authors: Ralph Bean <[email protected]>\n\"\"\"Define utilities and a view pertaining to captcha images for unauthenticated users.\"\"\"\n\nfrom __future__ import division\nimport base64\nimport math\nimport random\nimport binascii\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom pyramid.httpexceptions import HTTPGone, HTTPNotFound\nfrom pyramid.view import view_config\nimport cryptography.fernet\nimport six\n\n\ndef math_generator(plainkey, settings):\n \"\"\"\n Given a plainkey, return its expected value.\n\n Args:\n plainkey (basestring or None): The key to encrypt as ciphertext. If None, a random key is\n generated.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings, unsued.\n Returns:\n tuple: A 2-tuple is returned. The first element is the plainkey, and the second is its\n encrypted value.\n Raises:\n ValueError: If the plainkey is not valid.\n \"\"\"\n if not plainkey:\n x = random.randint(1, 100)\n y = random.randint(1, 100)\n plainkey = \"%i + %i =\" % (x, y)\n\n tokens = plainkey.split()\n if not len(tokens) == 4:\n raise ValueError(\"%s is an invalid plainkey\" % plainkey)\n\n if tokens[1] != '+' or tokens[3] != '=':\n raise ValueError(\"%s is an invalid plainkey\" % plainkey)\n\n x, y = int(tokens[0]), int(tokens[2])\n\n value = six.text_type(x + y)\n return plainkey, value\n\n\ndef jpeg_generator(plainkey, settings):\n \"\"\"\n Generate an image with plainkey written in it.\n\n Args:\n plainkey (basestring): The text to include in the generated image.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n PIL.Image.Image: An image containing the given text.\n \"\"\"\n image_size = image_width, image_height = (\n settings.get('captcha.image_width'),\n settings.get('captcha.image_height'),\n )\n font_path = settings.get('captcha.font_path')\n font_size = settings.get('captcha.font_size')\n font_color = settings.get('captcha.font_color')\n background_color = settings.get('captcha.background_color')\n padding = settings.get('captcha.padding')\n\n img = Image.new('RGB', image_size, color=background_color)\n\n font = ImageFont.truetype(font_path, font_size)\n width, height = font.getsize(plainkey)\n\n draw = ImageDraw.Draw(img)\n position = (\n random.randint(padding, (image_width - width - padding)),\n random.randint(padding, (image_height - height - padding)))\n draw.text(position, plainkey, font=font, fill=font_color)\n\n # Make it crazy!\n img = warp_image(img)\n\n return img\n\n\ndef warp_image(image):\n \"\"\"\n Apply some random bending operations to the given image.\n\n This function attempts to make it harder for bots to read the text inside the image, while\n allowing humans to read it.\n\n Args:\n image (PIL.Image.Image): The image to warp.\n Returns:\n PIL.Image.Image: A warped transformation of the given image.\n \"\"\"\n r = 10 # individually warp a bunch of 10x10 tiles.\n mesh_x = (image.size[0] // r) + 2\n mesh_y = (image.size[1] // r) + 2\n\n # Set up some random values we'll use over and over...\n amplitude = random.uniform(6, 10)\n period = random.uniform(0.65, 0.74)\n offset = (\n random.uniform(0, math.pi * 2 / period),\n random.uniform(0, math.pi * 2 / period),\n )\n\n def _sine(x, y, a=amplitude, p=period, o=offset):\n \"\"\"Given a single point, warp it.\"\"\"\n return (\n math.sin((y + o[0]) * p) * a + x,\n math.sin((x + o[1]) * p) * a + y,\n )\n\n def _clamp(x, y):\n \"\"\"Don't warp things outside the bounds of the image.\"\"\"\n return (\n max(0, min(image.size[0] - 1, x)),\n max(0, min(image.size[1] - 1, y)),\n )\n\n # Build a map of the corners of our r by r tiles, warping each one.\n warp = [\n [\n _clamp(*_sine(i * r, j * r))\n for j in range(mesh_y)\n ] for i in range(mesh_x)\n ]\n\n def _destination_rectangle(i, j):\n \"\"\"Return a happy tile from the original space.\"\"\"\n return (i * r, j * r, (i + 1) * r, (j + 1) * r)\n\n def _source_quadrilateral(i, j):\n \"\"\"\n Return the set of warped corners for a given tile.\n\n Specified counter-clockwise as a tuple.\n \"\"\"\n return (\n warp[i][j][0], warp[i][j][1],\n warp[i][j + 1][0], warp[i][j + 1][1],\n warp[i + 1][j + 1][0], warp[i + 1][j + 1][1],\n warp[i + 1][j][0], warp[i + 1][j][1],\n )\n\n # Finally, prepare our list of sources->destinations for PIL.\n mesh = [\n (\n _destination_rectangle(i, j),\n _source_quadrilateral(i, j),\n )\n for j in range(mesh_y - 1)\n for i in range(mesh_x - 1)\n ]\n # And do it.\n return image.transform(image.size, Image.MESH, mesh, Image.BILINEAR)\n\n\ndef validate(request, cipherkey, value):\n \"\"\"\n Return whether the value matches the expected value, based on the cipherkey.\n\n Args:\n request (pyramid.util.Request): The current web request.\n cipherkey (basestring): The encrypted Fernet key.\n value (basestring): The value to be validated.\n Returns:\n bool: True if value matches the expected value based on the cipherkey, False otherwise.\n False is also returned if the cipherkey is not found to be a valid Fernet token.\n \"\"\"\n settings = request.registry.settings\n\n plainkey = decrypt(cipherkey, settings)\n\n _, expected_value = math_generator(plainkey=plainkey, settings=settings)\n return value == expected_value\n\n\ndef generate_captcha(context, request):\n \"\"\"\n Generate a key and a URL to a captcha image that matches the key.\n\n Args:\n context (mako.runtime.Context): Unused.\n request (pyramid.util.Request): The current web request.\n Returns:\n tuple: A 2-tuple of strings. The first is the ciphertext key for a captcha, and the second\n is a URL to the captcha image that matches that key.\n \"\"\"\n settings = request.registry.settings\n plainkey, value = math_generator(plainkey=None, settings=settings)\n cipherkey = encrypt(plainkey, settings).decode('utf8')\n url = request.route_url('captcha_image', cipherkey=cipherkey)\n request.session['captcha'] = cipherkey # Remember this to stop replay.\n return cipherkey, url\n\n\ndef encrypt(plaintext, settings):\n \"\"\"\n Calculate and return the ciphertext key from the given plaintext key.\n\n Args:\n plaintext (basestring): A key you wish you encrypt.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n str: The ciphertext version of the given captcha key.\n \"\"\"\n secret = settings['captcha.secret']\n engine = cryptography.fernet.Fernet(secret)\n ciphertext = engine.encrypt(plaintext.encode('utf-8'))\n ciphertext = base64.urlsafe_b64encode(ciphertext)\n return ciphertext\n\n\ndef decrypt(ciphertext, settings):\n \"\"\"\n Calculate and return the plaintext key from the given ciphertext.\n\n Args:\n ciphertext (str): The encrypted secret for a captcha image.\n settings (bodhi.server.config.BodhiConfig): Bodhi's settings.\n Returns:\n unicode: The plaintext secret for a captcha image.\n Raises:\n pyramid.httpexceptions.HTTPNotFound: If the ciphertext can not be decoded as base64.\n pyramid.httpexceptions.HTTPGone: If the captcha token has expired.\n \"\"\"\n ttl = settings['captcha.ttl']\n secret = settings['captcha.secret']\n engine = cryptography.fernet.Fernet(secret)\n\n if isinstance(ciphertext, six.text_type):\n ciphertext = ciphertext.encode('utf-8')\n\n try:\n ciphertext = base64.urlsafe_b64decode(ciphertext)\n except (TypeError, binascii.Error):\n raise HTTPNotFound(\"%s is garbage\" % ciphertext.decode('utf-8'))\n\n try:\n plaintext = engine.decrypt(ciphertext, ttl=ttl)\n except cryptography.fernet.InvalidToken:\n raise HTTPGone('captcha token is no longer valid')\n\n return plaintext.decode('utf-8')\n\n\n@view_config(route_name='captcha_image', renderer='jpeg')\ndef captcha_image(request):\n \"\"\"\n Generate and return a captcha image.\n\n Args:\n request (pyramid.util.Request): The current web request.\n Returns:\n PIL.Image.Image: The generated captcha image.\n \"\"\"\n cipherkey = request.matchdict['cipherkey']\n plainkey = decrypt(cipherkey, request.registry.settings)\n image = jpeg_generator(plainkey, request.registry.settings)\n return image\n", "path": "bodhi/server/captcha.py"}]}
| 3,546 | 148 |
gh_patches_debug_47980
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-564
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary Loop
https://github.com/TheAlgorithms/Python/blob/4e0184a41dd3e4838da484057d25e17234353da0/dynamic_programming/matrix_chain_order.py#L12-L15
`Line 12` creates a NxN Matrix that **contains 0**. So there is no need for `Line 14-15`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynamic_programming/matrix_chain_order.py`
Content:
```
1 from __future__ import print_function
2
3 import sys
4 '''
5 Dynamic Programming
6 Implementation of Matrix Chain Multiplication
7 Time Complexity: O(n^3)
8 Space Complexity: O(n^2)
9 '''
10 def MatrixChainOrder(array):
11 N=len(array)
12 Matrix=[[0 for x in range(N)] for x in range(N)]
13 Sol=[[0 for x in range(N)] for x in range(N)]
14 for i in range(1,N):
15 Matrix[i][i]=0
16
17 for ChainLength in range(2,N):
18 for a in range(1,N-ChainLength+1):
19 b = a+ChainLength-1
20
21 Matrix[a][b] = sys.maxsize
22 for c in range(a , b):
23 cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]
24 if cost < Matrix[a][b]:
25 Matrix[a][b] = cost
26 Sol[a][b] = c
27 return Matrix , Sol
28 #Print order of matrix with Ai as Matrix
29 def PrintOptimalSolution(OptimalSolution,i,j):
30 if i==j:
31 print("A" + str(i),end = " ")
32 else:
33 print("(",end = " ")
34 PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])
35 PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)
36 print(")",end = " ")
37
38 def main():
39 array=[30,35,15,5,10,20,25]
40 n=len(array)
41 #Size of matrix created from above array will be
42 # 30*35 35*15 15*5 5*10 10*20 20*25
43 Matrix , OptimalSolution = MatrixChainOrder(array)
44
45 print("No. of Operation required: "+str((Matrix[1][n-1])))
46 PrintOptimalSolution(OptimalSolution,1,n-1)
47 if __name__ == '__main__':
48 main()
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py
--- a/dynamic_programming/matrix_chain_order.py
+++ b/dynamic_programming/matrix_chain_order.py
@@ -11,8 +11,6 @@
N=len(array)
Matrix=[[0 for x in range(N)] for x in range(N)]
Sol=[[0 for x in range(N)] for x in range(N)]
- for i in range(1,N):
- Matrix[i][i]=0
for ChainLength in range(2,N):
for a in range(1,N-ChainLength+1):
|
{"golden_diff": "diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py\n--- a/dynamic_programming/matrix_chain_order.py\n+++ b/dynamic_programming/matrix_chain_order.py\n@@ -11,8 +11,6 @@\n N=len(array)\n Matrix=[[0 for x in range(N)] for x in range(N)]\n Sol=[[0 for x in range(N)] for x in range(N)]\n- for i in range(1,N):\n- Matrix[i][i]=0\n \n for ChainLength in range(2,N):\n for a in range(1,N-ChainLength+1):\n", "issue": "Unnecessary Loop \nhttps://github.com/TheAlgorithms/Python/blob/4e0184a41dd3e4838da484057d25e17234353da0/dynamic_programming/matrix_chain_order.py#L12-L15\r\n\r\n`Line 12` creates a NxN Matrix that **contains 0**. So there is no need for `Line 14-15`\n", "before_files": [{"content": "from __future__ import print_function\n\nimport sys\n'''\nDynamic Programming\nImplementation of Matrix Chain Multiplication\nTime Complexity: O(n^3)\nSpace Complexity: O(n^2)\n'''\ndef MatrixChainOrder(array):\n N=len(array)\n Matrix=[[0 for x in range(N)] for x in range(N)]\n Sol=[[0 for x in range(N)] for x in range(N)]\n for i in range(1,N):\n Matrix[i][i]=0\n\n for ChainLength in range(2,N):\n for a in range(1,N-ChainLength+1):\n b = a+ChainLength-1\n\n Matrix[a][b] = sys.maxsize\n for c in range(a , b):\n cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]\n if cost < Matrix[a][b]:\n Matrix[a][b] = cost\n Sol[a][b] = c\n return Matrix , Sol\n#Print order of matrix with Ai as Matrix\ndef PrintOptimalSolution(OptimalSolution,i,j):\n if i==j:\n print(\"A\" + str(i),end = \" \")\n else:\n print(\"(\",end = \" \")\n PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])\n PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)\n print(\")\",end = \" \")\n\ndef main():\n array=[30,35,15,5,10,20,25]\n n=len(array)\n #Size of matrix created from above array will be\n # 30*35 35*15 15*5 5*10 10*20 20*25\n Matrix , OptimalSolution = MatrixChainOrder(array)\n\n print(\"No. of Operation required: \"+str((Matrix[1][n-1])))\n PrintOptimalSolution(OptimalSolution,1,n-1)\nif __name__ == '__main__':\n main()\n", "path": "dynamic_programming/matrix_chain_order.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport sys\n'''\nDynamic Programming\nImplementation of Matrix Chain Multiplication\nTime Complexity: O(n^3)\nSpace Complexity: O(n^2)\n'''\ndef MatrixChainOrder(array):\n N=len(array)\n Matrix=[[0 for x in range(N)] for x in range(N)]\n Sol=[[0 for x in range(N)] for x in range(N)]\n\n for ChainLength in range(2,N):\n for a in range(1,N-ChainLength+1):\n b = a+ChainLength-1\n\n Matrix[a][b] = sys.maxsize\n for c in range(a , b):\n cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]\n if cost < Matrix[a][b]:\n Matrix[a][b] = cost\n Sol[a][b] = c\n return Matrix , Sol\n#Print order of matrix with Ai as Matrix\ndef PrintOptimalSolution(OptimalSolution,i,j):\n if i==j:\n print(\"A\" + str(i),end = \" \")\n else:\n print(\"(\",end = \" \")\n PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])\n PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)\n print(\")\",end = \" \")\n\ndef main():\n array=[30,35,15,5,10,20,25]\n n=len(array)\n #Size of matrix created from above array will be\n # 30*35 35*15 15*5 5*10 10*20 20*25\n Matrix , OptimalSolution = MatrixChainOrder(array)\n\n print(\"No. of Operation required: \"+str((Matrix[1][n-1])))\n PrintOptimalSolution(OptimalSolution,1,n-1)\nif __name__ == '__main__':\n main()\n", "path": "dynamic_programming/matrix_chain_order.py"}]}
| 908 | 140 |
gh_patches_debug_19547
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1619
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FastAPI/Starlette: Truncate exception message to prevent "event exceeded the permitted size" error from APM Server
**Describe the bug**:
The capture_exception() method is sending the whole exception message to the APM Server and when the message exceeds the maximum size, the APM Server returns the "event exceeded the permitted size" error.
I'm sorry if it is not a bug, but I believe that the apm-agent should truncate the message before sending it to the APM Server.
**To Reproduce**
Create a FastAPI route that always raise an error with a large exception message (as the example below) and it will show an "event exceeded the permitted size" error on the next attempt to send events to APM server
```
@app.get('/apm_test/')
async def apm_test():
try:
raise Exception('test' * 1000000)
except Exception as e:
apm_client.capture_exception()
return PlainTextResponse('', 200)
```
**Environment**
- OS: ubuntu 20.04
- Python version: 3.9.13
- Framework and version: fastapi==0.79.0
- APM Server version: 8.3.0
- Agent version: 6.11.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/events.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 import random
33 import sys
34
35 from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH
36 from elasticapm.utils import varmap
37 from elasticapm.utils.encoding import keyword_field, shorten, to_unicode
38 from elasticapm.utils.logging import get_logger
39 from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames
40
41 __all__ = ("BaseEvent", "Exception", "Message")
42
43 logger = get_logger("elasticapm.events")
44
45
46 class BaseEvent(object):
47 @staticmethod
48 def to_string(client, data):
49 raise NotImplementedError
50
51 @staticmethod
52 def capture(client, **kwargs):
53 return {}
54
55
56 class Exception(BaseEvent):
57 """
58 Exceptions store the following metadata:
59
60 - value: 'My exception value'
61 - type: 'ClassName'
62 - module '__builtin__' (i.e. __builtin__.TypeError)
63 - frames: a list of serialized frames (see _get_traceback_frames)
64 """
65
66 @staticmethod
67 def to_string(client, data):
68 exc = data["exception"]
69 if exc["value"]:
70 return "%s: %s" % (exc["type"], exc["value"])
71 return exc["type"]
72
73 @staticmethod
74 def get_hash(data):
75 exc = data["exception"]
76 output = [exc["type"]]
77 for frame in data["stacktrace"]["frames"]:
78 output.append(frame["module"])
79 output.append(frame["function"])
80 return output
81
82 @staticmethod
83 def capture(client, exc_info=None, **kwargs):
84 culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None
85 new_exc_info = False
86 if not exc_info or exc_info is True:
87 new_exc_info = True
88 exc_info = sys.exc_info()
89
90 if exc_info == (None, None, None):
91 raise ValueError("No exception found: capture_exception requires an active exception.")
92
93 try:
94 exc_type, exc_value, exc_traceback = exc_info
95
96 frames = get_stack_info(
97 iter_traceback_frames(exc_traceback, config=client.config),
98 with_locals=client.config.collect_local_variables in ("errors", "all"),
99 library_frame_context_lines=client.config.source_lines_error_library_frames,
100 in_app_frame_context_lines=client.config.source_lines_error_app_frames,
101 include_paths_re=client.include_paths_re,
102 exclude_paths_re=client.exclude_paths_re,
103 locals_processor_func=lambda local_var: varmap(
104 lambda k, val: shorten(
105 val,
106 list_length=client.config.local_var_list_max_length,
107 string_length=client.config.local_var_max_length,
108 dict_length=client.config.local_var_dict_max_length,
109 ),
110 local_var,
111 ),
112 )
113
114 culprit = kwargs.get("culprit", None) or get_culprit(
115 frames, client.config.include_paths, client.config.exclude_paths
116 )
117
118 if hasattr(exc_type, "__module__"):
119 exc_module = exc_type.__module__
120 exc_type = exc_type.__name__
121 else:
122 exc_module = None
123 exc_type = exc_type.__name__
124 finally:
125 if new_exc_info:
126 try:
127 del exc_info
128 del exc_traceback
129 except Exception as e:
130 logger.exception(e)
131 if "message" in kwargs:
132 message = kwargs["message"]
133 else:
134 message = "%s: %s" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)
135
136 data = {
137 "id": "%032x" % random.getrandbits(128),
138 "culprit": keyword_field(culprit),
139 "exception": {
140 "message": message,
141 "type": keyword_field(str(exc_type)),
142 "module": keyword_field(str(exc_module)),
143 "stacktrace": frames,
144 },
145 }
146 if hasattr(exc_value, "_elastic_apm_span_id"):
147 data["parent_id"] = exc_value._elastic_apm_span_id
148 del exc_value._elastic_apm_span_id
149 depth = kwargs.get("_exc_chain_depth", 0)
150 if depth > EXCEPTION_CHAIN_MAX_DEPTH:
151 return
152 cause = exc_value.__cause__
153 chained_context = exc_value.__context__
154
155 # we follow the pattern of Python itself here and only capture the chained exception
156 # if cause is not None and __suppress_context__ is False
157 if chained_context and not (exc_value.__suppress_context__ and cause is None):
158 if cause:
159 chained_exc_type = type(cause)
160 chained_exc_value = cause
161 else:
162 chained_exc_type = type(chained_context)
163 chained_exc_value = chained_context
164 chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__
165
166 chained_cause = Exception.capture(
167 client, exc_info=chained_exc_info, culprit="None", _exc_chain_depth=depth + 1
168 )
169 if chained_cause:
170 data["exception"]["cause"] = [chained_cause["exception"]]
171 return data
172
173
174 class Message(BaseEvent):
175 """
176 Messages store the following metadata:
177
178 - message: 'My message from %s about %s'
179 - params: ('foo', 'bar')
180 """
181
182 @staticmethod
183 def to_string(client, data):
184 return data["log"]["message"]
185
186 @staticmethod
187 def get_hash(data):
188 msg = data["param_message"]
189 return [msg["message"]]
190
191 @staticmethod
192 def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):
193 if message:
194 param_message = {"message": message}
195 params = param_message.get("params")
196 message = param_message["message"] % params if params else param_message["message"]
197 data = kwargs.get("data", {})
198 message_data = {
199 "id": "%032x" % random.getrandbits(128),
200 "log": {
201 "level": keyword_field(level or "error"),
202 "logger_name": keyword_field(logger_name or "__root__"),
203 "message": message,
204 "param_message": keyword_field(param_message["message"]),
205 },
206 }
207 if isinstance(data.get("stacktrace"), dict):
208 message_data["log"]["stacktrace"] = data["stacktrace"]["frames"]
209 if kwargs.get("exception"):
210 message_data["culprit"] = kwargs["exception"]["culprit"]
211 message_data["exception"] = kwargs["exception"]["exception"]
212 return message_data
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticapm/events.py b/elasticapm/events.py
--- a/elasticapm/events.py
+++ b/elasticapm/events.py
@@ -33,7 +33,7 @@
import sys
from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH
-from elasticapm.utils import varmap
+from elasticapm.utils import encoding, varmap
from elasticapm.utils.encoding import keyword_field, shorten, to_unicode
from elasticapm.utils.logging import get_logger
from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames
@@ -133,6 +133,8 @@
else:
message = "%s: %s" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)
+ message = encoding.long_field(message)
+
data = {
"id": "%032x" % random.getrandbits(128),
"culprit": keyword_field(culprit),
|
{"golden_diff": "diff --git a/elasticapm/events.py b/elasticapm/events.py\n--- a/elasticapm/events.py\n+++ b/elasticapm/events.py\n@@ -33,7 +33,7 @@\n import sys\n \n from elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH\n-from elasticapm.utils import varmap\n+from elasticapm.utils import encoding, varmap\n from elasticapm.utils.encoding import keyword_field, shorten, to_unicode\n from elasticapm.utils.logging import get_logger\n from elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames\n@@ -133,6 +133,8 @@\n else:\n message = \"%s: %s\" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)\n \n+ message = encoding.long_field(message)\n+\n data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"culprit\": keyword_field(culprit),\n", "issue": "FastAPI/Starlette: Truncate exception message to prevent \"event exceeded the permitted size\" error from APM Server\n**Describe the bug**: \r\nThe capture_exception() method is sending the whole exception message to the APM Server and when the message exceeds the maximum size, the APM Server returns the \"event exceeded the permitted size\" error. \r\n\r\nI'm sorry if it is not a bug, but I believe that the apm-agent should truncate the message before sending it to the APM Server.\r\n\r\n\r\n**To Reproduce**\r\nCreate a FastAPI route that always raise an error with a large exception message (as the example below) and it will show an \"event exceeded the permitted size\" error on the next attempt to send events to APM server\r\n\r\n```\r\[email protected]('/apm_test/')\r\nasync def apm_test():\r\n try:\r\n raise Exception('test' * 1000000)\r\n except Exception as e:\r\n apm_client.capture_exception()\r\n return PlainTextResponse('', 200)\r\n```\r\n\r\n**Environment**\r\n- OS: ubuntu 20.04\r\n- Python version: 3.9.13\r\n- Framework and version: fastapi==0.79.0\r\n- APM Server version: 8.3.0\r\n- Agent version: 6.11.0\r\n\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport random\nimport sys\n\nfrom elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH\nfrom elasticapm.utils import varmap\nfrom elasticapm.utils.encoding import keyword_field, shorten, to_unicode\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames\n\n__all__ = (\"BaseEvent\", \"Exception\", \"Message\")\n\nlogger = get_logger(\"elasticapm.events\")\n\n\nclass BaseEvent(object):\n @staticmethod\n def to_string(client, data):\n raise NotImplementedError\n\n @staticmethod\n def capture(client, **kwargs):\n return {}\n\n\nclass Exception(BaseEvent):\n \"\"\"\n Exceptions store the following metadata:\n\n - value: 'My exception value'\n - type: 'ClassName'\n - module '__builtin__' (i.e. __builtin__.TypeError)\n - frames: a list of serialized frames (see _get_traceback_frames)\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n exc = data[\"exception\"]\n if exc[\"value\"]:\n return \"%s: %s\" % (exc[\"type\"], exc[\"value\"])\n return exc[\"type\"]\n\n @staticmethod\n def get_hash(data):\n exc = data[\"exception\"]\n output = [exc[\"type\"]]\n for frame in data[\"stacktrace\"][\"frames\"]:\n output.append(frame[\"module\"])\n output.append(frame[\"function\"])\n return output\n\n @staticmethod\n def capture(client, exc_info=None, **kwargs):\n culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None\n new_exc_info = False\n if not exc_info or exc_info is True:\n new_exc_info = True\n exc_info = sys.exc_info()\n\n if exc_info == (None, None, None):\n raise ValueError(\"No exception found: capture_exception requires an active exception.\")\n\n try:\n exc_type, exc_value, exc_traceback = exc_info\n\n frames = get_stack_info(\n iter_traceback_frames(exc_traceback, config=client.config),\n with_locals=client.config.collect_local_variables in (\"errors\", \"all\"),\n library_frame_context_lines=client.config.source_lines_error_library_frames,\n in_app_frame_context_lines=client.config.source_lines_error_app_frames,\n include_paths_re=client.include_paths_re,\n exclude_paths_re=client.exclude_paths_re,\n locals_processor_func=lambda local_var: varmap(\n lambda k, val: shorten(\n val,\n list_length=client.config.local_var_list_max_length,\n string_length=client.config.local_var_max_length,\n dict_length=client.config.local_var_dict_max_length,\n ),\n local_var,\n ),\n )\n\n culprit = kwargs.get(\"culprit\", None) or get_culprit(\n frames, client.config.include_paths, client.config.exclude_paths\n )\n\n if hasattr(exc_type, \"__module__\"):\n exc_module = exc_type.__module__\n exc_type = exc_type.__name__\n else:\n exc_module = None\n exc_type = exc_type.__name__\n finally:\n if new_exc_info:\n try:\n del exc_info\n del exc_traceback\n except Exception as e:\n logger.exception(e)\n if \"message\" in kwargs:\n message = kwargs[\"message\"]\n else:\n message = \"%s: %s\" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)\n\n data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"culprit\": keyword_field(culprit),\n \"exception\": {\n \"message\": message,\n \"type\": keyword_field(str(exc_type)),\n \"module\": keyword_field(str(exc_module)),\n \"stacktrace\": frames,\n },\n }\n if hasattr(exc_value, \"_elastic_apm_span_id\"):\n data[\"parent_id\"] = exc_value._elastic_apm_span_id\n del exc_value._elastic_apm_span_id\n depth = kwargs.get(\"_exc_chain_depth\", 0)\n if depth > EXCEPTION_CHAIN_MAX_DEPTH:\n return\n cause = exc_value.__cause__\n chained_context = exc_value.__context__\n\n # we follow the pattern of Python itself here and only capture the chained exception\n # if cause is not None and __suppress_context__ is False\n if chained_context and not (exc_value.__suppress_context__ and cause is None):\n if cause:\n chained_exc_type = type(cause)\n chained_exc_value = cause\n else:\n chained_exc_type = type(chained_context)\n chained_exc_value = chained_context\n chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__\n\n chained_cause = Exception.capture(\n client, exc_info=chained_exc_info, culprit=\"None\", _exc_chain_depth=depth + 1\n )\n if chained_cause:\n data[\"exception\"][\"cause\"] = [chained_cause[\"exception\"]]\n return data\n\n\nclass Message(BaseEvent):\n \"\"\"\n Messages store the following metadata:\n\n - message: 'My message from %s about %s'\n - params: ('foo', 'bar')\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n return data[\"log\"][\"message\"]\n\n @staticmethod\n def get_hash(data):\n msg = data[\"param_message\"]\n return [msg[\"message\"]]\n\n @staticmethod\n def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):\n if message:\n param_message = {\"message\": message}\n params = param_message.get(\"params\")\n message = param_message[\"message\"] % params if params else param_message[\"message\"]\n data = kwargs.get(\"data\", {})\n message_data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"log\": {\n \"level\": keyword_field(level or \"error\"),\n \"logger_name\": keyword_field(logger_name or \"__root__\"),\n \"message\": message,\n \"param_message\": keyword_field(param_message[\"message\"]),\n },\n }\n if isinstance(data.get(\"stacktrace\"), dict):\n message_data[\"log\"][\"stacktrace\"] = data[\"stacktrace\"][\"frames\"]\n if kwargs.get(\"exception\"):\n message_data[\"culprit\"] = kwargs[\"exception\"][\"culprit\"]\n message_data[\"exception\"] = kwargs[\"exception\"][\"exception\"]\n return message_data\n", "path": "elasticapm/events.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport random\nimport sys\n\nfrom elasticapm.conf.constants import EXCEPTION_CHAIN_MAX_DEPTH\nfrom elasticapm.utils import encoding, varmap\nfrom elasticapm.utils.encoding import keyword_field, shorten, to_unicode\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.stacks import get_culprit, get_stack_info, iter_traceback_frames\n\n__all__ = (\"BaseEvent\", \"Exception\", \"Message\")\n\nlogger = get_logger(\"elasticapm.events\")\n\n\nclass BaseEvent(object):\n @staticmethod\n def to_string(client, data):\n raise NotImplementedError\n\n @staticmethod\n def capture(client, **kwargs):\n return {}\n\n\nclass Exception(BaseEvent):\n \"\"\"\n Exceptions store the following metadata:\n\n - value: 'My exception value'\n - type: 'ClassName'\n - module '__builtin__' (i.e. __builtin__.TypeError)\n - frames: a list of serialized frames (see _get_traceback_frames)\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n exc = data[\"exception\"]\n if exc[\"value\"]:\n return \"%s: %s\" % (exc[\"type\"], exc[\"value\"])\n return exc[\"type\"]\n\n @staticmethod\n def get_hash(data):\n exc = data[\"exception\"]\n output = [exc[\"type\"]]\n for frame in data[\"stacktrace\"][\"frames\"]:\n output.append(frame[\"module\"])\n output.append(frame[\"function\"])\n return output\n\n @staticmethod\n def capture(client, exc_info=None, **kwargs):\n culprit = exc_value = exc_type = exc_module = frames = exc_traceback = None\n new_exc_info = False\n if not exc_info or exc_info is True:\n new_exc_info = True\n exc_info = sys.exc_info()\n\n if exc_info == (None, None, None):\n raise ValueError(\"No exception found: capture_exception requires an active exception.\")\n\n try:\n exc_type, exc_value, exc_traceback = exc_info\n\n frames = get_stack_info(\n iter_traceback_frames(exc_traceback, config=client.config),\n with_locals=client.config.collect_local_variables in (\"errors\", \"all\"),\n library_frame_context_lines=client.config.source_lines_error_library_frames,\n in_app_frame_context_lines=client.config.source_lines_error_app_frames,\n include_paths_re=client.include_paths_re,\n exclude_paths_re=client.exclude_paths_re,\n locals_processor_func=lambda local_var: varmap(\n lambda k, val: shorten(\n val,\n list_length=client.config.local_var_list_max_length,\n string_length=client.config.local_var_max_length,\n dict_length=client.config.local_var_dict_max_length,\n ),\n local_var,\n ),\n )\n\n culprit = kwargs.get(\"culprit\", None) or get_culprit(\n frames, client.config.include_paths, client.config.exclude_paths\n )\n\n if hasattr(exc_type, \"__module__\"):\n exc_module = exc_type.__module__\n exc_type = exc_type.__name__\n else:\n exc_module = None\n exc_type = exc_type.__name__\n finally:\n if new_exc_info:\n try:\n del exc_info\n del exc_traceback\n except Exception as e:\n logger.exception(e)\n if \"message\" in kwargs:\n message = kwargs[\"message\"]\n else:\n message = \"%s: %s\" % (exc_type, to_unicode(exc_value)) if exc_value else str(exc_type)\n\n message = encoding.long_field(message)\n\n data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"culprit\": keyword_field(culprit),\n \"exception\": {\n \"message\": message,\n \"type\": keyword_field(str(exc_type)),\n \"module\": keyword_field(str(exc_module)),\n \"stacktrace\": frames,\n },\n }\n if hasattr(exc_value, \"_elastic_apm_span_id\"):\n data[\"parent_id\"] = exc_value._elastic_apm_span_id\n del exc_value._elastic_apm_span_id\n depth = kwargs.get(\"_exc_chain_depth\", 0)\n if depth > EXCEPTION_CHAIN_MAX_DEPTH:\n return\n cause = exc_value.__cause__\n chained_context = exc_value.__context__\n\n # we follow the pattern of Python itself here and only capture the chained exception\n # if cause is not None and __suppress_context__ is False\n if chained_context and not (exc_value.__suppress_context__ and cause is None):\n if cause:\n chained_exc_type = type(cause)\n chained_exc_value = cause\n else:\n chained_exc_type = type(chained_context)\n chained_exc_value = chained_context\n chained_exc_info = chained_exc_type, chained_exc_value, chained_context.__traceback__\n\n chained_cause = Exception.capture(\n client, exc_info=chained_exc_info, culprit=\"None\", _exc_chain_depth=depth + 1\n )\n if chained_cause:\n data[\"exception\"][\"cause\"] = [chained_cause[\"exception\"]]\n return data\n\n\nclass Message(BaseEvent):\n \"\"\"\n Messages store the following metadata:\n\n - message: 'My message from %s about %s'\n - params: ('foo', 'bar')\n \"\"\"\n\n @staticmethod\n def to_string(client, data):\n return data[\"log\"][\"message\"]\n\n @staticmethod\n def get_hash(data):\n msg = data[\"param_message\"]\n return [msg[\"message\"]]\n\n @staticmethod\n def capture(client, param_message=None, message=None, level=None, logger_name=None, **kwargs):\n if message:\n param_message = {\"message\": message}\n params = param_message.get(\"params\")\n message = param_message[\"message\"] % params if params else param_message[\"message\"]\n data = kwargs.get(\"data\", {})\n message_data = {\n \"id\": \"%032x\" % random.getrandbits(128),\n \"log\": {\n \"level\": keyword_field(level or \"error\"),\n \"logger_name\": keyword_field(logger_name or \"__root__\"),\n \"message\": message,\n \"param_message\": keyword_field(param_message[\"message\"]),\n },\n }\n if isinstance(data.get(\"stacktrace\"), dict):\n message_data[\"log\"][\"stacktrace\"] = data[\"stacktrace\"][\"frames\"]\n if kwargs.get(\"exception\"):\n message_data[\"culprit\"] = kwargs[\"exception\"][\"culprit\"]\n message_data[\"exception\"] = kwargs[\"exception\"][\"exception\"]\n return message_data\n", "path": "elasticapm/events.py"}]}
| 2,807 | 221 |
gh_patches_debug_3449
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-608
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
README broken on PyPI (must be reStructuredText)
The [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown.
Solution A: Convert to reST
---------------------
1. Rename the current `README.md` to `README.rst`
1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst))
1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50))
Solution B: Process before Upload
-------------------
1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14))
1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49))
------------
Both solutions above will render a nicely formatted, HTML-styled package description on PyPI.
Quality Assurance
--------------
Optionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 from os.path import abspath, dirname, join
5 from setuptools import find_packages, setup
6
7 def read_file(filename):
8 """Read the contents of a file located relative to setup.py"""
9 with open(join(abspath(dirname(__file__)), filename)) as thefile:
10 return thefile.read()
11
12 setup(
13 name='pip-tools',
14 use_scm_version=True,
15 url='https://github.com/jazzband/pip-tools/',
16 license='BSD',
17 author='Vincent Driessen',
18 author_email='[email protected]',
19 description=__doc__,
20 long_description=read_file('README.rst'),
21 packages=find_packages(exclude=['tests']),
22 setup_requires=['setuptools_scm'],
23 install_requires=[
24 'click>=6',
25 'first',
26 'six',
27 'setuptools'
28 ],
29 zip_safe=False,
30 entry_points={
31 'console_scripts': [
32 'pip-compile = piptools.scripts.compile:cli',
33 'pip-sync = piptools.scripts.sync:cli',
34 ],
35 },
36 platforms='any',
37 classifiers=[
38 'Development Status :: 5 - Production/Stable',
39 'Intended Audience :: Developers',
40 'Intended Audience :: System Administrators',
41 'License :: OSI Approved :: BSD License',
42 'Operating System :: OS Independent',
43 'Programming Language :: Python',
44 'Programming Language :: Python :: 2',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.4',
48 'Programming Language :: Python :: 3.5',
49 'Programming Language :: Python :: 3.6',
50 'Topic :: System :: Systems Administration',
51 ]
52 )
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
license='BSD',
author='Vincent Driessen',
author_email='[email protected]',
- description=__doc__,
+ description=__doc__.strip(),
long_description=read_file('README.rst'),
packages=find_packages(exclude=['tests']),
setup_requires=['setuptools_scm'],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n- description=__doc__,\n+ description=__doc__.strip(),\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n", "issue": "README broken on PyPI (must be reStructuredText)\nThe [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown.\r\n\r\nSolution A: Convert to reST\r\n---------------------\r\n\r\n1. Rename the current `README.md` to `README.rst`\r\n1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst))\r\n1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50))\r\n\r\nSolution B: Process before Upload\r\n-------------------\r\n\r\n1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14))\r\n1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49))\r\n\r\n------------\r\n\r\nBoth solutions above will render a nicely formatted, HTML-styled package description on PyPI.\r\n\r\nQuality Assurance\r\n--------------\r\n\r\nOptionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description.\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom os.path import abspath, dirname, join\nfrom setuptools import find_packages, setup\n\ndef read_file(filename):\n \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n with open(join(abspath(dirname(__file__)), filename)) as thefile:\n return thefile.read()\n\nsetup(\n name='pip-tools',\n use_scm_version=True,\n url='https://github.com/jazzband/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n 'click>=6',\n 'first',\n 'six',\n 'setuptools'\n ],\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'pip-compile = piptools.scripts.compile:cli',\n 'pip-sync = piptools.scripts.sync:cli',\n ],\n },\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom os.path import abspath, dirname, join\nfrom setuptools import find_packages, setup\n\ndef read_file(filename):\n \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n with open(join(abspath(dirname(__file__)), filename)) as thefile:\n return thefile.read()\n\nsetup(\n name='pip-tools',\n use_scm_version=True,\n url='https://github.com/jazzband/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__.strip(),\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n 'click>=6',\n 'first',\n 'six',\n 'setuptools'\n ],\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'pip-compile = piptools.scripts.compile:cli',\n 'pip-sync = piptools.scripts.sync:cli',\n ],\n },\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]}
| 1,116 | 98 |
gh_patches_debug_21155
|
rasdani/github-patches
|
git_diff
|
kedro-org__kedro-1182
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ExcelDataSet not working after pandas 1.4.0 release (python 3.8)
## Description
On a fresh new project, after installing the extra dependency `kedro[pandas.ExcelDataSet]`, the user receives a runtime error when trying to load a xslx file dataset via the data catalog. The cause of the issue seems to be an incompatibility with the new pandas `v1.4.0` and `xlrd~=1.0`, the latter being enforced by kedro `17.6.0`.
## Context
The error occurred while I was following the exact steps of the spaceflights tutorial. In order to load the `shuttles` dataset (stored as a xslx file), I was instructed to install the extra dependency `kedro[pandas.ExcelDataSet]`. After installing the dependency and trying to load the dataset via the catalog, I received the following error: `Pandas requires version '2.0.1' or newer of 'xlrd' (version '1.2.0' currently installed).`
After some debugging, I discovered that pandas released version `1.4.0` two days ago, and it bumped the mininum version of optional dependency xlrd to `2.0.1` ([source](https://pandas.pydata.org/docs/whatsnew/v1.4.0.html#increased-minimum-versions-for-dependencies), [source2](https://github.com/pandas-dev/pandas/blob/v1.4.0/pandas/compat/_optional.py#L40)). As kedro enforces `xlrd~=1.0` via extra dependency `kedro[pandas.ExcelDataSet]` ([source](https://github.com/kedro-org/kedro/blob/0.17.6/setup.py#L75)), pandas checks that the currently installed version of xlrd is `1.2.0` and raises the error before importing the package ([source](https://github.com/pandas-dev/pandas/blob/v1.4.0/pandas/compat/_optional.py#L133-L152)).
I repeated the same steps using python 3.7, but because pandas 1.4.0 only supports python 3.8+ ([source](https://github.com/pandas-dev/pandas/blob/v1.4.0/setup.cfg#L40)), the installed version of pandas was 1.3.5. When I tried to load the dataset, I received no errors (although I got a future warning saying xlrd will not support xlsx files in version >= 2.0)
## Steps to Reproduce
1. Create a virtualenv with python 3.8 and activate it
2. Run `pip install kedro==0.17.6`
3. Run `kedro new`
4. `cd` to project dir
5. Run `kedro install`
6. Replace the line `kedro==0.17.6` with `kedro[pandas.ExcelDataSet]==0.17.6` in src/requirements.in
7. Run `kedro build-reqs && kedro install`
8. Add file `shuttles.xlsx` (from spaceflights tutorial) to folder `data/01_raw/`
9. Add the dataset information to `catalog.yml`:
```yml
shuttles:
type: pandas.ExcelDataSet
filepath: data/01_raw/shuttles.xlsx
```
10. Run `kedro ipython`
11. Run python code: `shuttles = catalog.load("shuttles")`
## Expected Result
The xlsx file should've been loaded into memory as a pandas Dataframe
## Actual Result
I received the following error:
```
DataSetError: Failed while loading data from data set ExcelDataSet(filepath=/.../data/01_raw/shuttles.xlsx, load_args={'engine': xlrd}, protocol=file, save_args={'index': False}, writer_args={'engine': xlsxwriter}).
Pandas requires version '2.0.1' or newer of 'xlrd' (version '1.2.0' currently installed).
```
## Your Environment
* Kedro version used (`pip show kedro` or `kedro -V`): 0.17.6
* Python version used (`python -V`): 3.8.12
* Operating system and version: WSL: Ubuntu 20.04
* Pandas version: 1.4.0
* xlrd version: 1.2.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 from codecs import open
3 from glob import glob
4 from itertools import chain
5 from os import path
6
7 from setuptools import find_packages, setup
8
9 name = "kedro"
10 here = path.abspath(path.dirname(__file__))
11
12
13 PANDAS = "pandas>=0.24"
14 SPARK = "pyspark>=2.2, <4.0"
15 HDFS = "hdfs>=2.5.8, <3.0"
16 S3FS = "s3fs>=0.3.0, <0.5"
17
18 # get package version
19 with open(path.join(here, name, "__init__.py"), encoding="utf-8") as f:
20 result = re.search(r'__version__ = ["\']([^"\']+)', f.read())
21
22 if not result:
23 raise ValueError("Can't find the version in kedro/__init__.py")
24
25 version = result.group(1)
26
27 # get the dependencies and installs
28 with open("requirements.txt", encoding="utf-8") as f:
29 requires = [x.strip() for x in f if x.strip()]
30
31 # get test dependencies and installs
32 with open("test_requirements.txt", encoding="utf-8") as f:
33 test_requires = [x.strip() for x in f if x.strip() and not x.startswith("-r")]
34
35
36 # Get the long description from the README file
37 with open(path.join(here, "README.md"), encoding="utf-8") as f:
38 readme = f.read()
39
40 doc_html_files = [
41 name.replace("kedro/", "", 1)
42 for name in glob("kedro/framework/html/**/*", recursive=True)
43 ]
44
45 template_files = []
46 for pattern in ["**/*", "**/.*", "**/.*/**", "**/.*/.**"]:
47 template_files.extend(
48 [
49 name.replace("kedro/", "", 1)
50 for name in glob("kedro/templates/" + pattern, recursive=True)
51 ]
52 )
53
54
55 def _collect_requirements(requires):
56 return sorted(set(chain.from_iterable(requires.values())))
57
58
59 api_require = {"api.APIDataSet": ["requests~=2.20"]}
60 biosequence_require = {"biosequence.BioSequenceDataSet": ["biopython~=1.73"]}
61 dask_require = {
62 "dask.ParquetDataSet": [
63 "dask>=2021.10.0, <2022.01; python_version > '3.6'",
64 "dask[complete]~=2.6; python_version == '3.6'",
65 ]
66 }
67 geopandas_require = {
68 "geopandas.GeoJSONDataSet": ["geopandas>=0.6.0, <1.0", "pyproj>=2.2.0, <3.0"]
69 }
70 matplotlib_require = {"matplotlib.MatplotlibWriter": ["matplotlib>=3.0.3, <4.0"]}
71 holoviews_require = {"holoviews.HoloviewsWriter": ["holoviews~=1.13.0"]}
72 networkx_require = {"networkx.NetworkXDataSet": ["networkx~=2.4"]}
73 pandas_require = {
74 "pandas.CSVDataSet": [PANDAS],
75 "pandas.ExcelDataSet": [PANDAS, "xlrd~=1.0", "xlsxwriter~=1.0"],
76 "pandas.AppendableExcelDataSet": [PANDAS, "openpyxl>=3.0.3, <4.0"],
77 "pandas.FeatherDataSet": [PANDAS],
78 "pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
79 "pandas.GBQQueryDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
80 "pandas.HDFDataSet": [PANDAS, "tables~=3.6.0; platform_system == 'Windows'",
81 "tables~=3.6; platform_system != 'Windows'"],
82 "pandas.JSONDataSet": [PANDAS],
83 "pandas.ParquetDataSet": [PANDAS, "pyarrow>=1.0, <7.0"],
84 "pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"],
85 "pandas.SQLQueryDataSet": [PANDAS, "SQLAlchemy~=1.2"],
86 "pandas.GenericDataSet": [PANDAS],
87 }
88 pillow_require = {"pillow.ImageDataSet": ["Pillow~=8.0"]}
89 plotly_require = {
90 "plotly.PlotlyDataSet": [PANDAS, "plotly>=4.8.0, <6.0"],
91 "plotly.JSONDataSet": ["plotly>=4.8.0, <6.0"],
92 }
93 spark_require = {
94 "spark.SparkDataSet": [SPARK, HDFS, S3FS],
95 "spark.SparkHiveDataSet": [SPARK, HDFS, S3FS],
96 "spark.SparkJDBCDataSet": [SPARK, HDFS, S3FS],
97 "spark.DeltaTableDataSet": [SPARK, HDFS, S3FS, "delta-spark~=1.0"],
98 }
99 tensorflow_required = {
100 "tensorflow.TensorflowModelDataset": [
101 # currently only TensorFlow V2 supported for saving and loading.
102 # V1 requires HDF5 and serializes differently
103 "tensorflow~=2.0"
104 ]
105 }
106 yaml_require = {"yaml.YAMLDataSet": [PANDAS, "PyYAML>=4.2, <6.0"]}
107
108 extras_require = {
109 "api": _collect_requirements(api_require),
110 "biosequence": _collect_requirements(biosequence_require),
111 "dask": _collect_requirements(dask_require),
112 "docs": [
113 "docutils==0.16",
114 "sphinx~=3.4.3",
115 "sphinx_rtd_theme==0.4.1",
116 "nbsphinx==0.8.1",
117 "nbstripout~=0.4",
118 "recommonmark==0.7.1",
119 "sphinx-autodoc-typehints==1.11.1",
120 "sphinx_copybutton==0.3.1",
121 "ipykernel>=5.3, <7.0",
122 ],
123 "geopandas": _collect_requirements(geopandas_require),
124 "ipython": ["ipython~=7.10"],
125 "matplotlib": _collect_requirements(matplotlib_require),
126 "holoviews": _collect_requirements(holoviews_require),
127 "networkx": _collect_requirements(networkx_require),
128 "notebook_templates": ["nbconvert>=5.3.1, <6.0", "nbformat~=4.4"],
129 "pandas": _collect_requirements(pandas_require),
130 "pillow": _collect_requirements(pillow_require),
131 "plotly": _collect_requirements(plotly_require),
132 "profilers": ["memory_profiler>=0.50.0, <1.0"],
133 "spark": _collect_requirements(spark_require),
134 "tensorflow": _collect_requirements(tensorflow_required),
135 "yaml": _collect_requirements(yaml_require),
136 **api_require,
137 **biosequence_require,
138 **dask_require,
139 **geopandas_require,
140 **matplotlib_require,
141 **holoviews_require,
142 **networkx_require,
143 **pandas_require,
144 **pillow_require,
145 **plotly_require,
146 **spark_require,
147 **tensorflow_required,
148 **yaml_require,
149 }
150
151 extras_require["all"] = _collect_requirements(extras_require)
152
153 setup(
154 name=name,
155 version=version,
156 description="Kedro helps you build production-ready data and analytics pipelines",
157 license="Apache Software License (Apache 2.0)",
158 long_description=readme,
159 long_description_content_type="text/markdown",
160 url="https://github.com/kedro-org/kedro",
161 python_requires=">=3.6, <3.9",
162 packages=find_packages(exclude=["docs*", "tests*", "tools*", "features*"]),
163 include_package_data=True,
164 tests_require=test_requires,
165 install_requires=requires,
166 author="Kedro",
167 entry_points={"console_scripts": ["kedro = kedro.framework.cli:main"]},
168 package_data={
169 name: ["py.typed", "test_requirements.txt"] + template_files + doc_html_files
170 },
171 zip_safe=False,
172 keywords="pipelines, machine learning, data pipelines, data science, data engineering",
173 classifiers=[
174 "Development Status :: 4 - Beta",
175 "Programming Language :: Python :: 3.6",
176 "Programming Language :: Python :: 3.7",
177 "Programming Language :: Python :: 3.8",
178 ],
179 extras_require=extras_require,
180 )
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
here = path.abspath(path.dirname(__file__))
-PANDAS = "pandas>=0.24"
+PANDAS = "pandas>=0.24, <1.4"
SPARK = "pyspark>=2.2, <4.0"
HDFS = "hdfs>=2.5.8, <3.0"
S3FS = "s3fs>=0.3.0, <0.5"
@@ -77,8 +77,11 @@
"pandas.FeatherDataSet": [PANDAS],
"pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
"pandas.GBQQueryDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
- "pandas.HDFDataSet": [PANDAS, "tables~=3.6.0; platform_system == 'Windows'",
- "tables~=3.6; platform_system != 'Windows'"],
+ "pandas.HDFDataSet": [
+ PANDAS,
+ "tables~=3.6.0; platform_system == 'Windows'",
+ "tables~=3.6; platform_system != 'Windows'",
+ ],
"pandas.JSONDataSet": [PANDAS],
"pandas.ParquetDataSet": [PANDAS, "pyarrow>=1.0, <7.0"],
"pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n here = path.abspath(path.dirname(__file__))\n \n \n-PANDAS = \"pandas>=0.24\"\n+PANDAS = \"pandas>=0.24, <1.4\"\n SPARK = \"pyspark>=2.2, <4.0\"\n HDFS = \"hdfs>=2.5.8, <3.0\"\n S3FS = \"s3fs>=0.3.0, <0.5\"\n@@ -77,8 +77,11 @@\n \"pandas.FeatherDataSet\": [PANDAS],\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.GBQQueryDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n- \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6.0; platform_system == 'Windows'\",\n- \"tables~=3.6; platform_system != 'Windows'\"],\n+ \"pandas.HDFDataSet\": [\n+ PANDAS,\n+ \"tables~=3.6.0; platform_system == 'Windows'\",\n+ \"tables~=3.6; platform_system != 'Windows'\",\n+ ],\n \"pandas.JSONDataSet\": [PANDAS],\n \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=1.0, <7.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n", "issue": "ExcelDataSet not working after pandas 1.4.0 release (python 3.8)\n## Description\r\n\r\nOn a fresh new project, after installing the extra dependency `kedro[pandas.ExcelDataSet]`, the user receives a runtime error when trying to load a xslx file dataset via the data catalog. The cause of the issue seems to be an incompatibility with the new pandas `v1.4.0` and `xlrd~=1.0`, the latter being enforced by kedro `17.6.0`.\r\n\r\n## Context\r\n\r\nThe error occurred while I was following the exact steps of the spaceflights tutorial. In order to load the `shuttles` dataset (stored as a xslx file), I was instructed to install the extra dependency `kedro[pandas.ExcelDataSet]`. After installing the dependency and trying to load the dataset via the catalog, I received the following error: `Pandas requires version '2.0.1' or newer of 'xlrd' (version '1.2.0' currently installed).`\r\n\r\nAfter some debugging, I discovered that pandas released version `1.4.0` two days ago, and it bumped the mininum version of optional dependency xlrd to `2.0.1` ([source](https://pandas.pydata.org/docs/whatsnew/v1.4.0.html#increased-minimum-versions-for-dependencies), [source2](https://github.com/pandas-dev/pandas/blob/v1.4.0/pandas/compat/_optional.py#L40)). As kedro enforces `xlrd~=1.0` via extra dependency `kedro[pandas.ExcelDataSet]` ([source](https://github.com/kedro-org/kedro/blob/0.17.6/setup.py#L75)), pandas checks that the currently installed version of xlrd is `1.2.0` and raises the error before importing the package ([source](https://github.com/pandas-dev/pandas/blob/v1.4.0/pandas/compat/_optional.py#L133-L152)).\r\n\r\nI repeated the same steps using python 3.7, but because pandas 1.4.0 only supports python 3.8+ ([source](https://github.com/pandas-dev/pandas/blob/v1.4.0/setup.cfg#L40)), the installed version of pandas was 1.3.5. When I tried to load the dataset, I received no errors (although I got a future warning saying xlrd will not support xlsx files in version >= 2.0)\r\n\r\n## Steps to Reproduce\r\n1. Create a virtualenv with python 3.8 and activate it\r\n2. Run `pip install kedro==0.17.6`\r\n3. Run `kedro new`\r\n4. `cd` to project dir\r\n5. Run `kedro install`\r\n6. Replace the line `kedro==0.17.6` with `kedro[pandas.ExcelDataSet]==0.17.6` in src/requirements.in\r\n7. Run `kedro build-reqs && kedro install`\r\n8. Add file `shuttles.xlsx` (from spaceflights tutorial) to folder `data/01_raw/`\r\n9. Add the dataset information to `catalog.yml`:\r\n ```yml\r\n shuttles:\r\n type: pandas.ExcelDataSet\r\n filepath: data/01_raw/shuttles.xlsx\r\n ```\r\n10. Run `kedro ipython`\r\n11. Run python code: `shuttles = catalog.load(\"shuttles\")`\r\n\r\n## Expected Result\r\nThe xlsx file should've been loaded into memory as a pandas Dataframe\r\n\r\n## Actual Result\r\nI received the following error:\r\n\r\n```\r\nDataSetError: Failed while loading data from data set ExcelDataSet(filepath=/.../data/01_raw/shuttles.xlsx, load_args={'engine': xlrd}, protocol=file, save_args={'index': False}, writer_args={'engine': xlsxwriter}).\r\nPandas requires version '2.0.1' or newer of 'xlrd' (version '1.2.0' currently installed).\r\n```\r\n\r\n## Your Environment\r\n\r\n* Kedro version used (`pip show kedro` or `kedro -V`): 0.17.6\r\n* Python version used (`python -V`): 3.8.12\r\n* Operating system and version: WSL: Ubuntu 20.04\r\n* Pandas version: 1.4.0\r\n* xlrd version: 1.2.0\r\n\n", "before_files": [{"content": "import re\nfrom codecs import open\nfrom glob import glob\nfrom itertools import chain\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nname = \"kedro\"\nhere = path.abspath(path.dirname(__file__))\n\n\nPANDAS = \"pandas>=0.24\"\nSPARK = \"pyspark>=2.2, <4.0\"\nHDFS = \"hdfs>=2.5.8, <3.0\"\nS3FS = \"s3fs>=0.3.0, <0.5\"\n\n# get package version\nwith open(path.join(here, name, \"__init__.py\"), encoding=\"utf-8\") as f:\n result = re.search(r'__version__ = [\"\\']([^\"\\']+)', f.read())\n\n if not result:\n raise ValueError(\"Can't find the version in kedro/__init__.py\")\n\n version = result.group(1)\n\n# get the dependencies and installs\nwith open(\"requirements.txt\", encoding=\"utf-8\") as f:\n requires = [x.strip() for x in f if x.strip()]\n\n# get test dependencies and installs\nwith open(\"test_requirements.txt\", encoding=\"utf-8\") as f:\n test_requires = [x.strip() for x in f if x.strip() and not x.startswith(\"-r\")]\n\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n readme = f.read()\n\ndoc_html_files = [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/framework/html/**/*\", recursive=True)\n]\n\ntemplate_files = []\nfor pattern in [\"**/*\", \"**/.*\", \"**/.*/**\", \"**/.*/.**\"]:\n template_files.extend(\n [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/templates/\" + pattern, recursive=True)\n ]\n )\n\n\ndef _collect_requirements(requires):\n return sorted(set(chain.from_iterable(requires.values())))\n\n\napi_require = {\"api.APIDataSet\": [\"requests~=2.20\"]}\nbiosequence_require = {\"biosequence.BioSequenceDataSet\": [\"biopython~=1.73\"]}\ndask_require = {\n \"dask.ParquetDataSet\": [\n \"dask>=2021.10.0, <2022.01; python_version > '3.6'\",\n \"dask[complete]~=2.6; python_version == '3.6'\",\n ]\n}\ngeopandas_require = {\n \"geopandas.GeoJSONDataSet\": [\"geopandas>=0.6.0, <1.0\", \"pyproj>=2.2.0, <3.0\"]\n}\nmatplotlib_require = {\"matplotlib.MatplotlibWriter\": [\"matplotlib>=3.0.3, <4.0\"]}\nholoviews_require = {\"holoviews.HoloviewsWriter\": [\"holoviews~=1.13.0\"]}\nnetworkx_require = {\"networkx.NetworkXDataSet\": [\"networkx~=2.4\"]}\npandas_require = {\n \"pandas.CSVDataSet\": [PANDAS],\n \"pandas.ExcelDataSet\": [PANDAS, \"xlrd~=1.0\", \"xlsxwriter~=1.0\"],\n \"pandas.AppendableExcelDataSet\": [PANDAS, \"openpyxl>=3.0.3, <4.0\"],\n \"pandas.FeatherDataSet\": [PANDAS],\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.GBQQueryDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6.0; platform_system == 'Windows'\",\n \"tables~=3.6; platform_system != 'Windows'\"],\n \"pandas.JSONDataSet\": [PANDAS],\n \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=1.0, <7.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.SQLQueryDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.GenericDataSet\": [PANDAS],\n}\npillow_require = {\"pillow.ImageDataSet\": [\"Pillow~=8.0\"]}\nplotly_require = {\n \"plotly.PlotlyDataSet\": [PANDAS, \"plotly>=4.8.0, <6.0\"],\n \"plotly.JSONDataSet\": [\"plotly>=4.8.0, <6.0\"],\n}\nspark_require = {\n \"spark.SparkDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkHiveDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkJDBCDataSet\": [SPARK, HDFS, S3FS],\n \"spark.DeltaTableDataSet\": [SPARK, HDFS, S3FS, \"delta-spark~=1.0\"],\n}\ntensorflow_required = {\n \"tensorflow.TensorflowModelDataset\": [\n # currently only TensorFlow V2 supported for saving and loading.\n # V1 requires HDF5 and serializes differently\n \"tensorflow~=2.0\"\n ]\n}\nyaml_require = {\"yaml.YAMLDataSet\": [PANDAS, \"PyYAML>=4.2, <6.0\"]}\n\nextras_require = {\n \"api\": _collect_requirements(api_require),\n \"biosequence\": _collect_requirements(biosequence_require),\n \"dask\": _collect_requirements(dask_require),\n \"docs\": [\n \"docutils==0.16\",\n \"sphinx~=3.4.3\",\n \"sphinx_rtd_theme==0.4.1\",\n \"nbsphinx==0.8.1\",\n \"nbstripout~=0.4\",\n \"recommonmark==0.7.1\",\n \"sphinx-autodoc-typehints==1.11.1\",\n \"sphinx_copybutton==0.3.1\",\n \"ipykernel>=5.3, <7.0\",\n ],\n \"geopandas\": _collect_requirements(geopandas_require),\n \"ipython\": [\"ipython~=7.10\"],\n \"matplotlib\": _collect_requirements(matplotlib_require),\n \"holoviews\": _collect_requirements(holoviews_require),\n \"networkx\": _collect_requirements(networkx_require),\n \"notebook_templates\": [\"nbconvert>=5.3.1, <6.0\", \"nbformat~=4.4\"],\n \"pandas\": _collect_requirements(pandas_require),\n \"pillow\": _collect_requirements(pillow_require),\n \"plotly\": _collect_requirements(plotly_require),\n \"profilers\": [\"memory_profiler>=0.50.0, <1.0\"],\n \"spark\": _collect_requirements(spark_require),\n \"tensorflow\": _collect_requirements(tensorflow_required),\n \"yaml\": _collect_requirements(yaml_require),\n **api_require,\n **biosequence_require,\n **dask_require,\n **geopandas_require,\n **matplotlib_require,\n **holoviews_require,\n **networkx_require,\n **pandas_require,\n **pillow_require,\n **plotly_require,\n **spark_require,\n **tensorflow_required,\n **yaml_require,\n}\n\nextras_require[\"all\"] = _collect_requirements(extras_require)\n\nsetup(\n name=name,\n version=version,\n description=\"Kedro helps you build production-ready data and analytics pipelines\",\n license=\"Apache Software License (Apache 2.0)\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/kedro-org/kedro\",\n python_requires=\">=3.6, <3.9\",\n packages=find_packages(exclude=[\"docs*\", \"tests*\", \"tools*\", \"features*\"]),\n include_package_data=True,\n tests_require=test_requires,\n install_requires=requires,\n author=\"Kedro\",\n entry_points={\"console_scripts\": [\"kedro = kedro.framework.cli:main\"]},\n package_data={\n name: [\"py.typed\", \"test_requirements.txt\"] + template_files + doc_html_files\n },\n zip_safe=False,\n keywords=\"pipelines, machine learning, data pipelines, data science, data engineering\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\nfrom codecs import open\nfrom glob import glob\nfrom itertools import chain\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nname = \"kedro\"\nhere = path.abspath(path.dirname(__file__))\n\n\nPANDAS = \"pandas>=0.24, <1.4\"\nSPARK = \"pyspark>=2.2, <4.0\"\nHDFS = \"hdfs>=2.5.8, <3.0\"\nS3FS = \"s3fs>=0.3.0, <0.5\"\n\n# get package version\nwith open(path.join(here, name, \"__init__.py\"), encoding=\"utf-8\") as f:\n result = re.search(r'__version__ = [\"\\']([^\"\\']+)', f.read())\n\n if not result:\n raise ValueError(\"Can't find the version in kedro/__init__.py\")\n\n version = result.group(1)\n\n# get the dependencies and installs\nwith open(\"requirements.txt\", encoding=\"utf-8\") as f:\n requires = [x.strip() for x in f if x.strip()]\n\n# get test dependencies and installs\nwith open(\"test_requirements.txt\", encoding=\"utf-8\") as f:\n test_requires = [x.strip() for x in f if x.strip() and not x.startswith(\"-r\")]\n\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n readme = f.read()\n\ndoc_html_files = [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/framework/html/**/*\", recursive=True)\n]\n\ntemplate_files = []\nfor pattern in [\"**/*\", \"**/.*\", \"**/.*/**\", \"**/.*/.**\"]:\n template_files.extend(\n [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/templates/\" + pattern, recursive=True)\n ]\n )\n\n\ndef _collect_requirements(requires):\n return sorted(set(chain.from_iterable(requires.values())))\n\n\napi_require = {\"api.APIDataSet\": [\"requests~=2.20\"]}\nbiosequence_require = {\"biosequence.BioSequenceDataSet\": [\"biopython~=1.73\"]}\ndask_require = {\n \"dask.ParquetDataSet\": [\n \"dask>=2021.10.0, <2022.01; python_version > '3.6'\",\n \"dask[complete]~=2.6; python_version == '3.6'\",\n ]\n}\ngeopandas_require = {\n \"geopandas.GeoJSONDataSet\": [\"geopandas>=0.6.0, <1.0\", \"pyproj>=2.2.0, <3.0\"]\n}\nmatplotlib_require = {\"matplotlib.MatplotlibWriter\": [\"matplotlib>=3.0.3, <4.0\"]}\nholoviews_require = {\"holoviews.HoloviewsWriter\": [\"holoviews~=1.13.0\"]}\nnetworkx_require = {\"networkx.NetworkXDataSet\": [\"networkx~=2.4\"]}\npandas_require = {\n \"pandas.CSVDataSet\": [PANDAS],\n \"pandas.ExcelDataSet\": [PANDAS, \"xlrd~=1.0\", \"xlsxwriter~=1.0\"],\n \"pandas.AppendableExcelDataSet\": [PANDAS, \"openpyxl>=3.0.3, <4.0\"],\n \"pandas.FeatherDataSet\": [PANDAS],\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.GBQQueryDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.HDFDataSet\": [\n PANDAS,\n \"tables~=3.6.0; platform_system == 'Windows'\",\n \"tables~=3.6; platform_system != 'Windows'\",\n ],\n \"pandas.JSONDataSet\": [PANDAS],\n \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=1.0, <7.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.SQLQueryDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.GenericDataSet\": [PANDAS],\n}\npillow_require = {\"pillow.ImageDataSet\": [\"Pillow~=8.0\"]}\nplotly_require = {\n \"plotly.PlotlyDataSet\": [PANDAS, \"plotly>=4.8.0, <6.0\"],\n \"plotly.JSONDataSet\": [\"plotly>=4.8.0, <6.0\"],\n}\nspark_require = {\n \"spark.SparkDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkHiveDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkJDBCDataSet\": [SPARK, HDFS, S3FS],\n \"spark.DeltaTableDataSet\": [SPARK, HDFS, S3FS, \"delta-spark~=1.0\"],\n}\ntensorflow_required = {\n \"tensorflow.TensorflowModelDataset\": [\n # currently only TensorFlow V2 supported for saving and loading.\n # V1 requires HDF5 and serializes differently\n \"tensorflow~=2.0\"\n ]\n}\nyaml_require = {\"yaml.YAMLDataSet\": [PANDAS, \"PyYAML>=4.2, <6.0\"]}\n\nextras_require = {\n \"api\": _collect_requirements(api_require),\n \"biosequence\": _collect_requirements(biosequence_require),\n \"dask\": _collect_requirements(dask_require),\n \"docs\": [\n \"docutils==0.16\",\n \"sphinx~=3.4.3\",\n \"sphinx_rtd_theme==0.4.1\",\n \"nbsphinx==0.8.1\",\n \"nbstripout~=0.4\",\n \"recommonmark==0.7.1\",\n \"sphinx-autodoc-typehints==1.11.1\",\n \"sphinx_copybutton==0.3.1\",\n \"ipykernel>=5.3, <7.0\",\n ],\n \"geopandas\": _collect_requirements(geopandas_require),\n \"ipython\": [\"ipython~=7.10\"],\n \"matplotlib\": _collect_requirements(matplotlib_require),\n \"holoviews\": _collect_requirements(holoviews_require),\n \"networkx\": _collect_requirements(networkx_require),\n \"notebook_templates\": [\"nbconvert>=5.3.1, <6.0\", \"nbformat~=4.4\"],\n \"pandas\": _collect_requirements(pandas_require),\n \"pillow\": _collect_requirements(pillow_require),\n \"plotly\": _collect_requirements(plotly_require),\n \"profilers\": [\"memory_profiler>=0.50.0, <1.0\"],\n \"spark\": _collect_requirements(spark_require),\n \"tensorflow\": _collect_requirements(tensorflow_required),\n \"yaml\": _collect_requirements(yaml_require),\n **api_require,\n **biosequence_require,\n **dask_require,\n **geopandas_require,\n **matplotlib_require,\n **holoviews_require,\n **networkx_require,\n **pandas_require,\n **pillow_require,\n **plotly_require,\n **spark_require,\n **tensorflow_required,\n **yaml_require,\n}\n\nextras_require[\"all\"] = _collect_requirements(extras_require)\n\nsetup(\n name=name,\n version=version,\n description=\"Kedro helps you build production-ready data and analytics pipelines\",\n license=\"Apache Software License (Apache 2.0)\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/kedro-org/kedro\",\n python_requires=\">=3.6, <3.9\",\n packages=find_packages(exclude=[\"docs*\", \"tests*\", \"tools*\", \"features*\"]),\n include_package_data=True,\n tests_require=test_requires,\n install_requires=requires,\n author=\"Kedro\",\n entry_points={\"console_scripts\": [\"kedro = kedro.framework.cli:main\"]},\n package_data={\n name: [\"py.typed\", \"test_requirements.txt\"] + template_files + doc_html_files\n },\n zip_safe=False,\n keywords=\"pipelines, machine learning, data pipelines, data science, data engineering\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
| 3,580 | 379 |
gh_patches_debug_20070
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-5020
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Potential new `datetime` issues
### Describe the issue
See CI failures
* https://github.com/ietf-tools/datatracker/actions/runs/3899998769/jobs/6660186805
* https://github.com/ietf-tools/datatracker/actions/runs/3900011570/jobs/6660211516
There is also this one, which is different but possibly related:
* https://github.com/ietf-tools/datatracker/actions/runs/3900217767/jobs/6660629415
CC @jennifer-richards
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/iesg/agenda.py`
Content:
```
1 # Copyright The IETF Trust 2013-2020, All Rights Reserved
2 # -*- coding: utf-8 -*-
3
4
5 # utilities for constructing agendas for IESG telechats
6
7 import io
8 import datetime
9 from collections import OrderedDict
10
11 from django.conf import settings
12 from django.http import Http404
13
14 import debug # pyflakes:ignore
15
16 from ietf.doc.models import Document, LastCallDocEvent, ConsensusDocEvent
17 from ietf.doc.utils_search import fill_in_telechat_date
18 from ietf.iesg.models import TelechatDate, TelechatAgendaItem
19 from ietf.review.utils import review_assignments_to_list_for_docs
20 from ietf.utils.timezone import date_today
21
22 def get_agenda_date(date=None):
23 if not date:
24 try:
25 return TelechatDate.objects.active().order_by('date')[0].date
26 except IndexError:
27 return date_today()
28 else:
29 try:
30 return TelechatDate.objects.active().get(date=datetime.datetime.strptime(date, "%Y-%m-%d").date()).date
31 except (ValueError, TelechatDate.DoesNotExist):
32 raise Http404
33
34 def get_doc_section(doc):
35 if doc.type_id == 'draft':
36 if doc.intended_std_level_id in ["bcp", "ds", "ps", "std"]:
37 s = "2"
38 else:
39 s = "3"
40
41 g = doc.group_acronym()
42 if g and str(g) != 'none':
43 s += ".1"
44 elif s == "3" and doc.stream_id in ("ise","irtf"):
45 s += ".3"
46 else:
47 s += ".2"
48 if doc.get_state_slug() != "rfc" and doc.get_state_slug('draft-iesg') not in ("lc", "writeupw", "goaheadw", "iesg-eva", "defer", "approved", "ann", "rfcqueue", "pub"):
49 s += ".3"
50 elif doc.returning_item():
51 s += ".2"
52 else:
53 s += ".1"
54
55 elif doc.type_id == 'charter':
56 s = "4"
57 if doc.group.state_id in ('active', 'dormant'):
58 s += ".2"
59 else:
60 s += ".1"
61 if doc.get_state_slug() in ('extrev', 'iesgrev'):
62 s += '.2'
63 else:
64 s += '.1'
65
66 elif doc.type_id == 'statchg':
67 protocol_action = False
68 for relation in doc.relateddocument_set.filter(relationship__slug__in=('tops','tois','tohist','toinf','tobcp','toexp')):
69 if relation.relationship_id in ('tops','tois') or relation.target.document.std_level_id in ('std','ds','ps'):
70 protocol_action = True
71 if protocol_action:
72 s = "2.3"
73 else:
74 s = "3.3"
75 if doc.get_state_slug() not in ("iesgeval", "defer", "appr-pr", "appr-pend", "appr-sent"):
76 s += ".3"
77 elif doc.returning_item():
78 s += ".2"
79 else:
80 s += ".1"
81
82 elif doc.type_id == 'conflrev':
83 if not doc.get_state('conflrev') or doc.get_state('conflrev').slug not in ('adrev','iesgeval','appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent','defer'):
84 s = "3.4.3"
85 elif doc.returning_item():
86 s = "3.4.2"
87 else:
88 s = "3.4.1"
89
90 return s
91
92 def agenda_sections():
93 return OrderedDict([
94 ('1', {'title':"Administrivia"}),
95 ('1.1', {'title':"Roll call"}),
96 ('1.2', {'title':"Bash the agenda"}),
97 ('1.3', {'title':"Approval of the minutes of past telechats"}),
98 ('1.4', {'title':"List of remaining action items from last telechat"}),
99 ('2', {'title':"Protocol actions"}),
100 ('2.1', {'title':"WG submissions"}),
101 ('2.1.1', {'title':"New items", 'docs': []}),
102 ('2.1.2', {'title':"Returning items", 'docs':[]}),
103 ('2.1.3', {'title':"For action", 'docs':[]}),
104 ('2.2', {'title':"Individual submissions"}),
105 ('2.2.1', {'title':"New items", 'docs':[]}),
106 ('2.2.2', {'title':"Returning items", 'docs':[]}),
107 ('2.2.3', {'title':"For action", 'docs':[]}),
108 ('2.3', {'title':"Status changes"}),
109 ('2.3.1', {'title':"New items", 'docs':[]}),
110 ('2.3.2', {'title':"Returning items", 'docs':[]}),
111 ('2.3.3', {'title':"For action", 'docs':[]}),
112 ('3', {'title':"Document actions"}),
113 ('3.1', {'title':"WG submissions"}),
114 ('3.1.1', {'title':"New items", 'docs':[]}),
115 ('3.1.2', {'title':"Returning items", 'docs':[]}),
116 ('3.1.3', {'title':"For action", 'docs':[]}),
117 ('3.2', {'title':"Individual submissions via AD"}),
118 ('3.2.1', {'title':"New items", 'docs':[]}),
119 ('3.2.2', {'title':"Returning items", 'docs':[]}),
120 ('3.2.3', {'title':"For action", 'docs':[]}),
121 ('3.3', {'title':"Status changes"}),
122 ('3.3.1', {'title':"New items", 'docs':[]}),
123 ('3.3.2', {'title':"Returning items", 'docs':[]}),
124 ('3.3.3', {'title':"For action", 'docs':[]}),
125 ('3.4', {'title':"IRTF and Independent Submission stream documents"}),
126 ('3.4.1', {'title':"New items", 'docs':[]}),
127 ('3.4.2', {'title':"Returning items", 'docs':[]}),
128 ('3.4.3', {'title':"For action", 'docs':[]}),
129 ('4', {'title':"Working Group actions"}),
130 ('4.1', {'title':"WG creation"}),
131 ('4.1.1', {'title':"Proposed for IETF review", 'docs':[]}),
132 ('4.1.2', {'title':"Proposed for approval", 'docs':[]}),
133 ('4.2', {'title':"WG rechartering"}),
134 ('4.2.1', {'title':"Under evaluation for IETF review", 'docs':[]}),
135 ('4.2.2', {'title':"Proposed for approval", 'docs':[]}),
136 ('5', {'title':"IAB news we can use"}),
137 ('6', {'title':"Management issues"}),
138 ('7', {'title':"Any Other Business (WG News, New Proposals, etc.)"}),
139 ])
140
141 def fill_in_agenda_administrivia(date, sections):
142 extra_info_files = (
143 ("1.1", "roll_call", settings.IESG_ROLL_CALL_FILE),
144 ("1.3", "minutes", settings.IESG_MINUTES_FILE),
145 ("1.4", "action_items", settings.IESG_TASK_FILE),
146 )
147
148 for s, key, filename in extra_info_files:
149 try:
150 with io.open(filename, 'r', encoding='utf-8', errors='replace') as f:
151 t = f.read().strip()
152 except IOError:
153 t = "(Error reading %s)" % filename
154
155 sections[s]["text"] = t
156
157 def fill_in_agenda_docs(date, sections, docs=None):
158 if not docs:
159 docs = Document.objects.filter(docevent__telechatdocevent__telechat_date=date)
160 docs = docs.select_related("stream", "group").distinct()
161 fill_in_telechat_date(docs)
162
163 review_assignments_for_docs = review_assignments_to_list_for_docs(docs)
164
165 for doc in docs:
166 if doc.telechat_date() != date:
167 continue
168
169 if not hasattr(doc, 'balloting_started'):
170 e = doc.latest_event(type="started_iesg_process")
171 doc.balloting_started = e.time if e else datetime.datetime.min
172
173 if doc.type_id == "draft":
174 s = doc.get_state("draft-iana-review")
175 if s: # and s.slug in ("not-ok", "changed", "need-rev"):
176 doc.iana_review_state = str(s)
177
178 if doc.get_state_slug("draft-iesg") == "lc":
179 e = doc.latest_event(LastCallDocEvent, type="sent_last_call")
180 if e:
181 doc.lastcall_expires = e.expires
182
183 if doc.stream_id in ("ietf", "irtf", "iab"):
184 doc.consensus = "Unknown"
185 e = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
186 if e and (e.consensus != None):
187 doc.consensus = "Yes" if e.consensus else "No"
188
189 doc.review_assignments = review_assignments_for_docs.get(doc.name, [])
190 elif doc.type_id == "conflrev":
191 doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document
192 elif doc.type_id == "charter":
193 pass
194
195 number = get_doc_section(doc)
196 if number: # and num in sections
197 sections[number]["docs"].append(doc)
198
199 # prune empty "For action" sections
200 empty_for_action = [n for n, section in sections.items()
201 if section["title"] == "For action" and not section["docs"]]
202 for num in empty_for_action:
203 del sections[num]
204
205 # Be careful to keep this the same as what's used in agenda_documents
206 for s in sections.values():
207 if "docs" in s:
208 s["docs"].sort(key=lambda d: d.balloting_started)
209
210 def fill_in_agenda_management_issues(date, sections):
211 s = "6.%s"
212 for i, item in enumerate(TelechatAgendaItem.objects.filter(type=3).order_by('id'), start=1):
213 sections[s % i] = { "title": item.title, "text": item.text }
214
215 def agenda_data(date=None):
216 """Return a dict with the different IESG telechat agenda components."""
217 date = get_agenda_date(date)
218 sections = agenda_sections()
219
220 fill_in_agenda_administrivia(date, sections)
221 fill_in_agenda_docs(date, sections)
222 fill_in_agenda_management_issues(date, sections)
223
224 return { 'date': date.isoformat(), 'sections': sections }
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ietf/iesg/agenda.py b/ietf/iesg/agenda.py
--- a/ietf/iesg/agenda.py
+++ b/ietf/iesg/agenda.py
@@ -17,7 +17,7 @@
from ietf.doc.utils_search import fill_in_telechat_date
from ietf.iesg.models import TelechatDate, TelechatAgendaItem
from ietf.review.utils import review_assignments_to_list_for_docs
-from ietf.utils.timezone import date_today
+from ietf.utils.timezone import date_today, make_aware
def get_agenda_date(date=None):
if not date:
@@ -26,8 +26,9 @@
except IndexError:
return date_today()
else:
+ parsed_date = make_aware(datetime.datetime.strptime(date, "%Y-%m-%d"), settings.TIME_ZONE).date()
try:
- return TelechatDate.objects.active().get(date=datetime.datetime.strptime(date, "%Y-%m-%d").date()).date
+ return TelechatDate.objects.active().get(date=parsed_date).date
except (ValueError, TelechatDate.DoesNotExist):
raise Http404
|
{"golden_diff": "diff --git a/ietf/iesg/agenda.py b/ietf/iesg/agenda.py\n--- a/ietf/iesg/agenda.py\n+++ b/ietf/iesg/agenda.py\n@@ -17,7 +17,7 @@\n from ietf.doc.utils_search import fill_in_telechat_date\n from ietf.iesg.models import TelechatDate, TelechatAgendaItem\n from ietf.review.utils import review_assignments_to_list_for_docs\n-from ietf.utils.timezone import date_today\n+from ietf.utils.timezone import date_today, make_aware\n \n def get_agenda_date(date=None):\n if not date:\n@@ -26,8 +26,9 @@\n except IndexError:\n return date_today()\n else:\n+ parsed_date = make_aware(datetime.datetime.strptime(date, \"%Y-%m-%d\"), settings.TIME_ZONE).date()\n try:\n- return TelechatDate.objects.active().get(date=datetime.datetime.strptime(date, \"%Y-%m-%d\").date()).date\n+ return TelechatDate.objects.active().get(date=parsed_date).date\n except (ValueError, TelechatDate.DoesNotExist):\n raise Http404\n", "issue": "Potential new `datetime` issues\n### Describe the issue\r\n\r\nSee CI failures\r\n* https://github.com/ietf-tools/datatracker/actions/runs/3899998769/jobs/6660186805\r\n* https://github.com/ietf-tools/datatracker/actions/runs/3900011570/jobs/6660211516\r\n\r\nThere is also this one, which is different but possibly related:\r\n* https://github.com/ietf-tools/datatracker/actions/runs/3900217767/jobs/6660629415\r\n\r\nCC @jennifer-richards \r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2013-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\n# utilities for constructing agendas for IESG telechats\n\nimport io\nimport datetime\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.http import Http404\n\nimport debug # pyflakes:ignore\n\nfrom ietf.doc.models import Document, LastCallDocEvent, ConsensusDocEvent\nfrom ietf.doc.utils_search import fill_in_telechat_date\nfrom ietf.iesg.models import TelechatDate, TelechatAgendaItem\nfrom ietf.review.utils import review_assignments_to_list_for_docs\nfrom ietf.utils.timezone import date_today\n\ndef get_agenda_date(date=None):\n if not date:\n try:\n return TelechatDate.objects.active().order_by('date')[0].date\n except IndexError:\n return date_today()\n else:\n try:\n return TelechatDate.objects.active().get(date=datetime.datetime.strptime(date, \"%Y-%m-%d\").date()).date\n except (ValueError, TelechatDate.DoesNotExist):\n raise Http404\n\ndef get_doc_section(doc):\n if doc.type_id == 'draft':\n if doc.intended_std_level_id in [\"bcp\", \"ds\", \"ps\", \"std\"]:\n s = \"2\"\n else:\n s = \"3\"\n\n g = doc.group_acronym()\n if g and str(g) != 'none':\n s += \".1\"\n elif s == \"3\" and doc.stream_id in (\"ise\",\"irtf\"):\n s += \".3\"\n else:\n s += \".2\"\n if doc.get_state_slug() != \"rfc\" and doc.get_state_slug('draft-iesg') not in (\"lc\", \"writeupw\", \"goaheadw\", \"iesg-eva\", \"defer\", \"approved\", \"ann\", \"rfcqueue\", \"pub\"):\n s += \".3\"\n elif doc.returning_item():\n s += \".2\"\n else:\n s += \".1\"\n\n elif doc.type_id == 'charter':\n s = \"4\"\n if doc.group.state_id in ('active', 'dormant'):\n s += \".2\"\n else:\n s += \".1\"\n if doc.get_state_slug() in ('extrev', 'iesgrev'):\n s += '.2'\n else:\n s += '.1'\n\n elif doc.type_id == 'statchg':\n protocol_action = False\n for relation in doc.relateddocument_set.filter(relationship__slug__in=('tops','tois','tohist','toinf','tobcp','toexp')):\n if relation.relationship_id in ('tops','tois') or relation.target.document.std_level_id in ('std','ds','ps'):\n protocol_action = True\n if protocol_action:\n s = \"2.3\"\n else:\n s = \"3.3\"\n if doc.get_state_slug() not in (\"iesgeval\", \"defer\", \"appr-pr\", \"appr-pend\", \"appr-sent\"):\n s += \".3\"\n elif doc.returning_item():\n s += \".2\"\n else:\n s += \".1\"\n\n elif doc.type_id == 'conflrev':\n if not doc.get_state('conflrev') or doc.get_state('conflrev').slug not in ('adrev','iesgeval','appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent','defer'):\n s = \"3.4.3\"\n elif doc.returning_item():\n s = \"3.4.2\"\n else:\n s = \"3.4.1\"\n\n return s\n\ndef agenda_sections():\n return OrderedDict([\n ('1', {'title':\"Administrivia\"}),\n ('1.1', {'title':\"Roll call\"}),\n ('1.2', {'title':\"Bash the agenda\"}),\n ('1.3', {'title':\"Approval of the minutes of past telechats\"}),\n ('1.4', {'title':\"List of remaining action items from last telechat\"}),\n ('2', {'title':\"Protocol actions\"}),\n ('2.1', {'title':\"WG submissions\"}),\n ('2.1.1', {'title':\"New items\", 'docs': []}),\n ('2.1.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.1.3', {'title':\"For action\", 'docs':[]}),\n ('2.2', {'title':\"Individual submissions\"}),\n ('2.2.1', {'title':\"New items\", 'docs':[]}),\n ('2.2.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.2.3', {'title':\"For action\", 'docs':[]}),\n ('2.3', {'title':\"Status changes\"}),\n ('2.3.1', {'title':\"New items\", 'docs':[]}),\n ('2.3.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.3.3', {'title':\"For action\", 'docs':[]}),\n ('3', {'title':\"Document actions\"}),\n ('3.1', {'title':\"WG submissions\"}),\n ('3.1.1', {'title':\"New items\", 'docs':[]}),\n ('3.1.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.1.3', {'title':\"For action\", 'docs':[]}),\n ('3.2', {'title':\"Individual submissions via AD\"}),\n ('3.2.1', {'title':\"New items\", 'docs':[]}),\n ('3.2.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.2.3', {'title':\"For action\", 'docs':[]}),\n ('3.3', {'title':\"Status changes\"}),\n ('3.3.1', {'title':\"New items\", 'docs':[]}),\n ('3.3.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.3.3', {'title':\"For action\", 'docs':[]}),\n ('3.4', {'title':\"IRTF and Independent Submission stream documents\"}),\n ('3.4.1', {'title':\"New items\", 'docs':[]}),\n ('3.4.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.4.3', {'title':\"For action\", 'docs':[]}),\n ('4', {'title':\"Working Group actions\"}),\n ('4.1', {'title':\"WG creation\"}),\n ('4.1.1', {'title':\"Proposed for IETF review\", 'docs':[]}),\n ('4.1.2', {'title':\"Proposed for approval\", 'docs':[]}),\n ('4.2', {'title':\"WG rechartering\"}),\n ('4.2.1', {'title':\"Under evaluation for IETF review\", 'docs':[]}),\n ('4.2.2', {'title':\"Proposed for approval\", 'docs':[]}),\n ('5', {'title':\"IAB news we can use\"}),\n ('6', {'title':\"Management issues\"}),\n ('7', {'title':\"Any Other Business (WG News, New Proposals, etc.)\"}),\n ])\n\ndef fill_in_agenda_administrivia(date, sections):\n extra_info_files = (\n (\"1.1\", \"roll_call\", settings.IESG_ROLL_CALL_FILE),\n (\"1.3\", \"minutes\", settings.IESG_MINUTES_FILE),\n (\"1.4\", \"action_items\", settings.IESG_TASK_FILE),\n )\n\n for s, key, filename in extra_info_files:\n try:\n with io.open(filename, 'r', encoding='utf-8', errors='replace') as f:\n t = f.read().strip()\n except IOError:\n t = \"(Error reading %s)\" % filename\n\n sections[s][\"text\"] = t\n\ndef fill_in_agenda_docs(date, sections, docs=None):\n if not docs:\n docs = Document.objects.filter(docevent__telechatdocevent__telechat_date=date)\n docs = docs.select_related(\"stream\", \"group\").distinct()\n fill_in_telechat_date(docs)\n\n review_assignments_for_docs = review_assignments_to_list_for_docs(docs)\n\n for doc in docs:\n if doc.telechat_date() != date:\n continue\n\n if not hasattr(doc, 'balloting_started'):\n e = doc.latest_event(type=\"started_iesg_process\")\n doc.balloting_started = e.time if e else datetime.datetime.min\n\n if doc.type_id == \"draft\":\n s = doc.get_state(\"draft-iana-review\")\n if s: # and s.slug in (\"not-ok\", \"changed\", \"need-rev\"):\n doc.iana_review_state = str(s)\n\n if doc.get_state_slug(\"draft-iesg\") == \"lc\":\n e = doc.latest_event(LastCallDocEvent, type=\"sent_last_call\")\n if e:\n doc.lastcall_expires = e.expires\n\n if doc.stream_id in (\"ietf\", \"irtf\", \"iab\"):\n doc.consensus = \"Unknown\"\n e = doc.latest_event(ConsensusDocEvent, type=\"changed_consensus\")\n if e and (e.consensus != None):\n doc.consensus = \"Yes\" if e.consensus else \"No\"\n\n doc.review_assignments = review_assignments_for_docs.get(doc.name, [])\n elif doc.type_id == \"conflrev\":\n doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document\n elif doc.type_id == \"charter\":\n pass\n\n number = get_doc_section(doc)\n if number: # and num in sections\n sections[number][\"docs\"].append(doc)\n\n # prune empty \"For action\" sections\n empty_for_action = [n for n, section in sections.items()\n if section[\"title\"] == \"For action\" and not section[\"docs\"]]\n for num in empty_for_action:\n del sections[num]\n\n # Be careful to keep this the same as what's used in agenda_documents\n for s in sections.values():\n if \"docs\" in s:\n s[\"docs\"].sort(key=lambda d: d.balloting_started)\n\ndef fill_in_agenda_management_issues(date, sections):\n s = \"6.%s\"\n for i, item in enumerate(TelechatAgendaItem.objects.filter(type=3).order_by('id'), start=1):\n sections[s % i] = { \"title\": item.title, \"text\": item.text }\n\ndef agenda_data(date=None):\n \"\"\"Return a dict with the different IESG telechat agenda components.\"\"\"\n date = get_agenda_date(date)\n sections = agenda_sections()\n\n fill_in_agenda_administrivia(date, sections)\n fill_in_agenda_docs(date, sections)\n fill_in_agenda_management_issues(date, sections)\n\n return { 'date': date.isoformat(), 'sections': sections }", "path": "ietf/iesg/agenda.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2013-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\n# utilities for constructing agendas for IESG telechats\n\nimport io\nimport datetime\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.http import Http404\n\nimport debug # pyflakes:ignore\n\nfrom ietf.doc.models import Document, LastCallDocEvent, ConsensusDocEvent\nfrom ietf.doc.utils_search import fill_in_telechat_date\nfrom ietf.iesg.models import TelechatDate, TelechatAgendaItem\nfrom ietf.review.utils import review_assignments_to_list_for_docs\nfrom ietf.utils.timezone import date_today, make_aware\n\ndef get_agenda_date(date=None):\n if not date:\n try:\n return TelechatDate.objects.active().order_by('date')[0].date\n except IndexError:\n return date_today()\n else:\n parsed_date = make_aware(datetime.datetime.strptime(date, \"%Y-%m-%d\"), settings.TIME_ZONE).date()\n try:\n return TelechatDate.objects.active().get(date=parsed_date).date\n except (ValueError, TelechatDate.DoesNotExist):\n raise Http404\n\ndef get_doc_section(doc):\n if doc.type_id == 'draft':\n if doc.intended_std_level_id in [\"bcp\", \"ds\", \"ps\", \"std\"]:\n s = \"2\"\n else:\n s = \"3\"\n\n g = doc.group_acronym()\n if g and str(g) != 'none':\n s += \".1\"\n elif s == \"3\" and doc.stream_id in (\"ise\",\"irtf\"):\n s += \".3\"\n else:\n s += \".2\"\n if doc.get_state_slug() != \"rfc\" and doc.get_state_slug('draft-iesg') not in (\"lc\", \"writeupw\", \"goaheadw\", \"iesg-eva\", \"defer\", \"approved\", \"ann\", \"rfcqueue\", \"pub\"):\n s += \".3\"\n elif doc.returning_item():\n s += \".2\"\n else:\n s += \".1\"\n\n elif doc.type_id == 'charter':\n s = \"4\"\n if doc.group.state_id in ('active', 'dormant'):\n s += \".2\"\n else:\n s += \".1\"\n if doc.get_state_slug() in ('extrev', 'iesgrev'):\n s += '.2'\n else:\n s += '.1'\n\n elif doc.type_id == 'statchg':\n protocol_action = False\n for relation in doc.relateddocument_set.filter(relationship__slug__in=('tops','tois','tohist','toinf','tobcp','toexp')):\n if relation.relationship_id in ('tops','tois') or relation.target.document.std_level_id in ('std','ds','ps'):\n protocol_action = True\n if protocol_action:\n s = \"2.3\"\n else:\n s = \"3.3\"\n if doc.get_state_slug() not in (\"iesgeval\", \"defer\", \"appr-pr\", \"appr-pend\", \"appr-sent\"):\n s += \".3\"\n elif doc.returning_item():\n s += \".2\"\n else:\n s += \".1\"\n\n elif doc.type_id == 'conflrev':\n if not doc.get_state('conflrev') or doc.get_state('conflrev').slug not in ('adrev','iesgeval','appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent','defer'):\n s = \"3.4.3\"\n elif doc.returning_item():\n s = \"3.4.2\"\n else:\n s = \"3.4.1\"\n\n return s\n\ndef agenda_sections():\n return OrderedDict([\n ('1', {'title':\"Administrivia\"}),\n ('1.1', {'title':\"Roll call\"}),\n ('1.2', {'title':\"Bash the agenda\"}),\n ('1.3', {'title':\"Approval of the minutes of past telechats\"}),\n ('1.4', {'title':\"List of remaining action items from last telechat\"}),\n ('2', {'title':\"Protocol actions\"}),\n ('2.1', {'title':\"WG submissions\"}),\n ('2.1.1', {'title':\"New items\", 'docs': []}),\n ('2.1.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.1.3', {'title':\"For action\", 'docs':[]}),\n ('2.2', {'title':\"Individual submissions\"}),\n ('2.2.1', {'title':\"New items\", 'docs':[]}),\n ('2.2.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.2.3', {'title':\"For action\", 'docs':[]}),\n ('2.3', {'title':\"Status changes\"}),\n ('2.3.1', {'title':\"New items\", 'docs':[]}),\n ('2.3.2', {'title':\"Returning items\", 'docs':[]}),\n ('2.3.3', {'title':\"For action\", 'docs':[]}),\n ('3', {'title':\"Document actions\"}),\n ('3.1', {'title':\"WG submissions\"}),\n ('3.1.1', {'title':\"New items\", 'docs':[]}),\n ('3.1.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.1.3', {'title':\"For action\", 'docs':[]}),\n ('3.2', {'title':\"Individual submissions via AD\"}),\n ('3.2.1', {'title':\"New items\", 'docs':[]}),\n ('3.2.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.2.3', {'title':\"For action\", 'docs':[]}),\n ('3.3', {'title':\"Status changes\"}),\n ('3.3.1', {'title':\"New items\", 'docs':[]}),\n ('3.3.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.3.3', {'title':\"For action\", 'docs':[]}),\n ('3.4', {'title':\"IRTF and Independent Submission stream documents\"}),\n ('3.4.1', {'title':\"New items\", 'docs':[]}),\n ('3.4.2', {'title':\"Returning items\", 'docs':[]}),\n ('3.4.3', {'title':\"For action\", 'docs':[]}),\n ('4', {'title':\"Working Group actions\"}),\n ('4.1', {'title':\"WG creation\"}),\n ('4.1.1', {'title':\"Proposed for IETF review\", 'docs':[]}),\n ('4.1.2', {'title':\"Proposed for approval\", 'docs':[]}),\n ('4.2', {'title':\"WG rechartering\"}),\n ('4.2.1', {'title':\"Under evaluation for IETF review\", 'docs':[]}),\n ('4.2.2', {'title':\"Proposed for approval\", 'docs':[]}),\n ('5', {'title':\"IAB news we can use\"}),\n ('6', {'title':\"Management issues\"}),\n ('7', {'title':\"Any Other Business (WG News, New Proposals, etc.)\"}),\n ])\n\ndef fill_in_agenda_administrivia(date, sections):\n extra_info_files = (\n (\"1.1\", \"roll_call\", settings.IESG_ROLL_CALL_FILE),\n (\"1.3\", \"minutes\", settings.IESG_MINUTES_FILE),\n (\"1.4\", \"action_items\", settings.IESG_TASK_FILE),\n )\n\n for s, key, filename in extra_info_files:\n try:\n with io.open(filename, 'r', encoding='utf-8', errors='replace') as f:\n t = f.read().strip()\n except IOError:\n t = \"(Error reading %s)\" % filename\n\n sections[s][\"text\"] = t\n\ndef fill_in_agenda_docs(date, sections, docs=None):\n if not docs:\n docs = Document.objects.filter(docevent__telechatdocevent__telechat_date=date)\n docs = docs.select_related(\"stream\", \"group\").distinct()\n fill_in_telechat_date(docs)\n\n review_assignments_for_docs = review_assignments_to_list_for_docs(docs)\n\n for doc in docs:\n if doc.telechat_date() != date:\n continue\n\n if not hasattr(doc, 'balloting_started'):\n e = doc.latest_event(type=\"started_iesg_process\")\n doc.balloting_started = e.time if e else datetime.datetime.min\n\n if doc.type_id == \"draft\":\n s = doc.get_state(\"draft-iana-review\")\n if s: # and s.slug in (\"not-ok\", \"changed\", \"need-rev\"):\n doc.iana_review_state = str(s)\n\n if doc.get_state_slug(\"draft-iesg\") == \"lc\":\n e = doc.latest_event(LastCallDocEvent, type=\"sent_last_call\")\n if e:\n doc.lastcall_expires = e.expires\n\n if doc.stream_id in (\"ietf\", \"irtf\", \"iab\"):\n doc.consensus = \"Unknown\"\n e = doc.latest_event(ConsensusDocEvent, type=\"changed_consensus\")\n if e and (e.consensus != None):\n doc.consensus = \"Yes\" if e.consensus else \"No\"\n\n doc.review_assignments = review_assignments_for_docs.get(doc.name, [])\n elif doc.type_id == \"conflrev\":\n doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document\n elif doc.type_id == \"charter\":\n pass\n\n number = get_doc_section(doc)\n if number: # and num in sections\n sections[number][\"docs\"].append(doc)\n\n # prune empty \"For action\" sections\n empty_for_action = [n for n, section in sections.items()\n if section[\"title\"] == \"For action\" and not section[\"docs\"]]\n for num in empty_for_action:\n del sections[num]\n\n # Be careful to keep this the same as what's used in agenda_documents\n for s in sections.values():\n if \"docs\" in s:\n s[\"docs\"].sort(key=lambda d: d.balloting_started)\n\ndef fill_in_agenda_management_issues(date, sections):\n s = \"6.%s\"\n for i, item in enumerate(TelechatAgendaItem.objects.filter(type=3).order_by('id'), start=1):\n sections[s % i] = { \"title\": item.title, \"text\": item.text }\n\ndef agenda_data(date=None):\n \"\"\"Return a dict with the different IESG telechat agenda components.\"\"\"\n date = get_agenda_date(date)\n sections = agenda_sections()\n\n fill_in_agenda_administrivia(date, sections)\n fill_in_agenda_docs(date, sections)\n fill_in_agenda_management_issues(date, sections)\n\n return { 'date': date.isoformat(), 'sections': sections }", "path": "ietf/iesg/agenda.py"}]}
| 3,426 | 257 |
gh_patches_debug_55973
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4994
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tests folder published in version 2.3.201
The wheel of version 2.3.201 contains the tests folder and pollutes the site-packages even though setup.py excludes it explicitly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import json
3 import logging
4 import os
5 from importlib import util
6 from os import path
7 from pathlib import Path
8
9 from setuptools import setup, find_packages
10 from setuptools.command.build_py import build_py
11
12
13 class PreBuildCommand(build_py):
14 """Pre-build command"""
15
16 def transform_graph_yaml_to_json(self) -> None:
17 """Transforms YAML graph checks to JSON and copies them to build/lib"""
18
19 import yaml # can't be top-level, because it needs to be first installed via 'setup_requires'
20
21 graph_check_paths = (
22 "checkov/*/checks/graph_checks",
23 )
24 build_path = Path(self.build_lib)
25 src_path = Path()
26
27 for graph_check_path in graph_check_paths:
28 for yaml_file in src_path.glob(f"{graph_check_path}/**/*.yaml"):
29 json_file = (build_path / yaml_file).with_suffix(".json")
30 self.mkpath(str(json_file.parent))
31 json_file.write_text(json.dumps(yaml.safe_load(yaml_file.read_text())))
32
33 def run(self) -> None:
34 self.execute(self.transform_graph_yaml_to_json, ())
35 build_py.run(self)
36
37
38 # read the contents of your README file
39 this_directory = path.abspath(path.dirname(__file__))
40 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
41 long_description = f.read()
42
43 logger = logging.getLogger(__name__)
44 spec = util.spec_from_file_location(
45 "checkov.version", os.path.join("checkov", "version.py")
46 )
47 # noinspection PyUnresolvedReferences
48 mod = util.module_from_spec(spec)
49 spec.loader.exec_module(mod) # type: ignore
50 version = mod.version # type: ignore
51
52 setup(
53 cmdclass={
54 'build_py': PreBuildCommand,
55 },
56 setup_requires=[
57 "pyyaml",
58 ],
59 extras_require={
60 "dev": [
61 "pytest==5.3.1",
62 "coverage==5.5",
63 "coverage-badge",
64 "GitPython==3.1.7",
65 "bandit",
66 "jsonschema",
67 ]
68 },
69 install_requires=[
70 "bc-python-hcl2==0.3.51",
71 "bc-detect-secrets==1.4.21",
72 "bc-jsonpath-ng==1.5.9",
73 "deep-merge",
74 "tabulate",
75 "colorama",
76 "termcolor",
77 "junit-xml>=1.9",
78 "dpath==2.1.3",
79 "pyyaml>=5.4.1",
80 "boto3>=1.17",
81 "gitpython",
82 "jmespath",
83 "tqdm",
84 "update-checker",
85 "semantic-version",
86 "packaging",
87 "cloudsplaining>=0.4.3",
88 "networkx<2.7",
89 "igraph",
90 "dockerfile-parse",
91 "docker",
92 "configargparse",
93 "argcomplete",
94 "policyuniverse",
95 "typing-extensions>=4.1.0",
96 "importlib-metadata>=0.12",
97 "cachetools",
98 "cyclonedx-python-lib>=2.4.0,<4.0.0",
99 "packageurl-python",
100 "click>=8.0.0",
101 "aiohttp",
102 "aiodns",
103 "aiomultiprocess",
104 "jsonschema<5.0.0,>=4.6.0",
105 "prettytable>=3.0.0",
106 "pycep-parser==0.3.9",
107 "charset-normalizer",
108 "pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'",
109 "pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'",
110 "schema",
111 "requests>=2.27.0",
112 "yarl",
113 "openai",
114 ],
115 dependency_links=[], # keep it empty, needed for pipenv-setup
116 license="Apache License 2.0",
117 name="checkov",
118 version=version,
119 python_requires=">=3.7",
120 description="Infrastructure as code static analysis",
121 author="bridgecrew",
122 author_email="[email protected]",
123 url="https://github.com/bridgecrewio/checkov",
124 packages=find_packages(exclude=[
125 "dogfood_tests*",
126 "integration_tests*",
127 "performance_tests*"
128 "tests*",
129 ]),
130 include_package_data=True,
131 package_data={
132 "checkov": ["py.typed"],
133 "checkov.common.util.templates": ["*.jinja2"],
134 "checkov.ansible.checks.graph_checks": ["**/*.json"],
135 "checkov.arm.checks.graph_checks": ["**/*.json"],
136 "checkov.bicep.checks.graph_checks": ["**/*.json"],
137 "checkov.cloudformation.checks.graph_checks": ["**/*.json"],
138 "checkov.dockerfile.checks.graph_checks": ["**/*.json"],
139 "checkov.github_actions.checks.graph_checks": ["**/*.json"],
140 "checkov.kubernetes.checks.graph_checks": ["**/*.json"],
141 "checkov.terraform.checks.graph_checks": ["**/*.json"],
142 },
143 scripts=["bin/checkov", "bin/checkov.cmd"],
144 long_description=long_description,
145 long_description_content_type="text/markdown",
146 classifiers=[
147 "Environment :: Console",
148 "Intended Audience :: Developers",
149 "Intended Audience :: System Administrators",
150 "License :: OSI Approved :: Apache Software License",
151 "Programming Language :: Python :: 3 :: Only",
152 "Programming Language :: Python :: 3.7",
153 "Programming Language :: Python :: 3.8",
154 "Programming Language :: Python :: 3.9",
155 "Programming Language :: Python :: 3.10",
156 "Programming Language :: Python :: 3.11",
157 "Topic :: Security",
158 "Topic :: Software Development :: Build Tools",
159 "Typing :: Typed",
160 ],
161 )
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -123,8 +123,9 @@
url="https://github.com/bridgecrewio/checkov",
packages=find_packages(exclude=[
"dogfood_tests*",
+ "flake8_plugins*",
"integration_tests*",
- "performance_tests*"
+ "performance_tests*",
"tests*",
]),
include_package_data=True,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -123,8 +123,9 @@\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=find_packages(exclude=[\n \"dogfood_tests*\",\n+ \"flake8_plugins*\",\n \"integration_tests*\",\n- \"performance_tests*\"\n+ \"performance_tests*\",\n \"tests*\",\n ]),\n include_package_data=True,\n", "issue": "Tests folder published in version 2.3.201\nThe wheel of version 2.3.201 contains the tests folder and pollutes the site-packages even though setup.py excludes it explicitly.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport json\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\n\n\nclass PreBuildCommand(build_py):\n \"\"\"Pre-build command\"\"\"\n\n def transform_graph_yaml_to_json(self) -> None:\n \"\"\"Transforms YAML graph checks to JSON and copies them to build/lib\"\"\"\n\n import yaml # can't be top-level, because it needs to be first installed via 'setup_requires'\n\n graph_check_paths = (\n \"checkov/*/checks/graph_checks\",\n )\n build_path = Path(self.build_lib)\n src_path = Path()\n\n for graph_check_path in graph_check_paths:\n for yaml_file in src_path.glob(f\"{graph_check_path}/**/*.yaml\"):\n json_file = (build_path / yaml_file).with_suffix(\".json\")\n self.mkpath(str(json_file.parent))\n json_file.write_text(json.dumps(yaml.safe_load(yaml_file.read_text())))\n\n def run(self) -> None:\n self.execute(self.transform_graph_yaml_to_json, ())\n build_py.run(self)\n\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n cmdclass={\n 'build_py': PreBuildCommand,\n },\n setup_requires=[\n \"pyyaml\",\n ],\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.51\",\n \"bc-detect-secrets==1.4.21\",\n \"bc-jsonpath-ng==1.5.9\",\n \"deep-merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath==2.1.3\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"gitpython\",\n \"jmespath\",\n \"tqdm\",\n \"update-checker\",\n \"semantic-version\",\n \"packaging\",\n \"cloudsplaining>=0.4.3\",\n \"networkx<2.7\",\n \"igraph\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"importlib-metadata>=0.12\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0,<4.0.0\",\n \"packageurl-python\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonschema<5.0.0,>=4.6.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.9\",\n \"charset-normalizer\",\n \"pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'\",\n \"pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'\",\n \"schema\",\n \"requests>=2.27.0\",\n \"yarl\",\n \"openai\",\n ],\n dependency_links=[], # keep it empty, needed for pipenv-setup\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=find_packages(exclude=[\n \"dogfood_tests*\",\n \"integration_tests*\",\n \"performance_tests*\"\n \"tests*\",\n ]),\n include_package_data=True,\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.ansible.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.arm.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.bicep.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.cloudformation.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.dockerfile.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.github_actions.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.kubernetes.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.terraform.checks.graph_checks\": [\"**/*.json\"],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport json\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\n\n\nclass PreBuildCommand(build_py):\n \"\"\"Pre-build command\"\"\"\n\n def transform_graph_yaml_to_json(self) -> None:\n \"\"\"Transforms YAML graph checks to JSON and copies them to build/lib\"\"\"\n\n import yaml # can't be top-level, because it needs to be first installed via 'setup_requires'\n\n graph_check_paths = (\n \"checkov/*/checks/graph_checks\",\n )\n build_path = Path(self.build_lib)\n src_path = Path()\n\n for graph_check_path in graph_check_paths:\n for yaml_file in src_path.glob(f\"{graph_check_path}/**/*.yaml\"):\n json_file = (build_path / yaml_file).with_suffix(\".json\")\n self.mkpath(str(json_file.parent))\n json_file.write_text(json.dumps(yaml.safe_load(yaml_file.read_text())))\n\n def run(self) -> None:\n self.execute(self.transform_graph_yaml_to_json, ())\n build_py.run(self)\n\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n cmdclass={\n 'build_py': PreBuildCommand,\n },\n setup_requires=[\n \"pyyaml\",\n ],\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.51\",\n \"bc-detect-secrets==1.4.21\",\n \"bc-jsonpath-ng==1.5.9\",\n \"deep-merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath==2.1.3\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"gitpython\",\n \"jmespath\",\n \"tqdm\",\n \"update-checker\",\n \"semantic-version\",\n \"packaging\",\n \"cloudsplaining>=0.4.3\",\n \"networkx<2.7\",\n \"igraph\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"importlib-metadata>=0.12\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0,<4.0.0\",\n \"packageurl-python\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonschema<5.0.0,>=4.6.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.9\",\n \"charset-normalizer\",\n \"pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'\",\n \"pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64' and implementation_name == 'cpython'\",\n \"schema\",\n \"requests>=2.27.0\",\n \"yarl\",\n \"openai\",\n ],\n dependency_links=[], # keep it empty, needed for pipenv-setup\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=find_packages(exclude=[\n \"dogfood_tests*\",\n \"flake8_plugins*\",\n \"integration_tests*\",\n \"performance_tests*\",\n \"tests*\",\n ]),\n include_package_data=True,\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.ansible.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.arm.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.bicep.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.cloudformation.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.dockerfile.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.github_actions.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.kubernetes.checks.graph_checks\": [\"**/*.json\"],\n \"checkov.terraform.checks.graph_checks\": [\"**/*.json\"],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}]}
| 2,051 | 103 |
gh_patches_debug_7898
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-1889
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Extend the OTEL traces to track bytes billed and bytes processed
Extend OTEL traces to track bytes billed and bytes processed. Allowing developers to see how much data and cost was consumed as part of BigQueryJob trace
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/cloud/bigquery/opentelemetry_tracing.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 from contextlib import contextmanager
17 from google.api_core.exceptions import GoogleAPICallError # type: ignore
18
19 logger = logging.getLogger(__name__)
20 try:
21 from opentelemetry import trace # type: ignore
22 from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore
23 from opentelemetry.trace.status import Status # type: ignore
24
25 HAS_OPENTELEMETRY = True
26 _warned_telemetry = True
27
28 except ImportError:
29 HAS_OPENTELEMETRY = False
30 _warned_telemetry = False
31
32 _default_attributes = {
33 "db.system": "BigQuery"
34 } # static, default values assigned to all spans
35
36
37 @contextmanager
38 def create_span(name, attributes=None, client=None, job_ref=None):
39 """Creates a ContextManager for a Span to be exported to the configured exporter.
40 If no configuration exists yields None.
41
42 Args:
43 name (str): Name that will be set for the span being created
44 attributes (Optional[dict]):
45 Additional attributes that pertain to
46 the specific API call (i.e. not a default attribute)
47 client (Optional[google.cloud.bigquery.client.Client]):
48 Pass in a Client object to extract any attributes that may be
49 relevant to it and add them to the created spans.
50 job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
51 Pass in a _AsyncJob object to extract any attributes that may be
52 relevant to it and add them to the created spans.
53
54 Yields:
55 opentelemetry.trace.Span: Yields the newly created Span.
56
57 Raises:
58 google.api_core.exceptions.GoogleAPICallError:
59 Raised if a span could not be yielded or issue with call to
60 OpenTelemetry.
61 """
62 global _warned_telemetry
63 final_attributes = _get_final_span_attributes(attributes, client, job_ref)
64 if not HAS_OPENTELEMETRY:
65 if not _warned_telemetry:
66 logger.debug(
67 "This service is instrumented using OpenTelemetry. "
68 "OpenTelemetry or one of its components could not be imported; "
69 "please add compatible versions of opentelemetry-api and "
70 "opentelemetry-instrumentation packages in order to get BigQuery "
71 "Tracing data."
72 )
73 _warned_telemetry = True
74
75 yield None
76 return
77 tracer = trace.get_tracer(__name__)
78
79 # yield new span value
80 with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
81 try:
82 yield span
83 except GoogleAPICallError as error:
84 if error.code is not None:
85 span.set_status(Status(http_status_to_status_code(error.code)))
86 raise
87
88
89 def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
90 """Compiles attributes from: client, job_ref, user-provided attributes.
91
92 Attributes from all of these sources are merged together. Note the
93 attributes are added sequentially based on perceived order of precedence:
94 i.e. attributes added last may overwrite attributes added earlier.
95
96 Args:
97 attributes (Optional[dict]):
98 Additional attributes that pertain to
99 the specific API call (i.e. not a default attribute)
100
101 client (Optional[google.cloud.bigquery.client.Client]):
102 Pass in a Client object to extract any attributes that may be
103 relevant to it and add them to the final_attributes
104
105 job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
106 Pass in a _AsyncJob object to extract any attributes that may be
107 relevant to it and add them to the final_attributes.
108
109 Returns: dict
110 """
111
112 collected_attributes = _default_attributes.copy()
113
114 if client:
115 collected_attributes.update(_set_client_attributes(client))
116 if job_ref:
117 collected_attributes.update(_set_job_attributes(job_ref))
118 if attributes:
119 collected_attributes.update(attributes)
120
121 final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}
122 return final_attributes
123
124
125 def _set_client_attributes(client):
126 return {"db.name": client.project, "location": client.location}
127
128
129 def _set_job_attributes(job_ref):
130 job_attributes = {
131 "db.name": job_ref.project,
132 "job_id": job_ref.job_id,
133 "state": job_ref.state,
134 }
135
136 job_attributes["hasErrors"] = job_ref.error_result is not None
137
138 if job_ref.created is not None:
139 job_attributes["timeCreated"] = job_ref.created.isoformat()
140
141 if job_ref.started is not None:
142 job_attributes["timeStarted"] = job_ref.started.isoformat()
143
144 if job_ref.ended is not None:
145 job_attributes["timeEnded"] = job_ref.ended.isoformat()
146
147 if job_ref.location is not None:
148 job_attributes["location"] = job_ref.location
149
150 if job_ref.parent_job_id is not None:
151 job_attributes["parent_job_id"] = job_ref.parent_job_id
152
153 if job_ref.num_child_jobs is not None:
154 job_attributes["num_child_jobs"] = job_ref.num_child_jobs
155
156 return job_attributes
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py
--- a/google/cloud/bigquery/opentelemetry_tracing.py
+++ b/google/cloud/bigquery/opentelemetry_tracing.py
@@ -153,4 +153,12 @@
if job_ref.num_child_jobs is not None:
job_attributes["num_child_jobs"] = job_ref.num_child_jobs
+ total_bytes_billed = getattr(job_ref, "total_bytes_billed", None)
+ if total_bytes_billed is not None:
+ job_attributes["total_bytes_billed"] = total_bytes_billed
+
+ total_bytes_processed = getattr(job_ref, "total_bytes_processed", None)
+ if total_bytes_processed is not None:
+ job_attributes["total_bytes_processed"] = total_bytes_processed
+
return job_attributes
|
{"golden_diff": "diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py\n--- a/google/cloud/bigquery/opentelemetry_tracing.py\n+++ b/google/cloud/bigquery/opentelemetry_tracing.py\n@@ -153,4 +153,12 @@\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n \n+ total_bytes_billed = getattr(job_ref, \"total_bytes_billed\", None)\n+ if total_bytes_billed is not None:\n+ job_attributes[\"total_bytes_billed\"] = total_bytes_billed\n+\n+ total_bytes_processed = getattr(job_ref, \"total_bytes_processed\", None)\n+ if total_bytes_processed is not None:\n+ job_attributes[\"total_bytes_processed\"] = total_bytes_processed\n+\n return job_attributes\n", "issue": "Extend the OTEL traces to track bytes billed and bytes processed\nExtend OTEL traces to track bytes billed and bytes processed. Allowing developers to see how much data and cost was consumed as part of BigQueryJob trace \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n \"\"\"Compiles attributes from: client, job_ref, user-provided attributes.\n\n Attributes from all of these sources are merged together. Note the\n attributes are added sequentially based on perceived order of precedence:\n i.e. attributes added last may overwrite attributes added earlier.\n\n Args:\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the final_attributes\n\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the final_attributes.\n\n Returns: dict\n \"\"\"\n\n collected_attributes = _default_attributes.copy()\n\n if client:\n collected_attributes.update(_set_client_attributes(client))\n if job_ref:\n collected_attributes.update(_set_job_attributes(job_ref))\n if attributes:\n collected_attributes.update(attributes)\n\n final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n \"\"\"Compiles attributes from: client, job_ref, user-provided attributes.\n\n Attributes from all of these sources are merged together. Note the\n attributes are added sequentially based on perceived order of precedence:\n i.e. attributes added last may overwrite attributes added earlier.\n\n Args:\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the final_attributes\n\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the final_attributes.\n\n Returns: dict\n \"\"\"\n\n collected_attributes = _default_attributes.copy()\n\n if client:\n collected_attributes.update(_set_client_attributes(client))\n if job_ref:\n collected_attributes.update(_set_job_attributes(job_ref))\n if attributes:\n collected_attributes.update(attributes)\n\n final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n total_bytes_billed = getattr(job_ref, \"total_bytes_billed\", None)\n if total_bytes_billed is not None:\n job_attributes[\"total_bytes_billed\"] = total_bytes_billed\n\n total_bytes_processed = getattr(job_ref, \"total_bytes_processed\", None)\n if total_bytes_processed is not None:\n job_attributes[\"total_bytes_processed\"] = total_bytes_processed\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}]}
| 1,921 | 187 |
gh_patches_debug_2307
|
rasdani/github-patches
|
git_diff
|
huggingface__text-generation-inference-1089
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flash attention (v1) is broken
Defaulting window_size_left to "-1" breaks flash attention v1 since v1 only works if window_size_left is 0 (line 83).
https://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/utils/flash_attn.py#L60C7-L60C7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/text_generation_server/utils/flash_attn.py`
Content:
```
1 import os
2 import torch
3
4 from loguru import logger
5
6 if os.getenv("USE_FLASH_ATTENTION", "").lower() == "false":
7 raise ImportError("`USE_FLASH_ATTENTION` is false.")
8
9 if not torch.cuda.is_available():
10 raise ImportError("CUDA is not available")
11
12 major, minor = torch.cuda.get_device_capability()
13 is_sm75 = major == 7 and minor == 5
14 is_sm8x = major == 8 and minor >= 0
15 is_sm90 = major == 9 and minor == 0
16
17 HAS_FLASH_ATTN = False
18 HAS_FLASH_ATTN_V2 = False
19 try:
20 try:
21 import flash_attn_2_cuda
22 except ImportError:
23 raise ImportError(
24 "Flash Attention V2 is not installed.\n"
25 "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
26 "or install flash attention v2 with `cd server && make install install-flash-attention-v2`"
27 )
28 if not (is_sm8x or is_sm90):
29 raise ImportError(
30 f"GPU with CUDA capability {major} {minor} is not supported for "
31 "Flash Attention V2"
32 )
33 HAS_FLASH_ATTN_V2 = True
34 except ImportError as e:
35 try:
36 import flash_attn_cuda
37 except ImportError:
38 raise ImportError(
39 "Flash Attention is not installed.\n"
40 "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
41 "or install flash attention with `cd server && make install install-flash-attention`"
42 ) from e
43
44 if not (is_sm75 or is_sm8x or is_sm90):
45 raise ImportError(
46 f"GPU with CUDA capability {major} {minor} is not supported"
47 ) from e
48 logger.warning(f"Unable to use Flash Attention V2: {e}")
49 HAS_FLASH_ATTN = True
50
51
52 def attention(
53 q,
54 k,
55 v,
56 out,
57 cu_seqlens,
58 max_s,
59 softmax_scale,
60 window_size_left=-1,
61 ):
62 if HAS_FLASH_ATTN_V2:
63 return flash_attn_2_cuda.varlen_fwd(
64 q,
65 k,
66 v,
67 out,
68 cu_seqlens,
69 cu_seqlens,
70 max_s,
71 max_s,
72 0.0,
73 softmax_scale,
74 False,
75 True,
76 window_size_left,
77 0,
78 False,
79 None,
80 )
81
82 if HAS_FLASH_ATTN:
83 if window_size_left != 0:
84 raise NotImplementedError(
85 "window_size_left is only available with flash attn v2"
86 )
87
88 # Flash attention v1 requires q, k and v to have the same number of heads
89 if k.shape[1] != q.shape[1]:
90 # MQA expand
91 if k.shape[1] == 1:
92 k = k.expand(-1, q.shape[1], -1)
93 # Grouped attention reshape
94 else:
95 original_shape = k.shape
96 k = (
97 k.unsqueeze(2)
98 .expand(-1, -1, q.shape[1] // k.shape[1], -1)
99 .reshape(original_shape[0], -1, original_shape[2])
100 )
101 if v.shape[1] != q.shape[1]:
102 # MQA expand
103 if v.shape[1] == 1:
104 v = v.expand(-1, q.shape[1], -1)
105 # Grouped attention reshape
106 else:
107 original_shape = v.shape
108 v = (
109 v.unsqueeze(2)
110 .expand(-1, -1, q.shape[1] // v.shape[1], -1)
111 .reshape(original_shape[0], -1, original_shape[2])
112 )
113
114 return flash_attn_cuda.fwd(
115 q,
116 k,
117 v,
118 out,
119 cu_seqlens,
120 cu_seqlens,
121 max_s,
122 max_s,
123 0.0,
124 softmax_scale,
125 False,
126 True,
127 False,
128 0,
129 None,
130 )
131
132 raise NotImplementedError("flash attention is not installed")
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py
--- a/server/text_generation_server/utils/flash_attn.py
+++ b/server/text_generation_server/utils/flash_attn.py
@@ -80,7 +80,7 @@
)
if HAS_FLASH_ATTN:
- if window_size_left != 0:
+ if window_size_left != -1:
raise NotImplementedError(
"window_size_left is only available with flash attn v2"
)
|
{"golden_diff": "diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py\n--- a/server/text_generation_server/utils/flash_attn.py\n+++ b/server/text_generation_server/utils/flash_attn.py\n@@ -80,7 +80,7 @@\n )\n \n if HAS_FLASH_ATTN:\n- if window_size_left != 0:\n+ if window_size_left != -1:\n raise NotImplementedError(\n \"window_size_left is only available with flash attn v2\"\n )\n", "issue": "Flash attention (v1) is broken\nDefaulting window_size_left to \"-1\" breaks flash attention v1 since v1 only works if window_size_left is 0 (line 83).\r\n\r\nhttps://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/utils/flash_attn.py#L60C7-L60C7\n", "before_files": [{"content": "import os\nimport torch\n\nfrom loguru import logger\n\nif os.getenv(\"USE_FLASH_ATTENTION\", \"\").lower() == \"false\":\n raise ImportError(\"`USE_FLASH_ATTENTION` is false.\")\n\nif not torch.cuda.is_available():\n raise ImportError(\"CUDA is not available\")\n\nmajor, minor = torch.cuda.get_device_capability()\nis_sm75 = major == 7 and minor == 5\nis_sm8x = major == 8 and minor >= 0\nis_sm90 = major == 9 and minor == 0\n\nHAS_FLASH_ATTN = False\nHAS_FLASH_ATTN_V2 = False\ntry:\n try:\n import flash_attn_2_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention V2 is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention v2 with `cd server && make install install-flash-attention-v2`\"\n )\n if not (is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported for \"\n \"Flash Attention V2\"\n )\n HAS_FLASH_ATTN_V2 = True\nexcept ImportError as e:\n try:\n import flash_attn_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention with `cd server && make install install-flash-attention`\"\n ) from e\n\n if not (is_sm75 or is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported\"\n ) from e\n logger.warning(f\"Unable to use Flash Attention V2: {e}\")\n HAS_FLASH_ATTN = True\n\n\ndef attention(\n q,\n k,\n v,\n out,\n cu_seqlens,\n max_s,\n softmax_scale,\n window_size_left=-1,\n):\n if HAS_FLASH_ATTN_V2:\n return flash_attn_2_cuda.varlen_fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n window_size_left,\n 0,\n False,\n None,\n )\n\n if HAS_FLASH_ATTN:\n if window_size_left != 0:\n raise NotImplementedError(\n \"window_size_left is only available with flash attn v2\"\n )\n\n # Flash attention v1 requires q, k and v to have the same number of heads\n if k.shape[1] != q.shape[1]:\n # MQA expand\n if k.shape[1] == 1:\n k = k.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = k.shape\n k = (\n k.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // k.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n if v.shape[1] != q.shape[1]:\n # MQA expand\n if v.shape[1] == 1:\n v = v.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = v.shape\n v = (\n v.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // v.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n\n return flash_attn_cuda.fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n False,\n 0,\n None,\n )\n\n raise NotImplementedError(\"flash attention is not installed\")\n", "path": "server/text_generation_server/utils/flash_attn.py"}], "after_files": [{"content": "import os\nimport torch\n\nfrom loguru import logger\n\nif os.getenv(\"USE_FLASH_ATTENTION\", \"\").lower() == \"false\":\n raise ImportError(\"`USE_FLASH_ATTENTION` is false.\")\n\nif not torch.cuda.is_available():\n raise ImportError(\"CUDA is not available\")\n\nmajor, minor = torch.cuda.get_device_capability()\nis_sm75 = major == 7 and minor == 5\nis_sm8x = major == 8 and minor >= 0\nis_sm90 = major == 9 and minor == 0\n\nHAS_FLASH_ATTN = False\nHAS_FLASH_ATTN_V2 = False\ntry:\n try:\n import flash_attn_2_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention V2 is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention v2 with `cd server && make install install-flash-attention-v2`\"\n )\n if not (is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported for \"\n \"Flash Attention V2\"\n )\n HAS_FLASH_ATTN_V2 = True\nexcept ImportError as e:\n try:\n import flash_attn_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention with `cd server && make install install-flash-attention`\"\n ) from e\n\n if not (is_sm75 or is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported\"\n ) from e\n logger.warning(f\"Unable to use Flash Attention V2: {e}\")\n HAS_FLASH_ATTN = True\n\n\ndef attention(\n q,\n k,\n v,\n out,\n cu_seqlens,\n max_s,\n softmax_scale,\n window_size_left=-1,\n):\n if HAS_FLASH_ATTN_V2:\n return flash_attn_2_cuda.varlen_fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n window_size_left,\n 0,\n False,\n None,\n )\n\n if HAS_FLASH_ATTN:\n if window_size_left != -1:\n raise NotImplementedError(\n \"window_size_left is only available with flash attn v2\"\n )\n\n # Flash attention v1 requires q, k and v to have the same number of heads\n if k.shape[1] != q.shape[1]:\n # MQA expand\n if k.shape[1] == 1:\n k = k.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = k.shape\n k = (\n k.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // k.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n if v.shape[1] != q.shape[1]:\n # MQA expand\n if v.shape[1] == 1:\n v = v.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = v.shape\n v = (\n v.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // v.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n\n return flash_attn_cuda.fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n False,\n 0,\n None,\n )\n\n raise NotImplementedError(\"flash attention is not installed\")\n", "path": "server/text_generation_server/utils/flash_attn.py"}]}
| 1,577 | 114 |
gh_patches_debug_23290
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-6107
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ignored exception on ci in windows related to monitor and socket.
This may be related to the intermitted failures of monitor tests on windows
Observing the following traceback
```
Exception ignored in: <socket.socket fd=1116, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 55090), raddr=('127.0.0.1', 55091)>
Traceback (most recent call last):
File "C:\hostedtoolcache\windows\Python\3.9.13\x64\lib\threading.py", line 917, in run
self._target(*self._args, **self._kwargs)
ResourceWarning: unclosed <socket.socket fd=1116, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 55090), raddr=('127.0.0.1', 55091)>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/qcodes/monitor/monitor.py`
Content:
```
1 #! /usr/bin/env python
2 # vim:fenc=utf-8
3 #
4 # Copyright Β© 2017 unga <[email protected]>
5 #
6 # Distributed under terms of the MIT license.
7 """
8 Monitor a set of parameters in a background thread
9 stream output over websocket
10
11 To start monitor, run this file, or if qcodes is installed as a module:
12
13 ``% python -m qcodes.monitor.monitor``
14
15 Add parameters to monitor in your measurement by creating a new monitor with a
16 list of parameters to monitor:
17
18 ``monitor = qcodes.Monitor(param1, param2, param3, ...)``
19 """
20 from __future__ import annotations
21
22 import asyncio
23 import json
24 import logging
25 import os
26 import socketserver
27 import time
28 import webbrowser
29 from asyncio import CancelledError
30 from collections import defaultdict
31 from contextlib import suppress
32 from importlib.resources import as_file, files
33 from threading import Event, Thread
34 from typing import TYPE_CHECKING, Any, Callable
35
36 import websockets
37 import websockets.exceptions
38 import websockets.server
39
40 from qcodes.parameters import Parameter
41
42 if TYPE_CHECKING:
43 from collections.abc import Awaitable, Sequence
44
45 WEBSOCKET_PORT = 5678
46 SERVER_PORT = 3000
47
48 log = logging.getLogger(__name__)
49
50
51 def _get_metadata(
52 *parameters: Parameter, use_root_instrument: bool = True
53 ) -> dict[str, Any]:
54 """
55 Return a dictionary that contains the parameter metadata grouped by the
56 instrument it belongs to.
57 """
58 metadata_timestamp = time.time()
59 # group metadata by instrument
60 metas: dict[Any, Any] = defaultdict(list)
61 for parameter in parameters:
62 # Get the latest value from the parameter,
63 # respecting the max_val_age parameter
64 meta: dict[str, float | str | None] = {}
65 meta["value"] = str(parameter.get_latest())
66 timestamp = parameter.get_latest.get_timestamp()
67 if timestamp is not None:
68 meta["ts"] = timestamp.timestamp()
69 else:
70 meta["ts"] = None
71 meta["name"] = parameter.label or parameter.name
72 meta["unit"] = parameter.unit
73
74 # find the base instrument that this parameter belongs to
75 if use_root_instrument:
76 baseinst = parameter.root_instrument
77 else:
78 baseinst = parameter.instrument
79 if baseinst is None:
80 metas["Unbound Parameter"].append(meta)
81 else:
82 metas[str(baseinst)].append(meta)
83
84 # Create list of parameters, grouped by instrument
85 parameters_out = []
86 for instrument in metas:
87 temp = {"instrument": instrument, "parameters": metas[instrument]}
88 parameters_out.append(temp)
89
90 state = {"ts": metadata_timestamp, "parameters": parameters_out}
91 return state
92
93
94 def _handler(
95 parameters: Sequence[Parameter], interval: float, use_root_instrument: bool = True
96 ) -> Callable[[websockets.server.WebSocketServerProtocol, str], Awaitable[None]]:
97 """
98 Return the websockets server handler.
99 """
100
101 async def server_func(
102 websocket: websockets.server.WebSocketServerProtocol, _: str
103 ) -> None:
104 """
105 Create a websockets handler that sends parameter values to a listener
106 every "interval" seconds.
107 """
108 while True:
109 try:
110 # Update the parameter values
111 try:
112 meta = _get_metadata(
113 *parameters, use_root_instrument=use_root_instrument
114 )
115 except ValueError:
116 log.exception("Error getting parameters")
117 break
118 log.debug("sending.. to %r", websocket)
119 await websocket.send(json.dumps(meta))
120 # Wait for interval seconds and then send again
121 await asyncio.sleep(interval)
122 except (CancelledError, websockets.exceptions.ConnectionClosed):
123 log.debug("Got CancelledError or ConnectionClosed",
124 exc_info=True)
125 break
126 log.debug("Closing websockets connection")
127
128 return server_func
129
130
131 class Monitor(Thread):
132 """
133 QCodes Monitor - WebSockets server to monitor qcodes parameters.
134 """
135
136 running: Monitor | None = None
137
138 def __init__(
139 self,
140 *parameters: Parameter,
141 interval: float = 1,
142 use_root_instrument: bool = True,
143 ):
144 """
145 Monitor qcodes parameters.
146
147 Args:
148 *parameters: Parameters to monitor.
149 interval: How often one wants to refresh the values.
150 use_root_instrument: Defines if parameters are grouped according to
151 parameter.root_instrument or parameter.instrument
152 """
153 super().__init__(daemon=True)
154
155 # Check that all values are valid parameters
156 for parameter in parameters:
157 if not isinstance(parameter, Parameter):
158 raise TypeError(f"We can only monitor QCodes "
159 f"Parameters, not {type(parameter)}")
160
161 self.loop: asyncio.AbstractEventLoop | None = None
162 self._stop_loop_future: asyncio.Future | None = None
163 self._parameters = parameters
164 self.loop_is_closed = Event()
165 self.server_is_started = Event()
166 self.handler = _handler(
167 parameters, interval=interval, use_root_instrument=use_root_instrument
168 )
169 log.debug("Start monitoring thread")
170 if Monitor.running:
171 # stop the old server
172 log.debug("Stopping and restarting server")
173 Monitor.running.stop()
174 self.start()
175
176 # Wait until the loop is running
177 self.server_is_started.wait(timeout=5)
178 if not self.server_is_started.is_set():
179 raise RuntimeError("Failed to start server")
180 Monitor.running = self
181
182 def run(self) -> None:
183 """
184 Start the event loop and run forever.
185 """
186 log.debug("Running Websocket server")
187
188 async def run_loop() -> None:
189 self.loop = asyncio.get_running_loop()
190 self._stop_loop_future = self.loop.create_future()
191
192 async with websockets.server.serve(
193 self.handler, "127.0.0.1", WEBSOCKET_PORT, close_timeout=1
194 ):
195 self.server_is_started.set()
196 try:
197 await self._stop_loop_future
198 except asyncio.CancelledError:
199 log.debug("Websocket server thread shutting down")
200
201 try:
202 asyncio.run(run_loop())
203 finally:
204 self.loop_is_closed.set()
205
206 def update_all(self) -> None:
207 """
208 Update all parameters in the monitor.
209 """
210 for parameter in self._parameters:
211 # call get if it can be called without arguments
212 with suppress(TypeError):
213 parameter.get()
214
215 def stop(self) -> None:
216 """
217 Shutdown the server, close the event loop and join the thread.
218 Setting active Monitor to ``None``.
219 """
220 self.join()
221 Monitor.running = None
222
223 def join(self, timeout: float | None = None) -> None:
224 """
225 Overwrite ``Thread.join`` to make sure server is stopped before
226 joining avoiding a potential deadlock.
227 """
228 log.debug("Shutting down server")
229 if not self.is_alive():
230 # we run this check before trying to run to prevent a cryptic
231 # error message
232 log.debug("monitor is dead")
233 return
234 try:
235 if self.loop is not None and self._stop_loop_future is not None:
236 log.debug("Instructing server to stop event loop.")
237 self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)
238 else:
239 log.debug("No event loop found. Cannot stop event loop.")
240 except RuntimeError:
241 # the above may throw a runtime error if the loop is already
242 # stopped in which case there is nothing more to do
243 log.exception("Could not close loop")
244 self.loop_is_closed.wait(timeout=5)
245 if not self.loop_is_closed.is_set():
246 raise RuntimeError("Failed to join loop")
247 log.debug("Loop reported closed")
248 super().join(timeout=timeout)
249 log.debug("Monitor Thread has joined")
250
251 @staticmethod
252 def show() -> None:
253 """
254 Overwrite this method to show/raise your monitor GUI
255 F.ex.
256
257 ::
258
259 import webbrowser
260 url = "localhost:3000"
261 # Open URL in new window, raising the window if possible.
262 webbrowser.open_new(url)
263
264 """
265 webbrowser.open(f"http://localhost:{SERVER_PORT}")
266
267
268 def main() -> None:
269 import http.server
270
271 # If this file is run, create a simple webserver that serves a simple
272 # website that can be used to view monitored parameters.
273 # # https://github.com/python/mypy/issues/4182
274 parent_module = ".".join(__loader__.name.split(".")[:-1]) # type: ignore[name-defined]
275
276 static_dir = files(parent_module).joinpath("dist")
277 try:
278 with as_file(static_dir) as extracted_dir:
279 os.chdir(extracted_dir)
280 log.info("Starting HTTP Server at http://localhost:%i", SERVER_PORT)
281 with socketserver.TCPServer(
282 ("", SERVER_PORT), http.server.SimpleHTTPRequestHandler
283 ) as httpd:
284 log.debug("serving directory %s", static_dir)
285 webbrowser.open(f"http://localhost:{SERVER_PORT}")
286 httpd.serve_forever()
287 except KeyboardInterrupt:
288 log.info("Shutting Down HTTP Server")
289
290
291 if __name__ == "__main__":
292 main()
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/qcodes/monitor/monitor.py b/src/qcodes/monitor/monitor.py
--- a/src/qcodes/monitor/monitor.py
+++ b/src/qcodes/monitor/monitor.py
@@ -193,10 +193,8 @@
self.handler, "127.0.0.1", WEBSOCKET_PORT, close_timeout=1
):
self.server_is_started.set()
- try:
- await self._stop_loop_future
- except asyncio.CancelledError:
- log.debug("Websocket server thread shutting down")
+ await self._stop_loop_future
+ log.debug("Websocket server thread shutting down")
try:
asyncio.run(run_loop())
@@ -234,7 +232,7 @@
try:
if self.loop is not None and self._stop_loop_future is not None:
log.debug("Instructing server to stop event loop.")
- self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)
+ self.loop.call_soon_threadsafe(self._stop_loop_future.set_result, True)
else:
log.debug("No event loop found. Cannot stop event loop.")
except RuntimeError:
|
{"golden_diff": "diff --git a/src/qcodes/monitor/monitor.py b/src/qcodes/monitor/monitor.py\n--- a/src/qcodes/monitor/monitor.py\n+++ b/src/qcodes/monitor/monitor.py\n@@ -193,10 +193,8 @@\n self.handler, \"127.0.0.1\", WEBSOCKET_PORT, close_timeout=1\n ):\n self.server_is_started.set()\n- try:\n- await self._stop_loop_future\n- except asyncio.CancelledError:\n- log.debug(\"Websocket server thread shutting down\")\n+ await self._stop_loop_future\n+ log.debug(\"Websocket server thread shutting down\")\n \n try:\n asyncio.run(run_loop())\n@@ -234,7 +232,7 @@\n try:\n if self.loop is not None and self._stop_loop_future is not None:\n log.debug(\"Instructing server to stop event loop.\")\n- self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)\n+ self.loop.call_soon_threadsafe(self._stop_loop_future.set_result, True)\n else:\n log.debug(\"No event loop found. Cannot stop event loop.\")\n except RuntimeError:\n", "issue": "Ignored exception on ci in windows related to monitor and socket. \nThis may be related to the intermitted failures of monitor tests on windows\r\n\r\nObserving the following traceback\r\n```\r\nException ignored in: <socket.socket fd=1116, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 55090), raddr=('127.0.0.1', 55091)>\r\nTraceback (most recent call last):\r\n File \"C:\\hostedtoolcache\\windows\\Python\\3.9.13\\x64\\lib\\threading.py\", line 917, in run\r\n self._target(*self._args, **self._kwargs)\r\nResourceWarning: unclosed <socket.socket fd=1116, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=0, laddr=('127.0.0.1', 55090), raddr=('127.0.0.1', 55091)>\r\n```\n", "before_files": [{"content": "#! /usr/bin/env python\n# vim:fenc=utf-8\n#\n# Copyright \u00a9 2017 unga <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\"\"\"\nMonitor a set of parameters in a background thread\nstream output over websocket\n\nTo start monitor, run this file, or if qcodes is installed as a module:\n\n``% python -m qcodes.monitor.monitor``\n\nAdd parameters to monitor in your measurement by creating a new monitor with a\nlist of parameters to monitor:\n\n``monitor = qcodes.Monitor(param1, param2, param3, ...)``\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport logging\nimport os\nimport socketserver\nimport time\nimport webbrowser\nfrom asyncio import CancelledError\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom importlib.resources import as_file, files\nfrom threading import Event, Thread\nfrom typing import TYPE_CHECKING, Any, Callable\n\nimport websockets\nimport websockets.exceptions\nimport websockets.server\n\nfrom qcodes.parameters import Parameter\n\nif TYPE_CHECKING:\n from collections.abc import Awaitable, Sequence\n\nWEBSOCKET_PORT = 5678\nSERVER_PORT = 3000\n\nlog = logging.getLogger(__name__)\n\n\ndef _get_metadata(\n *parameters: Parameter, use_root_instrument: bool = True\n) -> dict[str, Any]:\n \"\"\"\n Return a dictionary that contains the parameter metadata grouped by the\n instrument it belongs to.\n \"\"\"\n metadata_timestamp = time.time()\n # group metadata by instrument\n metas: dict[Any, Any] = defaultdict(list)\n for parameter in parameters:\n # Get the latest value from the parameter,\n # respecting the max_val_age parameter\n meta: dict[str, float | str | None] = {}\n meta[\"value\"] = str(parameter.get_latest())\n timestamp = parameter.get_latest.get_timestamp()\n if timestamp is not None:\n meta[\"ts\"] = timestamp.timestamp()\n else:\n meta[\"ts\"] = None\n meta[\"name\"] = parameter.label or parameter.name\n meta[\"unit\"] = parameter.unit\n\n # find the base instrument that this parameter belongs to\n if use_root_instrument:\n baseinst = parameter.root_instrument\n else:\n baseinst = parameter.instrument\n if baseinst is None:\n metas[\"Unbound Parameter\"].append(meta)\n else:\n metas[str(baseinst)].append(meta)\n\n # Create list of parameters, grouped by instrument\n parameters_out = []\n for instrument in metas:\n temp = {\"instrument\": instrument, \"parameters\": metas[instrument]}\n parameters_out.append(temp)\n\n state = {\"ts\": metadata_timestamp, \"parameters\": parameters_out}\n return state\n\n\ndef _handler(\n parameters: Sequence[Parameter], interval: float, use_root_instrument: bool = True\n) -> Callable[[websockets.server.WebSocketServerProtocol, str], Awaitable[None]]:\n \"\"\"\n Return the websockets server handler.\n \"\"\"\n\n async def server_func(\n websocket: websockets.server.WebSocketServerProtocol, _: str\n ) -> None:\n \"\"\"\n Create a websockets handler that sends parameter values to a listener\n every \"interval\" seconds.\n \"\"\"\n while True:\n try:\n # Update the parameter values\n try:\n meta = _get_metadata(\n *parameters, use_root_instrument=use_root_instrument\n )\n except ValueError:\n log.exception(\"Error getting parameters\")\n break\n log.debug(\"sending.. to %r\", websocket)\n await websocket.send(json.dumps(meta))\n # Wait for interval seconds and then send again\n await asyncio.sleep(interval)\n except (CancelledError, websockets.exceptions.ConnectionClosed):\n log.debug(\"Got CancelledError or ConnectionClosed\",\n exc_info=True)\n break\n log.debug(\"Closing websockets connection\")\n\n return server_func\n\n\nclass Monitor(Thread):\n \"\"\"\n QCodes Monitor - WebSockets server to monitor qcodes parameters.\n \"\"\"\n\n running: Monitor | None = None\n\n def __init__(\n self,\n *parameters: Parameter,\n interval: float = 1,\n use_root_instrument: bool = True,\n ):\n \"\"\"\n Monitor qcodes parameters.\n\n Args:\n *parameters: Parameters to monitor.\n interval: How often one wants to refresh the values.\n use_root_instrument: Defines if parameters are grouped according to\n parameter.root_instrument or parameter.instrument\n \"\"\"\n super().__init__(daemon=True)\n\n # Check that all values are valid parameters\n for parameter in parameters:\n if not isinstance(parameter, Parameter):\n raise TypeError(f\"We can only monitor QCodes \"\n f\"Parameters, not {type(parameter)}\")\n\n self.loop: asyncio.AbstractEventLoop | None = None\n self._stop_loop_future: asyncio.Future | None = None\n self._parameters = parameters\n self.loop_is_closed = Event()\n self.server_is_started = Event()\n self.handler = _handler(\n parameters, interval=interval, use_root_instrument=use_root_instrument\n )\n log.debug(\"Start monitoring thread\")\n if Monitor.running:\n # stop the old server\n log.debug(\"Stopping and restarting server\")\n Monitor.running.stop()\n self.start()\n\n # Wait until the loop is running\n self.server_is_started.wait(timeout=5)\n if not self.server_is_started.is_set():\n raise RuntimeError(\"Failed to start server\")\n Monitor.running = self\n\n def run(self) -> None:\n \"\"\"\n Start the event loop and run forever.\n \"\"\"\n log.debug(\"Running Websocket server\")\n\n async def run_loop() -> None:\n self.loop = asyncio.get_running_loop()\n self._stop_loop_future = self.loop.create_future()\n\n async with websockets.server.serve(\n self.handler, \"127.0.0.1\", WEBSOCKET_PORT, close_timeout=1\n ):\n self.server_is_started.set()\n try:\n await self._stop_loop_future\n except asyncio.CancelledError:\n log.debug(\"Websocket server thread shutting down\")\n\n try:\n asyncio.run(run_loop())\n finally:\n self.loop_is_closed.set()\n\n def update_all(self) -> None:\n \"\"\"\n Update all parameters in the monitor.\n \"\"\"\n for parameter in self._parameters:\n # call get if it can be called without arguments\n with suppress(TypeError):\n parameter.get()\n\n def stop(self) -> None:\n \"\"\"\n Shutdown the server, close the event loop and join the thread.\n Setting active Monitor to ``None``.\n \"\"\"\n self.join()\n Monitor.running = None\n\n def join(self, timeout: float | None = None) -> None:\n \"\"\"\n Overwrite ``Thread.join`` to make sure server is stopped before\n joining avoiding a potential deadlock.\n \"\"\"\n log.debug(\"Shutting down server\")\n if not self.is_alive():\n # we run this check before trying to run to prevent a cryptic\n # error message\n log.debug(\"monitor is dead\")\n return\n try:\n if self.loop is not None and self._stop_loop_future is not None:\n log.debug(\"Instructing server to stop event loop.\")\n self.loop.call_soon_threadsafe(self._stop_loop_future.cancel)\n else:\n log.debug(\"No event loop found. Cannot stop event loop.\")\n except RuntimeError:\n # the above may throw a runtime error if the loop is already\n # stopped in which case there is nothing more to do\n log.exception(\"Could not close loop\")\n self.loop_is_closed.wait(timeout=5)\n if not self.loop_is_closed.is_set():\n raise RuntimeError(\"Failed to join loop\")\n log.debug(\"Loop reported closed\")\n super().join(timeout=timeout)\n log.debug(\"Monitor Thread has joined\")\n\n @staticmethod\n def show() -> None:\n \"\"\"\n Overwrite this method to show/raise your monitor GUI\n F.ex.\n\n ::\n\n import webbrowser\n url = \"localhost:3000\"\n # Open URL in new window, raising the window if possible.\n webbrowser.open_new(url)\n\n \"\"\"\n webbrowser.open(f\"http://localhost:{SERVER_PORT}\")\n\n\ndef main() -> None:\n import http.server\n\n # If this file is run, create a simple webserver that serves a simple\n # website that can be used to view monitored parameters.\n # # https://github.com/python/mypy/issues/4182\n parent_module = \".\".join(__loader__.name.split(\".\")[:-1]) # type: ignore[name-defined]\n\n static_dir = files(parent_module).joinpath(\"dist\")\n try:\n with as_file(static_dir) as extracted_dir:\n os.chdir(extracted_dir)\n log.info(\"Starting HTTP Server at http://localhost:%i\", SERVER_PORT)\n with socketserver.TCPServer(\n (\"\", SERVER_PORT), http.server.SimpleHTTPRequestHandler\n ) as httpd:\n log.debug(\"serving directory %s\", static_dir)\n webbrowser.open(f\"http://localhost:{SERVER_PORT}\")\n httpd.serve_forever()\n except KeyboardInterrupt:\n log.info(\"Shutting Down HTTP Server\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/qcodes/monitor/monitor.py"}], "after_files": [{"content": "#! /usr/bin/env python\n# vim:fenc=utf-8\n#\n# Copyright \u00a9 2017 unga <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\"\"\"\nMonitor a set of parameters in a background thread\nstream output over websocket\n\nTo start monitor, run this file, or if qcodes is installed as a module:\n\n``% python -m qcodes.monitor.monitor``\n\nAdd parameters to monitor in your measurement by creating a new monitor with a\nlist of parameters to monitor:\n\n``monitor = qcodes.Monitor(param1, param2, param3, ...)``\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport json\nimport logging\nimport os\nimport socketserver\nimport time\nimport webbrowser\nfrom asyncio import CancelledError\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom importlib.resources import as_file, files\nfrom threading import Event, Thread\nfrom typing import TYPE_CHECKING, Any, Callable\n\nimport websockets\nimport websockets.exceptions\nimport websockets.server\n\nfrom qcodes.parameters import Parameter\n\nif TYPE_CHECKING:\n from collections.abc import Awaitable, Sequence\n\nWEBSOCKET_PORT = 5678\nSERVER_PORT = 3000\n\nlog = logging.getLogger(__name__)\n\n\ndef _get_metadata(\n *parameters: Parameter, use_root_instrument: bool = True\n) -> dict[str, Any]:\n \"\"\"\n Return a dictionary that contains the parameter metadata grouped by the\n instrument it belongs to.\n \"\"\"\n metadata_timestamp = time.time()\n # group metadata by instrument\n metas: dict[Any, Any] = defaultdict(list)\n for parameter in parameters:\n # Get the latest value from the parameter,\n # respecting the max_val_age parameter\n meta: dict[str, float | str | None] = {}\n meta[\"value\"] = str(parameter.get_latest())\n timestamp = parameter.get_latest.get_timestamp()\n if timestamp is not None:\n meta[\"ts\"] = timestamp.timestamp()\n else:\n meta[\"ts\"] = None\n meta[\"name\"] = parameter.label or parameter.name\n meta[\"unit\"] = parameter.unit\n\n # find the base instrument that this parameter belongs to\n if use_root_instrument:\n baseinst = parameter.root_instrument\n else:\n baseinst = parameter.instrument\n if baseinst is None:\n metas[\"Unbound Parameter\"].append(meta)\n else:\n metas[str(baseinst)].append(meta)\n\n # Create list of parameters, grouped by instrument\n parameters_out = []\n for instrument in metas:\n temp = {\"instrument\": instrument, \"parameters\": metas[instrument]}\n parameters_out.append(temp)\n\n state = {\"ts\": metadata_timestamp, \"parameters\": parameters_out}\n return state\n\n\ndef _handler(\n parameters: Sequence[Parameter], interval: float, use_root_instrument: bool = True\n) -> Callable[[websockets.server.WebSocketServerProtocol, str], Awaitable[None]]:\n \"\"\"\n Return the websockets server handler.\n \"\"\"\n\n async def server_func(\n websocket: websockets.server.WebSocketServerProtocol, _: str\n ) -> None:\n \"\"\"\n Create a websockets handler that sends parameter values to a listener\n every \"interval\" seconds.\n \"\"\"\n while True:\n try:\n # Update the parameter values\n try:\n meta = _get_metadata(\n *parameters, use_root_instrument=use_root_instrument\n )\n except ValueError:\n log.exception(\"Error getting parameters\")\n break\n log.debug(\"sending.. to %r\", websocket)\n await websocket.send(json.dumps(meta))\n # Wait for interval seconds and then send again\n await asyncio.sleep(interval)\n except (CancelledError, websockets.exceptions.ConnectionClosed):\n log.debug(\"Got CancelledError or ConnectionClosed\",\n exc_info=True)\n break\n log.debug(\"Closing websockets connection\")\n\n return server_func\n\n\nclass Monitor(Thread):\n \"\"\"\n QCodes Monitor - WebSockets server to monitor qcodes parameters.\n \"\"\"\n\n running: Monitor | None = None\n\n def __init__(\n self,\n *parameters: Parameter,\n interval: float = 1,\n use_root_instrument: bool = True,\n ):\n \"\"\"\n Monitor qcodes parameters.\n\n Args:\n *parameters: Parameters to monitor.\n interval: How often one wants to refresh the values.\n use_root_instrument: Defines if parameters are grouped according to\n parameter.root_instrument or parameter.instrument\n \"\"\"\n super().__init__(daemon=True)\n\n # Check that all values are valid parameters\n for parameter in parameters:\n if not isinstance(parameter, Parameter):\n raise TypeError(f\"We can only monitor QCodes \"\n f\"Parameters, not {type(parameter)}\")\n\n self.loop: asyncio.AbstractEventLoop | None = None\n self._stop_loop_future: asyncio.Future | None = None\n self._parameters = parameters\n self.loop_is_closed = Event()\n self.server_is_started = Event()\n self.handler = _handler(\n parameters, interval=interval, use_root_instrument=use_root_instrument\n )\n log.debug(\"Start monitoring thread\")\n if Monitor.running:\n # stop the old server\n log.debug(\"Stopping and restarting server\")\n Monitor.running.stop()\n self.start()\n\n # Wait until the loop is running\n self.server_is_started.wait(timeout=5)\n if not self.server_is_started.is_set():\n raise RuntimeError(\"Failed to start server\")\n Monitor.running = self\n\n def run(self) -> None:\n \"\"\"\n Start the event loop and run forever.\n \"\"\"\n log.debug(\"Running Websocket server\")\n\n async def run_loop() -> None:\n self.loop = asyncio.get_running_loop()\n self._stop_loop_future = self.loop.create_future()\n\n async with websockets.server.serve(\n self.handler, \"127.0.0.1\", WEBSOCKET_PORT, close_timeout=1\n ):\n self.server_is_started.set()\n await self._stop_loop_future\n log.debug(\"Websocket server thread shutting down\")\n\n try:\n asyncio.run(run_loop())\n finally:\n self.loop_is_closed.set()\n\n def update_all(self) -> None:\n \"\"\"\n Update all parameters in the monitor.\n \"\"\"\n for parameter in self._parameters:\n # call get if it can be called without arguments\n with suppress(TypeError):\n parameter.get()\n\n def stop(self) -> None:\n \"\"\"\n Shutdown the server, close the event loop and join the thread.\n Setting active Monitor to ``None``.\n \"\"\"\n self.join()\n Monitor.running = None\n\n def join(self, timeout: float | None = None) -> None:\n \"\"\"\n Overwrite ``Thread.join`` to make sure server is stopped before\n joining avoiding a potential deadlock.\n \"\"\"\n log.debug(\"Shutting down server\")\n if not self.is_alive():\n # we run this check before trying to run to prevent a cryptic\n # error message\n log.debug(\"monitor is dead\")\n return\n try:\n if self.loop is not None and self._stop_loop_future is not None:\n log.debug(\"Instructing server to stop event loop.\")\n self.loop.call_soon_threadsafe(self._stop_loop_future.set_result, True)\n else:\n log.debug(\"No event loop found. Cannot stop event loop.\")\n except RuntimeError:\n # the above may throw a runtime error if the loop is already\n # stopped in which case there is nothing more to do\n log.exception(\"Could not close loop\")\n self.loop_is_closed.wait(timeout=5)\n if not self.loop_is_closed.is_set():\n raise RuntimeError(\"Failed to join loop\")\n log.debug(\"Loop reported closed\")\n super().join(timeout=timeout)\n log.debug(\"Monitor Thread has joined\")\n\n @staticmethod\n def show() -> None:\n \"\"\"\n Overwrite this method to show/raise your monitor GUI\n F.ex.\n\n ::\n\n import webbrowser\n url = \"localhost:3000\"\n # Open URL in new window, raising the window if possible.\n webbrowser.open_new(url)\n\n \"\"\"\n webbrowser.open(f\"http://localhost:{SERVER_PORT}\")\n\n\ndef main() -> None:\n import http.server\n\n # If this file is run, create a simple webserver that serves a simple\n # website that can be used to view monitored parameters.\n # # https://github.com/python/mypy/issues/4182\n parent_module = \".\".join(__loader__.name.split(\".\")[:-1]) # type: ignore[name-defined]\n\n static_dir = files(parent_module).joinpath(\"dist\")\n try:\n with as_file(static_dir) as extracted_dir:\n os.chdir(extracted_dir)\n log.info(\"Starting HTTP Server at http://localhost:%i\", SERVER_PORT)\n with socketserver.TCPServer(\n (\"\", SERVER_PORT), http.server.SimpleHTTPRequestHandler\n ) as httpd:\n log.debug(\"serving directory %s\", static_dir)\n webbrowser.open(f\"http://localhost:{SERVER_PORT}\")\n httpd.serve_forever()\n except KeyboardInterrupt:\n log.info(\"Shutting Down HTTP Server\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/qcodes/monitor/monitor.py"}]}
| 3,288 | 260 |
gh_patches_debug_36388
|
rasdani/github-patches
|
git_diff
|
apache__airflow-18772
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove duplicate line of code for configuration copy
This is code improvement PR to remove a duplicate line. The code copies config in either cases of IF statement hence duplicate call.
Moving that before IF makes it unique and sufficient for the purpose.
I have moved first call above IF and removed ELSE block
---
**^ Add meaningful description above**
Read the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.
In case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.
In case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).
In case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/task/task_runner/base_task_runner.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """Base task runner"""
19 import os
20 import subprocess
21 import threading
22 from pwd import getpwnam
23 from tempfile import NamedTemporaryFile
24 from typing import Optional, Union
25
26 from airflow.configuration import conf
27 from airflow.exceptions import AirflowConfigException
28 from airflow.models.taskinstance import load_error_file
29 from airflow.utils.configuration import tmp_configuration_copy
30 from airflow.utils.log.logging_mixin import LoggingMixin
31 from airflow.utils.net import get_hostname
32 from airflow.utils.platform import getuser
33
34 PYTHONPATH_VAR = 'PYTHONPATH'
35
36
37 class BaseTaskRunner(LoggingMixin):
38 """
39 Runs Airflow task instances by invoking the `airflow tasks run` command with raw
40 mode enabled in a subprocess.
41
42 :param local_task_job: The local task job associated with running the
43 associated task instance.
44 :type local_task_job: airflow.jobs.local_task_job.LocalTaskJob
45 """
46
47 def __init__(self, local_task_job):
48 # Pass task instance context into log handlers to setup the logger.
49 super().__init__(local_task_job.task_instance)
50 self._task_instance = local_task_job.task_instance
51
52 popen_prepend = []
53 if self._task_instance.run_as_user:
54 self.run_as_user = self._task_instance.run_as_user
55 else:
56 try:
57 self.run_as_user = conf.get('core', 'default_impersonation')
58 except AirflowConfigException:
59 self.run_as_user = None
60
61 # Add sudo commands to change user if we need to. Needed to handle SubDagOperator
62 # case using a SequentialExecutor.
63 self.log.debug("Planning to run as the %s user", self.run_as_user)
64 if self.run_as_user and (self.run_as_user != getuser()):
65 # We want to include any environment variables now, as we won't
66 # want to have to specify them in the sudo call - they would show
67 # up in `ps` that way! And run commands now, as the other user
68 # might not be able to run the cmds to get credentials
69 cfg_path = tmp_configuration_copy(chmod=0o600)
70
71 # Give ownership of file to user; only they can read and write
72 subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)
73
74 # propagate PYTHONPATH environment variable
75 pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')
76 popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]
77
78 if pythonpath_value:
79 popen_prepend.append(f'{PYTHONPATH_VAR}={pythonpath_value}')
80
81 else:
82 # Always provide a copy of the configuration file settings. Since
83 # we are running as the same user, and can pass through environment
84 # variables then we don't need to include those in the config copy
85 # - the runner can read/execute those values as it needs
86 cfg_path = tmp_configuration_copy(chmod=0o600)
87
88 self._error_file = NamedTemporaryFile(delete=True)
89 if self.run_as_user:
90 try:
91 os.chown(self._error_file.name, getpwnam(self.run_as_user).pw_uid, -1)
92 except KeyError:
93 # No user `run_as_user` found
94 pass
95
96 self._cfg_path = cfg_path
97 self._command = (
98 popen_prepend
99 + self._task_instance.command_as_list(
100 raw=True,
101 pickle_id=local_task_job.pickle_id,
102 mark_success=local_task_job.mark_success,
103 job_id=local_task_job.id,
104 pool=local_task_job.pool,
105 cfg_path=cfg_path,
106 )
107 + ["--error-file", self._error_file.name]
108 )
109 self.process = None
110
111 def deserialize_run_error(self) -> Optional[Union[str, Exception]]:
112 """Return task runtime error if its written to provided error file."""
113 return load_error_file(self._error_file)
114
115 def _read_task_logs(self, stream):
116 while True:
117 line = stream.readline()
118 if isinstance(line, bytes):
119 line = line.decode('utf-8')
120 if not line:
121 break
122 self.log.info(
123 'Job %s: Subtask %s %s',
124 self._task_instance.job_id,
125 self._task_instance.task_id,
126 line.rstrip('\n'),
127 )
128
129 def run_command(self, run_with=None):
130 """
131 Run the task command.
132
133 :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
134 :type run_with: list
135 :return: the process that was run
136 :rtype: subprocess.Popen
137 """
138 run_with = run_with or []
139 full_cmd = run_with + self._command
140
141 self.log.info("Running on host: %s", get_hostname())
142 self.log.info('Running: %s', full_cmd)
143
144 proc = subprocess.Popen(
145 full_cmd,
146 stdout=subprocess.PIPE,
147 stderr=subprocess.STDOUT,
148 universal_newlines=True,
149 close_fds=True,
150 env=os.environ.copy(),
151 preexec_fn=os.setsid,
152 )
153
154 # Start daemon thread to read subprocess logging output
155 log_reader = threading.Thread(
156 target=self._read_task_logs,
157 args=(proc.stdout,),
158 )
159 log_reader.daemon = True
160 log_reader.start()
161 return proc
162
163 def start(self):
164 """Start running the task instance in a subprocess."""
165 raise NotImplementedError()
166
167 def return_code(self) -> Optional[int]:
168 """
169 :return: The return code associated with running the task instance or
170 None if the task is not yet done.
171 :rtype: int
172 """
173 raise NotImplementedError()
174
175 def terminate(self) -> None:
176 """Force kill the running task instance."""
177 raise NotImplementedError()
178
179 def on_finish(self) -> None:
180 """A callback that should be called when this is done running."""
181 if self._cfg_path and os.path.isfile(self._cfg_path):
182 if self.run_as_user:
183 subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
184 else:
185 os.remove(self._cfg_path)
186 try:
187 self._error_file.close()
188 except FileNotFoundError:
189 # The subprocess has deleted this file before we do
190 # so we ignore
191 pass
192
```
Path: `airflow/utils/configuration.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 import json
20 import os
21 from tempfile import mkstemp
22
23 from airflow.configuration import conf
24
25
26 def tmp_configuration_copy(chmod=0o600):
27 """
28 Returns a path for a temporary file including a full copy of the configuration
29 settings.
30 :return: a path to a temporary file
31 """
32 cfg_dict = conf.as_dict(display_sensitive=True, raw=True)
33 temp_fd, cfg_path = mkstemp()
34
35 with os.fdopen(temp_fd, 'w') as temp_file:
36 # Set the permissions before we write anything to it.
37 if chmod is not None:
38 os.fchmod(temp_fd, chmod)
39 json.dump(cfg_dict, temp_file)
40
41 return cfg_path
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/task/task_runner/base_task_runner.py b/airflow/task/task_runner/base_task_runner.py
--- a/airflow/task/task_runner/base_task_runner.py
+++ b/airflow/task/task_runner/base_task_runner.py
@@ -66,7 +66,7 @@
# want to have to specify them in the sudo call - they would show
# up in `ps` that way! And run commands now, as the other user
# might not be able to run the cmds to get credentials
- cfg_path = tmp_configuration_copy(chmod=0o600)
+ cfg_path = tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True)
# Give ownership of file to user; only they can read and write
subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)
@@ -83,7 +83,7 @@
# we are running as the same user, and can pass through environment
# variables then we don't need to include those in the config copy
# - the runner can read/execute those values as it needs
- cfg_path = tmp_configuration_copy(chmod=0o600)
+ cfg_path = tmp_configuration_copy(chmod=0o600, include_env=False, include_cmds=False)
self._error_file = NamedTemporaryFile(delete=True)
if self.run_as_user:
diff --git a/airflow/utils/configuration.py b/airflow/utils/configuration.py
--- a/airflow/utils/configuration.py
+++ b/airflow/utils/configuration.py
@@ -23,13 +23,23 @@
from airflow.configuration import conf
-def tmp_configuration_copy(chmod=0o600):
+def tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):
"""
Returns a path for a temporary file including a full copy of the configuration
settings.
+
+ :param include_env: Should the value of configuration from ``AIRFLOW__``
+ environment variables be included or not
+ :type include_env: bool
+ :param include_cmds: Should the result of calling any *_cmd config be
+ set (True, default), or should the _cmd options be left as the
+ command to run (False)
+ :type include_cmds: bool
:return: a path to a temporary file
"""
- cfg_dict = conf.as_dict(display_sensitive=True, raw=True)
+ cfg_dict = conf.as_dict(
+ display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env
+ )
temp_fd, cfg_path = mkstemp()
with os.fdopen(temp_fd, 'w') as temp_file:
|
{"golden_diff": "diff --git a/airflow/task/task_runner/base_task_runner.py b/airflow/task/task_runner/base_task_runner.py\n--- a/airflow/task/task_runner/base_task_runner.py\n+++ b/airflow/task/task_runner/base_task_runner.py\n@@ -66,7 +66,7 @@\n # want to have to specify them in the sudo call - they would show\n # up in `ps` that way! And run commands now, as the other user\n # might not be able to run the cmds to get credentials\n- cfg_path = tmp_configuration_copy(chmod=0o600)\n+ cfg_path = tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True)\n \n # Give ownership of file to user; only they can read and write\n subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)\n@@ -83,7 +83,7 @@\n # we are running as the same user, and can pass through environment\n # variables then we don't need to include those in the config copy\n # - the runner can read/execute those values as it needs\n- cfg_path = tmp_configuration_copy(chmod=0o600)\n+ cfg_path = tmp_configuration_copy(chmod=0o600, include_env=False, include_cmds=False)\n \n self._error_file = NamedTemporaryFile(delete=True)\n if self.run_as_user:\ndiff --git a/airflow/utils/configuration.py b/airflow/utils/configuration.py\n--- a/airflow/utils/configuration.py\n+++ b/airflow/utils/configuration.py\n@@ -23,13 +23,23 @@\n from airflow.configuration import conf\n \n \n-def tmp_configuration_copy(chmod=0o600):\n+def tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):\n \"\"\"\n Returns a path for a temporary file including a full copy of the configuration\n settings.\n+\n+ :param include_env: Should the value of configuration from ``AIRFLOW__``\n+ environment variables be included or not\n+ :type include_env: bool\n+ :param include_cmds: Should the result of calling any *_cmd config be\n+ set (True, default), or should the _cmd options be left as the\n+ command to run (False)\n+ :type include_cmds: bool\n :return: a path to a temporary file\n \"\"\"\n- cfg_dict = conf.as_dict(display_sensitive=True, raw=True)\n+ cfg_dict = conf.as_dict(\n+ display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env\n+ )\n temp_fd, cfg_path = mkstemp()\n \n with os.fdopen(temp_fd, 'w') as temp_file:\n", "issue": "Remove duplicate line of code for configuration copy\nThis is code improvement PR to remove a duplicate line. The code copies config in either cases of IF statement hence duplicate call. \r\nMoving that before IF makes it unique and sufficient for the purpose.\r\n\r\nI have moved first call above IF and removed ELSE block\r\n\r\n---\r\n**^ Add meaningful description above**\r\n\r\nRead the **[Pull Request Guidelines](https://github.com/apache/airflow/blob/main/CONTRIBUTING.rst#pull-request-guidelines)** for more information.\r\nIn case of fundamental code change, Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)) is needed.\r\nIn case of a new dependency, check compliance with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).\r\nIn case of backwards incompatible changes please leave a note in [UPDATING.md](https://github.com/apache/airflow/blob/main/UPDATING.md).\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Base task runner\"\"\"\nimport os\nimport subprocess\nimport threading\nfrom pwd import getpwnam\nfrom tempfile import NamedTemporaryFile\nfrom typing import Optional, Union\n\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException\nfrom airflow.models.taskinstance import load_error_file\nfrom airflow.utils.configuration import tmp_configuration_copy\nfrom airflow.utils.log.logging_mixin import LoggingMixin\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.platform import getuser\n\nPYTHONPATH_VAR = 'PYTHONPATH'\n\n\nclass BaseTaskRunner(LoggingMixin):\n \"\"\"\n Runs Airflow task instances by invoking the `airflow tasks run` command with raw\n mode enabled in a subprocess.\n\n :param local_task_job: The local task job associated with running the\n associated task instance.\n :type local_task_job: airflow.jobs.local_task_job.LocalTaskJob\n \"\"\"\n\n def __init__(self, local_task_job):\n # Pass task instance context into log handlers to setup the logger.\n super().__init__(local_task_job.task_instance)\n self._task_instance = local_task_job.task_instance\n\n popen_prepend = []\n if self._task_instance.run_as_user:\n self.run_as_user = self._task_instance.run_as_user\n else:\n try:\n self.run_as_user = conf.get('core', 'default_impersonation')\n except AirflowConfigException:\n self.run_as_user = None\n\n # Add sudo commands to change user if we need to. Needed to handle SubDagOperator\n # case using a SequentialExecutor.\n self.log.debug(\"Planning to run as the %s user\", self.run_as_user)\n if self.run_as_user and (self.run_as_user != getuser()):\n # We want to include any environment variables now, as we won't\n # want to have to specify them in the sudo call - they would show\n # up in `ps` that way! And run commands now, as the other user\n # might not be able to run the cmds to get credentials\n cfg_path = tmp_configuration_copy(chmod=0o600)\n\n # Give ownership of file to user; only they can read and write\n subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)\n\n # propagate PYTHONPATH environment variable\n pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')\n popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]\n\n if pythonpath_value:\n popen_prepend.append(f'{PYTHONPATH_VAR}={pythonpath_value}')\n\n else:\n # Always provide a copy of the configuration file settings. Since\n # we are running as the same user, and can pass through environment\n # variables then we don't need to include those in the config copy\n # - the runner can read/execute those values as it needs\n cfg_path = tmp_configuration_copy(chmod=0o600)\n\n self._error_file = NamedTemporaryFile(delete=True)\n if self.run_as_user:\n try:\n os.chown(self._error_file.name, getpwnam(self.run_as_user).pw_uid, -1)\n except KeyError:\n # No user `run_as_user` found\n pass\n\n self._cfg_path = cfg_path\n self._command = (\n popen_prepend\n + self._task_instance.command_as_list(\n raw=True,\n pickle_id=local_task_job.pickle_id,\n mark_success=local_task_job.mark_success,\n job_id=local_task_job.id,\n pool=local_task_job.pool,\n cfg_path=cfg_path,\n )\n + [\"--error-file\", self._error_file.name]\n )\n self.process = None\n\n def deserialize_run_error(self) -> Optional[Union[str, Exception]]:\n \"\"\"Return task runtime error if its written to provided error file.\"\"\"\n return load_error_file(self._error_file)\n\n def _read_task_logs(self, stream):\n while True:\n line = stream.readline()\n if isinstance(line, bytes):\n line = line.decode('utf-8')\n if not line:\n break\n self.log.info(\n 'Job %s: Subtask %s %s',\n self._task_instance.job_id,\n self._task_instance.task_id,\n line.rstrip('\\n'),\n )\n\n def run_command(self, run_with=None):\n \"\"\"\n Run the task command.\n\n :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``\n :type run_with: list\n :return: the process that was run\n :rtype: subprocess.Popen\n \"\"\"\n run_with = run_with or []\n full_cmd = run_with + self._command\n\n self.log.info(\"Running on host: %s\", get_hostname())\n self.log.info('Running: %s', full_cmd)\n\n proc = subprocess.Popen(\n full_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n close_fds=True,\n env=os.environ.copy(),\n preexec_fn=os.setsid,\n )\n\n # Start daemon thread to read subprocess logging output\n log_reader = threading.Thread(\n target=self._read_task_logs,\n args=(proc.stdout,),\n )\n log_reader.daemon = True\n log_reader.start()\n return proc\n\n def start(self):\n \"\"\"Start running the task instance in a subprocess.\"\"\"\n raise NotImplementedError()\n\n def return_code(self) -> Optional[int]:\n \"\"\"\n :return: The return code associated with running the task instance or\n None if the task is not yet done.\n :rtype: int\n \"\"\"\n raise NotImplementedError()\n\n def terminate(self) -> None:\n \"\"\"Force kill the running task instance.\"\"\"\n raise NotImplementedError()\n\n def on_finish(self) -> None:\n \"\"\"A callback that should be called when this is done running.\"\"\"\n if self._cfg_path and os.path.isfile(self._cfg_path):\n if self.run_as_user:\n subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)\n else:\n os.remove(self._cfg_path)\n try:\n self._error_file.close()\n except FileNotFoundError:\n # The subprocess has deleted this file before we do\n # so we ignore\n pass\n", "path": "airflow/task/task_runner/base_task_runner.py"}, {"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport os\nfrom tempfile import mkstemp\n\nfrom airflow.configuration import conf\n\n\ndef tmp_configuration_copy(chmod=0o600):\n \"\"\"\n Returns a path for a temporary file including a full copy of the configuration\n settings.\n :return: a path to a temporary file\n \"\"\"\n cfg_dict = conf.as_dict(display_sensitive=True, raw=True)\n temp_fd, cfg_path = mkstemp()\n\n with os.fdopen(temp_fd, 'w') as temp_file:\n # Set the permissions before we write anything to it.\n if chmod is not None:\n os.fchmod(temp_fd, chmod)\n json.dump(cfg_dict, temp_file)\n\n return cfg_path\n", "path": "airflow/utils/configuration.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Base task runner\"\"\"\nimport os\nimport subprocess\nimport threading\nfrom pwd import getpwnam\nfrom tempfile import NamedTemporaryFile\nfrom typing import Optional, Union\n\nfrom airflow.configuration import conf\nfrom airflow.exceptions import AirflowConfigException\nfrom airflow.models.taskinstance import load_error_file\nfrom airflow.utils.configuration import tmp_configuration_copy\nfrom airflow.utils.log.logging_mixin import LoggingMixin\nfrom airflow.utils.net import get_hostname\nfrom airflow.utils.platform import getuser\n\nPYTHONPATH_VAR = 'PYTHONPATH'\n\n\nclass BaseTaskRunner(LoggingMixin):\n \"\"\"\n Runs Airflow task instances by invoking the `airflow tasks run` command with raw\n mode enabled in a subprocess.\n\n :param local_task_job: The local task job associated with running the\n associated task instance.\n :type local_task_job: airflow.jobs.local_task_job.LocalTaskJob\n \"\"\"\n\n def __init__(self, local_task_job):\n # Pass task instance context into log handlers to setup the logger.\n super().__init__(local_task_job.task_instance)\n self._task_instance = local_task_job.task_instance\n\n popen_prepend = []\n if self._task_instance.run_as_user:\n self.run_as_user = self._task_instance.run_as_user\n else:\n try:\n self.run_as_user = conf.get('core', 'default_impersonation')\n except AirflowConfigException:\n self.run_as_user = None\n\n # Add sudo commands to change user if we need to. Needed to handle SubDagOperator\n # case using a SequentialExecutor.\n self.log.debug(\"Planning to run as the %s user\", self.run_as_user)\n if self.run_as_user and (self.run_as_user != getuser()):\n # We want to include any environment variables now, as we won't\n # want to have to specify them in the sudo call - they would show\n # up in `ps` that way! And run commands now, as the other user\n # might not be able to run the cmds to get credentials\n cfg_path = tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True)\n\n # Give ownership of file to user; only they can read and write\n subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)\n\n # propagate PYTHONPATH environment variable\n pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')\n popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]\n\n if pythonpath_value:\n popen_prepend.append(f'{PYTHONPATH_VAR}={pythonpath_value}')\n\n else:\n # Always provide a copy of the configuration file settings. Since\n # we are running as the same user, and can pass through environment\n # variables then we don't need to include those in the config copy\n # - the runner can read/execute those values as it needs\n cfg_path = tmp_configuration_copy(chmod=0o600, include_env=False, include_cmds=False)\n\n self._error_file = NamedTemporaryFile(delete=True)\n if self.run_as_user:\n try:\n os.chown(self._error_file.name, getpwnam(self.run_as_user).pw_uid, -1)\n except KeyError:\n # No user `run_as_user` found\n pass\n\n self._cfg_path = cfg_path\n self._command = (\n popen_prepend\n + self._task_instance.command_as_list(\n raw=True,\n pickle_id=local_task_job.pickle_id,\n mark_success=local_task_job.mark_success,\n job_id=local_task_job.id,\n pool=local_task_job.pool,\n cfg_path=cfg_path,\n )\n + [\"--error-file\", self._error_file.name]\n )\n self.process = None\n\n def deserialize_run_error(self) -> Optional[Union[str, Exception]]:\n \"\"\"Return task runtime error if its written to provided error file.\"\"\"\n return load_error_file(self._error_file)\n\n def _read_task_logs(self, stream):\n while True:\n line = stream.readline()\n if isinstance(line, bytes):\n line = line.decode('utf-8')\n if not line:\n break\n self.log.info(\n 'Job %s: Subtask %s %s',\n self._task_instance.job_id,\n self._task_instance.task_id,\n line.rstrip('\\n'),\n )\n\n def run_command(self, run_with=None):\n \"\"\"\n Run the task command.\n\n :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``\n :type run_with: list\n :return: the process that was run\n :rtype: subprocess.Popen\n \"\"\"\n run_with = run_with or []\n full_cmd = run_with + self._command\n\n self.log.info(\"Running on host: %s\", get_hostname())\n self.log.info('Running: %s', full_cmd)\n\n proc = subprocess.Popen(\n full_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n close_fds=True,\n env=os.environ.copy(),\n preexec_fn=os.setsid,\n )\n\n # Start daemon thread to read subprocess logging output\n log_reader = threading.Thread(\n target=self._read_task_logs,\n args=(proc.stdout,),\n )\n log_reader.daemon = True\n log_reader.start()\n return proc\n\n def start(self):\n \"\"\"Start running the task instance in a subprocess.\"\"\"\n raise NotImplementedError()\n\n def return_code(self) -> Optional[int]:\n \"\"\"\n :return: The return code associated with running the task instance or\n None if the task is not yet done.\n :rtype: int\n \"\"\"\n raise NotImplementedError()\n\n def terminate(self) -> None:\n \"\"\"Force kill the running task instance.\"\"\"\n raise NotImplementedError()\n\n def on_finish(self) -> None:\n \"\"\"A callback that should be called when this is done running.\"\"\"\n if self._cfg_path and os.path.isfile(self._cfg_path):\n if self.run_as_user:\n subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)\n else:\n os.remove(self._cfg_path)\n try:\n self._error_file.close()\n except FileNotFoundError:\n # The subprocess has deleted this file before we do\n # so we ignore\n pass\n", "path": "airflow/task/task_runner/base_task_runner.py"}, {"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport os\nfrom tempfile import mkstemp\n\nfrom airflow.configuration import conf\n\n\ndef tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):\n \"\"\"\n Returns a path for a temporary file including a full copy of the configuration\n settings.\n\n :param include_env: Should the value of configuration from ``AIRFLOW__``\n environment variables be included or not\n :type include_env: bool\n :param include_cmds: Should the result of calling any *_cmd config be\n set (True, default), or should the _cmd options be left as the\n command to run (False)\n :type include_cmds: bool\n :return: a path to a temporary file\n \"\"\"\n cfg_dict = conf.as_dict(\n display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env\n )\n temp_fd, cfg_path = mkstemp()\n\n with os.fdopen(temp_fd, 'w') as temp_file:\n # Set the permissions before we write anything to it.\n if chmod is not None:\n os.fchmod(temp_fd, chmod)\n json.dump(cfg_dict, temp_file)\n\n return cfg_path\n", "path": "airflow/utils/configuration.py"}]}
| 2,904 | 609 |
gh_patches_debug_61186
|
rasdani/github-patches
|
git_diff
|
benoitc__gunicorn-1071
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include request URL in error message
It would be really helpful if the logged error message were "Error handling request http://host/path/etc" instead of just "Error handling request".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/workers/base.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import os
8 import signal
9 import sys
10 import time
11 import traceback
12 from random import randint
13
14
15 from gunicorn import util
16 from gunicorn.workers.workertmp import WorkerTmp
17 from gunicorn.reloader import Reloader
18 from gunicorn.http.errors import (
19 InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,
20 InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,
21 )
22 from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
23 from gunicorn.http.wsgi import default_environ, Response
24 from gunicorn.six import MAXSIZE
25
26
27 class Worker(object):
28
29 SIGNALS = [getattr(signal, "SIG%s" % x)
30 for x in "ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()]
31
32 PIPE = []
33
34 def __init__(self, age, ppid, sockets, app, timeout, cfg, log):
35 """\
36 This is called pre-fork so it shouldn't do anything to the
37 current process. If there's a need to make process wide
38 changes you'll want to do that in ``self.init_process()``.
39 """
40 self.age = age
41 self.ppid = ppid
42 self.sockets = sockets
43 self.app = app
44 self.timeout = timeout
45 self.cfg = cfg
46 self.booted = False
47 self.aborted = False
48 self.reloader = None
49
50 self.nr = 0
51 jitter = randint(0, cfg.max_requests_jitter)
52 self.max_requests = cfg.max_requests + jitter or MAXSIZE
53 self.alive = True
54 self.log = log
55 self.tmp = WorkerTmp(cfg)
56
57 def __str__(self):
58 return "<Worker %s>" % self.pid
59
60 @property
61 def pid(self):
62 return os.getpid()
63
64 def notify(self):
65 """\
66 Your worker subclass must arrange to have this method called
67 once every ``self.timeout`` seconds. If you fail in accomplishing
68 this task, the master process will murder your workers.
69 """
70 self.tmp.notify()
71
72 def run(self):
73 """\
74 This is the mainloop of a worker process. You should override
75 this method in a subclass to provide the intended behaviour
76 for your particular evil schemes.
77 """
78 raise NotImplementedError()
79
80 def init_process(self):
81 """\
82 If you override this method in a subclass, the last statement
83 in the function should be to call this method with
84 super(MyWorkerClass, self).init_process() so that the ``run()``
85 loop is initiated.
86 """
87
88 # start the reloader
89 if self.cfg.reload:
90 def changed(fname):
91 self.log.info("Worker reloading: %s modified", fname)
92 os.kill(self.pid, signal.SIGQUIT)
93 self.reloader = Reloader(callback=changed)
94 self.reloader.start()
95
96 # set environment' variables
97 if self.cfg.env:
98 for k, v in self.cfg.env.items():
99 os.environ[k] = v
100
101 util.set_owner_process(self.cfg.uid, self.cfg.gid)
102
103 # Reseed the random number generator
104 util.seed()
105
106 # For waking ourselves up
107 self.PIPE = os.pipe()
108 for p in self.PIPE:
109 util.set_non_blocking(p)
110 util.close_on_exec(p)
111
112 # Prevent fd inheritance
113 [util.close_on_exec(s) for s in self.sockets]
114 util.close_on_exec(self.tmp.fileno())
115
116 self.log.close_on_exec()
117
118 self.init_signals()
119
120 self.cfg.post_worker_init(self)
121
122 self.load_wsgi()
123
124 # Enter main run loop
125 self.booted = True
126 self.run()
127
128 def load_wsgi(self):
129 try:
130 self.wsgi = self.app.wsgi()
131 except SyntaxError as e:
132 if not self.cfg.reload:
133 raise
134
135 self.log.exception(e)
136
137 exc_type, exc_val, exc_tb = sys.exc_info()
138 self.reloader.add_extra_file(exc_val.filename)
139
140 tb_string = traceback.format_exc(exc_tb)
141 self.wsgi = util.make_fail_app(tb_string)
142
143 def init_signals(self):
144 # reset signaling
145 [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]
146 # init new signaling
147 signal.signal(signal.SIGQUIT, self.handle_quit)
148 signal.signal(signal.SIGTERM, self.handle_exit)
149 signal.signal(signal.SIGINT, self.handle_quit)
150 signal.signal(signal.SIGWINCH, self.handle_winch)
151 signal.signal(signal.SIGUSR1, self.handle_usr1)
152 signal.signal(signal.SIGABRT, self.handle_abort)
153
154 # Don't let SIGTERM and SIGUSR1 disturb active requests
155 # by interrupting system calls
156 if hasattr(signal, 'siginterrupt'): # python >= 2.6
157 signal.siginterrupt(signal.SIGTERM, False)
158 signal.siginterrupt(signal.SIGUSR1, False)
159
160 def handle_usr1(self, sig, frame):
161 self.log.reopen_files()
162
163 def handle_exit(self, sig, frame):
164 self.alive = False
165
166 def handle_quit(self, sig, frame):
167 self.alive = False
168 # worker_int callback
169 self.cfg.worker_int(self)
170 time.sleep(0.1)
171 sys.exit(0)
172
173 def handle_abort(self, sig, frame):
174 self.alive = False
175 self.cfg.worker_abort(self)
176 sys.exit(1)
177
178 def handle_error(self, req, client, addr, exc):
179 request_start = datetime.now()
180 addr = addr or ('', -1) # unix socket case
181 if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,
182 InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
183 LimitRequestLine, LimitRequestHeaders,
184 InvalidProxyLine, ForbiddenProxyRequest)):
185
186 status_int = 400
187 reason = "Bad Request"
188
189 if isinstance(exc, InvalidRequestLine):
190 mesg = "Invalid Request Line '%s'" % str(exc)
191 elif isinstance(exc, InvalidRequestMethod):
192 mesg = "Invalid Method '%s'" % str(exc)
193 elif isinstance(exc, InvalidHTTPVersion):
194 mesg = "Invalid HTTP Version '%s'" % str(exc)
195 elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):
196 mesg = "%s" % str(exc)
197 if not req and hasattr(exc, "req"):
198 req = exc.req # for access log
199 elif isinstance(exc, LimitRequestLine):
200 mesg = "%s" % str(exc)
201 elif isinstance(exc, LimitRequestHeaders):
202 mesg = "Error parsing headers: '%s'" % str(exc)
203 elif isinstance(exc, InvalidProxyLine):
204 mesg = "'%s'" % str(exc)
205 elif isinstance(exc, ForbiddenProxyRequest):
206 reason = "Forbidden"
207 mesg = "Request forbidden"
208 status_int = 403
209
210 msg = "Invalid request from ip={ip}: {error}"
211 self.log.debug(msg.format(ip=addr[0], error=str(exc)))
212 else:
213 self.log.exception("Error handling request")
214
215 status_int = 500
216 reason = "Internal Server Error"
217 mesg = ""
218
219 if req is not None:
220 request_time = datetime.now() - request_start
221 environ = default_environ(req, client, self.cfg)
222 environ['REMOTE_ADDR'] = addr[0]
223 environ['REMOTE_PORT'] = str(addr[1])
224 resp = Response(req, client, self.cfg)
225 resp.status = "%s %s" % (status_int, reason)
226 resp.response_length = len(mesg)
227 self.log.access(resp, req, environ, request_time)
228
229 try:
230 util.write_error(client, status_int, reason, mesg)
231 except:
232 self.log.debug("Failed to send error message.")
233
234 def handle_winch(self, sig, fname):
235 # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
236 return
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py
--- a/gunicorn/workers/base.py
+++ b/gunicorn/workers/base.py
@@ -210,7 +210,7 @@
msg = "Invalid request from ip={ip}: {error}"
self.log.debug(msg.format(ip=addr[0], error=str(exc)))
else:
- self.log.exception("Error handling request")
+ self.log.exception("Error handling request %s", req.uri)
status_int = 500
reason = "Internal Server Error"
|
{"golden_diff": "diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py\n--- a/gunicorn/workers/base.py\n+++ b/gunicorn/workers/base.py\n@@ -210,7 +210,7 @@\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n- self.log.exception(\"Error handling request\")\n+ self.log.exception(\"Error handling request %s\", req.uri)\n \n status_int = 500\n reason = \"Internal Server Error\"\n", "issue": "Include request URL in error message\nIt would be really helpful if the logged error message were \"Error handling request http://host/path/etc\" instead of just \"Error handling request\".\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nimport time\nimport traceback\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request\")\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nimport time\nimport traceback\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request %s\", req.uri)\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n", "path": "gunicorn/workers/base.py"}]}
| 2,667 | 126 |
gh_patches_debug_35072
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-1974
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
give better error when DATABASE_URL is empty
@jamieforrest hit this and I just did as well. Here's the gnarly Traceback we currently have:
```
pid-39707 thread-140735224484224 (MainThread) Oh no! Aspen crashed!
pid-39707 thread-140735224484224 (MainThread) Traceback (most recent call last):
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/server.py", line 125, in main
pid-39707 thread-140735224484224 (MainThread) _main(argv)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/server.py", line 51, in _main
pid-39707 thread-140735224484224 (MainThread) website = Website(argv)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/website.py", line 31, in __init__
pid-39707 thread-140735224484224 (MainThread) self.configure(argv)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/configuration/__init__.py", line 343, in configure
pid-39707 thread-140735224484224 (MainThread) self.run_config_scripts()
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/configuration/__init__.py", line 386, in run_config_scripts
pid-39707 thread-140735224484224 (MainThread) execfile(filepath, {'website': self})
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/configure-aspen.py", line 7, in <module>
pid-39707 thread-140735224484224 (MainThread) import gittip.authentication
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/authentication.py", line 10, in <module>
pid-39707 thread-140735224484224 (MainThread) from gittip.orm import db
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py", line 73, in <module>
pid-39707 thread-140735224484224 (MainThread) db = SQLAlchemy()
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py", line 31, in __init__
pid-39707 thread-140735224484224 (MainThread) self.session = self.create_session()
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py", line 46, in create_session
pid-39707 thread-140735224484224 (MainThread) session.configure(bind=self.engine)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py", line 38, in engine
pid-39707 thread-140735224484224 (MainThread) return create_engine(dburl, pool_size=maxconn, max_overflow=0)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py", line 331, in create_engine
pid-39707 thread-140735224484224 (MainThread) return strategy.create(*args, **kwargs)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/strategies.py", line 48, in create
pid-39707 thread-140735224484224 (MainThread) u = url.make_url(name_or_url)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/url.py", line 154, in make_url
pid-39707 thread-140735224484224 (MainThread) return _parse_rfc1738_args(name_or_url)
pid-39707 thread-140735224484224 (MainThread) File "/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/url.py", line 196, in _parse_rfc1738_args
pid-39707 thread-140735224484224 (MainThread) "Could not parse rfc1738 URL from string '%s'" % name)
pid-39707 thread-140735224484224 (MainThread) ArgumentError: Could not parse rfc1738 URL from string ''
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/utils/swaddle.py`
Content:
```
1 """Run a command with an environment specified in a file.
2 """
3 import os
4 import sys
5 if sys.platform.startswith('win'):
6 import ctypes
7
8
9 def configure(envdef):
10 """Given a filepath or -, return a tuple of bytestrings.
11 """
12 if envdef == '-': # Heroku-style
13
14 # Heroku used to use =>, which was weird. Now they use :, and we may as
15 # well too, but I had already started using =, and now changing would
16 # break people's local.env.
17
18 m = "[SWADDLE] reading environment from stdin."
19 print >> sys.stderr, m
20 fp = sys.stdin
21 splitter = ": "
22 elif not os.path.isfile(envdef):
23 m = "[SWADDLE] %s is not a file; environment unchanged." % envdef
24 print >> sys.stderr, m
25 envdef = ""
26 else: # Gittip-style
27 fp = open(envdef)
28 splitter = "="
29
30 args = sys.argv[2:]
31 if not args:
32 m ="[SWADDLE] No command specified; exiting."
33 raise SystemExit(m)
34 if sys.platform == 'win32' and not os.path.isfile(args[0]):
35 # Try with an '.exe' extension on Windows if the command doesn't
36 # already have an extension.
37 if os.path.splitext(args[0])[-1] == '':
38 args[0] += '.exe'
39 if not os.path.isfile(args[0]):
40 m ="[SWADDLE] Command %s does not exist; exiting." % args[0]
41 raise SystemExit(m)
42
43 if envdef:
44 for line in fp:
45 line = line.split('#')[0].strip()
46 if splitter not in line:
47 m = "[SWADDLE] Skipping line: %s." % line
48 print >> sys.stderr, m
49 continue
50 key, val = line.split(splitter, 1)
51 if sys.platform.startswith('win'):
52 ctypes.windll.kernel32.SetEnvironmentVariableA(key.strip(), val.strip())
53 else:
54 os.environ[key.strip()] = val.strip()
55
56 return args
57
58
59 def main():
60 if len(sys.argv) < 1:
61 m ="[SWADDLE] Usage: %s {definition.env} {command} {args}" % sys.argv[0]
62 raise SystemExit(m)
63 envdef = sys.argv[1]
64 args = configure(envdef)
65 os.execv(args[0], args)
66
```
Path: `setup.py`
Content:
```
1 import os
2 from setuptools import setup, find_packages
3
4
5 def get_version():
6 try:
7 return open(os.path.join('www', 'version.txt')).read().strip()
8 except OSError:
9 return 'n/a'
10
11
12 setup( name='gittip'
13 , version=get_version()
14 , packages=find_packages()
15 , entry_points = { 'console_scripts'
16 : [ 'payday=gittip.cli:payday'
17 , 'swaddle=gittip.utils.swaddle:main'
18 , 'fake_data=gittip.utils.fake_data:main'
19 ]
20 }
21 )
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gittip/utils/swaddle.py b/gittip/utils/swaddle.py
deleted file mode 100644
--- a/gittip/utils/swaddle.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""Run a command with an environment specified in a file.
-"""
-import os
-import sys
-if sys.platform.startswith('win'):
- import ctypes
-
-
-def configure(envdef):
- """Given a filepath or -, return a tuple of bytestrings.
- """
- if envdef == '-': # Heroku-style
-
- # Heroku used to use =>, which was weird. Now they use :, and we may as
- # well too, but I had already started using =, and now changing would
- # break people's local.env.
-
- m = "[SWADDLE] reading environment from stdin."
- print >> sys.stderr, m
- fp = sys.stdin
- splitter = ": "
- elif not os.path.isfile(envdef):
- m = "[SWADDLE] %s is not a file; environment unchanged." % envdef
- print >> sys.stderr, m
- envdef = ""
- else: # Gittip-style
- fp = open(envdef)
- splitter = "="
-
- args = sys.argv[2:]
- if not args:
- m ="[SWADDLE] No command specified; exiting."
- raise SystemExit(m)
- if sys.platform == 'win32' and not os.path.isfile(args[0]):
- # Try with an '.exe' extension on Windows if the command doesn't
- # already have an extension.
- if os.path.splitext(args[0])[-1] == '':
- args[0] += '.exe'
- if not os.path.isfile(args[0]):
- m ="[SWADDLE] Command %s does not exist; exiting." % args[0]
- raise SystemExit(m)
-
- if envdef:
- for line in fp:
- line = line.split('#')[0].strip()
- if splitter not in line:
- m = "[SWADDLE] Skipping line: %s." % line
- print >> sys.stderr, m
- continue
- key, val = line.split(splitter, 1)
- if sys.platform.startswith('win'):
- ctypes.windll.kernel32.SetEnvironmentVariableA(key.strip(), val.strip())
- else:
- os.environ[key.strip()] = val.strip()
-
- return args
-
-
-def main():
- if len(sys.argv) < 1:
- m ="[SWADDLE] Usage: %s {definition.env} {command} {args}" % sys.argv[0]
- raise SystemExit(m)
- envdef = sys.argv[1]
- args = configure(envdef)
- os.execv(args[0], args)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,6 @@
, packages=find_packages()
, entry_points = { 'console_scripts'
: [ 'payday=gittip.cli:payday'
- , 'swaddle=gittip.utils.swaddle:main'
, 'fake_data=gittip.utils.fake_data:main'
]
}
|
{"golden_diff": "diff --git a/gittip/utils/swaddle.py b/gittip/utils/swaddle.py\ndeleted file mode 100644\n--- a/gittip/utils/swaddle.py\n+++ /dev/null\n@@ -1,65 +0,0 @@\n-\"\"\"Run a command with an environment specified in a file.\n-\"\"\"\n-import os\n-import sys\n-if sys.platform.startswith('win'):\n- import ctypes\n-\n-\n-def configure(envdef):\n- \"\"\"Given a filepath or -, return a tuple of bytestrings.\n- \"\"\"\n- if envdef == '-': # Heroku-style\n-\n- # Heroku used to use =>, which was weird. Now they use :, and we may as\n- # well too, but I had already started using =, and now changing would\n- # break people's local.env.\n-\n- m = \"[SWADDLE] reading environment from stdin.\"\n- print >> sys.stderr, m\n- fp = sys.stdin\n- splitter = \": \"\n- elif not os.path.isfile(envdef):\n- m = \"[SWADDLE] %s is not a file; environment unchanged.\" % envdef\n- print >> sys.stderr, m\n- envdef = \"\"\n- else: # Gittip-style\n- fp = open(envdef)\n- splitter = \"=\"\n-\n- args = sys.argv[2:]\n- if not args:\n- m =\"[SWADDLE] No command specified; exiting.\"\n- raise SystemExit(m)\n- if sys.platform == 'win32' and not os.path.isfile(args[0]):\n- # Try with an '.exe' extension on Windows if the command doesn't\n- # already have an extension.\n- if os.path.splitext(args[0])[-1] == '':\n- args[0] += '.exe'\n- if not os.path.isfile(args[0]):\n- m =\"[SWADDLE] Command %s does not exist; exiting.\" % args[0]\n- raise SystemExit(m)\n-\n- if envdef:\n- for line in fp:\n- line = line.split('#')[0].strip()\n- if splitter not in line:\n- m = \"[SWADDLE] Skipping line: %s.\" % line\n- print >> sys.stderr, m\n- continue\n- key, val = line.split(splitter, 1)\n- if sys.platform.startswith('win'):\n- ctypes.windll.kernel32.SetEnvironmentVariableA(key.strip(), val.strip())\n- else:\n- os.environ[key.strip()] = val.strip()\n-\n- return args\n-\n-\n-def main():\n- if len(sys.argv) < 1:\n- m =\"[SWADDLE] Usage: %s {definition.env} {command} {args}\" % sys.argv[0]\n- raise SystemExit(m)\n- envdef = sys.argv[1]\n- args = configure(envdef)\n- os.execv(args[0], args)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,6 @@\n , packages=find_packages()\n , entry_points = { 'console_scripts'\n : [ 'payday=gittip.cli:payday'\n- , 'swaddle=gittip.utils.swaddle:main'\n , 'fake_data=gittip.utils.fake_data:main'\n ]\n }\n", "issue": "give better error when DATABASE_URL is empty\n@jamieforrest hit this and I just did as well. Here's the gnarly Traceback we currently have:\n\n```\npid-39707 thread-140735224484224 (MainThread) Oh no! Aspen crashed!\npid-39707 thread-140735224484224 (MainThread) Traceback (most recent call last):\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/server.py\", line 125, in main\npid-39707 thread-140735224484224 (MainThread) _main(argv)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/server.py\", line 51, in _main\npid-39707 thread-140735224484224 (MainThread) website = Website(argv)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/website.py\", line 31, in __init__\npid-39707 thread-140735224484224 (MainThread) self.configure(argv)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/configuration/__init__.py\", line 343, in configure\npid-39707 thread-140735224484224 (MainThread) self.run_config_scripts()\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/aspen/configuration/__init__.py\", line 386, in run_config_scripts\npid-39707 thread-140735224484224 (MainThread) execfile(filepath, {'website': self})\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/configure-aspen.py\", line 7, in <module>\npid-39707 thread-140735224484224 (MainThread) import gittip.authentication\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/authentication.py\", line 10, in <module>\npid-39707 thread-140735224484224 (MainThread) from gittip.orm import db\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py\", line 73, in <module>\npid-39707 thread-140735224484224 (MainThread) db = SQLAlchemy()\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py\", line 31, in __init__\npid-39707 thread-140735224484224 (MainThread) self.session = self.create_session()\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py\", line 46, in create_session\npid-39707 thread-140735224484224 (MainThread) session.configure(bind=self.engine)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/gittip/orm/__init__.py\", line 38, in engine\npid-39707 thread-140735224484224 (MainThread) return create_engine(dburl, pool_size=maxconn, max_overflow=0)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py\", line 331, in create_engine\npid-39707 thread-140735224484224 (MainThread) return strategy.create(*args, **kwargs)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/strategies.py\", line 48, in create\npid-39707 thread-140735224484224 (MainThread) u = url.make_url(name_or_url)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/url.py\", line 154, in make_url\npid-39707 thread-140735224484224 (MainThread) return _parse_rfc1738_args(name_or_url)\npid-39707 thread-140735224484224 (MainThread) File \"/Users/whit537/personal/gittip/www.gittip.com/env/lib/python2.7/site-packages/sqlalchemy/engine/url.py\", line 196, in _parse_rfc1738_args\npid-39707 thread-140735224484224 (MainThread) \"Could not parse rfc1738 URL from string '%s'\" % name)\npid-39707 thread-140735224484224 (MainThread) ArgumentError: Could not parse rfc1738 URL from string ''\n```\n\n", "before_files": [{"content": "\"\"\"Run a command with an environment specified in a file.\n\"\"\"\nimport os\nimport sys\nif sys.platform.startswith('win'):\n import ctypes\n\n\ndef configure(envdef):\n \"\"\"Given a filepath or -, return a tuple of bytestrings.\n \"\"\"\n if envdef == '-': # Heroku-style\n\n # Heroku used to use =>, which was weird. Now they use :, and we may as\n # well too, but I had already started using =, and now changing would\n # break people's local.env.\n\n m = \"[SWADDLE] reading environment from stdin.\"\n print >> sys.stderr, m\n fp = sys.stdin\n splitter = \": \"\n elif not os.path.isfile(envdef):\n m = \"[SWADDLE] %s is not a file; environment unchanged.\" % envdef\n print >> sys.stderr, m\n envdef = \"\"\n else: # Gittip-style\n fp = open(envdef)\n splitter = \"=\"\n\n args = sys.argv[2:]\n if not args:\n m =\"[SWADDLE] No command specified; exiting.\"\n raise SystemExit(m)\n if sys.platform == 'win32' and not os.path.isfile(args[0]):\n # Try with an '.exe' extension on Windows if the command doesn't\n # already have an extension.\n if os.path.splitext(args[0])[-1] == '':\n args[0] += '.exe'\n if not os.path.isfile(args[0]):\n m =\"[SWADDLE] Command %s does not exist; exiting.\" % args[0]\n raise SystemExit(m)\n\n if envdef:\n for line in fp:\n line = line.split('#')[0].strip()\n if splitter not in line:\n m = \"[SWADDLE] Skipping line: %s.\" % line\n print >> sys.stderr, m\n continue\n key, val = line.split(splitter, 1)\n if sys.platform.startswith('win'):\n ctypes.windll.kernel32.SetEnvironmentVariableA(key.strip(), val.strip())\n else:\n os.environ[key.strip()] = val.strip()\n\n return args\n\n\ndef main():\n if len(sys.argv) < 1:\n m =\"[SWADDLE] Usage: %s {definition.env} {command} {args}\" % sys.argv[0]\n raise SystemExit(m)\n envdef = sys.argv[1]\n args = configure(envdef)\n os.execv(args[0], args)\n", "path": "gittip/utils/swaddle.py"}, {"content": "import os\nfrom setuptools import setup, find_packages\n\n\ndef get_version():\n try:\n return open(os.path.join('www', 'version.txt')).read().strip()\n except OSError:\n return 'n/a'\n\n\nsetup( name='gittip'\n , version=get_version()\n , packages=find_packages()\n , entry_points = { 'console_scripts'\n : [ 'payday=gittip.cli:payday'\n , 'swaddle=gittip.utils.swaddle:main'\n , 'fake_data=gittip.utils.fake_data:main'\n ]\n }\n )\n", "path": "setup.py"}], "after_files": [{"content": null, "path": "gittip/utils/swaddle.py"}, {"content": "import os\nfrom setuptools import setup, find_packages\n\n\ndef get_version():\n try:\n return open(os.path.join('www', 'version.txt')).read().strip()\n except OSError:\n return 'n/a'\n\n\nsetup( name='gittip'\n , version=get_version()\n , packages=find_packages()\n , entry_points = { 'console_scripts'\n : [ 'payday=gittip.cli:payday'\n , 'fake_data=gittip.utils.fake_data:main'\n ]\n }\n )\n", "path": "setup.py"}]}
| 2,829 | 746 |
gh_patches_debug_15818
|
rasdani/github-patches
|
git_diff
|
intel__dffml-177
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
model: scikit: setup.py needs correct entrypoints
```diff
diff --git a/model/scikit/setup.py b/model/scikit/setup.py
index 1bd6284..efbff80 100644
--- a/model/scikit/setup.py
+++ b/model/scikit/setup.py
@@ -57,5 +57,9 @@ setup(
],
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
- entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
+ entry_points={"dffml.model": [
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ ]},
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `model/scikit/setup.py`
Content:
```
1 import os
2 import ast
3 from io import open
4 from setuptools import find_packages, setup
5
6 ORG = "intel"
7 NAME = "dffml-model-scikit"
8 DESCRIPTION = "DFFML model scikit"
9 AUTHOR_NAME = "Yash Lamba"
10 AUTHOR_EMAIL = "[email protected]"
11 INSTALL_REQUIRES = ["scikit-learn>=0.21.2", "joblib>=0.13.2", "pandas>=0.25.0"]
12
13 IMPORT_NAME = (
14 NAME
15 if "replace_package_name".upper() != NAME
16 else "replace_import_package_name".upper()
17 ).replace("-", "_")
18
19 SELF_PATH = os.path.dirname(os.path.realpath(__file__))
20
21 with open(os.path.join(SELF_PATH, IMPORT_NAME, "version.py"), "r") as f:
22 for line in f:
23 if line.startswith("VERSION"):
24 version = ast.literal_eval(line.strip().split("=")[-1].strip())
25 break
26
27 with open(os.path.join(SELF_PATH, "README.md"), "r", encoding="utf-8") as f:
28 readme = f.read()
29
30 setup(
31 name="dffml-model-scikit",
32 version=version,
33 description="",
34 long_description=readme,
35 long_description_content_type="text/markdown",
36 author="Yash Lamba",
37 author_email="[email protected]",
38 maintainer="John Andersen",
39 maintainer_email="[email protected]",
40 url="https://github.com/intel/dffml/blob/master/model/scikit/README.md",
41 license="MIT",
42 keywords=["dffml"],
43 classifiers=[
44 "Development Status :: 3 - Alpha",
45 "Intended Audience :: Developers",
46 "License :: OSI Approved :: MIT License",
47 "Natural Language :: English",
48 "Operating System :: OS Independent",
49 "Programming Language :: Python :: 3 :: Only",
50 "Programming Language :: Python :: 3.7",
51 "Programming Language :: Python :: Implementation :: CPython",
52 "Programming Language :: Python :: Implementation :: PyPy",
53 ],
54 install_requires=INSTALL_REQUIRES,
55 packages=find_packages(),
56 entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/model/scikit/setup.py b/model/scikit/setup.py
--- a/model/scikit/setup.py
+++ b/model/scikit/setup.py
@@ -53,5 +53,18 @@
],
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
- entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
+ entry_points={
+ "dffml.model": [
+ f"scikitknn = {IMPORT_NAME}.scikit_models:KNeighborsClassifier",
+ f"scikitadaboost = {IMPORT_NAME}.scikit_models:AdaBoostClassifier",
+ f"scikitgpc = {IMPORT_NAME}.scikit_models:GaussianProcessClassifier",
+ f"scikitdtc = {IMPORT_NAME}.scikit_models:DecisionTreeClassifier",
+ f"scikitrfc = {IMPORT_NAME}.scikit_models:RandomForestClassifier",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifier",
+ f"scikitgnb = {IMPORT_NAME}.scikit_models:GaussianNB",
+ f"scikitqda = {IMPORT_NAME}.scikit_models:QuadraticDiscriminantAnalysis",
+ f"scikitsvc = {IMPORT_NAME}.scikit_models:SVC",
+ f"scikitlr = {IMPORT_NAME}.scikit_models:LinearRegression",
+ ]
+ },
)
|
{"golden_diff": "diff --git a/model/scikit/setup.py b/model/scikit/setup.py\n--- a/model/scikit/setup.py\n+++ b/model/scikit/setup.py\n@@ -53,5 +53,18 @@\n ],\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n- entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\n+ entry_points={\n+ \"dffml.model\": [\n+ f\"scikitknn = {IMPORT_NAME}.scikit_models:KNeighborsClassifier\",\n+ f\"scikitadaboost = {IMPORT_NAME}.scikit_models:AdaBoostClassifier\",\n+ f\"scikitgpc = {IMPORT_NAME}.scikit_models:GaussianProcessClassifier\",\n+ f\"scikitdtc = {IMPORT_NAME}.scikit_models:DecisionTreeClassifier\",\n+ f\"scikitrfc = {IMPORT_NAME}.scikit_models:RandomForestClassifier\",\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifier\",\n+ f\"scikitgnb = {IMPORT_NAME}.scikit_models:GaussianNB\",\n+ f\"scikitqda = {IMPORT_NAME}.scikit_models:QuadraticDiscriminantAnalysis\",\n+ f\"scikitsvc = {IMPORT_NAME}.scikit_models:SVC\",\n+ f\"scikitlr = {IMPORT_NAME}.scikit_models:LinearRegression\",\n+ ]\n+ },\n )\n", "issue": "model: scikit: setup.py needs correct entrypoints\n```diff\r\ndiff --git a/model/scikit/setup.py b/model/scikit/setup.py\r\nindex 1bd6284..efbff80 100644\r\n--- a/model/scikit/setup.py\r\n+++ b/model/scikit/setup.py\r\n@@ -57,5 +57,9 @@ setup(\r\n ],\r\n install_requires=INSTALL_REQUIRES,\r\n packages=find_packages(),\r\n- entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\r\n+ entry_points={\"dffml.model\": [\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ ]},\r\n )\r\n```\n", "before_files": [{"content": "import os\nimport ast\nfrom io import open\nfrom setuptools import find_packages, setup\n\nORG = \"intel\"\nNAME = \"dffml-model-scikit\"\nDESCRIPTION = \"DFFML model scikit\"\nAUTHOR_NAME = \"Yash Lamba\"\nAUTHOR_EMAIL = \"[email protected]\"\nINSTALL_REQUIRES = [\"scikit-learn>=0.21.2\", \"joblib>=0.13.2\", \"pandas>=0.25.0\"]\n\nIMPORT_NAME = (\n NAME\n if \"replace_package_name\".upper() != NAME\n else \"replace_import_package_name\".upper()\n).replace(\"-\", \"_\")\n\nSELF_PATH = os.path.dirname(os.path.realpath(__file__))\n\nwith open(os.path.join(SELF_PATH, IMPORT_NAME, \"version.py\"), \"r\") as f:\n for line in f:\n if line.startswith(\"VERSION\"):\n version = ast.literal_eval(line.strip().split(\"=\")[-1].strip())\n break\n\nwith open(os.path.join(SELF_PATH, \"README.md\"), \"r\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"dffml-model-scikit\",\n version=version,\n description=\"\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Yash Lamba\",\n author_email=\"[email protected]\",\n maintainer=\"John Andersen\",\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/intel/dffml/blob/master/model/scikit/README.md\",\n license=\"MIT\",\n keywords=[\"dffml\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\n)\n", "path": "model/scikit/setup.py"}], "after_files": [{"content": "import os\nimport ast\nfrom io import open\nfrom setuptools import find_packages, setup\n\nORG = \"intel\"\nNAME = \"dffml-model-scikit\"\nDESCRIPTION = \"DFFML model scikit\"\nAUTHOR_NAME = \"Yash Lamba\"\nAUTHOR_EMAIL = \"[email protected]\"\nINSTALL_REQUIRES = [\"scikit-learn>=0.21.2\", \"joblib>=0.13.2\", \"pandas>=0.25.0\"]\n\nIMPORT_NAME = (\n NAME\n if \"replace_package_name\".upper() != NAME\n else \"replace_import_package_name\".upper()\n).replace(\"-\", \"_\")\n\nSELF_PATH = os.path.dirname(os.path.realpath(__file__))\n\nwith open(os.path.join(SELF_PATH, IMPORT_NAME, \"version.py\"), \"r\") as f:\n for line in f:\n if line.startswith(\"VERSION\"):\n version = ast.literal_eval(line.strip().split(\"=\")[-1].strip())\n break\n\nwith open(os.path.join(SELF_PATH, \"README.md\"), \"r\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"dffml-model-scikit\",\n version=version,\n description=\"\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Yash Lamba\",\n author_email=\"[email protected]\",\n maintainer=\"John Andersen\",\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/intel/dffml/blob/master/model/scikit/README.md\",\n license=\"MIT\",\n keywords=[\"dffml\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n entry_points={\n \"dffml.model\": [\n f\"scikitknn = {IMPORT_NAME}.scikit_models:KNeighborsClassifier\",\n f\"scikitadaboost = {IMPORT_NAME}.scikit_models:AdaBoostClassifier\",\n f\"scikitgpc = {IMPORT_NAME}.scikit_models:GaussianProcessClassifier\",\n f\"scikitdtc = {IMPORT_NAME}.scikit_models:DecisionTreeClassifier\",\n f\"scikitrfc = {IMPORT_NAME}.scikit_models:RandomForestClassifier\",\n f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifier\",\n f\"scikitgnb = {IMPORT_NAME}.scikit_models:GaussianNB\",\n f\"scikitqda = {IMPORT_NAME}.scikit_models:QuadraticDiscriminantAnalysis\",\n f\"scikitsvc = {IMPORT_NAME}.scikit_models:SVC\",\n f\"scikitlr = {IMPORT_NAME}.scikit_models:LinearRegression\",\n ]\n },\n)\n", "path": "model/scikit/setup.py"}]}
| 1,062 | 317 |
gh_patches_debug_18895
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1186
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyhf json2xml requires pyhf[contrib]
# Description
```
$ pip install pyhf[xmlio]
$ pyhf json2xml -h
ERROR:pyhf.contrib.utils:No module named 'requests'
Installation of the contrib extra is required to use pyhf.contrib.utils.download
Please install with: python -m pip install pyhf[contrib]
Usage: pyhf json2xml [OPTIONS] [WORKSPACE]
Convert pyhf JSON back to XML + ROOT files.
Options:
--output-dir PATH
--specroot TEXT
--dataroot TEXT
--resultprefix TEXT
-p, --patch TEXT
-h, --help Show this message and exit.
```
# Expected Behavior
Shown the help without any reference to pyhf.contrib as this does not depend on contrib.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/contrib/cli.py`
Content:
```
1 """CLI for functionality that will get migrated out eventually."""
2 import logging
3 import click
4 from pathlib import Path
5
6 from . import utils
7
8 logging.basicConfig()
9 log = logging.getLogger(__name__)
10
11
12 @click.group(name="contrib")
13 def cli():
14 """
15 Contrib experimental operations.
16
17 .. note::
18
19 Requires installation of the ``contrib`` extra.
20
21 .. code-block:: shell
22
23 $ python -m pip install pyhf[contrib]
24 """
25
26
27 @cli.command()
28 @click.argument("archive-url")
29 @click.argument("output-directory")
30 @click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
31 @click.option(
32 "-f", "--force", is_flag=True, help="Force download from non-approved host"
33 )
34 @click.option(
35 "-c",
36 "--compress",
37 is_flag=True,
38 help="Keep the archive in a compressed tar.gz form",
39 )
40 def download(archive_url, output_directory, verbose, force, compress):
41 """
42 Download the patchset archive from the remote URL and extract it in a
43 directory at the path given.
44
45 Example:
46
47 .. code-block:: shell
48
49 $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods
50
51 \b
52 1Lbb-likelihoods/patchset.json
53 1Lbb-likelihoods/README.md
54 1Lbb-likelihoods/BkgOnly.json
55
56 Raises:
57 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
58 """
59 try:
60 utils.download(archive_url, output_directory, force, compress)
61
62 if verbose:
63 file_list = [str(file) for file in list(Path(output_directory).glob("*"))]
64 print("\n".join(file_list))
65 except AttributeError as excep:
66 exception_info = (
67 str(excep)
68 + "\nInstallation of the contrib extra is required to use the contrib CLI API"
69 + "\nPlease install with: python -m pip install pyhf[contrib]\n"
70 )
71 log.error(exception_info)
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pyhf/contrib/cli.py b/src/pyhf/contrib/cli.py
--- a/src/pyhf/contrib/cli.py
+++ b/src/pyhf/contrib/cli.py
@@ -3,8 +3,6 @@
import click
from pathlib import Path
-from . import utils
-
logging.basicConfig()
log = logging.getLogger(__name__)
@@ -22,6 +20,10 @@
$ python -m pip install pyhf[contrib]
"""
+ from . import utils # Guard CLI from missing extra
+
+ # TODO: https://github.com/scikit-hep/pyhf/issues/863
+ _ = utils # Placate pyflakes
@cli.command()
@@ -57,6 +59,8 @@
:class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
"""
try:
+ from . import utils
+
utils.download(archive_url, output_directory, force, compress)
if verbose:
|
{"golden_diff": "diff --git a/src/pyhf/contrib/cli.py b/src/pyhf/contrib/cli.py\n--- a/src/pyhf/contrib/cli.py\n+++ b/src/pyhf/contrib/cli.py\n@@ -3,8 +3,6 @@\n import click\n from pathlib import Path\n \n-from . import utils\n-\n logging.basicConfig()\n log = logging.getLogger(__name__)\n \n@@ -22,6 +20,10 @@\n \n $ python -m pip install pyhf[contrib]\n \"\"\"\n+ from . import utils # Guard CLI from missing extra\n+\n+ # TODO: https://github.com/scikit-hep/pyhf/issues/863\n+ _ = utils # Placate pyflakes\n \n \n @cli.command()\n@@ -57,6 +59,8 @@\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n+ from . import utils\n+\n utils.download(archive_url, output_directory, force, compress)\n \n if verbose:\n", "issue": "pyhf json2xml requires pyhf[contrib]\n# Description\r\n\r\n```\r\n$ pip install pyhf[xmlio]\r\n$ pyhf json2xml -h\r\nERROR:pyhf.contrib.utils:No module named 'requests'\r\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\r\nPlease install with: python -m pip install pyhf[contrib]\r\n\r\nUsage: pyhf json2xml [OPTIONS] [WORKSPACE]\r\n\r\n Convert pyhf JSON back to XML + ROOT files.\r\n\r\nOptions:\r\n --output-dir PATH\r\n --specroot TEXT\r\n --dataroot TEXT\r\n --resultprefix TEXT\r\n -p, --patch TEXT\r\n -h, --help Show this message and exit.\r\n```\r\n\r\n# Expected Behavior\r\n\r\nShown the help without any reference to pyhf.contrib as this does not depend on contrib.\n", "before_files": [{"content": "\"\"\"CLI for functionality that will get migrated out eventually.\"\"\"\nimport logging\nimport click\nfrom pathlib import Path\n\nfrom . import utils\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\[email protected](name=\"contrib\")\ndef cli():\n \"\"\"\n Contrib experimental operations.\n\n .. note::\n\n Requires installation of the ``contrib`` extra.\n\n .. code-block:: shell\n\n $ python -m pip install pyhf[contrib]\n \"\"\"\n\n\[email protected]()\[email protected](\"archive-url\")\[email protected](\"output-directory\")\[email protected](\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\[email protected](\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n)\[email protected](\n \"-c\",\n \"--compress\",\n is_flag=True,\n help=\"Keep the archive in a compressed tar.gz form\",\n)\ndef download(archive_url, output_directory, verbose, force, compress):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n .. code-block:: shell\n\n $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods\n\n \\b\n 1Lbb-likelihoods/patchset.json\n 1Lbb-likelihoods/README.md\n 1Lbb-likelihoods/BkgOnly.json\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n utils.download(archive_url, output_directory, force, compress)\n\n if verbose:\n file_list = [str(file) for file in list(Path(output_directory).glob(\"*\"))]\n print(\"\\n\".join(file_list))\n except AttributeError as excep:\n exception_info = (\n str(excep)\n + \"\\nInstallation of the contrib extra is required to use the contrib CLI API\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\"\n )\n log.error(exception_info)\n", "path": "src/pyhf/contrib/cli.py"}], "after_files": [{"content": "\"\"\"CLI for functionality that will get migrated out eventually.\"\"\"\nimport logging\nimport click\nfrom pathlib import Path\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\[email protected](name=\"contrib\")\ndef cli():\n \"\"\"\n Contrib experimental operations.\n\n .. note::\n\n Requires installation of the ``contrib`` extra.\n\n .. code-block:: shell\n\n $ python -m pip install pyhf[contrib]\n \"\"\"\n from . import utils # Guard CLI from missing extra\n\n # TODO: https://github.com/scikit-hep/pyhf/issues/863\n _ = utils # Placate pyflakes\n\n\[email protected]()\[email protected](\"archive-url\")\[email protected](\"output-directory\")\[email protected](\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\[email protected](\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n)\[email protected](\n \"-c\",\n \"--compress\",\n is_flag=True,\n help=\"Keep the archive in a compressed tar.gz form\",\n)\ndef download(archive_url, output_directory, verbose, force, compress):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n .. code-block:: shell\n\n $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods\n\n \\b\n 1Lbb-likelihoods/patchset.json\n 1Lbb-likelihoods/README.md\n 1Lbb-likelihoods/BkgOnly.json\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n from . import utils\n\n utils.download(archive_url, output_directory, force, compress)\n\n if verbose:\n file_list = [str(file) for file in list(Path(output_directory).glob(\"*\"))]\n print(\"\\n\".join(file_list))\n except AttributeError as excep:\n exception_info = (\n str(excep)\n + \"\\nInstallation of the contrib extra is required to use the contrib CLI API\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\"\n )\n log.error(exception_info)\n", "path": "src/pyhf/contrib/cli.py"}]}
| 1,044 | 226 |
gh_patches_debug_25711
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-8506
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
added %matplotlib --list so backends are easily visible
linked to #8444 adding --list argument to show the available backends
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/core/magics/pylab.py`
Content:
```
1 """Implementation of magic functions for matplotlib/pylab support.
2 """
3 from __future__ import print_function
4 #-----------------------------------------------------------------------------
5 # Copyright (c) 2012 The IPython Development Team.
6 #
7 # Distributed under the terms of the Modified BSD License.
8 #
9 # The full license is in the file COPYING.txt, distributed with this software.
10 #-----------------------------------------------------------------------------
11
12 #-----------------------------------------------------------------------------
13 # Imports
14 #-----------------------------------------------------------------------------
15
16 # Our own packages
17 from traitlets.config.application import Application
18 from IPython.core import magic_arguments
19 from IPython.core.magic import Magics, magics_class, line_magic
20 from IPython.testing.skipdoctest import skip_doctest
21 from IPython.utils.warn import warn
22 from IPython.core.pylabtools import backends
23
24 #-----------------------------------------------------------------------------
25 # Magic implementation classes
26 #-----------------------------------------------------------------------------
27
28 magic_gui_arg = magic_arguments.argument(
29 'gui', nargs='?',
30 help="""Name of the matplotlib backend to use %s.
31 If given, the corresponding matplotlib backend is used,
32 otherwise it will be matplotlib's default
33 (which you can set in your matplotlib config file).
34 """ % str(tuple(sorted(backends.keys())))
35 )
36
37
38 @magics_class
39 class PylabMagics(Magics):
40 """Magics related to matplotlib's pylab support"""
41
42 @skip_doctest
43 @line_magic
44 @magic_arguments.magic_arguments()
45 @magic_gui_arg
46 def matplotlib(self, line=''):
47 """Set up matplotlib to work interactively.
48
49 This function lets you activate matplotlib interactive support
50 at any point during an IPython session. It does not import anything
51 into the interactive namespace.
52
53 If you are using the inline matplotlib backend in the IPython Notebook
54 you can set which figure formats are enabled using the following::
55
56 In [1]: from IPython.display import set_matplotlib_formats
57
58 In [2]: set_matplotlib_formats('pdf', 'svg')
59
60 The default for inline figures sets `bbox_inches` to 'tight'. This can
61 cause discrepancies between the displayed image and the identical
62 image created using `savefig`. This behavior can be disabled using the
63 `%config` magic::
64
65 In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
66
67 In addition, see the docstring of
68 `IPython.display.set_matplotlib_formats` and
69 `IPython.display.set_matplotlib_close` for more information on
70 changing additional behaviors of the inline backend.
71
72 Examples
73 --------
74 To enable the inline backend for usage with the IPython Notebook::
75
76 In [1]: %matplotlib inline
77
78 In this case, where the matplotlib default is TkAgg::
79
80 In [2]: %matplotlib
81 Using matplotlib backend: TkAgg
82
83 But you can explicitly request a different GUI backend::
84
85 In [3]: %matplotlib qt
86 """
87 args = magic_arguments.parse_argstring(self.matplotlib, line)
88 gui, backend = self.shell.enable_matplotlib(args.gui)
89 self._show_matplotlib_backend(args.gui, backend)
90
91 @skip_doctest
92 @line_magic
93 @magic_arguments.magic_arguments()
94 @magic_arguments.argument(
95 '--no-import-all', action='store_true', default=None,
96 help="""Prevent IPython from performing ``import *`` into the interactive namespace.
97
98 You can govern the default behavior of this flag with the
99 InteractiveShellApp.pylab_import_all configurable.
100 """
101 )
102 @magic_gui_arg
103 def pylab(self, line=''):
104 """Load numpy and matplotlib to work interactively.
105
106 This function lets you activate pylab (matplotlib, numpy and
107 interactive support) at any point during an IPython session.
108
109 %pylab makes the following imports::
110
111 import numpy
112 import matplotlib
113 from matplotlib import pylab, mlab, pyplot
114 np = numpy
115 plt = pyplot
116
117 from IPython.display import display
118 from IPython.core.pylabtools import figsize, getfigs
119
120 from pylab import *
121 from numpy import *
122
123 If you pass `--no-import-all`, the last two `*` imports will be excluded.
124
125 See the %matplotlib magic for more details about activating matplotlib
126 without affecting the interactive namespace.
127 """
128 args = magic_arguments.parse_argstring(self.pylab, line)
129 if args.no_import_all is None:
130 # get default from Application
131 if Application.initialized():
132 app = Application.instance()
133 try:
134 import_all = app.pylab_import_all
135 except AttributeError:
136 import_all = True
137 else:
138 # nothing specified, no app - default True
139 import_all = True
140 else:
141 # invert no-import flag
142 import_all = not args.no_import_all
143
144 gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
145 self._show_matplotlib_backend(args.gui, backend)
146 print ("Populating the interactive namespace from numpy and matplotlib")
147 if clobbered:
148 warn("pylab import has clobbered these variables: %s" % clobbered +
149 "\n`%matplotlib` prevents importing * from pylab and numpy"
150 )
151
152 def _show_matplotlib_backend(self, gui, backend):
153 """show matplotlib message backend message"""
154 if not gui or gui == 'auto':
155 print("Using matplotlib backend: %s" % backend)
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/IPython/core/magics/pylab.py b/IPython/core/magics/pylab.py
--- a/IPython/core/magics/pylab.py
+++ b/IPython/core/magics/pylab.py
@@ -42,6 +42,8 @@
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
+ @magic_arguments.argument('-l', '--list', action='store_true',
+ help='Show available matplotlib backends')
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
@@ -83,10 +85,20 @@
But you can explicitly request a different GUI backend::
In [3]: %matplotlib qt
+
+ You can list the available backends using the -l/--list option
+
+ In [4]: %matplotlib --list
+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
+ 'gtk', 'tk', 'inline']
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
- gui, backend = self.shell.enable_matplotlib(args.gui)
- self._show_matplotlib_backend(args.gui, backend)
+ if args.list:
+ backends_list = list(backends.keys())
+ print("Available matplotlib backends: %s" % backends_list)
+ else:
+ gui, backend = self.shell.enable_matplotlib(args.gui)
+ self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
|
{"golden_diff": "diff --git a/IPython/core/magics/pylab.py b/IPython/core/magics/pylab.py\n--- a/IPython/core/magics/pylab.py\n+++ b/IPython/core/magics/pylab.py\n@@ -42,6 +42,8 @@\n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n+ @magic_arguments.argument('-l', '--list', action='store_true',\n+ help='Show available matplotlib backends')\n @magic_gui_arg\n def matplotlib(self, line=''):\n \"\"\"Set up matplotlib to work interactively.\n@@ -83,10 +85,20 @@\n But you can explicitly request a different GUI backend::\n \n In [3]: %matplotlib qt\n+\n+ You can list the available backends using the -l/--list option\n+\n+ In [4]: %matplotlib --list\n+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',\n+ 'gtk', 'tk', 'inline']\n \"\"\"\n args = magic_arguments.parse_argstring(self.matplotlib, line)\n- gui, backend = self.shell.enable_matplotlib(args.gui)\n- self._show_matplotlib_backend(args.gui, backend)\n+ if args.list:\n+ backends_list = list(backends.keys())\n+ print(\"Available matplotlib backends: %s\" % backends_list)\n+ else:\n+ gui, backend = self.shell.enable_matplotlib(args.gui)\n+ self._show_matplotlib_backend(args.gui, backend)\n \n @skip_doctest\n @line_magic\n", "issue": "added %matplotlib --list so backends are easily visible\nlinked to #8444 adding --list argument to show the available backends\n\n", "before_files": [{"content": "\"\"\"Implementation of magic functions for matplotlib/pylab support.\n\"\"\"\nfrom __future__ import print_function\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 The IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Our own packages\nfrom traitlets.config.application import Application\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils.warn import warn\nfrom IPython.core.pylabtools import backends\n\n#-----------------------------------------------------------------------------\n# Magic implementation classes\n#-----------------------------------------------------------------------------\n\nmagic_gui_arg = magic_arguments.argument(\n 'gui', nargs='?',\n help=\"\"\"Name of the matplotlib backend to use %s.\n If given, the corresponding matplotlib backend is used,\n otherwise it will be matplotlib's default\n (which you can set in your matplotlib config file).\n \"\"\" % str(tuple(sorted(backends.keys())))\n)\n\n\n@magics_class\nclass PylabMagics(Magics):\n \"\"\"Magics related to matplotlib's pylab support\"\"\"\n \n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_gui_arg\n def matplotlib(self, line=''):\n \"\"\"Set up matplotlib to work interactively.\n \n This function lets you activate matplotlib interactive support\n at any point during an IPython session. It does not import anything\n into the interactive namespace.\n \n If you are using the inline matplotlib backend in the IPython Notebook\n you can set which figure formats are enabled using the following::\n \n In [1]: from IPython.display import set_matplotlib_formats\n \n In [2]: set_matplotlib_formats('pdf', 'svg')\n\n The default for inline figures sets `bbox_inches` to 'tight'. This can\n cause discrepancies between the displayed image and the identical\n image created using `savefig`. This behavior can be disabled using the\n `%config` magic::\n \n In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}\n\n In addition, see the docstring of\n `IPython.display.set_matplotlib_formats` and\n `IPython.display.set_matplotlib_close` for more information on\n changing additional behaviors of the inline backend.\n\n Examples\n --------\n To enable the inline backend for usage with the IPython Notebook::\n \n In [1]: %matplotlib inline\n\n In this case, where the matplotlib default is TkAgg::\n\n In [2]: %matplotlib\n Using matplotlib backend: TkAgg\n\n But you can explicitly request a different GUI backend::\n\n In [3]: %matplotlib qt\n \"\"\"\n args = magic_arguments.parse_argstring(self.matplotlib, line)\n gui, backend = self.shell.enable_matplotlib(args.gui)\n self._show_matplotlib_backend(args.gui, backend)\n\n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n '--no-import-all', action='store_true', default=None,\n help=\"\"\"Prevent IPython from performing ``import *`` into the interactive namespace.\n \n You can govern the default behavior of this flag with the\n InteractiveShellApp.pylab_import_all configurable.\n \"\"\"\n )\n @magic_gui_arg\n def pylab(self, line=''):\n \"\"\"Load numpy and matplotlib to work interactively.\n\n This function lets you activate pylab (matplotlib, numpy and\n interactive support) at any point during an IPython session.\n \n %pylab makes the following imports::\n \n import numpy\n import matplotlib\n from matplotlib import pylab, mlab, pyplot\n np = numpy\n plt = pyplot\n \n from IPython.display import display\n from IPython.core.pylabtools import figsize, getfigs\n \n from pylab import *\n from numpy import *\n\n If you pass `--no-import-all`, the last two `*` imports will be excluded.\n \n See the %matplotlib magic for more details about activating matplotlib\n without affecting the interactive namespace.\n \"\"\"\n args = magic_arguments.parse_argstring(self.pylab, line)\n if args.no_import_all is None:\n # get default from Application\n if Application.initialized():\n app = Application.instance()\n try:\n import_all = app.pylab_import_all\n except AttributeError:\n import_all = True\n else:\n # nothing specified, no app - default True\n import_all = True\n else:\n # invert no-import flag\n import_all = not args.no_import_all\n\n gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)\n self._show_matplotlib_backend(args.gui, backend)\n print (\"Populating the interactive namespace from numpy and matplotlib\")\n if clobbered:\n warn(\"pylab import has clobbered these variables: %s\" % clobbered +\n \"\\n`%matplotlib` prevents importing * from pylab and numpy\"\n )\n \n def _show_matplotlib_backend(self, gui, backend):\n \"\"\"show matplotlib message backend message\"\"\"\n if not gui or gui == 'auto':\n print(\"Using matplotlib backend: %s\" % backend)\n", "path": "IPython/core/magics/pylab.py"}], "after_files": [{"content": "\"\"\"Implementation of magic functions for matplotlib/pylab support.\n\"\"\"\nfrom __future__ import print_function\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 The IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Our own packages\nfrom traitlets.config.application import Application\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils.warn import warn\nfrom IPython.core.pylabtools import backends\n\n#-----------------------------------------------------------------------------\n# Magic implementation classes\n#-----------------------------------------------------------------------------\n\nmagic_gui_arg = magic_arguments.argument(\n 'gui', nargs='?',\n help=\"\"\"Name of the matplotlib backend to use %s.\n If given, the corresponding matplotlib backend is used,\n otherwise it will be matplotlib's default\n (which you can set in your matplotlib config file).\n \"\"\" % str(tuple(sorted(backends.keys())))\n)\n\n\n@magics_class\nclass PylabMagics(Magics):\n \"\"\"Magics related to matplotlib's pylab support\"\"\"\n \n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('-l', '--list', action='store_true',\n help='Show available matplotlib backends')\n @magic_gui_arg\n def matplotlib(self, line=''):\n \"\"\"Set up matplotlib to work interactively.\n \n This function lets you activate matplotlib interactive support\n at any point during an IPython session. It does not import anything\n into the interactive namespace.\n \n If you are using the inline matplotlib backend in the IPython Notebook\n you can set which figure formats are enabled using the following::\n \n In [1]: from IPython.display import set_matplotlib_formats\n \n In [2]: set_matplotlib_formats('pdf', 'svg')\n\n The default for inline figures sets `bbox_inches` to 'tight'. This can\n cause discrepancies between the displayed image and the identical\n image created using `savefig`. This behavior can be disabled using the\n `%config` magic::\n \n In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}\n\n In addition, see the docstring of\n `IPython.display.set_matplotlib_formats` and\n `IPython.display.set_matplotlib_close` for more information on\n changing additional behaviors of the inline backend.\n\n Examples\n --------\n To enable the inline backend for usage with the IPython Notebook::\n \n In [1]: %matplotlib inline\n\n In this case, where the matplotlib default is TkAgg::\n\n In [2]: %matplotlib\n Using matplotlib backend: TkAgg\n\n But you can explicitly request a different GUI backend::\n\n In [3]: %matplotlib qt\n\n You can list the available backends using the -l/--list option\n\n In [4]: %matplotlib --list\n Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',\n 'gtk', 'tk', 'inline']\n \"\"\"\n args = magic_arguments.parse_argstring(self.matplotlib, line)\n if args.list:\n backends_list = list(backends.keys())\n print(\"Available matplotlib backends: %s\" % backends_list)\n else:\n gui, backend = self.shell.enable_matplotlib(args.gui)\n self._show_matplotlib_backend(args.gui, backend)\n\n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n '--no-import-all', action='store_true', default=None,\n help=\"\"\"Prevent IPython from performing ``import *`` into the interactive namespace.\n \n You can govern the default behavior of this flag with the\n InteractiveShellApp.pylab_import_all configurable.\n \"\"\"\n )\n @magic_gui_arg\n def pylab(self, line=''):\n \"\"\"Load numpy and matplotlib to work interactively.\n\n This function lets you activate pylab (matplotlib, numpy and\n interactive support) at any point during an IPython session.\n \n %pylab makes the following imports::\n \n import numpy\n import matplotlib\n from matplotlib import pylab, mlab, pyplot\n np = numpy\n plt = pyplot\n \n from IPython.display import display\n from IPython.core.pylabtools import figsize, getfigs\n \n from pylab import *\n from numpy import *\n\n If you pass `--no-import-all`, the last two `*` imports will be excluded.\n \n See the %matplotlib magic for more details about activating matplotlib\n without affecting the interactive namespace.\n \"\"\"\n args = magic_arguments.parse_argstring(self.pylab, line)\n if args.no_import_all is None:\n # get default from Application\n if Application.initialized():\n app = Application.instance()\n try:\n import_all = app.pylab_import_all\n except AttributeError:\n import_all = True\n else:\n # nothing specified, no app - default True\n import_all = True\n else:\n # invert no-import flag\n import_all = not args.no_import_all\n\n gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)\n self._show_matplotlib_backend(args.gui, backend)\n print (\"Populating the interactive namespace from numpy and matplotlib\")\n if clobbered:\n warn(\"pylab import has clobbered these variables: %s\" % clobbered +\n \"\\n`%matplotlib` prevents importing * from pylab and numpy\"\n )\n \n def _show_matplotlib_backend(self, gui, backend):\n \"\"\"show matplotlib message backend message\"\"\"\n if not gui or gui == 'auto':\n print(\"Using matplotlib backend: %s\" % backend)\n", "path": "IPython/core/magics/pylab.py"}]}
| 1,806 | 361 |
gh_patches_debug_16253
|
rasdani/github-patches
|
git_diff
|
mozilla__pontoon-3030
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken heroku deploy button
When i try to deploy pontoon on heroku, i got an infinite charge..

I just clicked on button from README file and logged in to heroku platform
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/base/management/commands/heroku_deploy_setup.py`
Content:
```
1 import os
2
3 from urllib.parse import urlparse, urljoin
4
5 from django.core.management.base import BaseCommand
6 from django.contrib.sites.models import Site
7
8 from pontoon.base.models import Project, User
9
10
11 class Command(BaseCommand):
12 help = "Setup an instance of Pontoon deployed via Heroku Deploy."
13
14 def handle(self, *args, **options):
15 site_url = os.environ.get("SITE_URL")
16 app_host = urlparse(site_url).netloc
17 admin_email = os.environ.get("ADMIN_EMAIL")
18 admin_password = os.environ.get("ADMIN_PASSWORD")
19
20 User.objects.create_superuser(admin_email, admin_email, admin_password)
21 Site.objects.filter(pk=1).update(name=app_host, domain=app_host)
22
23 Project.objects.filter(slug="pontoon-intro").update(
24 url=urljoin(site_url, "intro/")
25 )
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pontoon/base/management/commands/heroku_deploy_setup.py b/pontoon/base/management/commands/heroku_deploy_setup.py
--- a/pontoon/base/management/commands/heroku_deploy_setup.py
+++ b/pontoon/base/management/commands/heroku_deploy_setup.py
@@ -1,11 +1,11 @@
import os
-from urllib.parse import urlparse, urljoin
+from urllib.parse import urlparse
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
-from pontoon.base.models import Project, User
+from pontoon.base.models import User
class Command(BaseCommand):
@@ -19,7 +19,3 @@
User.objects.create_superuser(admin_email, admin_email, admin_password)
Site.objects.filter(pk=1).update(name=app_host, domain=app_host)
-
- Project.objects.filter(slug="pontoon-intro").update(
- url=urljoin(site_url, "intro/")
- )
|
{"golden_diff": "diff --git a/pontoon/base/management/commands/heroku_deploy_setup.py b/pontoon/base/management/commands/heroku_deploy_setup.py\n--- a/pontoon/base/management/commands/heroku_deploy_setup.py\n+++ b/pontoon/base/management/commands/heroku_deploy_setup.py\n@@ -1,11 +1,11 @@\n import os\n \n-from urllib.parse import urlparse, urljoin\n+from urllib.parse import urlparse\n \n from django.core.management.base import BaseCommand\n from django.contrib.sites.models import Site\n \n-from pontoon.base.models import Project, User\n+from pontoon.base.models import User\n \n \n class Command(BaseCommand):\n@@ -19,7 +19,3 @@\n \n User.objects.create_superuser(admin_email, admin_email, admin_password)\n Site.objects.filter(pk=1).update(name=app_host, domain=app_host)\n-\n- Project.objects.filter(slug=\"pontoon-intro\").update(\n- url=urljoin(site_url, \"intro/\")\n- )\n", "issue": "Broken heroku deploy button\nWhen i try to deploy pontoon on heroku, i got an infinite charge..\r\n\r\n\r\nI just clicked on button from README file and logged in to heroku platform\n", "before_files": [{"content": "import os\n\nfrom urllib.parse import urlparse, urljoin\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.sites.models import Site\n\nfrom pontoon.base.models import Project, User\n\n\nclass Command(BaseCommand):\n help = \"Setup an instance of Pontoon deployed via Heroku Deploy.\"\n\n def handle(self, *args, **options):\n site_url = os.environ.get(\"SITE_URL\")\n app_host = urlparse(site_url).netloc\n admin_email = os.environ.get(\"ADMIN_EMAIL\")\n admin_password = os.environ.get(\"ADMIN_PASSWORD\")\n\n User.objects.create_superuser(admin_email, admin_email, admin_password)\n Site.objects.filter(pk=1).update(name=app_host, domain=app_host)\n\n Project.objects.filter(slug=\"pontoon-intro\").update(\n url=urljoin(site_url, \"intro/\")\n )\n", "path": "pontoon/base/management/commands/heroku_deploy_setup.py"}], "after_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.sites.models import Site\n\nfrom pontoon.base.models import User\n\n\nclass Command(BaseCommand):\n help = \"Setup an instance of Pontoon deployed via Heroku Deploy.\"\n\n def handle(self, *args, **options):\n site_url = os.environ.get(\"SITE_URL\")\n app_host = urlparse(site_url).netloc\n admin_email = os.environ.get(\"ADMIN_EMAIL\")\n admin_password = os.environ.get(\"ADMIN_PASSWORD\")\n\n User.objects.create_superuser(admin_email, admin_email, admin_password)\n Site.objects.filter(pk=1).update(name=app_host, domain=app_host)\n", "path": "pontoon/base/management/commands/heroku_deploy_setup.py"}]}
| 600 | 214 |
gh_patches_debug_6556
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-8679
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: Lambda execution doesn't work with Golang binary lambdas: <title>
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Running `awslocal lambda invoke --function-name "localStage-mediaStack-backfillGetFunction91FAA-b70d6394" "manual_ test_output.json"` even a single time causes localstack to continuously try and fail to run the lambda, the terminal hands whilst docker compose logs output:
```
localstack_main | 2023-06-07T16:47:15.160 INFO --- [ asgi_gw_4] localstack.request.aws : AWS sts.AssumeRole => 200
localstack_main | 2023-06-07T16:47:16.000 INFO --- [ asgi_gw_0] localstack.request.http : POST /_localstack_lambda/f1a13ad3d39e7c5f771f6cf85f78456d/status/f1a13ad3d39e7c5f771f6cf85f78456d/error => 202
localstack_main | 2023-06-07T16:47:16.378 WARN --- [ asgi_gw_2] l.s.a.i.executor_endpoint : Execution environment startup failed: {"errorMessage":"Error: fork/exec /var/task/bootstrap: no such file or directory","errorType":"Runtime.InvalidEntrypoint"}
```
The logs won't stop until I pull the container down and restart it (hence I know it's continuously retrying to no avail)
### Expected Behavior
Lambda should be executed just like it does on AWS, returning a sample return string like "Test" (the lambda is very simple)
Even if it does fail (I'm not sure why it'd fail here but not on AWS) then it should try a small finite number of times and then stop
### How are you starting LocalStack?
With a docker-compose file (shown below)
### Steps To Reproduce
I don't think my code itself is the problem since it does work fine on AWS, but for a bit of context:
I use the cdk to compile the binaries and infrastructure which is deployed to LocalStack via:
cdklocal deploy -a "cdk.out/assembly-localStage/" --all --require-approval "never"
My infra:
https://github.com/KamWithK/exSTATic-backend/tree/master/infrastructure
I can potentially create a small isolated example which illustrates the problem if needed
### Environment
```markdown
version: "3.8"
services:
localstack:
container_name: "${LOCALSTACK_DOCKER_NAME-localstack_main}"
image: localstack/localstack:latest
ports:
- "127.0.0.1:4566:4566" # LocalStack Gateway
- "127.0.0.1:4510-4559:4510-4559" # external services port range
environment:
- DEBUG=${DEBUG-}
- DOCKER_HOST=unix:///var/run/docker.sock
- PERSISTENCE=/tmp/localstack/data
- AWS_DEFAULT_REGION=ap-southeast-2
volumes:
- "${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack"
- "/var/run/docker.sock:/var/run/docker.sock"
healthcheck:
test: curl http://localhost:4566/_localstack/health
interval: 1s
timeout: 1s
retries: 10
```
### Anything else?
Here's some function info:
```
{
"FunctionName": "localStage-mediaStack-backfillGetFunction91FAA-b70d6394",
"FunctionArn": "arn:aws:lambda:us-east-1:000000000000:function:localStage-mediaStack-backfillGetFunction91FAA-b70d6394",
"Runtime": "provided.al2",
"Role": "arn:aws:iam::000000000000:role/localStage-mediaStack-backfillGetFunctionServi-f0162982",
"Handler": "bootstrap",
"CodeSize": 14604,
"Description": "",
"Timeout": 3,
"MemorySize": 128,
"LastModified": "2023-06-07T15:58:33.766076+0000",
"CodeSha256": "nqvy9NYt9j59ura5fEUE4QaeXOTuTiEsDNgEvsSQGdk=",
"Version": "$LATEST",
"TracingConfig": {
"Mode": "PassThrough"
},
"RevisionId": "377a6465-e32b-4ed3-a9f1-396af7ee0d86",
"PackageType": "Zip",
"Architectures": [
"x86_64"
],
"EphemeralStorage": {
"Size": 512
},
"SnapStart": {
"ApplyOn": "None",
"OptimizationStatus": "Off"
}
}
```
Test lambda:
```golang
package main
import (
"github.com/aws/aws-lambda-go/lambda"
)
func HandleRequest() (string, error) {
return "Test", nil
}
func main() {
lambda.Start(HandleRequest)
}
```
I did find this issue which sounded similar at first, but in my case the code does run on AWS (so I think it's a different problem, put here for reference though):
https://github.com/localstack/localstack/issues/4216
Any help would be greatly appreciated!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/awslambda/packages.py`
Content:
```
1 import os
2 import platform
3 import stat
4 from typing import List
5
6 from localstack import config
7 from localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller
8 from localstack.packages.core import ArchiveDownloadAndExtractInstaller, SystemNotSupportedException
9 from localstack.utils.platform import get_arch
10
11 LAMBDA_RUNTIME_INIT_URL = "https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}"
12
13 LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.18-pre"
14 LAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION
15
16 # GO Lambda runtime
17 GO_RUNTIME_VERSION = "0.4.0"
18 GO_RUNTIME_DOWNLOAD_URL_TEMPLATE = "https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz"
19
20
21 class AWSLambdaRuntimePackage(Package):
22 def __init__(self, default_version: str = LAMBDA_RUNTIME_VERSION):
23 super().__init__(name="AwsLambda", default_version=default_version)
24
25 def get_versions(self) -> List[str]:
26 return [LAMBDA_RUNTIME_VERSION]
27
28 def _get_installer(self, version: str) -> PackageInstaller:
29 return AWSLambdaRuntimePackageInstaller(name="awslambda-runtime", version=version)
30
31
32 class AWSLambdaRuntimePackageInstaller(DownloadInstaller):
33 def _get_arch(self):
34 arch = get_arch()
35 return "x86_64" if arch == "amd64" else arch
36
37 def _get_download_url(self) -> str:
38 arch = self._get_arch()
39 return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch)
40
41 def _get_install_dir(self, target: InstallTarget) -> str:
42 install_dir = super()._get_install_dir(target)
43 arch = self._get_arch()
44 return os.path.join(install_dir, arch)
45
46 def _get_install_marker_path(self, install_dir: str) -> str:
47 return os.path.join(install_dir, "var", "rapid", "init")
48
49 def _install(self, target: InstallTarget) -> None:
50 super()._install(target)
51 install_location = self.get_executable_path()
52 st = os.stat(install_location)
53 os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
54
55
56 class AWSLambdaGoRuntimePackage(Package):
57 def __init__(self, default_version: str = GO_RUNTIME_VERSION):
58 super().__init__(name="AwsLambdaGo", default_version=default_version)
59
60 def get_versions(self) -> List[str]:
61 return [GO_RUNTIME_VERSION]
62
63 def _get_installer(self, version: str) -> PackageInstaller:
64 return AWSLambdaGoRuntimePackageInstaller(name="awslamba-go-runtime", version=version)
65
66
67 class AWSLambdaGoRuntimePackageInstaller(ArchiveDownloadAndExtractInstaller):
68 def _get_download_url(self) -> str:
69 system = platform.system().lower()
70 arch = get_arch()
71
72 if system not in ["linux"]:
73 raise SystemNotSupportedException(f"Unsupported os {system} for awslambda-go-runtime")
74 if arch not in ["amd64", "arm64"]:
75 raise SystemNotSupportedException(f"Unsupported arch {arch} for awslambda-go-runtime")
76
77 return GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(
78 version=GO_RUNTIME_VERSION,
79 os=system,
80 arch=arch,
81 )
82
83 def _get_install_marker_path(self, install_dir: str) -> str:
84 return os.path.join(install_dir, "aws-lambda-mock")
85
86 def _install(self, target: InstallTarget) -> None:
87 super()._install(target)
88
89 install_dir = self._get_install_dir(target)
90 install_location = self._get_install_marker_path(install_dir)
91 st = os.stat(install_location)
92 os.chmod(install_location, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
93
94 go_lambda_mockserver = os.path.join(install_dir, "mockserver")
95 st = os.stat(go_lambda_mockserver)
96 os.chmod(go_lambda_mockserver, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
97
98
99 # version of the Maven dependency with Java utility code
100 LOCALSTACK_MAVEN_VERSION = "0.2.21"
101 MAVEN_REPO_URL = "https://repo1.maven.org/maven2"
102 URL_LOCALSTACK_FAT_JAR = (
103 "{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar"
104 )
105
106
107 class AWSLambdaJavaPackage(Package):
108 def __init__(self):
109 super().__init__("LambdaJavaLibs", "0.2.22")
110
111 def get_versions(self) -> List[str]:
112 return ["0.2.22", "0.2.21"]
113
114 def _get_installer(self, version: str) -> PackageInstaller:
115 return AWSLambdaJavaPackageInstaller("lambda-java-libs", version)
116
117
118 class AWSLambdaJavaPackageInstaller(DownloadInstaller):
119 def _get_download_url(self) -> str:
120 return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL)
121
122
123 awslambda_runtime_package = AWSLambdaRuntimePackage()
124 awslambda_go_runtime_package = AWSLambdaGoRuntimePackage()
125 lambda_java_libs_package = AWSLambdaJavaPackage()
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/localstack/services/awslambda/packages.py b/localstack/services/awslambda/packages.py
--- a/localstack/services/awslambda/packages.py
+++ b/localstack/services/awslambda/packages.py
@@ -10,7 +10,7 @@
LAMBDA_RUNTIME_INIT_URL = "https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}"
-LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.18-pre"
+LAMBDA_RUNTIME_DEFAULT_VERSION = "v0.1.19-pre"
LAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION
# GO Lambda runtime
|
{"golden_diff": "diff --git a/localstack/services/awslambda/packages.py b/localstack/services/awslambda/packages.py\n--- a/localstack/services/awslambda/packages.py\n+++ b/localstack/services/awslambda/packages.py\n@@ -10,7 +10,7 @@\n \n LAMBDA_RUNTIME_INIT_URL = \"https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}\"\n \n-LAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.18-pre\"\n+LAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.19-pre\"\n LAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION\n \n # GO Lambda runtime\n", "issue": "bug: Lambda execution doesn't work with Golang binary lambdas: <title>\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Current Behavior\r\n\r\nRunning `awslocal lambda invoke --function-name \"localStage-mediaStack-backfillGetFunction91FAA-b70d6394\" \"manual_ test_output.json\"` even a single time causes localstack to continuously try and fail to run the lambda, the terminal hands whilst docker compose logs output:\r\n```\r\nlocalstack_main | 2023-06-07T16:47:15.160 INFO --- [ asgi_gw_4] localstack.request.aws : AWS sts.AssumeRole => 200\r\nlocalstack_main | 2023-06-07T16:47:16.000 INFO --- [ asgi_gw_0] localstack.request.http : POST /_localstack_lambda/f1a13ad3d39e7c5f771f6cf85f78456d/status/f1a13ad3d39e7c5f771f6cf85f78456d/error => 202\r\nlocalstack_main | 2023-06-07T16:47:16.378 WARN --- [ asgi_gw_2] l.s.a.i.executor_endpoint : Execution environment startup failed: {\"errorMessage\":\"Error: fork/exec /var/task/bootstrap: no such file or directory\",\"errorType\":\"Runtime.InvalidEntrypoint\"}\r\n```\r\n\r\nThe logs won't stop until I pull the container down and restart it (hence I know it's continuously retrying to no avail)\r\n\r\n### Expected Behavior\r\n\r\nLambda should be executed just like it does on AWS, returning a sample return string like \"Test\" (the lambda is very simple)\r\nEven if it does fail (I'm not sure why it'd fail here but not on AWS) then it should try a small finite number of times and then stop\r\n\r\n### How are you starting LocalStack?\r\n\r\nWith a docker-compose file (shown below)\r\n\r\n### Steps To Reproduce\r\n\r\nI don't think my code itself is the problem since it does work fine on AWS, but for a bit of context:\r\nI use the cdk to compile the binaries and infrastructure which is deployed to LocalStack via:\r\n cdklocal deploy -a \"cdk.out/assembly-localStage/\" --all --require-approval \"never\"\r\n\r\nMy infra:\r\nhttps://github.com/KamWithK/exSTATic-backend/tree/master/infrastructure\r\n\r\nI can potentially create a small isolated example which illustrates the problem if needed\r\n\r\n### Environment\r\n\r\n```markdown\r\nversion: \"3.8\"\r\n\r\nservices:\r\n localstack:\r\n container_name: \"${LOCALSTACK_DOCKER_NAME-localstack_main}\"\r\n image: localstack/localstack:latest\r\n ports:\r\n - \"127.0.0.1:4566:4566\" # LocalStack Gateway\r\n - \"127.0.0.1:4510-4559:4510-4559\" # external services port range\r\n environment:\r\n - DEBUG=${DEBUG-}\r\n - DOCKER_HOST=unix:///var/run/docker.sock\r\n - PERSISTENCE=/tmp/localstack/data\r\n - AWS_DEFAULT_REGION=ap-southeast-2\r\n volumes:\r\n - \"${LOCALSTACK_VOLUME_DIR:-./volume}:/var/lib/localstack\"\r\n - \"/var/run/docker.sock:/var/run/docker.sock\"\r\n healthcheck:\r\n test: curl http://localhost:4566/_localstack/health\r\n interval: 1s\r\n timeout: 1s\r\n retries: 10\r\n```\r\n\r\n\r\n### Anything else?\r\n\r\nHere's some function info:\r\n```\r\n{\r\n \"FunctionName\": \"localStage-mediaStack-backfillGetFunction91FAA-b70d6394\",\r\n \"FunctionArn\": \"arn:aws:lambda:us-east-1:000000000000:function:localStage-mediaStack-backfillGetFunction91FAA-b70d6394\",\r\n \"Runtime\": \"provided.al2\",\r\n \"Role\": \"arn:aws:iam::000000000000:role/localStage-mediaStack-backfillGetFunctionServi-f0162982\",\r\n \"Handler\": \"bootstrap\",\r\n \"CodeSize\": 14604,\r\n \"Description\": \"\",\r\n \"Timeout\": 3,\r\n \"MemorySize\": 128,\r\n \"LastModified\": \"2023-06-07T15:58:33.766076+0000\",\r\n \"CodeSha256\": \"nqvy9NYt9j59ura5fEUE4QaeXOTuTiEsDNgEvsSQGdk=\",\r\n \"Version\": \"$LATEST\",\r\n \"TracingConfig\": {\r\n \"Mode\": \"PassThrough\"\r\n },\r\n \"RevisionId\": \"377a6465-e32b-4ed3-a9f1-396af7ee0d86\",\r\n \"PackageType\": \"Zip\",\r\n \"Architectures\": [\r\n \"x86_64\"\r\n ],\r\n \"EphemeralStorage\": {\r\n \"Size\": 512\r\n },\r\n \"SnapStart\": {\r\n \"ApplyOn\": \"None\",\r\n \"OptimizationStatus\": \"Off\"\r\n }\r\n }\r\n```\r\n\r\nTest lambda:\r\n```golang\r\npackage main\r\n\r\nimport (\r\n\t\"github.com/aws/aws-lambda-go/lambda\"\r\n)\r\n\r\nfunc HandleRequest() (string, error) {\r\n\treturn \"Test\", nil\r\n}\r\n\r\nfunc main() {\r\n\tlambda.Start(HandleRequest)\r\n}\r\n```\r\n\r\n\r\nI did find this issue which sounded similar at first, but in my case the code does run on AWS (so I think it's a different problem, put here for reference though):\r\nhttps://github.com/localstack/localstack/issues/4216\r\n\r\nAny help would be greatly appreciated!\n", "before_files": [{"content": "import os\nimport platform\nimport stat\nfrom typing import List\n\nfrom localstack import config\nfrom localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller\nfrom localstack.packages.core import ArchiveDownloadAndExtractInstaller, SystemNotSupportedException\nfrom localstack.utils.platform import get_arch\n\nLAMBDA_RUNTIME_INIT_URL = \"https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}\"\n\nLAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.18-pre\"\nLAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION\n\n# GO Lambda runtime\nGO_RUNTIME_VERSION = \"0.4.0\"\nGO_RUNTIME_DOWNLOAD_URL_TEMPLATE = \"https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz\"\n\n\nclass AWSLambdaRuntimePackage(Package):\n def __init__(self, default_version: str = LAMBDA_RUNTIME_VERSION):\n super().__init__(name=\"AwsLambda\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [LAMBDA_RUNTIME_VERSION]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaRuntimePackageInstaller(name=\"awslambda-runtime\", version=version)\n\n\nclass AWSLambdaRuntimePackageInstaller(DownloadInstaller):\n def _get_arch(self):\n arch = get_arch()\n return \"x86_64\" if arch == \"amd64\" else arch\n\n def _get_download_url(self) -> str:\n arch = self._get_arch()\n return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch)\n\n def _get_install_dir(self, target: InstallTarget) -> str:\n install_dir = super()._get_install_dir(target)\n arch = self._get_arch()\n return os.path.join(install_dir, arch)\n\n def _get_install_marker_path(self, install_dir: str) -> str:\n return os.path.join(install_dir, \"var\", \"rapid\", \"init\")\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n install_location = self.get_executable_path()\n st = os.stat(install_location)\n os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\nclass AWSLambdaGoRuntimePackage(Package):\n def __init__(self, default_version: str = GO_RUNTIME_VERSION):\n super().__init__(name=\"AwsLambdaGo\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [GO_RUNTIME_VERSION]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaGoRuntimePackageInstaller(name=\"awslamba-go-runtime\", version=version)\n\n\nclass AWSLambdaGoRuntimePackageInstaller(ArchiveDownloadAndExtractInstaller):\n def _get_download_url(self) -> str:\n system = platform.system().lower()\n arch = get_arch()\n\n if system not in [\"linux\"]:\n raise SystemNotSupportedException(f\"Unsupported os {system} for awslambda-go-runtime\")\n if arch not in [\"amd64\", \"arm64\"]:\n raise SystemNotSupportedException(f\"Unsupported arch {arch} for awslambda-go-runtime\")\n\n return GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(\n version=GO_RUNTIME_VERSION,\n os=system,\n arch=arch,\n )\n\n def _get_install_marker_path(self, install_dir: str) -> str:\n return os.path.join(install_dir, \"aws-lambda-mock\")\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n\n install_dir = self._get_install_dir(target)\n install_location = self._get_install_marker_path(install_dir)\n st = os.stat(install_location)\n os.chmod(install_location, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n go_lambda_mockserver = os.path.join(install_dir, \"mockserver\")\n st = os.stat(go_lambda_mockserver)\n os.chmod(go_lambda_mockserver, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = \"0.2.21\"\nMAVEN_REPO_URL = \"https://repo1.maven.org/maven2\"\nURL_LOCALSTACK_FAT_JAR = (\n \"{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar\"\n)\n\n\nclass AWSLambdaJavaPackage(Package):\n def __init__(self):\n super().__init__(\"LambdaJavaLibs\", \"0.2.22\")\n\n def get_versions(self) -> List[str]:\n return [\"0.2.22\", \"0.2.21\"]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaJavaPackageInstaller(\"lambda-java-libs\", version)\n\n\nclass AWSLambdaJavaPackageInstaller(DownloadInstaller):\n def _get_download_url(self) -> str:\n return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL)\n\n\nawslambda_runtime_package = AWSLambdaRuntimePackage()\nawslambda_go_runtime_package = AWSLambdaGoRuntimePackage()\nlambda_java_libs_package = AWSLambdaJavaPackage()\n", "path": "localstack/services/awslambda/packages.py"}], "after_files": [{"content": "import os\nimport platform\nimport stat\nfrom typing import List\n\nfrom localstack import config\nfrom localstack.packages import DownloadInstaller, InstallTarget, Package, PackageInstaller\nfrom localstack.packages.core import ArchiveDownloadAndExtractInstaller, SystemNotSupportedException\nfrom localstack.utils.platform import get_arch\n\nLAMBDA_RUNTIME_INIT_URL = \"https://github.com/localstack/lambda-runtime-init/releases/download/{version}/aws-lambda-rie-{arch}\"\n\nLAMBDA_RUNTIME_DEFAULT_VERSION = \"v0.1.19-pre\"\nLAMBDA_RUNTIME_VERSION = config.LAMBDA_INIT_RELEASE_VERSION or LAMBDA_RUNTIME_DEFAULT_VERSION\n\n# GO Lambda runtime\nGO_RUNTIME_VERSION = \"0.4.0\"\nGO_RUNTIME_DOWNLOAD_URL_TEMPLATE = \"https://github.com/localstack/awslamba-go-runtime/releases/download/v{version}/awslamba-go-runtime-{version}-{os}-{arch}.tar.gz\"\n\n\nclass AWSLambdaRuntimePackage(Package):\n def __init__(self, default_version: str = LAMBDA_RUNTIME_VERSION):\n super().__init__(name=\"AwsLambda\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [LAMBDA_RUNTIME_VERSION]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaRuntimePackageInstaller(name=\"awslambda-runtime\", version=version)\n\n\nclass AWSLambdaRuntimePackageInstaller(DownloadInstaller):\n def _get_arch(self):\n arch = get_arch()\n return \"x86_64\" if arch == \"amd64\" else arch\n\n def _get_download_url(self) -> str:\n arch = self._get_arch()\n return LAMBDA_RUNTIME_INIT_URL.format(version=self.version, arch=arch)\n\n def _get_install_dir(self, target: InstallTarget) -> str:\n install_dir = super()._get_install_dir(target)\n arch = self._get_arch()\n return os.path.join(install_dir, arch)\n\n def _get_install_marker_path(self, install_dir: str) -> str:\n return os.path.join(install_dir, \"var\", \"rapid\", \"init\")\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n install_location = self.get_executable_path()\n st = os.stat(install_location)\n os.chmod(install_location, mode=st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\nclass AWSLambdaGoRuntimePackage(Package):\n def __init__(self, default_version: str = GO_RUNTIME_VERSION):\n super().__init__(name=\"AwsLambdaGo\", default_version=default_version)\n\n def get_versions(self) -> List[str]:\n return [GO_RUNTIME_VERSION]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaGoRuntimePackageInstaller(name=\"awslamba-go-runtime\", version=version)\n\n\nclass AWSLambdaGoRuntimePackageInstaller(ArchiveDownloadAndExtractInstaller):\n def _get_download_url(self) -> str:\n system = platform.system().lower()\n arch = get_arch()\n\n if system not in [\"linux\"]:\n raise SystemNotSupportedException(f\"Unsupported os {system} for awslambda-go-runtime\")\n if arch not in [\"amd64\", \"arm64\"]:\n raise SystemNotSupportedException(f\"Unsupported arch {arch} for awslambda-go-runtime\")\n\n return GO_RUNTIME_DOWNLOAD_URL_TEMPLATE.format(\n version=GO_RUNTIME_VERSION,\n os=system,\n arch=arch,\n )\n\n def _get_install_marker_path(self, install_dir: str) -> str:\n return os.path.join(install_dir, \"aws-lambda-mock\")\n\n def _install(self, target: InstallTarget) -> None:\n super()._install(target)\n\n install_dir = self._get_install_dir(target)\n install_location = self._get_install_marker_path(install_dir)\n st = os.stat(install_location)\n os.chmod(install_location, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n go_lambda_mockserver = os.path.join(install_dir, \"mockserver\")\n st = os.stat(go_lambda_mockserver)\n os.chmod(go_lambda_mockserver, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = \"0.2.21\"\nMAVEN_REPO_URL = \"https://repo1.maven.org/maven2\"\nURL_LOCALSTACK_FAT_JAR = (\n \"{mvn_repo}/cloud/localstack/localstack-utils/{ver}/localstack-utils-{ver}-fat.jar\"\n)\n\n\nclass AWSLambdaJavaPackage(Package):\n def __init__(self):\n super().__init__(\"LambdaJavaLibs\", \"0.2.22\")\n\n def get_versions(self) -> List[str]:\n return [\"0.2.22\", \"0.2.21\"]\n\n def _get_installer(self, version: str) -> PackageInstaller:\n return AWSLambdaJavaPackageInstaller(\"lambda-java-libs\", version)\n\n\nclass AWSLambdaJavaPackageInstaller(DownloadInstaller):\n def _get_download_url(self) -> str:\n return URL_LOCALSTACK_FAT_JAR.format(ver=self.version, mvn_repo=MAVEN_REPO_URL)\n\n\nawslambda_runtime_package = AWSLambdaRuntimePackage()\nawslambda_go_runtime_package = AWSLambdaGoRuntimePackage()\nlambda_java_libs_package = AWSLambdaJavaPackage()\n", "path": "localstack/services/awslambda/packages.py"}]}
| 3,072 | 154 |
gh_patches_debug_13676
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-2408
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migration 0040_set_admin_is_staff.py is missing dependency on user model
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/migrations/0040_set_admin_is_staff.py`
Content:
```
1 # Generated by Django 2.2.13 on 2020-07-01 21:29
2
3 from django.contrib.auth import get_user_model
4 from django.db import migrations
5
6
7 def allow_admin_as_staff(apps, schema_editor):
8 user_model = get_user_model()
9 try:
10 admin_user = user_model.objects.get(username='admin')
11 except user_model.DoesNotExist:
12 pass
13 else:
14 admin_user.is_staff = True
15 admin_user.save()
16
17
18 class Migration(migrations.Migration):
19
20 dependencies = [
21 ('core', '0039_change_download_concurrency'),
22 ]
23
24 operations = [
25 migrations.RunPython(allow_admin_as_staff),
26 ]
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py
--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py
+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py
@@ -1,5 +1,6 @@
# Generated by Django 2.2.13 on 2020-07-01 21:29
+from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import migrations
@@ -19,8 +20,9 @@
dependencies = [
('core', '0039_change_download_concurrency'),
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
- migrations.RunPython(allow_admin_as_staff),
+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),
]
|
{"golden_diff": "diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py\n--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py\n+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py\n@@ -1,5 +1,6 @@\n # Generated by Django 2.2.13 on 2020-07-01 21:29\n \n+from django.conf import settings\n from django.contrib.auth import get_user_model\n from django.db import migrations\n \n@@ -19,8 +20,9 @@\n \n dependencies = [\n ('core', '0039_change_download_concurrency'),\n+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n \n operations = [\n- migrations.RunPython(allow_admin_as_staff),\n+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),\n ]\n", "issue": "Migration 0040_set_admin_is_staff.py is missing dependency on user model\n\n", "before_files": [{"content": "# Generated by Django 2.2.13 on 2020-07-01 21:29\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import migrations\n\n\ndef allow_admin_as_staff(apps, schema_editor):\n user_model = get_user_model()\n try:\n admin_user = user_model.objects.get(username='admin')\n except user_model.DoesNotExist:\n pass\n else:\n admin_user.is_staff = True\n admin_user.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0039_change_download_concurrency'),\n ]\n\n operations = [\n migrations.RunPython(allow_admin_as_staff),\n ]\n", "path": "pulpcore/app/migrations/0040_set_admin_is_staff.py"}], "after_files": [{"content": "# Generated by Django 2.2.13 on 2020-07-01 21:29\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db import migrations\n\n\ndef allow_admin_as_staff(apps, schema_editor):\n user_model = get_user_model()\n try:\n admin_user = user_model.objects.get(username='admin')\n except user_model.DoesNotExist:\n pass\n else:\n admin_user.is_staff = True\n admin_user.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0039_change_download_concurrency'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),\n ]\n", "path": "pulpcore/app/migrations/0040_set_admin_is_staff.py"}]}
| 487 | 224 |
gh_patches_debug_3692
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-566
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`nautobot.core.api.serializers.WritableNestedSerializer` docstring is incorrect and confusing
<!--
NOTE: This template is for use by maintainers only. Please do not submit
an issue using this template unless you have been specifically asked to
do so.
-->
### Proposed Changes
The docstring should read something like this:
> Returns a nested representation of an object on read, but accepts either the nested representation or just the PK value on write operations.
<!-- Provide justification for the proposed change(s). -->
### Justification
The current docstring is in conflict with the actual implementation by stating that only the PK value is supported on write, which is incorrect.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/core/api/serializers.py`
Content:
```
1 import uuid
2
3 from django.core.exceptions import (
4 FieldError,
5 MultipleObjectsReturned,
6 ObjectDoesNotExist,
7 )
8 from django.db.models import AutoField, ManyToManyField
9 from drf_yasg.utils import swagger_serializer_method
10 from rest_framework import serializers
11 from rest_framework.exceptions import ValidationError
12
13 from nautobot.utilities.utils import dict_to_filter_params
14
15
16 class BaseModelSerializer(serializers.ModelSerializer):
17 """
18 This base serializer implements common fields and logic for all ModelSerializers.
19 Namely it defines the `display` field which exposes a human friendly value for the given object.
20 """
21
22 display = serializers.SerializerMethodField(read_only=True, help_text="Human friendly display value")
23
24 @swagger_serializer_method(serializer_or_field=serializers.CharField)
25 def get_display(self, instance):
26 """
27 Return either the `display` property of the instance or `str(instance)`
28 """
29 return getattr(instance, "display", str(instance))
30
31 def get_field_names(self, declared_fields, info):
32 """
33 Override get_field_names() to append the `display` field so it is always included in the
34 serializer's `Meta.fields`.
35
36 DRF does not automatically add declared fields to `Meta.fields`, nor does it require that declared fields
37 on a super class be included in `Meta.fields` to allow for a subclass to include only a subset of declared
38 fields from the super. This means either we intercept and append the display field at this level, or
39 enforce by convention that all consumers of BaseModelSerializer include `display` in their `Meta.fields`
40 which would surely lead to errors of omission; therefore we have chosen the former approach.
41 """
42 fields = list(super().get_field_names(declared_fields, info)) # Meta.fields could be defined as a tuple
43 fields.append("display")
44
45 return fields
46
47
48 class ValidatedModelSerializer(BaseModelSerializer):
49 """
50 Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during
51 validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)
52 """
53
54 def validate(self, data):
55
56 # Remove custom fields data and tags (if any) prior to model validation
57 attrs = data.copy()
58 attrs.pop("custom_fields", None)
59 attrs.pop("tags", None)
60
61 # Skip ManyToManyFields
62 for field in self.Meta.model._meta.get_fields():
63 if isinstance(field, ManyToManyField):
64 attrs.pop(field.name, None)
65
66 # Run clean() on an instance of the model
67 if self.instance is None:
68 instance = self.Meta.model(**attrs)
69 else:
70 instance = self.instance
71 for k, v in attrs.items():
72 setattr(instance, k, v)
73 instance.full_clean()
74
75 return data
76
77
78 class WritableNestedSerializer(BaseModelSerializer):
79 """
80 Returns a nested representation of an object on read, but accepts only a primary key on write.
81 """
82
83 def to_internal_value(self, data):
84
85 if data is None:
86 return None
87
88 # Dictionary of related object attributes
89 if isinstance(data, dict):
90 params = dict_to_filter_params(data)
91 queryset = self.Meta.model.objects
92 try:
93 return queryset.get(**params)
94 except ObjectDoesNotExist:
95 raise ValidationError("Related object not found using the provided attributes: {}".format(params))
96 except MultipleObjectsReturned:
97 raise ValidationError("Multiple objects match the provided attributes: {}".format(params))
98 except FieldError as e:
99 raise ValidationError(e)
100
101 queryset = self.Meta.model.objects
102 pk = None
103
104 if isinstance(self.Meta.model._meta.pk, AutoField):
105 # PK is an int for this model. This is usually the User model
106 try:
107 pk = int(data)
108 except (TypeError, ValueError):
109 raise ValidationError(
110 "Related objects must be referenced by ID or by dictionary of attributes. Received an "
111 "unrecognized value: {}".format(data)
112 )
113
114 else:
115 # We assume a type of UUIDField for all other models
116
117 # PK of related object
118 try:
119 # Ensure the pk is a valid UUID
120 pk = uuid.UUID(str(data))
121 except (TypeError, ValueError):
122 raise ValidationError(
123 "Related objects must be referenced by ID or by dictionary of attributes. Received an "
124 "unrecognized value: {}".format(data)
125 )
126
127 try:
128 return queryset.get(pk=pk)
129 except ObjectDoesNotExist:
130 raise ValidationError("Related object not found using the provided ID: {}".format(pk))
131
132
133 class BulkOperationSerializer(serializers.Serializer):
134 id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model
135
136
137 #
138 # GraphQL, used by the openapi doc, not by the view
139 #
140
141
142 class GraphQLAPISerializer(serializers.Serializer):
143 query = serializers.CharField(required=True, help_text="GraphQL query")
144 variables = serializers.JSONField(required=False, help_text="Variables in JSON Format")
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nautobot/core/api/serializers.py b/nautobot/core/api/serializers.py
--- a/nautobot/core/api/serializers.py
+++ b/nautobot/core/api/serializers.py
@@ -77,7 +77,8 @@
class WritableNestedSerializer(BaseModelSerializer):
"""
- Returns a nested representation of an object on read, but accepts only a primary key on write.
+ Returns a nested representation of an object on read, but accepts either the nested representation or the
+ primary key value on write operations.
"""
def to_internal_value(self, data):
|
{"golden_diff": "diff --git a/nautobot/core/api/serializers.py b/nautobot/core/api/serializers.py\n--- a/nautobot/core/api/serializers.py\n+++ b/nautobot/core/api/serializers.py\n@@ -77,7 +77,8 @@\n \n class WritableNestedSerializer(BaseModelSerializer):\n \"\"\"\n- Returns a nested representation of an object on read, but accepts only a primary key on write.\n+ Returns a nested representation of an object on read, but accepts either the nested representation or the\n+ primary key value on write operations.\n \"\"\"\n \n def to_internal_value(self, data):\n", "issue": "`nautobot.core.api.serializers.WritableNestedSerializer` docstring is incorrect and confusing\n<!--\r\n NOTE: This template is for use by maintainers only. Please do not submit\r\n an issue using this template unless you have been specifically asked to\r\n do so.\r\n-->\r\n### Proposed Changes\r\nThe docstring should read something like this:\r\n> Returns a nested representation of an object on read, but accepts either the nested representation or just the PK value on write operations.\r\n\r\n<!-- Provide justification for the proposed change(s). -->\r\n### Justification\r\nThe current docstring is in conflict with the actual implementation by stating that only the PK value is supported on write, which is incorrect.\n", "before_files": [{"content": "import uuid\n\nfrom django.core.exceptions import (\n FieldError,\n MultipleObjectsReturned,\n ObjectDoesNotExist,\n)\nfrom django.db.models import AutoField, ManyToManyField\nfrom drf_yasg.utils import swagger_serializer_method\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom nautobot.utilities.utils import dict_to_filter_params\n\n\nclass BaseModelSerializer(serializers.ModelSerializer):\n \"\"\"\n This base serializer implements common fields and logic for all ModelSerializers.\n Namely it defines the `display` field which exposes a human friendly value for the given object.\n \"\"\"\n\n display = serializers.SerializerMethodField(read_only=True, help_text=\"Human friendly display value\")\n\n @swagger_serializer_method(serializer_or_field=serializers.CharField)\n def get_display(self, instance):\n \"\"\"\n Return either the `display` property of the instance or `str(instance)`\n \"\"\"\n return getattr(instance, \"display\", str(instance))\n\n def get_field_names(self, declared_fields, info):\n \"\"\"\n Override get_field_names() to append the `display` field so it is always included in the\n serializer's `Meta.fields`.\n\n DRF does not automatically add declared fields to `Meta.fields`, nor does it require that declared fields\n on a super class be included in `Meta.fields` to allow for a subclass to include only a subset of declared\n fields from the super. This means either we intercept and append the display field at this level, or\n enforce by convention that all consumers of BaseModelSerializer include `display` in their `Meta.fields`\n which would surely lead to errors of omission; therefore we have chosen the former approach.\n \"\"\"\n fields = list(super().get_field_names(declared_fields, info)) # Meta.fields could be defined as a tuple\n fields.append(\"display\")\n\n return fields\n\n\nclass ValidatedModelSerializer(BaseModelSerializer):\n \"\"\"\n Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during\n validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)\n \"\"\"\n\n def validate(self, data):\n\n # Remove custom fields data and tags (if any) prior to model validation\n attrs = data.copy()\n attrs.pop(\"custom_fields\", None)\n attrs.pop(\"tags\", None)\n\n # Skip ManyToManyFields\n for field in self.Meta.model._meta.get_fields():\n if isinstance(field, ManyToManyField):\n attrs.pop(field.name, None)\n\n # Run clean() on an instance of the model\n if self.instance is None:\n instance = self.Meta.model(**attrs)\n else:\n instance = self.instance\n for k, v in attrs.items():\n setattr(instance, k, v)\n instance.full_clean()\n\n return data\n\n\nclass WritableNestedSerializer(BaseModelSerializer):\n \"\"\"\n Returns a nested representation of an object on read, but accepts only a primary key on write.\n \"\"\"\n\n def to_internal_value(self, data):\n\n if data is None:\n return None\n\n # Dictionary of related object attributes\n if isinstance(data, dict):\n params = dict_to_filter_params(data)\n queryset = self.Meta.model.objects\n try:\n return queryset.get(**params)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided attributes: {}\".format(params))\n except MultipleObjectsReturned:\n raise ValidationError(\"Multiple objects match the provided attributes: {}\".format(params))\n except FieldError as e:\n raise ValidationError(e)\n\n queryset = self.Meta.model.objects\n pk = None\n\n if isinstance(self.Meta.model._meta.pk, AutoField):\n # PK is an int for this model. This is usually the User model\n try:\n pk = int(data)\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n else:\n # We assume a type of UUIDField for all other models\n\n # PK of related object\n try:\n # Ensure the pk is a valid UUID\n pk = uuid.UUID(str(data))\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n try:\n return queryset.get(pk=pk)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided ID: {}\".format(pk))\n\n\nclass BulkOperationSerializer(serializers.Serializer):\n id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model\n\n\n#\n# GraphQL, used by the openapi doc, not by the view\n#\n\n\nclass GraphQLAPISerializer(serializers.Serializer):\n query = serializers.CharField(required=True, help_text=\"GraphQL query\")\n variables = serializers.JSONField(required=False, help_text=\"Variables in JSON Format\")\n", "path": "nautobot/core/api/serializers.py"}], "after_files": [{"content": "import uuid\n\nfrom django.core.exceptions import (\n FieldError,\n MultipleObjectsReturned,\n ObjectDoesNotExist,\n)\nfrom django.db.models import AutoField, ManyToManyField\nfrom drf_yasg.utils import swagger_serializer_method\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom nautobot.utilities.utils import dict_to_filter_params\n\n\nclass BaseModelSerializer(serializers.ModelSerializer):\n \"\"\"\n This base serializer implements common fields and logic for all ModelSerializers.\n Namely it defines the `display` field which exposes a human friendly value for the given object.\n \"\"\"\n\n display = serializers.SerializerMethodField(read_only=True, help_text=\"Human friendly display value\")\n\n @swagger_serializer_method(serializer_or_field=serializers.CharField)\n def get_display(self, instance):\n \"\"\"\n Return either the `display` property of the instance or `str(instance)`\n \"\"\"\n return getattr(instance, \"display\", str(instance))\n\n def get_field_names(self, declared_fields, info):\n \"\"\"\n Override get_field_names() to append the `display` field so it is always included in the\n serializer's `Meta.fields`.\n\n DRF does not automatically add declared fields to `Meta.fields`, nor does it require that declared fields\n on a super class be included in `Meta.fields` to allow for a subclass to include only a subset of declared\n fields from the super. This means either we intercept and append the display field at this level, or\n enforce by convention that all consumers of BaseModelSerializer include `display` in their `Meta.fields`\n which would surely lead to errors of omission; therefore we have chosen the former approach.\n \"\"\"\n fields = list(super().get_field_names(declared_fields, info)) # Meta.fields could be defined as a tuple\n fields.append(\"display\")\n\n return fields\n\n\nclass ValidatedModelSerializer(BaseModelSerializer):\n \"\"\"\n Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during\n validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)\n \"\"\"\n\n def validate(self, data):\n\n # Remove custom fields data and tags (if any) prior to model validation\n attrs = data.copy()\n attrs.pop(\"custom_fields\", None)\n attrs.pop(\"tags\", None)\n\n # Skip ManyToManyFields\n for field in self.Meta.model._meta.get_fields():\n if isinstance(field, ManyToManyField):\n attrs.pop(field.name, None)\n\n # Run clean() on an instance of the model\n if self.instance is None:\n instance = self.Meta.model(**attrs)\n else:\n instance = self.instance\n for k, v in attrs.items():\n setattr(instance, k, v)\n instance.full_clean()\n\n return data\n\n\nclass WritableNestedSerializer(BaseModelSerializer):\n \"\"\"\n Returns a nested representation of an object on read, but accepts either the nested representation or the\n primary key value on write operations.\n \"\"\"\n\n def to_internal_value(self, data):\n\n if data is None:\n return None\n\n # Dictionary of related object attributes\n if isinstance(data, dict):\n params = dict_to_filter_params(data)\n queryset = self.Meta.model.objects\n try:\n return queryset.get(**params)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided attributes: {}\".format(params))\n except MultipleObjectsReturned:\n raise ValidationError(\"Multiple objects match the provided attributes: {}\".format(params))\n except FieldError as e:\n raise ValidationError(e)\n\n queryset = self.Meta.model.objects\n pk = None\n\n if isinstance(self.Meta.model._meta.pk, AutoField):\n # PK is an int for this model. This is usually the User model\n try:\n pk = int(data)\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n else:\n # We assume a type of UUIDField for all other models\n\n # PK of related object\n try:\n # Ensure the pk is a valid UUID\n pk = uuid.UUID(str(data))\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n try:\n return queryset.get(pk=pk)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided ID: {}\".format(pk))\n\n\nclass BulkOperationSerializer(serializers.Serializer):\n id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model\n\n\n#\n# GraphQL, used by the openapi doc, not by the view\n#\n\n\nclass GraphQLAPISerializer(serializers.Serializer):\n query = serializers.CharField(required=True, help_text=\"GraphQL query\")\n variables = serializers.JSONField(required=False, help_text=\"Variables in JSON Format\")\n", "path": "nautobot/core/api/serializers.py"}]}
| 1,798 | 135 |
gh_patches_debug_4
|
rasdani/github-patches
|
git_diff
|
horovod__horovod-1139
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace .step(synchronize=False) with optimizer.skip_synchronize()
NVIDIA AMP does not support passing additional flags to `optimizer.step()`, such as `optimizer.step(synchronize=False)`.
This PR switches API to use context manager:
```python
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `horovod/__init__.py`
Content:
```
1 __version__ = '0.16.3'
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/horovod/__init__.py b/horovod/__init__.py
--- a/horovod/__init__.py
+++ b/horovod/__init__.py
@@ -1 +1 @@
-__version__ = '0.16.3'
+__version__ = '0.16.4'
|
{"golden_diff": "diff --git a/horovod/__init__.py b/horovod/__init__.py\n--- a/horovod/__init__.py\n+++ b/horovod/__init__.py\n@@ -1 +1 @@\n-__version__ = '0.16.3'\n+__version__ = '0.16.4'\n", "issue": "Replace .step(synchronize=False) with optimizer.skip_synchronize()\nNVIDIA AMP does not support passing additional flags to `optimizer.step()`, such as `optimizer.step(synchronize=False)`.\r\n\r\nThis PR switches API to use context manager:\r\n```python\r\noptimizer.synchronize()\r\nwith optimizer.skip_synchronize():\r\n optimizer.step()\r\n```\n", "before_files": [{"content": "__version__ = '0.16.3'\n", "path": "horovod/__init__.py"}], "after_files": [{"content": "__version__ = '0.16.4'\n", "path": "horovod/__init__.py"}]}
| 341 | 76 |
gh_patches_debug_30797
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-1567
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update our SOCKS documentation
- `socks4a://` is useless, all DNS queries are resolved client-side for SOCKS4.
- `socks5://` should have a warning about DNS queries being resolved client-side and that `socks5h://` is recommended for secure use (Should have been the default on our end).
- PySocks supports IPv6 connections to the SOCKS proxy.
- Need to check all our other assumptions in those docs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/contrib/socks.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 This module contains provisional support for SOCKS proxies from within
4 urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
5 SOCKS5. To enable its functionality, either install PySocks or install this
6 module with the ``socks`` extra.
7
8 The SOCKS implementation supports the full range of urllib3 features. It also
9 supports the following SOCKS features:
10
11 - SOCKS4
12 - SOCKS4a
13 - SOCKS5
14 - Usernames and passwords for the SOCKS proxy
15
16 Known Limitations:
17
18 - Currently PySocks does not support contacting remote websites via literal
19 IPv6 addresses. Any such connection attempt will fail. You must use a domain
20 name.
21 - Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
22 such connection attempt will fail.
23 """
24 from __future__ import absolute_import
25
26 try:
27 import socks
28 except ImportError:
29 import warnings
30 from ..exceptions import DependencyWarning
31
32 warnings.warn((
33 'SOCKS support in urllib3 requires the installation of optional '
34 'dependencies: specifically, PySocks. For more information, see '
35 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
36 ),
37 DependencyWarning
38 )
39 raise
40
41 from socket import error as SocketError, timeout as SocketTimeout
42
43 from ..connection import (
44 HTTPConnection, HTTPSConnection
45 )
46 from ..connectionpool import (
47 HTTPConnectionPool, HTTPSConnectionPool
48 )
49 from ..exceptions import ConnectTimeoutError, NewConnectionError
50 from ..poolmanager import PoolManager
51 from ..util.url import parse_url
52
53 try:
54 import ssl
55 except ImportError:
56 ssl = None
57
58
59 class SOCKSConnection(HTTPConnection):
60 """
61 A plain-text HTTP connection that connects via a SOCKS proxy.
62 """
63 def __init__(self, *args, **kwargs):
64 self._socks_options = kwargs.pop('_socks_options')
65 super(SOCKSConnection, self).__init__(*args, **kwargs)
66
67 def _new_conn(self):
68 """
69 Establish a new connection via the SOCKS proxy.
70 """
71 extra_kw = {}
72 if self.source_address:
73 extra_kw['source_address'] = self.source_address
74
75 if self.socket_options:
76 extra_kw['socket_options'] = self.socket_options
77
78 try:
79 conn = socks.create_connection(
80 (self.host, self.port),
81 proxy_type=self._socks_options['socks_version'],
82 proxy_addr=self._socks_options['proxy_host'],
83 proxy_port=self._socks_options['proxy_port'],
84 proxy_username=self._socks_options['username'],
85 proxy_password=self._socks_options['password'],
86 proxy_rdns=self._socks_options['rdns'],
87 timeout=self.timeout,
88 **extra_kw
89 )
90
91 except SocketTimeout:
92 raise ConnectTimeoutError(
93 self, "Connection to %s timed out. (connect timeout=%s)" %
94 (self.host, self.timeout))
95
96 except socks.ProxyError as e:
97 # This is fragile as hell, but it seems to be the only way to raise
98 # useful errors here.
99 if e.socket_err:
100 error = e.socket_err
101 if isinstance(error, SocketTimeout):
102 raise ConnectTimeoutError(
103 self,
104 "Connection to %s timed out. (connect timeout=%s)" %
105 (self.host, self.timeout)
106 )
107 else:
108 raise NewConnectionError(
109 self,
110 "Failed to establish a new connection: %s" % error
111 )
112 else:
113 raise NewConnectionError(
114 self,
115 "Failed to establish a new connection: %s" % e
116 )
117
118 except SocketError as e: # Defensive: PySocks should catch all these.
119 raise NewConnectionError(
120 self, "Failed to establish a new connection: %s" % e)
121
122 return conn
123
124
125 # We don't need to duplicate the Verified/Unverified distinction from
126 # urllib3/connection.py here because the HTTPSConnection will already have been
127 # correctly set to either the Verified or Unverified form by that module. This
128 # means the SOCKSHTTPSConnection will automatically be the correct type.
129 class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
130 pass
131
132
133 class SOCKSHTTPConnectionPool(HTTPConnectionPool):
134 ConnectionCls = SOCKSConnection
135
136
137 class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
138 ConnectionCls = SOCKSHTTPSConnection
139
140
141 class SOCKSProxyManager(PoolManager):
142 """
143 A version of the urllib3 ProxyManager that routes connections via the
144 defined SOCKS proxy.
145 """
146 pool_classes_by_scheme = {
147 'http': SOCKSHTTPConnectionPool,
148 'https': SOCKSHTTPSConnectionPool,
149 }
150
151 def __init__(self, proxy_url, username=None, password=None,
152 num_pools=10, headers=None, **connection_pool_kw):
153 parsed = parse_url(proxy_url)
154
155 if username is None and password is None and parsed.auth is not None:
156 split = parsed.auth.split(':')
157 if len(split) == 2:
158 username, password = split
159 if parsed.scheme == 'socks5':
160 socks_version = socks.PROXY_TYPE_SOCKS5
161 rdns = False
162 elif parsed.scheme == 'socks5h':
163 socks_version = socks.PROXY_TYPE_SOCKS5
164 rdns = True
165 elif parsed.scheme == 'socks4':
166 socks_version = socks.PROXY_TYPE_SOCKS4
167 rdns = False
168 elif parsed.scheme == 'socks4a':
169 socks_version = socks.PROXY_TYPE_SOCKS4
170 rdns = True
171 else:
172 raise ValueError(
173 "Unable to determine SOCKS version from %s" % proxy_url
174 )
175
176 self.proxy_url = proxy_url
177
178 socks_options = {
179 'socks_version': socks_version,
180 'proxy_host': parsed.host,
181 'proxy_port': parsed.port,
182 'username': username,
183 'password': password,
184 'rdns': rdns
185 }
186 connection_pool_kw['_socks_options'] = socks_options
187
188 super(SOCKSProxyManager, self).__init__(
189 num_pools, headers, **connection_pool_kw
190 )
191
192 self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/urllib3/contrib/socks.py b/src/urllib3/contrib/socks.py
--- a/src/urllib3/contrib/socks.py
+++ b/src/urllib3/contrib/socks.py
@@ -1,25 +1,38 @@
# -*- coding: utf-8 -*-
"""
This module contains provisional support for SOCKS proxies from within
-urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
+urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
SOCKS5. To enable its functionality, either install PySocks or install this
module with the ``socks`` extra.
The SOCKS implementation supports the full range of urllib3 features. It also
supports the following SOCKS features:
-- SOCKS4
-- SOCKS4a
-- SOCKS5
+- SOCKS4A (``proxy_url='socks4a://...``)
+- SOCKS4 (``proxy_url='socks4://...``)
+- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
+- SOCKS5 with local DNS (``proxy_url='socks5://...``)
- Usernames and passwords for the SOCKS proxy
-Known Limitations:
+ .. note::
+ It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
+ your ``proxy_url`` to ensure that DNS resolution is done from the remote
+ server instead of client-side when connecting to a domain name.
+
+SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
+supports IPv4, IPv6, and domain names.
+
+When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
+will be sent as the ``userid`` section of the SOCKS request::
+
+ proxy_url="socks4a://<userid>@proxy-host"
+
+When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
+of the ``proxy_url`` will be sent as the username/password to authenticate
+with the proxy::
+
+ proxy_url="socks5h://<username>:<password>@proxy-host"
-- Currently PySocks does not support contacting remote websites via literal
- IPv6 addresses. Any such connection attempt will fail. You must use a domain
- name.
-- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
- such connection attempt will fail.
"""
from __future__ import absolute_import
|
{"golden_diff": "diff --git a/src/urllib3/contrib/socks.py b/src/urllib3/contrib/socks.py\n--- a/src/urllib3/contrib/socks.py\n+++ b/src/urllib3/contrib/socks.py\n@@ -1,25 +1,38 @@\n # -*- coding: utf-8 -*-\n \"\"\"\n This module contains provisional support for SOCKS proxies from within\n-urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and\n+urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and\n SOCKS5. To enable its functionality, either install PySocks or install this\n module with the ``socks`` extra.\n \n The SOCKS implementation supports the full range of urllib3 features. It also\n supports the following SOCKS features:\n \n-- SOCKS4\n-- SOCKS4a\n-- SOCKS5\n+- SOCKS4A (``proxy_url='socks4a://...``)\n+- SOCKS4 (``proxy_url='socks4://...``)\n+- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)\n+- SOCKS5 with local DNS (``proxy_url='socks5://...``)\n - Usernames and passwords for the SOCKS proxy\n \n-Known Limitations:\n+ .. note::\n+ It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in\n+ your ``proxy_url`` to ensure that DNS resolution is done from the remote\n+ server instead of client-side when connecting to a domain name.\n+\n+SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5\n+supports IPv4, IPv6, and domain names.\n+\n+When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``\n+will be sent as the ``userid`` section of the SOCKS request::\n+\n+ proxy_url=\"socks4a://<userid>@proxy-host\"\n+\n+When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion\n+of the ``proxy_url`` will be sent as the username/password to authenticate\n+with the proxy::\n+\n+ proxy_url=\"socks5h://<username>:<password>@proxy-host\"\n \n-- Currently PySocks does not support contacting remote websites via literal\n- IPv6 addresses. Any such connection attempt will fail. You must use a domain\n- name.\n-- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any\n- such connection attempt will fail.\n \"\"\"\n from __future__ import absolute_import\n", "issue": "Update our SOCKS documentation\n- `socks4a://` is useless, all DNS queries are resolved client-side for SOCKS4.\r\n- `socks5://` should have a warning about DNS queries being resolved client-side and that `socks5h://` is recommended for secure use (Should have been the default on our end).\r\n- PySocks supports IPv6 connections to the SOCKS proxy.\r\n- Need to check all our other assumptions in those docs.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains provisional support for SOCKS proxies from within\nurllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and\nSOCKS5. To enable its functionality, either install PySocks or install this\nmodule with the ``socks`` extra.\n\nThe SOCKS implementation supports the full range of urllib3 features. It also\nsupports the following SOCKS features:\n\n- SOCKS4\n- SOCKS4a\n- SOCKS5\n- Usernames and passwords for the SOCKS proxy\n\nKnown Limitations:\n\n- Currently PySocks does not support contacting remote websites via literal\n IPv6 addresses. Any such connection attempt will fail. You must use a domain\n name.\n- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any\n such connection attempt will fail.\n\"\"\"\nfrom __future__ import absolute_import\n\ntry:\n import socks\nexcept ImportError:\n import warnings\n from ..exceptions import DependencyWarning\n\n warnings.warn((\n 'SOCKS support in urllib3 requires the installation of optional '\n 'dependencies: specifically, PySocks. For more information, see '\n 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'\n ),\n DependencyWarning\n )\n raise\n\nfrom socket import error as SocketError, timeout as SocketTimeout\n\nfrom ..connection import (\n HTTPConnection, HTTPSConnection\n)\nfrom ..connectionpool import (\n HTTPConnectionPool, HTTPSConnectionPool\n)\nfrom ..exceptions import ConnectTimeoutError, NewConnectionError\nfrom ..poolmanager import PoolManager\nfrom ..util.url import parse_url\n\ntry:\n import ssl\nexcept ImportError:\n ssl = None\n\n\nclass SOCKSConnection(HTTPConnection):\n \"\"\"\n A plain-text HTTP connection that connects via a SOCKS proxy.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._socks_options = kwargs.pop('_socks_options')\n super(SOCKSConnection, self).__init__(*args, **kwargs)\n\n def _new_conn(self):\n \"\"\"\n Establish a new connection via the SOCKS proxy.\n \"\"\"\n extra_kw = {}\n if self.source_address:\n extra_kw['source_address'] = self.source_address\n\n if self.socket_options:\n extra_kw['socket_options'] = self.socket_options\n\n try:\n conn = socks.create_connection(\n (self.host, self.port),\n proxy_type=self._socks_options['socks_version'],\n proxy_addr=self._socks_options['proxy_host'],\n proxy_port=self._socks_options['proxy_port'],\n proxy_username=self._socks_options['username'],\n proxy_password=self._socks_options['password'],\n proxy_rdns=self._socks_options['rdns'],\n timeout=self.timeout,\n **extra_kw\n )\n\n except SocketTimeout:\n raise ConnectTimeoutError(\n self, \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout))\n\n except socks.ProxyError as e:\n # This is fragile as hell, but it seems to be the only way to raise\n # useful errors here.\n if e.socket_err:\n error = e.socket_err\n if isinstance(error, SocketTimeout):\n raise ConnectTimeoutError(\n self,\n \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout)\n )\n else:\n raise NewConnectionError(\n self,\n \"Failed to establish a new connection: %s\" % error\n )\n else:\n raise NewConnectionError(\n self,\n \"Failed to establish a new connection: %s\" % e\n )\n\n except SocketError as e: # Defensive: PySocks should catch all these.\n raise NewConnectionError(\n self, \"Failed to establish a new connection: %s\" % e)\n\n return conn\n\n\n# We don't need to duplicate the Verified/Unverified distinction from\n# urllib3/connection.py here because the HTTPSConnection will already have been\n# correctly set to either the Verified or Unverified form by that module. This\n# means the SOCKSHTTPSConnection will automatically be the correct type.\nclass SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):\n pass\n\n\nclass SOCKSHTTPConnectionPool(HTTPConnectionPool):\n ConnectionCls = SOCKSConnection\n\n\nclass SOCKSHTTPSConnectionPool(HTTPSConnectionPool):\n ConnectionCls = SOCKSHTTPSConnection\n\n\nclass SOCKSProxyManager(PoolManager):\n \"\"\"\n A version of the urllib3 ProxyManager that routes connections via the\n defined SOCKS proxy.\n \"\"\"\n pool_classes_by_scheme = {\n 'http': SOCKSHTTPConnectionPool,\n 'https': SOCKSHTTPSConnectionPool,\n }\n\n def __init__(self, proxy_url, username=None, password=None,\n num_pools=10, headers=None, **connection_pool_kw):\n parsed = parse_url(proxy_url)\n\n if username is None and password is None and parsed.auth is not None:\n split = parsed.auth.split(':')\n if len(split) == 2:\n username, password = split\n if parsed.scheme == 'socks5':\n socks_version = socks.PROXY_TYPE_SOCKS5\n rdns = False\n elif parsed.scheme == 'socks5h':\n socks_version = socks.PROXY_TYPE_SOCKS5\n rdns = True\n elif parsed.scheme == 'socks4':\n socks_version = socks.PROXY_TYPE_SOCKS4\n rdns = False\n elif parsed.scheme == 'socks4a':\n socks_version = socks.PROXY_TYPE_SOCKS4\n rdns = True\n else:\n raise ValueError(\n \"Unable to determine SOCKS version from %s\" % proxy_url\n )\n\n self.proxy_url = proxy_url\n\n socks_options = {\n 'socks_version': socks_version,\n 'proxy_host': parsed.host,\n 'proxy_port': parsed.port,\n 'username': username,\n 'password': password,\n 'rdns': rdns\n }\n connection_pool_kw['_socks_options'] = socks_options\n\n super(SOCKSProxyManager, self).__init__(\n num_pools, headers, **connection_pool_kw\n )\n\n self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme\n", "path": "src/urllib3/contrib/socks.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains provisional support for SOCKS proxies from within\nurllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and\nSOCKS5. To enable its functionality, either install PySocks or install this\nmodule with the ``socks`` extra.\n\nThe SOCKS implementation supports the full range of urllib3 features. It also\nsupports the following SOCKS features:\n\n- SOCKS4A (``proxy_url='socks4a://...``)\n- SOCKS4 (``proxy_url='socks4://...``)\n- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)\n- SOCKS5 with local DNS (``proxy_url='socks5://...``)\n- Usernames and passwords for the SOCKS proxy\n\n .. note::\n It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in\n your ``proxy_url`` to ensure that DNS resolution is done from the remote\n server instead of client-side when connecting to a domain name.\n\nSOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5\nsupports IPv4, IPv6, and domain names.\n\nWhen connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``\nwill be sent as the ``userid`` section of the SOCKS request::\n\n proxy_url=\"socks4a://<userid>@proxy-host\"\n\nWhen connecting to a SOCKS5 proxy the ``username`` and ``password`` portion\nof the ``proxy_url`` will be sent as the username/password to authenticate\nwith the proxy::\n\n proxy_url=\"socks5h://<username>:<password>@proxy-host\"\n\n\"\"\"\nfrom __future__ import absolute_import\n\ntry:\n import socks\nexcept ImportError:\n import warnings\n from ..exceptions import DependencyWarning\n\n warnings.warn((\n 'SOCKS support in urllib3 requires the installation of optional '\n 'dependencies: specifically, PySocks. For more information, see '\n 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'\n ),\n DependencyWarning\n )\n raise\n\nfrom socket import error as SocketError, timeout as SocketTimeout\n\nfrom ..connection import (\n HTTPConnection, HTTPSConnection\n)\nfrom ..connectionpool import (\n HTTPConnectionPool, HTTPSConnectionPool\n)\nfrom ..exceptions import ConnectTimeoutError, NewConnectionError\nfrom ..poolmanager import PoolManager\nfrom ..util.url import parse_url\n\ntry:\n import ssl\nexcept ImportError:\n ssl = None\n\n\nclass SOCKSConnection(HTTPConnection):\n \"\"\"\n A plain-text HTTP connection that connects via a SOCKS proxy.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._socks_options = kwargs.pop('_socks_options')\n super(SOCKSConnection, self).__init__(*args, **kwargs)\n\n def _new_conn(self):\n \"\"\"\n Establish a new connection via the SOCKS proxy.\n \"\"\"\n extra_kw = {}\n if self.source_address:\n extra_kw['source_address'] = self.source_address\n\n if self.socket_options:\n extra_kw['socket_options'] = self.socket_options\n\n try:\n conn = socks.create_connection(\n (self.host, self.port),\n proxy_type=self._socks_options['socks_version'],\n proxy_addr=self._socks_options['proxy_host'],\n proxy_port=self._socks_options['proxy_port'],\n proxy_username=self._socks_options['username'],\n proxy_password=self._socks_options['password'],\n proxy_rdns=self._socks_options['rdns'],\n timeout=self.timeout,\n **extra_kw\n )\n\n except SocketTimeout:\n raise ConnectTimeoutError(\n self, \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout))\n\n except socks.ProxyError as e:\n # This is fragile as hell, but it seems to be the only way to raise\n # useful errors here.\n if e.socket_err:\n error = e.socket_err\n if isinstance(error, SocketTimeout):\n raise ConnectTimeoutError(\n self,\n \"Connection to %s timed out. (connect timeout=%s)\" %\n (self.host, self.timeout)\n )\n else:\n raise NewConnectionError(\n self,\n \"Failed to establish a new connection: %s\" % error\n )\n else:\n raise NewConnectionError(\n self,\n \"Failed to establish a new connection: %s\" % e\n )\n\n except SocketError as e: # Defensive: PySocks should catch all these.\n raise NewConnectionError(\n self, \"Failed to establish a new connection: %s\" % e)\n\n return conn\n\n\n# We don't need to duplicate the Verified/Unverified distinction from\n# urllib3/connection.py here because the HTTPSConnection will already have been\n# correctly set to either the Verified or Unverified form by that module. This\n# means the SOCKSHTTPSConnection will automatically be the correct type.\nclass SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):\n pass\n\n\nclass SOCKSHTTPConnectionPool(HTTPConnectionPool):\n ConnectionCls = SOCKSConnection\n\n\nclass SOCKSHTTPSConnectionPool(HTTPSConnectionPool):\n ConnectionCls = SOCKSHTTPSConnection\n\n\nclass SOCKSProxyManager(PoolManager):\n \"\"\"\n A version of the urllib3 ProxyManager that routes connections via the\n defined SOCKS proxy.\n \"\"\"\n pool_classes_by_scheme = {\n 'http': SOCKSHTTPConnectionPool,\n 'https': SOCKSHTTPSConnectionPool,\n }\n\n def __init__(self, proxy_url, username=None, password=None,\n num_pools=10, headers=None, **connection_pool_kw):\n parsed = parse_url(proxy_url)\n\n if username is None and password is None and parsed.auth is not None:\n split = parsed.auth.split(':')\n if len(split) == 2:\n username, password = split\n if parsed.scheme == 'socks5':\n socks_version = socks.PROXY_TYPE_SOCKS5\n rdns = False\n elif parsed.scheme == 'socks5h':\n socks_version = socks.PROXY_TYPE_SOCKS5\n rdns = True\n elif parsed.scheme == 'socks4':\n socks_version = socks.PROXY_TYPE_SOCKS4\n rdns = False\n elif parsed.scheme == 'socks4a':\n socks_version = socks.PROXY_TYPE_SOCKS4\n rdns = True\n else:\n raise ValueError(\n \"Unable to determine SOCKS version from %s\" % proxy_url\n )\n\n self.proxy_url = proxy_url\n\n socks_options = {\n 'socks_version': socks_version,\n 'proxy_host': parsed.host,\n 'proxy_port': parsed.port,\n 'username': username,\n 'password': password,\n 'rdns': rdns\n }\n connection_pool_kw['_socks_options'] = socks_options\n\n super(SOCKSProxyManager, self).__init__(\n num_pools, headers, **connection_pool_kw\n )\n\n self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme\n", "path": "src/urllib3/contrib/socks.py"}]}
| 2,211 | 582 |
gh_patches_debug_19971
|
rasdani/github-patches
|
git_diff
|
vispy__vispy-476
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding more documentation
Currently, we only have the API reference. There's no other documentation at the moment.
Here are a few references we could take inspiration from/copy.
- [@rougier's tutorial](http://www.loria.fr/~rougier/teaching/opengl/)
- Recipe from the IPython Cookbook (link coming soon)
- [Paper in Frontiers in Neuroinformatics](http://journal.frontiersin.org/Journal/10.3389/fninf.2013.00036/full)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 """
6
7 =====
8 Vispy
9 =====
10
11 Vispy is a collaborative project that has the goal to allow more sharing
12 of code between visualization projects based on OpenGL. It does this
13 by providing powerful interfaces to OpenGL, at different levels of
14 abstraction and generality.
15
16 Vispy consists of the following modules:
17 * vispy.app: for creating windows, timers and mainloops for various backends
18 * vispy.gloo: Object oriented GL API
19 * vispy.gloo.gl: Low level OpenGL API
20 * vispy.util: various utilities
21 * vispy.scene: Higher level visualization objects (work in progress)
22 * vispy.mpl_plot: matplotlib interface (work in progress)
23 * ... more to come
24
25 Vispy comes with a powerful event system and a simple application
26 framework that works on multiple backends. This allows easy creation
27 of figures, and enables integrating visualizations in a GUI application.
28
29 For more information see http://vispy.org.
30 """
31
32 from __future__ import division
33
34 __all__ = ['use', 'sys_info', 'set_log_level', 'test']
35
36 # Definition of the version number
37 __version__ = '0.3'
38
39
40 from .util import (_parse_command_line_arguments, config, # noqa
41 set_log_level, keys, sys_info, test) # noqa
42 from .util.wrappers import use # noqa
43
44 _parse_command_line_arguments()
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/vispy/__init__.py b/vispy/__init__.py
--- a/vispy/__init__.py
+++ b/vispy/__init__.py
@@ -8,25 +8,13 @@
Vispy
=====
-Vispy is a collaborative project that has the goal to allow more sharing
-of code between visualization projects based on OpenGL. It does this
-by providing powerful interfaces to OpenGL, at different levels of
-abstraction and generality.
-
-Vispy consists of the following modules:
- * vispy.app: for creating windows, timers and mainloops for various backends
- * vispy.gloo: Object oriented GL API
- * vispy.gloo.gl: Low level OpenGL API
- * vispy.util: various utilities
- * vispy.scene: Higher level visualization objects (work in progress)
- * vispy.mpl_plot: matplotlib interface (work in progress)
- * ... more to come
-
-Vispy comes with a powerful event system and a simple application
-framework that works on multiple backends. This allows easy creation
-of figures, and enables integrating visualizations in a GUI application.
-
-For more information see http://vispy.org.
+Vispy is a **high-performance interactive 2D/3D data visualization
+library**. Vispy leverages the computational power of modern **Graphics
+Processing Units (GPUs)** through the **OpenGL** library to display very
+large datasets.
+
+For more information, see http://vispy.org.
+
"""
from __future__ import division
|
{"golden_diff": "diff --git a/vispy/__init__.py b/vispy/__init__.py\n--- a/vispy/__init__.py\n+++ b/vispy/__init__.py\n@@ -8,25 +8,13 @@\n Vispy\n =====\n \n-Vispy is a collaborative project that has the goal to allow more sharing\n-of code between visualization projects based on OpenGL. It does this\n-by providing powerful interfaces to OpenGL, at different levels of\n-abstraction and generality.\n-\n-Vispy consists of the following modules:\n- * vispy.app: for creating windows, timers and mainloops for various backends\n- * vispy.gloo: Object oriented GL API\n- * vispy.gloo.gl: Low level OpenGL API\n- * vispy.util: various utilities\n- * vispy.scene: Higher level visualization objects (work in progress)\n- * vispy.mpl_plot: matplotlib interface (work in progress)\n- * ... more to come\n-\n-Vispy comes with a powerful event system and a simple application\n-framework that works on multiple backends. This allows easy creation\n-of figures, and enables integrating visualizations in a GUI application.\n-\n-For more information see http://vispy.org.\n+Vispy is a **high-performance interactive 2D/3D data visualization\n+library**. Vispy leverages the computational power of modern **Graphics\n+Processing Units (GPUs)** through the **OpenGL** library to display very\n+large datasets.\n+\n+For more information, see http://vispy.org.\n+\n \"\"\"\n \n from __future__ import division\n", "issue": "Adding more documentation\nCurrently, we only have the API reference. There's no other documentation at the moment.\n\nHere are a few references we could take inspiration from/copy.\n- [@rougier's tutorial](http://www.loria.fr/~rougier/teaching/opengl/)\n- Recipe from the IPython Cookbook (link coming soon)\n- [Paper in Frontiers in Neuroinformatics](http://journal.frontiersin.org/Journal/10.3389/fninf.2013.00036/full)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\n\n=====\nVispy\n=====\n\nVispy is a collaborative project that has the goal to allow more sharing\nof code between visualization projects based on OpenGL. It does this\nby providing powerful interfaces to OpenGL, at different levels of\nabstraction and generality.\n\nVispy consists of the following modules:\n * vispy.app: for creating windows, timers and mainloops for various backends\n * vispy.gloo: Object oriented GL API\n * vispy.gloo.gl: Low level OpenGL API\n * vispy.util: various utilities\n * vispy.scene: Higher level visualization objects (work in progress)\n * vispy.mpl_plot: matplotlib interface (work in progress)\n * ... more to come\n\nVispy comes with a powerful event system and a simple application\nframework that works on multiple backends. This allows easy creation\nof figures, and enables integrating visualizations in a GUI application.\n\nFor more information see http://vispy.org.\n\"\"\"\n\nfrom __future__ import division\n\n__all__ = ['use', 'sys_info', 'set_log_level', 'test']\n\n# Definition of the version number\n__version__ = '0.3'\n\n\nfrom .util import (_parse_command_line_arguments, config, # noqa\n set_log_level, keys, sys_info, test) # noqa\nfrom .util.wrappers import use # noqa\n\n_parse_command_line_arguments()\n", "path": "vispy/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\n\n=====\nVispy\n=====\n\nVispy is a **high-performance interactive 2D/3D data visualization\nlibrary**. Vispy leverages the computational power of modern **Graphics\nProcessing Units (GPUs)** through the **OpenGL** library to display very\nlarge datasets.\n\nFor more information, see http://vispy.org.\n\n\"\"\"\n\nfrom __future__ import division\n\n__all__ = ['use', 'sys_info', 'set_log_level', 'test']\n\n# Definition of the version number\n__version__ = '0.3'\n\n\nfrom .util import (_parse_command_line_arguments, config, # noqa\n set_log_level, keys, sys_info, test) # noqa\nfrom .util.wrappers import use # noqa\n\n_parse_command_line_arguments()\n", "path": "vispy/__init__.py"}]}
| 800 | 343 |
gh_patches_debug_35191
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-1397
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove date from NLCD glob/regex
### Description
We should remove the date from the NLCD glob/regex. NLCD products are regenerated every time a new release comes out, meaning the name (and unfortunately the hash) will change.
### Steps to reproduce
This came to me in a dream.
### Version
0.5.0dev
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/datasets/nlcd.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """NLCD dataset."""
5
6 import glob
7 import os
8 from typing import Any, Callable, Optional
9
10 import matplotlib.pyplot as plt
11 import torch
12 from rasterio.crs import CRS
13
14 from .geo import RasterDataset
15 from .utils import BoundingBox, download_url, extract_archive
16
17
18 class NLCD(RasterDataset):
19 """National Land Cover Database (NLCD) dataset.
20
21 The `NLCD dataset
22 <https://www.usgs.gov/centers/eros/science/national-land-cover-database>`_
23 is a land cover product that covers the United States and Puerto Rico. The current
24 implementation supports maps for the continental United States only. The product is
25 a joint effort between the United States Geological Survey
26 (`USGS <https://www.usgs.gov/>`_) and the Multi-Resolution Land Characteristics
27 Consortium (`MRLC <https://www.mrlc.gov/>`_) which released the first product
28 in 2001 with new updates every five years since then.
29
30 The dataset contains the following 17 classes:
31
32 0. Background
33 #. Open Water
34 #. Perennial Ice/Snow
35 #. Developed, Open Space
36 #. Developed, Low Intensity
37 #. Developed, Medium Intensity
38 #. Developed, High Intensity
39 #. Barren Land (Rock/Sand/Clay)
40 #. Deciduous Forest
41 #. Evergreen Forest
42 #. Mixed Forest
43 #. Shrub/Scrub
44 #. Grassland/Herbaceous
45 #. Pasture/Hay
46 #. Cultivated Crops
47 #. Woody Wetlands
48 #. Emergent Herbaceous Wetlands
49
50 Detailed descriptions of the classes can be found
51 `here <https://www.mrlc.gov/data/legends/national-land-cover-database-class-legend-and-description>`__.
52
53 Dataset format:
54
55 * single channel .img file with integer class labels
56
57 If you use this dataset in your research, please use the corresponding citation:
58
59 * 2001: https://doi.org/10.5066/P9MZGHLF
60 * 2006: https://doi.org/10.5066/P9HBR9V3
61 * 2011: https://doi.org/10.5066/P97S2IID
62 * 2016: https://doi.org/10.5066/P96HHBIE
63 * 2019: https://doi.org/10.5066/P9KZCM54
64
65 .. versionadded:: 0.5
66 """ # noqa: E501
67
68 filename_glob = "nlcd_*_land_cover_l48_20210604.img"
69 filename_regex = (
70 r"nlcd_(?P<date>\d{4})_land_cover_l48_(?P<publication_date>\d{8})\.img"
71 )
72 zipfile_glob = "nlcd_*_land_cover_l48_20210604.zip"
73 date_format = "%Y"
74 is_image = False
75
76 url = "https://s3-us-west-2.amazonaws.com/mrlc/nlcd_{}_land_cover_l48_20210604.zip"
77
78 md5s = {
79 2001: "538166a4d783204764e3df3b221fc4cd",
80 2006: "67454e7874a00294adb9442374d0c309",
81 2011: "ea524c835d173658eeb6fa3c8e6b917b",
82 2016: "452726f6e3bd3f70d8ca2476723d238a",
83 2019: "82851c3f8105763b01c83b4a9e6f3961",
84 }
85
86 cmap = {
87 0: (0, 0, 0, 0),
88 11: (70, 107, 159, 255),
89 12: (209, 222, 248, 255),
90 21: (222, 197, 197, 255),
91 22: (217, 146, 130, 255),
92 23: (235, 0, 0, 255),
93 24: (171, 0, 0, 255),
94 31: (179, 172, 159, 255),
95 41: (104, 171, 95, 255),
96 42: (28, 95, 44, 255),
97 43: (181, 197, 143, 255),
98 52: (204, 184, 121, 255),
99 71: (223, 223, 194, 255),
100 81: (220, 217, 57, 255),
101 82: (171, 108, 40, 255),
102 90: (184, 217, 235, 255),
103 95: (108, 159, 184, 255),
104 }
105
106 def __init__(
107 self,
108 root: str = "data",
109 crs: Optional[CRS] = None,
110 res: Optional[float] = None,
111 years: list[int] = [2019],
112 classes: list[int] = list(cmap.keys()),
113 transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,
114 cache: bool = True,
115 download: bool = False,
116 checksum: bool = False,
117 ) -> None:
118 """Initialize a new Dataset instance.
119
120 Args:
121 root: root directory where dataset can be found
122 crs: :term:`coordinate reference system (CRS)` to warp to
123 (defaults to the CRS of the first file found)
124 res: resolution of the dataset in units of CRS
125 (defaults to the resolution of the first file found)
126 years: list of years for which to use nlcd layer
127 classes: list of classes to include, the rest will be mapped to 0
128 (defaults to all classes)
129 transforms: a function/transform that takes an input sample
130 and returns a transformed version
131 cache: if True, cache file handle to speed up repeated sampling
132 download: if True, download dataset and store it in the root directory
133 checksum: if True, check the MD5 after downloading files (may be slow)
134
135 Raises:
136 AssertionError: if ``years`` or ``classes`` are invalid
137 FileNotFoundError: if no files are found in ``root``
138 RuntimeError: if ``download=False`` but dataset is missing or checksum fails
139 """
140 assert set(years) <= self.md5s.keys(), (
141 "NLCD data product only exists for the following years: "
142 f"{list(self.md5s.keys())}."
143 )
144 assert (
145 set(classes) <= self.cmap.keys()
146 ), f"Only the following classes are valid: {list(self.cmap.keys())}."
147 assert 0 in classes, "Classes must include the background class: 0"
148
149 self.root = root
150 self.years = years
151 self.classes = classes
152 self.download = download
153 self.checksum = checksum
154 self.ordinal_map = torch.zeros(max(self.cmap.keys()) + 1, dtype=self.dtype)
155 self.ordinal_cmap = torch.zeros((len(self.classes), 4), dtype=torch.uint8)
156
157 self._verify()
158
159 super().__init__(root, crs, res, transforms=transforms, cache=cache)
160
161 # Map chosen classes to ordinal numbers, all others mapped to background class
162 for v, k in enumerate(self.classes):
163 self.ordinal_map[k] = v
164 self.ordinal_cmap[v] = torch.tensor(self.cmap[k])
165
166 def __getitem__(self, query: BoundingBox) -> dict[str, Any]:
167 """Retrieve mask and metadata indexed by query.
168
169 Args:
170 query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index
171
172 Returns:
173 sample of mask and metadata at that index
174
175 Raises:
176 IndexError: if query is not found in the index
177 """
178 sample = super().__getitem__(query)
179 sample["mask"] = self.ordinal_map[sample["mask"]]
180 return sample
181
182 def _verify(self) -> None:
183 """Verify the integrity of the dataset.
184
185 Raises:
186 RuntimeError: if ``download=False`` but dataset is missing or checksum fails
187 """
188 # Check if the extracted files already exist
189 exists = []
190 for year in self.years:
191 filename_year = self.filename_glob.replace("*", str(year))
192 pathname = os.path.join(self.root, "**", filename_year)
193 if glob.glob(pathname, recursive=True):
194 exists.append(True)
195 else:
196 exists.append(False)
197
198 if all(exists):
199 return
200
201 # Check if the zip files have already been downloaded
202 exists = []
203 for year in self.years:
204 zipfile_year = self.zipfile_glob.replace("*", str(year))
205 pathname = os.path.join(self.root, "**", zipfile_year)
206 if glob.glob(pathname, recursive=True):
207 exists.append(True)
208 self._extract()
209 else:
210 exists.append(False)
211
212 if all(exists):
213 return
214
215 # Check if the user requested to download the dataset
216 if not self.download:
217 raise RuntimeError(
218 f"Dataset not found in `root={self.root}` and `download=False`, "
219 "either specify a different `root` directory or use `download=True` "
220 "to automatically download the dataset."
221 )
222
223 # Download the dataset
224 self._download()
225 self._extract()
226
227 def _download(self) -> None:
228 """Download the dataset."""
229 for year in self.years:
230 download_url(
231 self.url.format(year),
232 self.root,
233 md5=self.md5s[year] if self.checksum else None,
234 )
235
236 def _extract(self) -> None:
237 """Extract the dataset."""
238 for year in self.years:
239 zipfile_name = self.zipfile_glob.replace("*", str(year))
240 pathname = os.path.join(self.root, "**", zipfile_name)
241 extract_archive(glob.glob(pathname, recursive=True)[0], self.root)
242
243 def plot(
244 self,
245 sample: dict[str, Any],
246 show_titles: bool = True,
247 suptitle: Optional[str] = None,
248 ) -> plt.Figure:
249 """Plot a sample from the dataset.
250
251 Args:
252 sample: a sample returned by :meth:`RasterDataset.__getitem__`
253 show_titles: flag indicating whether to show titles above each panel
254 suptitle: optional string to use as a suptitle
255
256 Returns:
257 a matplotlib Figure with the rendered sample
258 """
259 mask = sample["mask"].squeeze()
260 ncols = 1
261
262 showing_predictions = "prediction" in sample
263 if showing_predictions:
264 pred = sample["prediction"].squeeze()
265 ncols = 2
266
267 fig, axs = plt.subplots(
268 nrows=1, ncols=ncols, figsize=(ncols * 4, 4), squeeze=False
269 )
270
271 axs[0, 0].imshow(self.ordinal_cmap[mask], interpolation="none")
272 axs[0, 0].axis("off")
273
274 if show_titles:
275 axs[0, 0].set_title("Mask")
276
277 if showing_predictions:
278 axs[0, 1].imshow(self.ordinal_cmap[pred], interpolation="none")
279 axs[0, 1].axis("off")
280 if show_titles:
281 axs[0, 1].set_title("Prediction")
282
283 if suptitle is not None:
284 plt.suptitle(suptitle)
285
286 return fig
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchgeo/datasets/nlcd.py b/torchgeo/datasets/nlcd.py
--- a/torchgeo/datasets/nlcd.py
+++ b/torchgeo/datasets/nlcd.py
@@ -65,11 +65,11 @@
.. versionadded:: 0.5
""" # noqa: E501
- filename_glob = "nlcd_*_land_cover_l48_20210604.img"
+ filename_glob = "nlcd_*_land_cover_l48_*.img"
filename_regex = (
r"nlcd_(?P<date>\d{4})_land_cover_l48_(?P<publication_date>\d{8})\.img"
)
- zipfile_glob = "nlcd_*_land_cover_l48_20210604.zip"
+ zipfile_glob = "nlcd_*_land_cover_l48_*.zip"
date_format = "%Y"
is_image = False
@@ -188,7 +188,7 @@
# Check if the extracted files already exist
exists = []
for year in self.years:
- filename_year = self.filename_glob.replace("*", str(year))
+ filename_year = self.filename_glob.replace("*", str(year), 1)
pathname = os.path.join(self.root, "**", filename_year)
if glob.glob(pathname, recursive=True):
exists.append(True)
@@ -201,7 +201,7 @@
# Check if the zip files have already been downloaded
exists = []
for year in self.years:
- zipfile_year = self.zipfile_glob.replace("*", str(year))
+ zipfile_year = self.zipfile_glob.replace("*", str(year), 1)
pathname = os.path.join(self.root, "**", zipfile_year)
if glob.glob(pathname, recursive=True):
exists.append(True)
@@ -236,7 +236,7 @@
def _extract(self) -> None:
"""Extract the dataset."""
for year in self.years:
- zipfile_name = self.zipfile_glob.replace("*", str(year))
+ zipfile_name = self.zipfile_glob.replace("*", str(year), 1)
pathname = os.path.join(self.root, "**", zipfile_name)
extract_archive(glob.glob(pathname, recursive=True)[0], self.root)
|
{"golden_diff": "diff --git a/torchgeo/datasets/nlcd.py b/torchgeo/datasets/nlcd.py\n--- a/torchgeo/datasets/nlcd.py\n+++ b/torchgeo/datasets/nlcd.py\n@@ -65,11 +65,11 @@\n .. versionadded:: 0.5\n \"\"\" # noqa: E501\n \n- filename_glob = \"nlcd_*_land_cover_l48_20210604.img\"\n+ filename_glob = \"nlcd_*_land_cover_l48_*.img\"\n filename_regex = (\n r\"nlcd_(?P<date>\\d{4})_land_cover_l48_(?P<publication_date>\\d{8})\\.img\"\n )\n- zipfile_glob = \"nlcd_*_land_cover_l48_20210604.zip\"\n+ zipfile_glob = \"nlcd_*_land_cover_l48_*.zip\"\n date_format = \"%Y\"\n is_image = False\n \n@@ -188,7 +188,7 @@\n # Check if the extracted files already exist\n exists = []\n for year in self.years:\n- filename_year = self.filename_glob.replace(\"*\", str(year))\n+ filename_year = self.filename_glob.replace(\"*\", str(year), 1)\n pathname = os.path.join(self.root, \"**\", filename_year)\n if glob.glob(pathname, recursive=True):\n exists.append(True)\n@@ -201,7 +201,7 @@\n # Check if the zip files have already been downloaded\n exists = []\n for year in self.years:\n- zipfile_year = self.zipfile_glob.replace(\"*\", str(year))\n+ zipfile_year = self.zipfile_glob.replace(\"*\", str(year), 1)\n pathname = os.path.join(self.root, \"**\", zipfile_year)\n if glob.glob(pathname, recursive=True):\n exists.append(True)\n@@ -236,7 +236,7 @@\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n for year in self.years:\n- zipfile_name = self.zipfile_glob.replace(\"*\", str(year))\n+ zipfile_name = self.zipfile_glob.replace(\"*\", str(year), 1)\n pathname = os.path.join(self.root, \"**\", zipfile_name)\n extract_archive(glob.glob(pathname, recursive=True)[0], self.root)\n", "issue": "Remove date from NLCD glob/regex\n### Description\n\nWe should remove the date from the NLCD glob/regex. NLCD products are regenerated every time a new release comes out, meaning the name (and unfortunately the hash) will change.\n\n### Steps to reproduce\n\nThis came to me in a dream. \n\n### Version\n\n0.5.0dev\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"NLCD dataset.\"\"\"\n\nimport glob\nimport os\nfrom typing import Any, Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport torch\nfrom rasterio.crs import CRS\n\nfrom .geo import RasterDataset\nfrom .utils import BoundingBox, download_url, extract_archive\n\n\nclass NLCD(RasterDataset):\n \"\"\"National Land Cover Database (NLCD) dataset.\n\n The `NLCD dataset\n <https://www.usgs.gov/centers/eros/science/national-land-cover-database>`_\n is a land cover product that covers the United States and Puerto Rico. The current\n implementation supports maps for the continental United States only. The product is\n a joint effort between the United States Geological Survey\n (`USGS <https://www.usgs.gov/>`_) and the Multi-Resolution Land Characteristics\n Consortium (`MRLC <https://www.mrlc.gov/>`_) which released the first product\n in 2001 with new updates every five years since then.\n\n The dataset contains the following 17 classes:\n\n 0. Background\n #. Open Water\n #. Perennial Ice/Snow\n #. Developed, Open Space\n #. Developed, Low Intensity\n #. Developed, Medium Intensity\n #. Developed, High Intensity\n #. Barren Land (Rock/Sand/Clay)\n #. Deciduous Forest\n #. Evergreen Forest\n #. Mixed Forest\n #. Shrub/Scrub\n #. Grassland/Herbaceous\n #. Pasture/Hay\n #. Cultivated Crops\n #. Woody Wetlands\n #. Emergent Herbaceous Wetlands\n\n Detailed descriptions of the classes can be found\n `here <https://www.mrlc.gov/data/legends/national-land-cover-database-class-legend-and-description>`__.\n\n Dataset format:\n\n * single channel .img file with integer class labels\n\n If you use this dataset in your research, please use the corresponding citation:\n\n * 2001: https://doi.org/10.5066/P9MZGHLF\n * 2006: https://doi.org/10.5066/P9HBR9V3\n * 2011: https://doi.org/10.5066/P97S2IID\n * 2016: https://doi.org/10.5066/P96HHBIE\n * 2019: https://doi.org/10.5066/P9KZCM54\n\n .. versionadded:: 0.5\n \"\"\" # noqa: E501\n\n filename_glob = \"nlcd_*_land_cover_l48_20210604.img\"\n filename_regex = (\n r\"nlcd_(?P<date>\\d{4})_land_cover_l48_(?P<publication_date>\\d{8})\\.img\"\n )\n zipfile_glob = \"nlcd_*_land_cover_l48_20210604.zip\"\n date_format = \"%Y\"\n is_image = False\n\n url = \"https://s3-us-west-2.amazonaws.com/mrlc/nlcd_{}_land_cover_l48_20210604.zip\"\n\n md5s = {\n 2001: \"538166a4d783204764e3df3b221fc4cd\",\n 2006: \"67454e7874a00294adb9442374d0c309\",\n 2011: \"ea524c835d173658eeb6fa3c8e6b917b\",\n 2016: \"452726f6e3bd3f70d8ca2476723d238a\",\n 2019: \"82851c3f8105763b01c83b4a9e6f3961\",\n }\n\n cmap = {\n 0: (0, 0, 0, 0),\n 11: (70, 107, 159, 255),\n 12: (209, 222, 248, 255),\n 21: (222, 197, 197, 255),\n 22: (217, 146, 130, 255),\n 23: (235, 0, 0, 255),\n 24: (171, 0, 0, 255),\n 31: (179, 172, 159, 255),\n 41: (104, 171, 95, 255),\n 42: (28, 95, 44, 255),\n 43: (181, 197, 143, 255),\n 52: (204, 184, 121, 255),\n 71: (223, 223, 194, 255),\n 81: (220, 217, 57, 255),\n 82: (171, 108, 40, 255),\n 90: (184, 217, 235, 255),\n 95: (108, 159, 184, 255),\n }\n\n def __init__(\n self,\n root: str = \"data\",\n crs: Optional[CRS] = None,\n res: Optional[float] = None,\n years: list[int] = [2019],\n classes: list[int] = list(cmap.keys()),\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n cache: bool = True,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n root: root directory where dataset can be found\n crs: :term:`coordinate reference system (CRS)` to warp to\n (defaults to the CRS of the first file found)\n res: resolution of the dataset in units of CRS\n (defaults to the resolution of the first file found)\n years: list of years for which to use nlcd layer\n classes: list of classes to include, the rest will be mapped to 0\n (defaults to all classes)\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n cache: if True, cache file handle to speed up repeated sampling\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 after downloading files (may be slow)\n\n Raises:\n AssertionError: if ``years`` or ``classes`` are invalid\n FileNotFoundError: if no files are found in ``root``\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n assert set(years) <= self.md5s.keys(), (\n \"NLCD data product only exists for the following years: \"\n f\"{list(self.md5s.keys())}.\"\n )\n assert (\n set(classes) <= self.cmap.keys()\n ), f\"Only the following classes are valid: {list(self.cmap.keys())}.\"\n assert 0 in classes, \"Classes must include the background class: 0\"\n\n self.root = root\n self.years = years\n self.classes = classes\n self.download = download\n self.checksum = checksum\n self.ordinal_map = torch.zeros(max(self.cmap.keys()) + 1, dtype=self.dtype)\n self.ordinal_cmap = torch.zeros((len(self.classes), 4), dtype=torch.uint8)\n\n self._verify()\n\n super().__init__(root, crs, res, transforms=transforms, cache=cache)\n\n # Map chosen classes to ordinal numbers, all others mapped to background class\n for v, k in enumerate(self.classes):\n self.ordinal_map[k] = v\n self.ordinal_cmap[v] = torch.tensor(self.cmap[k])\n\n def __getitem__(self, query: BoundingBox) -> dict[str, Any]:\n \"\"\"Retrieve mask and metadata indexed by query.\n\n Args:\n query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index\n\n Returns:\n sample of mask and metadata at that index\n\n Raises:\n IndexError: if query is not found in the index\n \"\"\"\n sample = super().__getitem__(query)\n sample[\"mask\"] = self.ordinal_map[sample[\"mask\"]]\n return sample\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n # Check if the extracted files already exist\n exists = []\n for year in self.years:\n filename_year = self.filename_glob.replace(\"*\", str(year))\n pathname = os.path.join(self.root, \"**\", filename_year)\n if glob.glob(pathname, recursive=True):\n exists.append(True)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the zip files have already been downloaded\n exists = []\n for year in self.years:\n zipfile_year = self.zipfile_glob.replace(\"*\", str(year))\n pathname = os.path.join(self.root, \"**\", zipfile_year)\n if glob.glob(pathname, recursive=True):\n exists.append(True)\n self._extract()\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n for year in self.years:\n download_url(\n self.url.format(year),\n self.root,\n md5=self.md5s[year] if self.checksum else None,\n )\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n for year in self.years:\n zipfile_name = self.zipfile_glob.replace(\"*\", str(year))\n pathname = os.path.join(self.root, \"**\", zipfile_name)\n extract_archive(glob.glob(pathname, recursive=True)[0], self.root)\n\n def plot(\n self,\n sample: dict[str, Any],\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> plt.Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`RasterDataset.__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n mask = sample[\"mask\"].squeeze()\n ncols = 1\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n pred = sample[\"prediction\"].squeeze()\n ncols = 2\n\n fig, axs = plt.subplots(\n nrows=1, ncols=ncols, figsize=(ncols * 4, 4), squeeze=False\n )\n\n axs[0, 0].imshow(self.ordinal_cmap[mask], interpolation=\"none\")\n axs[0, 0].axis(\"off\")\n\n if show_titles:\n axs[0, 0].set_title(\"Mask\")\n\n if showing_predictions:\n axs[0, 1].imshow(self.ordinal_cmap[pred], interpolation=\"none\")\n axs[0, 1].axis(\"off\")\n if show_titles:\n axs[0, 1].set_title(\"Prediction\")\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/nlcd.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"NLCD dataset.\"\"\"\n\nimport glob\nimport os\nfrom typing import Any, Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport torch\nfrom rasterio.crs import CRS\n\nfrom .geo import RasterDataset\nfrom .utils import BoundingBox, download_url, extract_archive\n\n\nclass NLCD(RasterDataset):\n \"\"\"National Land Cover Database (NLCD) dataset.\n\n The `NLCD dataset\n <https://www.usgs.gov/centers/eros/science/national-land-cover-database>`_\n is a land cover product that covers the United States and Puerto Rico. The current\n implementation supports maps for the continental United States only. The product is\n a joint effort between the United States Geological Survey\n (`USGS <https://www.usgs.gov/>`_) and the Multi-Resolution Land Characteristics\n Consortium (`MRLC <https://www.mrlc.gov/>`_) which released the first product\n in 2001 with new updates every five years since then.\n\n The dataset contains the following 17 classes:\n\n 0. Background\n #. Open Water\n #. Perennial Ice/Snow\n #. Developed, Open Space\n #. Developed, Low Intensity\n #. Developed, Medium Intensity\n #. Developed, High Intensity\n #. Barren Land (Rock/Sand/Clay)\n #. Deciduous Forest\n #. Evergreen Forest\n #. Mixed Forest\n #. Shrub/Scrub\n #. Grassland/Herbaceous\n #. Pasture/Hay\n #. Cultivated Crops\n #. Woody Wetlands\n #. Emergent Herbaceous Wetlands\n\n Detailed descriptions of the classes can be found\n `here <https://www.mrlc.gov/data/legends/national-land-cover-database-class-legend-and-description>`__.\n\n Dataset format:\n\n * single channel .img file with integer class labels\n\n If you use this dataset in your research, please use the corresponding citation:\n\n * 2001: https://doi.org/10.5066/P9MZGHLF\n * 2006: https://doi.org/10.5066/P9HBR9V3\n * 2011: https://doi.org/10.5066/P97S2IID\n * 2016: https://doi.org/10.5066/P96HHBIE\n * 2019: https://doi.org/10.5066/P9KZCM54\n\n .. versionadded:: 0.5\n \"\"\" # noqa: E501\n\n filename_glob = \"nlcd_*_land_cover_l48_*.img\"\n filename_regex = (\n r\"nlcd_(?P<date>\\d{4})_land_cover_l48_(?P<publication_date>\\d{8})\\.img\"\n )\n zipfile_glob = \"nlcd_*_land_cover_l48_*.zip\"\n date_format = \"%Y\"\n is_image = False\n\n url = \"https://s3-us-west-2.amazonaws.com/mrlc/nlcd_{}_land_cover_l48_20210604.zip\"\n\n md5s = {\n 2001: \"538166a4d783204764e3df3b221fc4cd\",\n 2006: \"67454e7874a00294adb9442374d0c309\",\n 2011: \"ea524c835d173658eeb6fa3c8e6b917b\",\n 2016: \"452726f6e3bd3f70d8ca2476723d238a\",\n 2019: \"82851c3f8105763b01c83b4a9e6f3961\",\n }\n\n cmap = {\n 0: (0, 0, 0, 0),\n 11: (70, 107, 159, 255),\n 12: (209, 222, 248, 255),\n 21: (222, 197, 197, 255),\n 22: (217, 146, 130, 255),\n 23: (235, 0, 0, 255),\n 24: (171, 0, 0, 255),\n 31: (179, 172, 159, 255),\n 41: (104, 171, 95, 255),\n 42: (28, 95, 44, 255),\n 43: (181, 197, 143, 255),\n 52: (204, 184, 121, 255),\n 71: (223, 223, 194, 255),\n 81: (220, 217, 57, 255),\n 82: (171, 108, 40, 255),\n 90: (184, 217, 235, 255),\n 95: (108, 159, 184, 255),\n }\n\n def __init__(\n self,\n root: str = \"data\",\n crs: Optional[CRS] = None,\n res: Optional[float] = None,\n years: list[int] = [2019],\n classes: list[int] = list(cmap.keys()),\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n cache: bool = True,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n root: root directory where dataset can be found\n crs: :term:`coordinate reference system (CRS)` to warp to\n (defaults to the CRS of the first file found)\n res: resolution of the dataset in units of CRS\n (defaults to the resolution of the first file found)\n years: list of years for which to use nlcd layer\n classes: list of classes to include, the rest will be mapped to 0\n (defaults to all classes)\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n cache: if True, cache file handle to speed up repeated sampling\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 after downloading files (may be slow)\n\n Raises:\n AssertionError: if ``years`` or ``classes`` are invalid\n FileNotFoundError: if no files are found in ``root``\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n assert set(years) <= self.md5s.keys(), (\n \"NLCD data product only exists for the following years: \"\n f\"{list(self.md5s.keys())}.\"\n )\n assert (\n set(classes) <= self.cmap.keys()\n ), f\"Only the following classes are valid: {list(self.cmap.keys())}.\"\n assert 0 in classes, \"Classes must include the background class: 0\"\n\n self.root = root\n self.years = years\n self.classes = classes\n self.download = download\n self.checksum = checksum\n self.ordinal_map = torch.zeros(max(self.cmap.keys()) + 1, dtype=self.dtype)\n self.ordinal_cmap = torch.zeros((len(self.classes), 4), dtype=torch.uint8)\n\n self._verify()\n\n super().__init__(root, crs, res, transforms=transforms, cache=cache)\n\n # Map chosen classes to ordinal numbers, all others mapped to background class\n for v, k in enumerate(self.classes):\n self.ordinal_map[k] = v\n self.ordinal_cmap[v] = torch.tensor(self.cmap[k])\n\n def __getitem__(self, query: BoundingBox) -> dict[str, Any]:\n \"\"\"Retrieve mask and metadata indexed by query.\n\n Args:\n query: (minx, maxx, miny, maxy, mint, maxt) coordinates to index\n\n Returns:\n sample of mask and metadata at that index\n\n Raises:\n IndexError: if query is not found in the index\n \"\"\"\n sample = super().__getitem__(query)\n sample[\"mask\"] = self.ordinal_map[sample[\"mask\"]]\n return sample\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n # Check if the extracted files already exist\n exists = []\n for year in self.years:\n filename_year = self.filename_glob.replace(\"*\", str(year), 1)\n pathname = os.path.join(self.root, \"**\", filename_year)\n if glob.glob(pathname, recursive=True):\n exists.append(True)\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the zip files have already been downloaded\n exists = []\n for year in self.years:\n zipfile_year = self.zipfile_glob.replace(\"*\", str(year), 1)\n pathname = os.path.join(self.root, \"**\", zipfile_year)\n if glob.glob(pathname, recursive=True):\n exists.append(True)\n self._extract()\n else:\n exists.append(False)\n\n if all(exists):\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n for year in self.years:\n download_url(\n self.url.format(year),\n self.root,\n md5=self.md5s[year] if self.checksum else None,\n )\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n for year in self.years:\n zipfile_name = self.zipfile_glob.replace(\"*\", str(year), 1)\n pathname = os.path.join(self.root, \"**\", zipfile_name)\n extract_archive(glob.glob(pathname, recursive=True)[0], self.root)\n\n def plot(\n self,\n sample: dict[str, Any],\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> plt.Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`RasterDataset.__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n mask = sample[\"mask\"].squeeze()\n ncols = 1\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n pred = sample[\"prediction\"].squeeze()\n ncols = 2\n\n fig, axs = plt.subplots(\n nrows=1, ncols=ncols, figsize=(ncols * 4, 4), squeeze=False\n )\n\n axs[0, 0].imshow(self.ordinal_cmap[mask], interpolation=\"none\")\n axs[0, 0].axis(\"off\")\n\n if show_titles:\n axs[0, 0].set_title(\"Mask\")\n\n if showing_predictions:\n axs[0, 1].imshow(self.ordinal_cmap[pred], interpolation=\"none\")\n axs[0, 1].axis(\"off\")\n if show_titles:\n axs[0, 1].set_title(\"Prediction\")\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/nlcd.py"}]}
| 3,932 | 521 |
gh_patches_debug_11506
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-3169
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Errors when installed munkres is > 1.0.x
### Problem
When invoking beets, beets errors out on munkres versions newer than 1.0.x. This was introduced by https://github.com/beetbox/beets/pull/3146.
Running this command in verbose (`-vv`) mode:
```python-traceback
> beet -vv import .
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 583, in _build_master
ws.require(__requires__)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 900, in require
needed = self.resolve(parse_requirements(requirements))
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 791, in resolve
raise VersionConflict(dist, req).with_context(dependent_req)
pkg_resources.ContextualVersionConflict: (munkres 1.1.2 (/usr/lib/python3.7/site-packages), Requirement.parse('munkres~=1.0.0'), {'beets'})
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/beet", line 6, in <module>
from pkg_resources import load_entry_point
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3191, in <module>
@_call_aside
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3175, in _call_aside
f(*args, **kwargs)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3204, in _initialize_master_working_set
working_set = WorkingSet._build_master()
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 585, in _build_master
return cls._build_from_requirements(__requires__)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 598, in _build_from_requirements
dists = ws.resolve(reqs, Environment())
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 786, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'munkres~=1.0.0' distribution was not found and is required by beets
```
### Setup
* OS: Linux (Arch Linux)
* Python version: 3.7.2
* beets version: 1.4.7.r359.g80f4f0a0-1 (from `pacman -Qi` output, `beet --version` also break due to munkres dependency)
* Turning off plugins made problem go away (yes/no): Didn't try.
My configuration (output of `beet config`) is:
Not relevant.
Errors when installed munkres is > 1.0.x
### Problem
When invoking beets, beets errors out on munkres versions newer than 1.0.x. This was introduced by https://github.com/beetbox/beets/pull/3146.
Running this command in verbose (`-vv`) mode:
```python-traceback
> beet -vv import .
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 583, in _build_master
ws.require(__requires__)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 900, in require
needed = self.resolve(parse_requirements(requirements))
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 791, in resolve
raise VersionConflict(dist, req).with_context(dependent_req)
pkg_resources.ContextualVersionConflict: (munkres 1.1.2 (/usr/lib/python3.7/site-packages), Requirement.parse('munkres~=1.0.0'), {'beets'})
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/beet", line 6, in <module>
from pkg_resources import load_entry_point
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3191, in <module>
@_call_aside
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3175, in _call_aside
f(*args, **kwargs)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 3204, in _initialize_master_working_set
working_set = WorkingSet._build_master()
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 585, in _build_master
return cls._build_from_requirements(__requires__)
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 598, in _build_from_requirements
dists = ws.resolve(reqs, Environment())
File "/usr/lib/python3.7/site-packages/pkg_resources/__init__.py", line 786, in resolve
raise DistributionNotFound(req, requirers)
pkg_resources.DistributionNotFound: The 'munkres~=1.0.0' distribution was not found and is required by beets
```
### Setup
* OS: Linux (Arch Linux)
* Python version: 3.7.2
* beets version: 1.4.7.r359.g80f4f0a0-1 (from `pacman -Qi` output, `beet --version` also break due to munkres dependency)
* Turning off plugins made problem go away (yes/no): Didn't try.
My configuration (output of `beet config`) is:
Not relevant.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # This file is part of beets.
5 # Copyright 2016, Adrian Sampson.
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining
8 # a copy of this software and associated documentation files (the
9 # "Software"), to deal in the Software without restriction, including
10 # without limitation the rights to use, copy, modify, merge, publish,
11 # distribute, sublicense, and/or sell copies of the Software, and to
12 # permit persons to whom the Software is furnished to do so, subject to
13 # the following conditions:
14 #
15 # The above copyright notice and this permission notice shall be
16 # included in all copies or substantial portions of the Software.
17
18 from __future__ import division, absolute_import, print_function
19
20 import os
21 import sys
22 import subprocess
23 import shutil
24 from setuptools import setup
25
26
27 def _read(fn):
28 path = os.path.join(os.path.dirname(__file__), fn)
29 return open(path).read()
30
31
32 def build_manpages():
33 # Go into the docs directory and build the manpage.
34 docdir = os.path.join(os.path.dirname(__file__), 'docs')
35 curdir = os.getcwd()
36 os.chdir(docdir)
37 try:
38 subprocess.check_call(['make', 'man'])
39 except OSError:
40 print("Could not build manpages (make man failed)!", file=sys.stderr)
41 return
42 finally:
43 os.chdir(curdir)
44
45 # Copy resulting manpages.
46 mandir = os.path.join(os.path.dirname(__file__), 'man')
47 if os.path.exists(mandir):
48 shutil.rmtree(mandir)
49 shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)
50
51
52 # Build manpages if we're making a source distribution tarball.
53 if 'sdist' in sys.argv:
54 build_manpages()
55
56
57 setup(
58 name='beets',
59 version='1.4.8',
60 description='music tagger and library organizer',
61 author='Adrian Sampson',
62 author_email='[email protected]',
63 url='http://beets.io/',
64 license='MIT',
65 platforms='ALL',
66 long_description=_read('README.rst'),
67 test_suite='test.testall.suite',
68 include_package_data=True, # Install plugin resources.
69
70 packages=[
71 'beets',
72 'beets.ui',
73 'beets.autotag',
74 'beets.util',
75 'beets.dbcore',
76 'beetsplug',
77 'beetsplug.bpd',
78 'beetsplug.web',
79 'beetsplug.lastgenre',
80 'beetsplug.metasync',
81 ],
82 entry_points={
83 'console_scripts': [
84 'beet = beets.ui:main',
85 ],
86 },
87
88 install_requires=[
89 'six>=1.9',
90 'mutagen>=1.33',
91 'munkres~=1.0.0',
92 'unidecode',
93 'musicbrainzngs>=0.4',
94 'pyyaml',
95 ] + (
96 # Use the backport of Python 3.4's `enum` module.
97 ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []
98 ) + (
99 # Pin a Python 2-compatible version of Jellyfish.
100 ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']
101 ) + (
102 # Support for ANSI console colors on Windows.
103 ['colorama'] if (sys.platform == 'win32') else []
104 ),
105
106 tests_require=[
107 'beautifulsoup4',
108 'flask',
109 'mock',
110 'pylast',
111 'rarfile',
112 'responses',
113 'pyxdg',
114 'pathlib',
115 'python-mpd2',
116 'discogs-client'
117 ],
118
119 # Plugin (optional) dependencies:
120 extras_require={
121 'absubmit': ['requests'],
122 'fetchart': ['requests'],
123 'chroma': ['pyacoustid'],
124 'discogs': ['discogs-client>=2.2.1'],
125 'beatport': ['requests-oauthlib>=0.6.1'],
126 'lastgenre': ['pylast'],
127 'mpdstats': ['python-mpd2>=0.4.2'],
128 'web': ['flask', 'flask-cors'],
129 'import': ['rarfile'],
130 'thumbnails': ['pyxdg'] +
131 (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),
132 'metasync': ['dbus-python'],
133 },
134 # Non-Python/non-PyPI plugin dependencies:
135 # convert: ffmpeg
136 # bpd: python-gi and GStreamer
137 # absubmit: extractor binary from http://acousticbrainz.org/download
138
139 classifiers=[
140 'Topic :: Multimedia :: Sound/Audio',
141 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
142 'License :: OSI Approved :: MIT License',
143 'Environment :: Console',
144 'Environment :: Web Environment',
145 'Programming Language :: Python',
146 'Programming Language :: Python :: 2',
147 'Programming Language :: Python :: 2.7',
148 'Programming Language :: Python :: 3',
149 'Programming Language :: Python :: 3.4',
150 'Programming Language :: Python :: 3.5',
151 'Programming Language :: Python :: 3.6',
152 'Programming Language :: Python :: 3.7',
153 'Programming Language :: Python :: Implementation :: CPython',
154 ],
155 )
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -88,10 +88,14 @@
install_requires=[
'six>=1.9',
'mutagen>=1.33',
- 'munkres~=1.0.0',
'unidecode',
'musicbrainzngs>=0.4',
'pyyaml',
+ ] + [
+ # Avoid a version of munkres incompatible with Python 3.
+ 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else
+ 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else
+ 'munkres>=1.0.0',
] + (
# Use the backport of Python 3.4's `enum` module.
['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -88,10 +88,14 @@\n install_requires=[\n 'six>=1.9',\n 'mutagen>=1.33',\n- 'munkres~=1.0.0',\n 'unidecode',\n 'musicbrainzngs>=0.4',\n 'pyyaml',\n+ ] + [\n+ # Avoid a version of munkres incompatible with Python 3.\n+ 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else\n+ 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else\n+ 'munkres>=1.0.0',\n ] + (\n # Use the backport of Python 3.4's `enum` module.\n ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []\n", "issue": "Errors when installed munkres is > 1.0.x\n### Problem\r\n\r\nWhen invoking beets, beets errors out on munkres versions newer than 1.0.x. This was introduced by https://github.com/beetbox/beets/pull/3146.\r\n\r\nRunning this command in verbose (`-vv`) mode:\r\n\r\n```python-traceback\r\n> beet -vv import .\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 583, in _build_master\r\n ws.require(__requires__)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 900, in require\r\n needed = self.resolve(parse_requirements(requirements))\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 791, in resolve\r\n raise VersionConflict(dist, req).with_context(dependent_req)\r\npkg_resources.ContextualVersionConflict: (munkres 1.1.2 (/usr/lib/python3.7/site-packages), Requirement.parse('munkres~=1.0.0'), {'beets'})\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/beet\", line 6, in <module>\r\n from pkg_resources import load_entry_point\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3191, in <module>\r\n @_call_aside\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3175, in _call_aside\r\n f(*args, **kwargs)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3204, in _initialize_master_working_set\r\n working_set = WorkingSet._build_master()\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 585, in _build_master\r\n return cls._build_from_requirements(__requires__)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 598, in _build_from_requirements\r\n dists = ws.resolve(reqs, Environment())\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 786, in resolve\r\n raise DistributionNotFound(req, requirers)\r\npkg_resources.DistributionNotFound: The 'munkres~=1.0.0' distribution was not found and is required by beets\r\n```\r\n\r\n### Setup\r\n\r\n* OS: Linux (Arch Linux)\r\n* Python version: 3.7.2\r\n* beets version: 1.4.7.r359.g80f4f0a0-1 (from `pacman -Qi` output, `beet --version` also break due to munkres dependency)\r\n* Turning off plugins made problem go away (yes/no): Didn't try.\r\n\r\nMy configuration (output of `beet config`) is:\r\n\r\nNot relevant.\nErrors when installed munkres is > 1.0.x\n### Problem\r\n\r\nWhen invoking beets, beets errors out on munkres versions newer than 1.0.x. This was introduced by https://github.com/beetbox/beets/pull/3146.\r\n\r\nRunning this command in verbose (`-vv`) mode:\r\n\r\n```python-traceback\r\n> beet -vv import .\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 583, in _build_master\r\n ws.require(__requires__)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 900, in require\r\n needed = self.resolve(parse_requirements(requirements))\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 791, in resolve\r\n raise VersionConflict(dist, req).with_context(dependent_req)\r\npkg_resources.ContextualVersionConflict: (munkres 1.1.2 (/usr/lib/python3.7/site-packages), Requirement.parse('munkres~=1.0.0'), {'beets'})\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/beet\", line 6, in <module>\r\n from pkg_resources import load_entry_point\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3191, in <module>\r\n @_call_aside\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3175, in _call_aside\r\n f(*args, **kwargs)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 3204, in _initialize_master_working_set\r\n working_set = WorkingSet._build_master()\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 585, in _build_master\r\n return cls._build_from_requirements(__requires__)\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 598, in _build_from_requirements\r\n dists = ws.resolve(reqs, Environment())\r\n File \"/usr/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 786, in resolve\r\n raise DistributionNotFound(req, requirers)\r\npkg_resources.DistributionNotFound: The 'munkres~=1.0.0' distribution was not found and is required by beets\r\n```\r\n\r\n### Setup\r\n\r\n* OS: Linux (Arch Linux)\r\n* Python version: 3.7.2\r\n* beets version: 1.4.7.r359.g80f4f0a0-1 (from `pacman -Qi` output, `beet --version` also break due to munkres dependency)\r\n* Turning off plugins made problem go away (yes/no): Didn't try.\r\n\r\nMy configuration (output of `beet config`) is:\r\n\r\nNot relevant.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport subprocess\nimport shutil\nfrom setuptools import setup\n\n\ndef _read(fn):\n path = os.path.join(os.path.dirname(__file__), fn)\n return open(path).read()\n\n\ndef build_manpages():\n # Go into the docs directory and build the manpage.\n docdir = os.path.join(os.path.dirname(__file__), 'docs')\n curdir = os.getcwd()\n os.chdir(docdir)\n try:\n subprocess.check_call(['make', 'man'])\n except OSError:\n print(\"Could not build manpages (make man failed)!\", file=sys.stderr)\n return\n finally:\n os.chdir(curdir)\n\n # Copy resulting manpages.\n mandir = os.path.join(os.path.dirname(__file__), 'man')\n if os.path.exists(mandir):\n shutil.rmtree(mandir)\n shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)\n\n\n# Build manpages if we're making a source distribution tarball.\nif 'sdist' in sys.argv:\n build_manpages()\n\n\nsetup(\n name='beets',\n version='1.4.8',\n description='music tagger and library organizer',\n author='Adrian Sampson',\n author_email='[email protected]',\n url='http://beets.io/',\n license='MIT',\n platforms='ALL',\n long_description=_read('README.rst'),\n test_suite='test.testall.suite',\n include_package_data=True, # Install plugin resources.\n\n packages=[\n 'beets',\n 'beets.ui',\n 'beets.autotag',\n 'beets.util',\n 'beets.dbcore',\n 'beetsplug',\n 'beetsplug.bpd',\n 'beetsplug.web',\n 'beetsplug.lastgenre',\n 'beetsplug.metasync',\n ],\n entry_points={\n 'console_scripts': [\n 'beet = beets.ui:main',\n ],\n },\n\n install_requires=[\n 'six>=1.9',\n 'mutagen>=1.33',\n 'munkres~=1.0.0',\n 'unidecode',\n 'musicbrainzngs>=0.4',\n 'pyyaml',\n ] + (\n # Use the backport of Python 3.4's `enum` module.\n ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []\n ) + (\n # Pin a Python 2-compatible version of Jellyfish.\n ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']\n ) + (\n # Support for ANSI console colors on Windows.\n ['colorama'] if (sys.platform == 'win32') else []\n ),\n\n tests_require=[\n 'beautifulsoup4',\n 'flask',\n 'mock',\n 'pylast',\n 'rarfile',\n 'responses',\n 'pyxdg',\n 'pathlib',\n 'python-mpd2',\n 'discogs-client'\n ],\n\n # Plugin (optional) dependencies:\n extras_require={\n 'absubmit': ['requests'],\n 'fetchart': ['requests'],\n 'chroma': ['pyacoustid'],\n 'discogs': ['discogs-client>=2.2.1'],\n 'beatport': ['requests-oauthlib>=0.6.1'],\n 'lastgenre': ['pylast'],\n 'mpdstats': ['python-mpd2>=0.4.2'],\n 'web': ['flask', 'flask-cors'],\n 'import': ['rarfile'],\n 'thumbnails': ['pyxdg'] +\n (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),\n 'metasync': ['dbus-python'],\n },\n # Non-Python/non-PyPI plugin dependencies:\n # convert: ffmpeg\n # bpd: python-gi and GStreamer\n # absubmit: extractor binary from http://acousticbrainz.org/download\n\n classifiers=[\n 'Topic :: Multimedia :: Sound/Audio',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'License :: OSI Approved :: MIT License',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport subprocess\nimport shutil\nfrom setuptools import setup\n\n\ndef _read(fn):\n path = os.path.join(os.path.dirname(__file__), fn)\n return open(path).read()\n\n\ndef build_manpages():\n # Go into the docs directory and build the manpage.\n docdir = os.path.join(os.path.dirname(__file__), 'docs')\n curdir = os.getcwd()\n os.chdir(docdir)\n try:\n subprocess.check_call(['make', 'man'])\n except OSError:\n print(\"Could not build manpages (make man failed)!\", file=sys.stderr)\n return\n finally:\n os.chdir(curdir)\n\n # Copy resulting manpages.\n mandir = os.path.join(os.path.dirname(__file__), 'man')\n if os.path.exists(mandir):\n shutil.rmtree(mandir)\n shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)\n\n\n# Build manpages if we're making a source distribution tarball.\nif 'sdist' in sys.argv:\n build_manpages()\n\n\nsetup(\n name='beets',\n version='1.4.8',\n description='music tagger and library organizer',\n author='Adrian Sampson',\n author_email='[email protected]',\n url='http://beets.io/',\n license='MIT',\n platforms='ALL',\n long_description=_read('README.rst'),\n test_suite='test.testall.suite',\n include_package_data=True, # Install plugin resources.\n\n packages=[\n 'beets',\n 'beets.ui',\n 'beets.autotag',\n 'beets.util',\n 'beets.dbcore',\n 'beetsplug',\n 'beetsplug.bpd',\n 'beetsplug.web',\n 'beetsplug.lastgenre',\n 'beetsplug.metasync',\n ],\n entry_points={\n 'console_scripts': [\n 'beet = beets.ui:main',\n ],\n },\n\n install_requires=[\n 'six>=1.9',\n 'mutagen>=1.33',\n 'unidecode',\n 'musicbrainzngs>=0.4',\n 'pyyaml',\n ] + [\n # Avoid a version of munkres incompatible with Python 3.\n 'munkres~=1.0.0' if sys.version_info < (3, 5, 0) else\n 'munkres!=1.1.0,!=1.1.1' if sys.version_info < (3, 6, 0) else\n 'munkres>=1.0.0',\n ] + (\n # Use the backport of Python 3.4's `enum` module.\n ['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []\n ) + (\n # Pin a Python 2-compatible version of Jellyfish.\n ['jellyfish==0.6.0'] if sys.version_info < (3, 4, 0) else ['jellyfish']\n ) + (\n # Support for ANSI console colors on Windows.\n ['colorama'] if (sys.platform == 'win32') else []\n ),\n\n tests_require=[\n 'beautifulsoup4',\n 'flask',\n 'mock',\n 'pylast',\n 'rarfile',\n 'responses',\n 'pyxdg',\n 'pathlib',\n 'python-mpd2',\n 'discogs-client'\n ],\n\n # Plugin (optional) dependencies:\n extras_require={\n 'absubmit': ['requests'],\n 'fetchart': ['requests'],\n 'chroma': ['pyacoustid'],\n 'discogs': ['discogs-client>=2.2.1'],\n 'beatport': ['requests-oauthlib>=0.6.1'],\n 'lastgenre': ['pylast'],\n 'mpdstats': ['python-mpd2>=0.4.2'],\n 'web': ['flask', 'flask-cors'],\n 'import': ['rarfile'],\n 'thumbnails': ['pyxdg'] +\n (['pathlib'] if (sys.version_info < (3, 4, 0)) else []),\n 'metasync': ['dbus-python'],\n },\n # Non-Python/non-PyPI plugin dependencies:\n # convert: ffmpeg\n # bpd: python-gi and GStreamer\n # absubmit: extractor binary from http://acousticbrainz.org/download\n\n classifiers=[\n 'Topic :: Multimedia :: Sound/Audio',\n 'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',\n 'License :: OSI Approved :: MIT License',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n)\n", "path": "setup.py"}]}
| 3,163 | 241 |
gh_patches_debug_25073
|
rasdani/github-patches
|
git_diff
|
modoboa__modoboa-722
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
In auto-reply message form, some issue around AM/PM datetime picker
In auto-reply message form, some issue around AM/PM datetime picker

I can't select AM or PM.
Suggest : enable 24h format
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modoboa/extensions/postfix_autoreply/forms.py`
Content:
```
1 # coding: utf-8
2 from django import forms
3 from django.utils.translation import ugettext as _, ugettext_lazy
4 from django.utils import timezone
5 from .models import ARmessage
6
7
8 class ARmessageForm(forms.ModelForm):
9 fromdate = forms.DateTimeField(
10 label=ugettext_lazy('From'),
11 required=False,
12 help_text=ugettext_lazy("Activate your auto reply from this date"),
13 widget=forms.TextInput(
14 attrs={'class': 'datefield form-control', 'readonly': 'readonly'}
15 )
16 )
17 untildate = forms.DateTimeField(
18 label=ugettext_lazy('Until'),
19 required=False,
20 help_text=ugettext_lazy("Activate your auto reply until this date"),
21 widget=forms.TextInput(
22 attrs={'class': 'datefield form-control', 'readonly': 'readonly'}
23 )
24 )
25 subject = forms.CharField(
26 widget=forms.TextInput(
27 attrs={'class': 'form-control'}
28 )
29 )
30 content = forms.CharField(
31 widget=forms.Textarea(
32 attrs={'class': 'form-control'}
33 )
34 )
35
36 class Meta:
37 model = ARmessage
38 fields = ('subject', 'content', 'enabled')
39
40 def __init__(self, *args, **kwargs):
41 super(ARmessageForm, self).__init__(*args, **kwargs)
42 self.fields.keyOrder = [
43 'subject', 'content', 'fromdate', 'untildate', 'enabled'
44 ]
45 if 'instance' in kwargs and kwargs['instance'] is not None:
46 self.fields['fromdate'].initial = \
47 kwargs['instance'].fromdate.replace(second=0, microsecond=0)
48 self.fields['untildate'].initial = kwargs['instance'].untildate
49
50 def clean(self):
51 """Custom fields validaton.
52
53 We want to be sure that fromdate < untildate and that they are
54 both in the future ONLY IF the autoreply is beeing activated.
55
56 """
57 super(ARmessageForm, self).clean()
58 if self._errors:
59 raise forms.ValidationError(self._errors)
60 if not self.cleaned_data["enabled"]:
61 return self.cleaned_data
62 if self.cleaned_data["fromdate"] is not None:
63 if self.cleaned_data["fromdate"] < timezone.now():
64 self._errors["fromdate"] = self.error_class(
65 [_("This date is over")])
66 del self.cleaned_data['fromdate']
67 else:
68 self.cleaned_data['fromdate'] = timezone.now()
69 if self.cleaned_data["untildate"] is not None:
70 if self.cleaned_data["untildate"] < timezone.now():
71 self._errors["untildate"] = self.error_class(
72 [_("This date is over")])
73 del self.cleaned_data['untildate']
74 elif 'fromdate' in self.cleaned_data and \
75 self.cleaned_data['untildate'] < self.cleaned_data['fromdate']:
76 self._errors["untildate"] = \
77 self.error_class([_("Must be greater than start date")])
78 del self.cleaned_data['untildate']
79 return self.cleaned_data
80
```
Path: `modoboa/extensions/postfix_autoreply/general_callbacks.py`
Content:
```
1 from django.utils import timezone
2 from django.utils.translation import ugettext_lazy
3 from modoboa.lib import events, parameters
4 from modoboa.extensions.postfix_autoreply.models import Transport, Alias
5 from .models import ARmessage
6
7
8 @events.observe("ExtraUprefsJS")
9 def extra_js(user):
10 return ["""function autoreply_cb() {
11 $('.datefield').datetimepicker({
12 format: 'YYYY-MM-DD hh:mm:ss',
13 language: '%(lang)s'
14 });
15 }
16 """ % {'lang': parameters.get_user(user, "LANG", app="core")}
17 ]
18
19
20 @events.observe("UserMenuDisplay")
21 def menu(target, user):
22 if target != "uprefs_menu":
23 return []
24 if not user.mailbox_set.count():
25 return []
26 return [
27 {"name": "autoreply",
28 "class": "ajaxnav",
29 "url": "autoreply/",
30 "label": ugettext_lazy("Auto-reply message")}
31 ]
32
33
34 @events.observe("DomainCreated")
35 def onDomainCreated(user, domain):
36 transport = Transport()
37 transport.domain = "autoreply.%s" % domain.name
38 transport.method = "autoreply:"
39 transport.save()
40
41
42 @events.observe("DomainModified")
43 def onDomainModified(domain):
44 if domain.oldname == domain.name:
45 return
46 Transport.objects.filter(domain="autoreply.%s" % domain.oldname) \
47 .update(domain="autoreply.%s" % domain.name)
48 for al in Alias.objects.filter(full_address__contains="@%s" % domain.oldname):
49 new_address = al.full_address.replace("@%s" % domain.oldname, "@%s" % domain.name)
50 al.full_address = new_address
51 al.autoreply_address = "%s@autoreply.%s" % (new_address, domain.name)
52 al.save()
53
54
55 @events.observe("DomainDeleted")
56 def onDomainDeleted(domain):
57 Transport.objects.filter(domain="autoreply.%s" % domain.name).delete()
58
59
60 @events.observe("MailboxCreated")
61 def onMailboxCreated(user, mailbox):
62 alias = Alias()
63 alias.full_address = mailbox.full_address
64 alias.autoreply_address = \
65 "%s@autoreply.%s" % (mailbox.full_address, mailbox.domain.name)
66 alias.save()
67
68
69 @events.observe("MailboxDeleted")
70 def onMailboxDeleted(mailboxes):
71 from modoboa.extensions.admin.models import Mailbox
72
73 if isinstance(mailboxes, Mailbox):
74 mailboxes = [mailboxes]
75 for mailbox in mailboxes:
76 try:
77 alias = Alias.objects.get(full_address=mailbox.full_address)
78 except Alias.DoesNotExist:
79 pass
80 else:
81 alias.delete()
82
83
84 @events.observe("MailboxModified")
85 def onMailboxModified(mailbox):
86 if not hasattr(mailbox, 'old_full_address'):
87 return
88 if mailbox.full_address == mailbox.old_full_address:
89 return
90 alias = Alias.objects.get(full_address=mailbox.old_full_address)
91 alias.full_address = mailbox.full_address
92 alias.autoreply_address = \
93 "%s@autoreply.%s" % (mailbox.full_address, mailbox.domain.name)
94 alias.save()
95
96
97 @events.observe("ExtraFormFields")
98 def extra_mailform_fields(form_name, mailbox=None):
99 """Define extra fields to include in mail forms.
100
101 For now, only the auto-reply state can be modified.
102
103 :param str form_name: form name (must be 'mailform')
104 :param Mailbox mailbox: mailbox
105 """
106 from modoboa.lib.formutils import YesNoField
107
108 if form_name != "mailform":
109 return []
110 status = False
111 if mailbox is not None and mailbox.armessage_set.count():
112 status = mailbox.armessage_set.all()[0].enabled
113 return [
114 ('autoreply', YesNoField(
115 label=ugettext_lazy("Enable auto-reply"),
116 initial="yes" if status else "no",
117 help_text=ugettext_lazy("Enable or disable Postfix auto-reply")
118 ))
119 ]
120
121
122 @events.observe("SaveExtraFormFields")
123 def save_extra_mailform_fields(form_name, mailbox, values):
124 """Set the auto-reply status for a mailbox.
125
126 If a corresponding auto-reply message exists, we update its
127 status. Otherwise, we create a message using default values.
128
129 :param str form_name: form name (must be 'mailform')
130 :param Mailbox mailbox: mailbox
131 :param dict values: form values
132 """
133 if form_name != 'mailform':
134 return
135 if mailbox.armessage_set.count():
136 arm = mailbox.armessage_set.all()[0]
137 else:
138 arm = ARmessage(mbox=mailbox)
139 arm.subject = parameters.get_admin("DEFAULT_SUBJECT")
140 arm.content = parameters.get_admin("DEFAULT_CONTENT") \
141 % {'name': mailbox.user.fullname}
142 arm.fromdate = timezone.now()
143 arm.enabled = True if values['autoreply'] == 'yes' else False
144 arm.save()
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modoboa/extensions/postfix_autoreply/forms.py b/modoboa/extensions/postfix_autoreply/forms.py
--- a/modoboa/extensions/postfix_autoreply/forms.py
+++ b/modoboa/extensions/postfix_autoreply/forms.py
@@ -11,7 +11,7 @@
required=False,
help_text=ugettext_lazy("Activate your auto reply from this date"),
widget=forms.TextInput(
- attrs={'class': 'datefield form-control', 'readonly': 'readonly'}
+ attrs={'class': 'datefield form-control'}
)
)
untildate = forms.DateTimeField(
@@ -19,7 +19,7 @@
required=False,
help_text=ugettext_lazy("Activate your auto reply until this date"),
widget=forms.TextInput(
- attrs={'class': 'datefield form-control', 'readonly': 'readonly'}
+ attrs={'class': 'datefield form-control'}
)
)
subject = forms.CharField(
diff --git a/modoboa/extensions/postfix_autoreply/general_callbacks.py b/modoboa/extensions/postfix_autoreply/general_callbacks.py
--- a/modoboa/extensions/postfix_autoreply/general_callbacks.py
+++ b/modoboa/extensions/postfix_autoreply/general_callbacks.py
@@ -9,7 +9,7 @@
def extra_js(user):
return ["""function autoreply_cb() {
$('.datefield').datetimepicker({
- format: 'YYYY-MM-DD hh:mm:ss',
+ format: 'YYYY-MM-DD HH:mm:ss',
language: '%(lang)s'
});
}
|
{"golden_diff": "diff --git a/modoboa/extensions/postfix_autoreply/forms.py b/modoboa/extensions/postfix_autoreply/forms.py\n--- a/modoboa/extensions/postfix_autoreply/forms.py\n+++ b/modoboa/extensions/postfix_autoreply/forms.py\n@@ -11,7 +11,7 @@\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply from this date\"),\n widget=forms.TextInput(\n- attrs={'class': 'datefield form-control', 'readonly': 'readonly'}\n+ attrs={'class': 'datefield form-control'}\n )\n )\n untildate = forms.DateTimeField(\n@@ -19,7 +19,7 @@\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply until this date\"),\n widget=forms.TextInput(\n- attrs={'class': 'datefield form-control', 'readonly': 'readonly'}\n+ attrs={'class': 'datefield form-control'}\n )\n )\n subject = forms.CharField(\ndiff --git a/modoboa/extensions/postfix_autoreply/general_callbacks.py b/modoboa/extensions/postfix_autoreply/general_callbacks.py\n--- a/modoboa/extensions/postfix_autoreply/general_callbacks.py\n+++ b/modoboa/extensions/postfix_autoreply/general_callbacks.py\n@@ -9,7 +9,7 @@\n def extra_js(user):\n return [\"\"\"function autoreply_cb() {\n $('.datefield').datetimepicker({\n- format: 'YYYY-MM-DD hh:mm:ss',\n+ format: 'YYYY-MM-DD HH:mm:ss',\n language: '%(lang)s'\n });\n }\n", "issue": "In auto-reply message form, some issue around AM/PM datetime picker\nIn auto-reply message form, some issue around AM/PM datetime picker\n\n\nI can't select AM or PM.\n\nSuggest : enable 24h format\n\n", "before_files": [{"content": "# coding: utf-8\nfrom django import forms\nfrom django.utils.translation import ugettext as _, ugettext_lazy\nfrom django.utils import timezone\nfrom .models import ARmessage\n\n\nclass ARmessageForm(forms.ModelForm):\n fromdate = forms.DateTimeField(\n label=ugettext_lazy('From'),\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply from this date\"),\n widget=forms.TextInput(\n attrs={'class': 'datefield form-control', 'readonly': 'readonly'}\n )\n )\n untildate = forms.DateTimeField(\n label=ugettext_lazy('Until'),\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply until this date\"),\n widget=forms.TextInput(\n attrs={'class': 'datefield form-control', 'readonly': 'readonly'}\n )\n )\n subject = forms.CharField(\n widget=forms.TextInput(\n attrs={'class': 'form-control'}\n )\n )\n content = forms.CharField(\n widget=forms.Textarea(\n attrs={'class': 'form-control'}\n )\n )\n\n class Meta:\n model = ARmessage\n fields = ('subject', 'content', 'enabled')\n\n def __init__(self, *args, **kwargs):\n super(ARmessageForm, self).__init__(*args, **kwargs)\n self.fields.keyOrder = [\n 'subject', 'content', 'fromdate', 'untildate', 'enabled'\n ]\n if 'instance' in kwargs and kwargs['instance'] is not None:\n self.fields['fromdate'].initial = \\\n kwargs['instance'].fromdate.replace(second=0, microsecond=0)\n self.fields['untildate'].initial = kwargs['instance'].untildate\n\n def clean(self):\n \"\"\"Custom fields validaton.\n\n We want to be sure that fromdate < untildate and that they are\n both in the future ONLY IF the autoreply is beeing activated.\n\n \"\"\"\n super(ARmessageForm, self).clean()\n if self._errors:\n raise forms.ValidationError(self._errors)\n if not self.cleaned_data[\"enabled\"]:\n return self.cleaned_data\n if self.cleaned_data[\"fromdate\"] is not None:\n if self.cleaned_data[\"fromdate\"] < timezone.now():\n self._errors[\"fromdate\"] = self.error_class(\n [_(\"This date is over\")])\n del self.cleaned_data['fromdate']\n else:\n self.cleaned_data['fromdate'] = timezone.now()\n if self.cleaned_data[\"untildate\"] is not None:\n if self.cleaned_data[\"untildate\"] < timezone.now():\n self._errors[\"untildate\"] = self.error_class(\n [_(\"This date is over\")])\n del self.cleaned_data['untildate']\n elif 'fromdate' in self.cleaned_data and \\\n self.cleaned_data['untildate'] < self.cleaned_data['fromdate']:\n self._errors[\"untildate\"] = \\\n self.error_class([_(\"Must be greater than start date\")])\n del self.cleaned_data['untildate']\n return self.cleaned_data\n", "path": "modoboa/extensions/postfix_autoreply/forms.py"}, {"content": "from django.utils import timezone\nfrom django.utils.translation import ugettext_lazy\nfrom modoboa.lib import events, parameters\nfrom modoboa.extensions.postfix_autoreply.models import Transport, Alias\nfrom .models import ARmessage\n\n\[email protected](\"ExtraUprefsJS\")\ndef extra_js(user):\n return [\"\"\"function autoreply_cb() {\n $('.datefield').datetimepicker({\n format: 'YYYY-MM-DD hh:mm:ss',\n language: '%(lang)s'\n });\n}\n\"\"\" % {'lang': parameters.get_user(user, \"LANG\", app=\"core\")}\n ]\n\n\[email protected](\"UserMenuDisplay\")\ndef menu(target, user):\n if target != \"uprefs_menu\":\n return []\n if not user.mailbox_set.count():\n return []\n return [\n {\"name\": \"autoreply\",\n \"class\": \"ajaxnav\",\n \"url\": \"autoreply/\",\n \"label\": ugettext_lazy(\"Auto-reply message\")}\n ]\n\n\[email protected](\"DomainCreated\")\ndef onDomainCreated(user, domain):\n transport = Transport()\n transport.domain = \"autoreply.%s\" % domain.name\n transport.method = \"autoreply:\"\n transport.save()\n\n\[email protected](\"DomainModified\")\ndef onDomainModified(domain):\n if domain.oldname == domain.name:\n return\n Transport.objects.filter(domain=\"autoreply.%s\" % domain.oldname) \\\n .update(domain=\"autoreply.%s\" % domain.name)\n for al in Alias.objects.filter(full_address__contains=\"@%s\" % domain.oldname):\n new_address = al.full_address.replace(\"@%s\" % domain.oldname, \"@%s\" % domain.name)\n al.full_address = new_address\n al.autoreply_address = \"%s@autoreply.%s\" % (new_address, domain.name)\n al.save()\n\n\[email protected](\"DomainDeleted\")\ndef onDomainDeleted(domain):\n Transport.objects.filter(domain=\"autoreply.%s\" % domain.name).delete()\n\n\[email protected](\"MailboxCreated\")\ndef onMailboxCreated(user, mailbox):\n alias = Alias()\n alias.full_address = mailbox.full_address\n alias.autoreply_address = \\\n \"%s@autoreply.%s\" % (mailbox.full_address, mailbox.domain.name)\n alias.save()\n\n\[email protected](\"MailboxDeleted\")\ndef onMailboxDeleted(mailboxes):\n from modoboa.extensions.admin.models import Mailbox\n\n if isinstance(mailboxes, Mailbox):\n mailboxes = [mailboxes]\n for mailbox in mailboxes:\n try:\n alias = Alias.objects.get(full_address=mailbox.full_address)\n except Alias.DoesNotExist:\n pass\n else:\n alias.delete()\n\n\[email protected](\"MailboxModified\")\ndef onMailboxModified(mailbox):\n if not hasattr(mailbox, 'old_full_address'):\n return\n if mailbox.full_address == mailbox.old_full_address:\n return\n alias = Alias.objects.get(full_address=mailbox.old_full_address)\n alias.full_address = mailbox.full_address\n alias.autoreply_address = \\\n \"%s@autoreply.%s\" % (mailbox.full_address, mailbox.domain.name)\n alias.save()\n\n\[email protected](\"ExtraFormFields\")\ndef extra_mailform_fields(form_name, mailbox=None):\n \"\"\"Define extra fields to include in mail forms.\n\n For now, only the auto-reply state can be modified.\n\n :param str form_name: form name (must be 'mailform')\n :param Mailbox mailbox: mailbox\n \"\"\"\n from modoboa.lib.formutils import YesNoField\n\n if form_name != \"mailform\":\n return []\n status = False\n if mailbox is not None and mailbox.armessage_set.count():\n status = mailbox.armessage_set.all()[0].enabled\n return [\n ('autoreply', YesNoField(\n label=ugettext_lazy(\"Enable auto-reply\"),\n initial=\"yes\" if status else \"no\",\n help_text=ugettext_lazy(\"Enable or disable Postfix auto-reply\")\n ))\n ]\n\n\[email protected](\"SaveExtraFormFields\")\ndef save_extra_mailform_fields(form_name, mailbox, values):\n \"\"\"Set the auto-reply status for a mailbox.\n\n If a corresponding auto-reply message exists, we update its\n status. Otherwise, we create a message using default values.\n\n :param str form_name: form name (must be 'mailform')\n :param Mailbox mailbox: mailbox\n :param dict values: form values\n \"\"\"\n if form_name != 'mailform':\n return\n if mailbox.armessage_set.count():\n arm = mailbox.armessage_set.all()[0]\n else:\n arm = ARmessage(mbox=mailbox)\n arm.subject = parameters.get_admin(\"DEFAULT_SUBJECT\")\n arm.content = parameters.get_admin(\"DEFAULT_CONTENT\") \\\n % {'name': mailbox.user.fullname}\n arm.fromdate = timezone.now()\n arm.enabled = True if values['autoreply'] == 'yes' else False\n arm.save()\n", "path": "modoboa/extensions/postfix_autoreply/general_callbacks.py"}], "after_files": [{"content": "# coding: utf-8\nfrom django import forms\nfrom django.utils.translation import ugettext as _, ugettext_lazy\nfrom django.utils import timezone\nfrom .models import ARmessage\n\n\nclass ARmessageForm(forms.ModelForm):\n fromdate = forms.DateTimeField(\n label=ugettext_lazy('From'),\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply from this date\"),\n widget=forms.TextInput(\n attrs={'class': 'datefield form-control'}\n )\n )\n untildate = forms.DateTimeField(\n label=ugettext_lazy('Until'),\n required=False,\n help_text=ugettext_lazy(\"Activate your auto reply until this date\"),\n widget=forms.TextInput(\n attrs={'class': 'datefield form-control'}\n )\n )\n subject = forms.CharField(\n widget=forms.TextInput(\n attrs={'class': 'form-control'}\n )\n )\n content = forms.CharField(\n widget=forms.Textarea(\n attrs={'class': 'form-control'}\n )\n )\n\n class Meta:\n model = ARmessage\n fields = ('subject', 'content', 'enabled')\n\n def __init__(self, *args, **kwargs):\n super(ARmessageForm, self).__init__(*args, **kwargs)\n self.fields.keyOrder = [\n 'subject', 'content', 'fromdate', 'untildate', 'enabled'\n ]\n if 'instance' in kwargs and kwargs['instance'] is not None:\n self.fields['fromdate'].initial = \\\n kwargs['instance'].fromdate.replace(second=0, microsecond=0)\n self.fields['untildate'].initial = kwargs['instance'].untildate\n\n def clean(self):\n \"\"\"Custom fields validaton.\n\n We want to be sure that fromdate < untildate and that they are\n both in the future ONLY IF the autoreply is beeing activated.\n\n \"\"\"\n super(ARmessageForm, self).clean()\n if self._errors:\n raise forms.ValidationError(self._errors)\n if not self.cleaned_data[\"enabled\"]:\n return self.cleaned_data\n if self.cleaned_data[\"fromdate\"] is not None:\n if self.cleaned_data[\"fromdate\"] < timezone.now():\n self._errors[\"fromdate\"] = self.error_class(\n [_(\"This date is over\")])\n del self.cleaned_data['fromdate']\n else:\n self.cleaned_data['fromdate'] = timezone.now()\n if self.cleaned_data[\"untildate\"] is not None:\n if self.cleaned_data[\"untildate\"] < timezone.now():\n self._errors[\"untildate\"] = self.error_class(\n [_(\"This date is over\")])\n del self.cleaned_data['untildate']\n elif 'fromdate' in self.cleaned_data and \\\n self.cleaned_data['untildate'] < self.cleaned_data['fromdate']:\n self._errors[\"untildate\"] = \\\n self.error_class([_(\"Must be greater than start date\")])\n del self.cleaned_data['untildate']\n return self.cleaned_data\n", "path": "modoboa/extensions/postfix_autoreply/forms.py"}, {"content": "from django.utils import timezone\nfrom django.utils.translation import ugettext_lazy\nfrom modoboa.lib import events, parameters\nfrom modoboa.extensions.postfix_autoreply.models import Transport, Alias\nfrom .models import ARmessage\n\n\[email protected](\"ExtraUprefsJS\")\ndef extra_js(user):\n return [\"\"\"function autoreply_cb() {\n $('.datefield').datetimepicker({\n format: 'YYYY-MM-DD HH:mm:ss',\n language: '%(lang)s'\n });\n}\n\"\"\" % {'lang': parameters.get_user(user, \"LANG\", app=\"core\")}\n ]\n\n\[email protected](\"UserMenuDisplay\")\ndef menu(target, user):\n if target != \"uprefs_menu\":\n return []\n if not user.mailbox_set.count():\n return []\n return [\n {\"name\": \"autoreply\",\n \"class\": \"ajaxnav\",\n \"url\": \"autoreply/\",\n \"label\": ugettext_lazy(\"Auto-reply message\")}\n ]\n\n\[email protected](\"DomainCreated\")\ndef onDomainCreated(user, domain):\n transport = Transport()\n transport.domain = \"autoreply.%s\" % domain.name\n transport.method = \"autoreply:\"\n transport.save()\n\n\[email protected](\"DomainModified\")\ndef onDomainModified(domain):\n if domain.oldname == domain.name:\n return\n Transport.objects.filter(domain=\"autoreply.%s\" % domain.oldname) \\\n .update(domain=\"autoreply.%s\" % domain.name)\n for al in Alias.objects.filter(full_address__contains=\"@%s\" % domain.oldname):\n new_address = al.full_address.replace(\"@%s\" % domain.oldname, \"@%s\" % domain.name)\n al.full_address = new_address\n al.autoreply_address = \"%s@autoreply.%s\" % (new_address, domain.name)\n al.save()\n\n\[email protected](\"DomainDeleted\")\ndef onDomainDeleted(domain):\n Transport.objects.filter(domain=\"autoreply.%s\" % domain.name).delete()\n\n\[email protected](\"MailboxCreated\")\ndef onMailboxCreated(user, mailbox):\n alias = Alias()\n alias.full_address = mailbox.full_address\n alias.autoreply_address = \\\n \"%s@autoreply.%s\" % (mailbox.full_address, mailbox.domain.name)\n alias.save()\n\n\[email protected](\"MailboxDeleted\")\ndef onMailboxDeleted(mailboxes):\n from modoboa.extensions.admin.models import Mailbox\n\n if isinstance(mailboxes, Mailbox):\n mailboxes = [mailboxes]\n for mailbox in mailboxes:\n try:\n alias = Alias.objects.get(full_address=mailbox.full_address)\n except Alias.DoesNotExist:\n pass\n else:\n alias.delete()\n\n\[email protected](\"MailboxModified\")\ndef onMailboxModified(mailbox):\n if not hasattr(mailbox, 'old_full_address'):\n return\n if mailbox.full_address == mailbox.old_full_address:\n return\n alias = Alias.objects.get(full_address=mailbox.old_full_address)\n alias.full_address = mailbox.full_address\n alias.autoreply_address = \\\n \"%s@autoreply.%s\" % (mailbox.full_address, mailbox.domain.name)\n alias.save()\n\n\[email protected](\"ExtraFormFields\")\ndef extra_mailform_fields(form_name, mailbox=None):\n \"\"\"Define extra fields to include in mail forms.\n\n For now, only the auto-reply state can be modified.\n\n :param str form_name: form name (must be 'mailform')\n :param Mailbox mailbox: mailbox\n \"\"\"\n from modoboa.lib.formutils import YesNoField\n\n if form_name != \"mailform\":\n return []\n status = False\n if mailbox is not None and mailbox.armessage_set.count():\n status = mailbox.armessage_set.all()[0].enabled\n return [\n ('autoreply', YesNoField(\n label=ugettext_lazy(\"Enable auto-reply\"),\n initial=\"yes\" if status else \"no\",\n help_text=ugettext_lazy(\"Enable or disable Postfix auto-reply\")\n ))\n ]\n\n\[email protected](\"SaveExtraFormFields\")\ndef save_extra_mailform_fields(form_name, mailbox, values):\n \"\"\"Set the auto-reply status for a mailbox.\n\n If a corresponding auto-reply message exists, we update its\n status. Otherwise, we create a message using default values.\n\n :param str form_name: form name (must be 'mailform')\n :param Mailbox mailbox: mailbox\n :param dict values: form values\n \"\"\"\n if form_name != 'mailform':\n return\n if mailbox.armessage_set.count():\n arm = mailbox.armessage_set.all()[0]\n else:\n arm = ARmessage(mbox=mailbox)\n arm.subject = parameters.get_admin(\"DEFAULT_SUBJECT\")\n arm.content = parameters.get_admin(\"DEFAULT_CONTENT\") \\\n % {'name': mailbox.user.fullname}\n arm.fromdate = timezone.now()\n arm.enabled = True if values['autoreply'] == 'yes' else False\n arm.save()\n", "path": "modoboa/extensions/postfix_autoreply/general_callbacks.py"}]}
| 2,624 | 339 |
gh_patches_debug_11619
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-440
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Function Role property doesn't work with string value
The following results in a TypeError for the Role property type. I don't get a TypeError if I use GetAtt for Role's type, but I need to use a string. Looks like an issue in which the expected type is a list of possible types, but the validation on types then thinks the value should be a list instead of being one of the types in the list.
```
code = Code(
S3Bucket=self.s3Bucket,
S3Key=self.s3Key
)
func = Function(
"title",
Code=code,
Handler="index.handler",
Role="role-arn-here",
Runtime="nodejs",
)
```
```
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 66, in __init__
self.__setattr__(k, v)
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 110, in __setattr__
self._raise_type(name, value, expected_type)
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 145, in _raise_type
expected_type))
TypeError: <class 'troposphere.awslambda.Function'>: EventssomeFunction.Role is <class 'str'>, expected [<class 'str'>, <class 'troposphere.GetAtt'>]
```
Suspected issue spot in `__init__.py:`
```
# If we're expecting a list, then make sure it is a list
if not isinstance(value, list):
self._raise_type(name, value, expected_type)
```
And the definition of Role in `awslambda.py:`
```
class Function(AWSObject):
resource_type = "AWS::Lambda::Function"
props = {
'Code': (Code, True),
'Description': (str, False),
'Handler': (str, True),
'MemorySize': (positive_integer, False),
'Role': ([str, GetAtt], True),
'Runtime': (str, True),
'Timeout': (positive_integer, False),
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/awslambda.py`
Content:
```
1 from . import AWSObject, AWSProperty, GetAtt
2 from .validators import positive_integer
3
4
5 class Code(AWSProperty):
6 props = {
7 'S3Bucket': (basestring, False),
8 'S3Key': (basestring, False),
9 'S3ObjectVersion': (basestring, False),
10 'ZipFile': (basestring, False)
11 }
12
13 def validate(self):
14 zip_file = self.properties.get('ZipFile')
15 s3_bucket = self.properties.get('S3Bucket')
16 s3_key = self.properties.get('S3Key')
17 s3_object_version = self.properties.get('SS3ObjectVersion')
18
19 if zip_file and s3_bucket:
20 raise ValueError("You can't specify both 'S3Bucket' and 'ZipFile'")
21 if zip_file and s3_key:
22 raise ValueError("You can't specify both 'S3Key' and 'ZipFile'")
23 if zip_file and s3_object_version:
24 raise ValueError(
25 "You can't specify both 'S3ObjectVersion' and 'ZipFile'"
26 )
27 if not zip_file and not (s3_bucket and s3_key):
28 raise ValueError(
29 "You must specify a bucket location (both the 'S3Bucket' and "
30 "'S3Key' properties) or the 'ZipFile' property"
31 )
32
33
34 class EventSourceMapping(AWSObject):
35 resource_type = "AWS::Lambda::EventSourceMapping"
36
37 props = {
38 'BatchSize': (positive_integer, False),
39 'Enabled': (bool, False),
40 'EventSourceArn': (basestring, True),
41 'FunctionName': (basestring, True),
42 'StartingPosition': (basestring, True),
43 }
44
45
46 class Function(AWSObject):
47 resource_type = "AWS::Lambda::Function"
48
49 props = {
50 'Code': (Code, True),
51 'Description': (basestring, False),
52 'Handler': (basestring, True),
53 'MemorySize': (positive_integer, False),
54 'Role': ([basestring, GetAtt], True),
55 'Runtime': (basestring, True),
56 'Timeout': (positive_integer, False),
57 }
58
59
60 class Permission(AWSObject):
61 resource_type = "AWS::Lambda::Permission"
62
63 props = {
64 'Action': (basestring, True),
65 'FunctionName': (basestring, True),
66 'Principal': (basestring, True),
67 'SourceAccount': (basestring, False),
68 'SourceArn': (basestring, False),
69 }
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/awslambda.py b/troposphere/awslambda.py
--- a/troposphere/awslambda.py
+++ b/troposphere/awslambda.py
@@ -1,4 +1,4 @@
-from . import AWSObject, AWSProperty, GetAtt
+from . import AWSObject, AWSProperty
from .validators import positive_integer
@@ -51,7 +51,7 @@
'Description': (basestring, False),
'Handler': (basestring, True),
'MemorySize': (positive_integer, False),
- 'Role': ([basestring, GetAtt], True),
+ 'Role': (basestring, True),
'Runtime': (basestring, True),
'Timeout': (positive_integer, False),
}
|
{"golden_diff": "diff --git a/troposphere/awslambda.py b/troposphere/awslambda.py\n--- a/troposphere/awslambda.py\n+++ b/troposphere/awslambda.py\n@@ -1,4 +1,4 @@\n-from . import AWSObject, AWSProperty, GetAtt\n+from . import AWSObject, AWSProperty\n from .validators import positive_integer\n \n \n@@ -51,7 +51,7 @@\n 'Description': (basestring, False),\n 'Handler': (basestring, True),\n 'MemorySize': (positive_integer, False),\n- 'Role': ([basestring, GetAtt], True),\n+ 'Role': (basestring, True),\n 'Runtime': (basestring, True),\n 'Timeout': (positive_integer, False),\n }\n", "issue": "Function Role property doesn't work with string value\nThe following results in a TypeError for the Role property type. I don't get a TypeError if I use GetAtt for Role's type, but I need to use a string. Looks like an issue in which the expected type is a list of possible types, but the validation on types then thinks the value should be a list instead of being one of the types in the list.\n\n```\ncode = Code(\n S3Bucket=self.s3Bucket,\n S3Key=self.s3Key\n )\n\nfunc = Function(\n \"title\",\n Code=code,\n Handler=\"index.handler\",\n Role=\"role-arn-here\",\n Runtime=\"nodejs\",\n )\n```\n\n```\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 66, in __init__\n self.__setattr__(k, v)\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 110, in __setattr__\n self._raise_type(name, value, expected_type)\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 145, in _raise_type\n expected_type))\nTypeError: <class 'troposphere.awslambda.Function'>: EventssomeFunction.Role is <class 'str'>, expected [<class 'str'>, <class 'troposphere.GetAtt'>]\n```\n\nSuspected issue spot in `__init__.py:`\n\n```\n# If we're expecting a list, then make sure it is a list\nif not isinstance(value, list):\n self._raise_type(name, value, expected_type)\n```\n\nAnd the definition of Role in `awslambda.py:`\n\n```\nclass Function(AWSObject):\n resource_type = \"AWS::Lambda::Function\"\n\n props = {\n 'Code': (Code, True),\n 'Description': (str, False),\n 'Handler': (str, True),\n 'MemorySize': (positive_integer, False),\n 'Role': ([str, GetAtt], True),\n 'Runtime': (str, True),\n 'Timeout': (positive_integer, False),\n }\n```\n\n", "before_files": [{"content": "from . import AWSObject, AWSProperty, GetAtt\nfrom .validators import positive_integer\n\n\nclass Code(AWSProperty):\n props = {\n 'S3Bucket': (basestring, False),\n 'S3Key': (basestring, False),\n 'S3ObjectVersion': (basestring, False),\n 'ZipFile': (basestring, False)\n }\n\n def validate(self):\n zip_file = self.properties.get('ZipFile')\n s3_bucket = self.properties.get('S3Bucket')\n s3_key = self.properties.get('S3Key')\n s3_object_version = self.properties.get('SS3ObjectVersion')\n\n if zip_file and s3_bucket:\n raise ValueError(\"You can't specify both 'S3Bucket' and 'ZipFile'\")\n if zip_file and s3_key:\n raise ValueError(\"You can't specify both 'S3Key' and 'ZipFile'\")\n if zip_file and s3_object_version:\n raise ValueError(\n \"You can't specify both 'S3ObjectVersion' and 'ZipFile'\"\n )\n if not zip_file and not (s3_bucket and s3_key):\n raise ValueError(\n \"You must specify a bucket location (both the 'S3Bucket' and \"\n \"'S3Key' properties) or the 'ZipFile' property\"\n )\n\n\nclass EventSourceMapping(AWSObject):\n resource_type = \"AWS::Lambda::EventSourceMapping\"\n\n props = {\n 'BatchSize': (positive_integer, False),\n 'Enabled': (bool, False),\n 'EventSourceArn': (basestring, True),\n 'FunctionName': (basestring, True),\n 'StartingPosition': (basestring, True),\n }\n\n\nclass Function(AWSObject):\n resource_type = \"AWS::Lambda::Function\"\n\n props = {\n 'Code': (Code, True),\n 'Description': (basestring, False),\n 'Handler': (basestring, True),\n 'MemorySize': (positive_integer, False),\n 'Role': ([basestring, GetAtt], True),\n 'Runtime': (basestring, True),\n 'Timeout': (positive_integer, False),\n }\n\n\nclass Permission(AWSObject):\n resource_type = \"AWS::Lambda::Permission\"\n\n props = {\n 'Action': (basestring, True),\n 'FunctionName': (basestring, True),\n 'Principal': (basestring, True),\n 'SourceAccount': (basestring, False),\n 'SourceArn': (basestring, False),\n }\n", "path": "troposphere/awslambda.py"}], "after_files": [{"content": "from . import AWSObject, AWSProperty\nfrom .validators import positive_integer\n\n\nclass Code(AWSProperty):\n props = {\n 'S3Bucket': (basestring, False),\n 'S3Key': (basestring, False),\n 'S3ObjectVersion': (basestring, False),\n 'ZipFile': (basestring, False)\n }\n\n def validate(self):\n zip_file = self.properties.get('ZipFile')\n s3_bucket = self.properties.get('S3Bucket')\n s3_key = self.properties.get('S3Key')\n s3_object_version = self.properties.get('SS3ObjectVersion')\n\n if zip_file and s3_bucket:\n raise ValueError(\"You can't specify both 'S3Bucket' and 'ZipFile'\")\n if zip_file and s3_key:\n raise ValueError(\"You can't specify both 'S3Key' and 'ZipFile'\")\n if zip_file and s3_object_version:\n raise ValueError(\n \"You can't specify both 'S3ObjectVersion' and 'ZipFile'\"\n )\n if not zip_file and not (s3_bucket and s3_key):\n raise ValueError(\n \"You must specify a bucket location (both the 'S3Bucket' and \"\n \"'S3Key' properties) or the 'ZipFile' property\"\n )\n\n\nclass EventSourceMapping(AWSObject):\n resource_type = \"AWS::Lambda::EventSourceMapping\"\n\n props = {\n 'BatchSize': (positive_integer, False),\n 'Enabled': (bool, False),\n 'EventSourceArn': (basestring, True),\n 'FunctionName': (basestring, True),\n 'StartingPosition': (basestring, True),\n }\n\n\nclass Function(AWSObject):\n resource_type = \"AWS::Lambda::Function\"\n\n props = {\n 'Code': (Code, True),\n 'Description': (basestring, False),\n 'Handler': (basestring, True),\n 'MemorySize': (positive_integer, False),\n 'Role': (basestring, True),\n 'Runtime': (basestring, True),\n 'Timeout': (positive_integer, False),\n }\n\n\nclass Permission(AWSObject):\n resource_type = \"AWS::Lambda::Permission\"\n\n props = {\n 'Action': (basestring, True),\n 'FunctionName': (basestring, True),\n 'Principal': (basestring, True),\n 'SourceAccount': (basestring, False),\n 'SourceArn': (basestring, False),\n }\n", "path": "troposphere/awslambda.py"}]}
| 1,461 | 174 |
gh_patches_debug_20882
|
rasdani/github-patches
|
git_diff
|
OpenEnergyPlatform__oeplatform-699
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tutorial form: Markdown for code does not work
See example tutorial
https://openenergy-platform.org/tutorials/12/
This would be important for our beginners tutorial to work:
https://openenergy-platform.org/tutorials/11/
> See also #405
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tutorials/views.py`
Content:
```
1 from django.shortcuts import render, redirect
2 from django.views import View
3 from django.views.generic.edit import CreateView, UpdateView, DeleteView
4 from django.urls import exceptions, reverse_lazy
5 from django.contrib.auth.mixins import LoginRequiredMixin
6
7 import os
8 import json
9
10 from copy import deepcopy
11
12 from django.conf import settings
13
14 from markdown2 import Markdown
15
16 from .forms import TutorialForm
17 from .models import Tutorial
18
19 import re
20 # Create your views here.
21
22 youtubeUrlRegex = re.compile('^.*youtube\.com\/watch\?v=(?P<id>[A-z0-9]+)$')
23
24 def _resolveStaticTutorial(tutorial):
25 try:
26 with open(os.path.join(settings.BASE_DIR, "examples", "build", tutorial["fileName"]), 'r') as buildFile:
27 buildFileContent = buildFile.read()
28
29 return {
30 "html": buildFileContent
31 }
32
33 except:
34 return {"html": "Tutorial is missing"}
35
36
37 def _resolveStaticTutorials():
38 resolvedTutorials = []
39
40 # Load list of static tutorials
41
42 try:
43 with open(os.path.join(settings.BASE_DIR, "examples", "build", 'meta.json'), 'r') as metaFile:
44 metaContent = json.load(metaFile)
45
46 for tutorial in metaContent:
47 rTut = _resolveStaticTutorial(tutorial)
48 resolvedTutorials.append({
49 'id': tutorial['id'],
50 'fileName': tutorial['fileName'],
51 'title': tutorial['title'] or tutorial['fileName'],
52 'html': rTut['html'],
53 })
54
55 return sorted(resolvedTutorials, key=lambda x: x["title"])
56 except Exception as e:
57 print('Static tutorials could not be loaded, error=%s' % e)
58 # If we do not have a generated meta.json or we cannot read them, we just do not return any static
59 # tutorials. This is completly fine and dynamic tutorials can be used like normal.
60 return []
61
62
63 def _resolveDynamicTutorial(evaluatedQs):
64 """
65
66
67 :param evaluatedQs: Evaluated queryset object
68 :return:
69 """
70
71 # Initialize dict that stores a tutorial
72 currentTutorial = {'id': '', 'title': '', 'html': '', 'markdown': '', 'category': '', 'media_src': '', 'level': ''}
73
74 # populate dict
75 currentTutorial.update(id=str(evaluatedQs.id),
76 title=evaluatedQs.title,
77 html=evaluatedQs.html,
78 markdown=evaluatedQs.markdown,
79 category= evaluatedQs.category,
80 media_src= evaluatedQs.media_src,
81 level=evaluatedQs.level)
82
83 return currentTutorial
84
85
86 def _resolveDynamicTutorials(tutorials_qs):
87 """
88 Evaluates a QuerySet and passes each evaluated object to the next function which returns a python
89 dictionary that contains all parameters from the object as dict. The dict is added to a list to
90 later merge the static and dynamic tutorials together.
91
92 :param tutorials_qs:
93 :return:
94 """
95 resolvedTutorials = []
96
97 for tutorial in tutorials_qs:
98 paramsToAdd = _resolveDynamicTutorial(tutorial)
99
100 resolvedTutorials.append(paramsToAdd)
101
102 return resolvedTutorials
103
104
105 def _gatherTutorials(id=None):
106 """
107 Collects all existing tutorials (static/dynamic) and returns them as a list. If an id is
108 specified as parameter a specific tutorial is returned filtered by id.
109
110 :param id:
111 :return:
112 """
113
114 # Retrieve allTutorials objects from db and cache
115 dynamicTutorialsQs = Tutorial.objects.all()
116
117 tutorials = _resolveStaticTutorials()
118 tutorials.extend(_resolveDynamicTutorials(dynamicTutorialsQs))
119
120 if id:
121 filteredElement = list(filter(lambda tutorial: tutorial["id"] == id, tutorials))[0]
122 return filteredElement
123
124 return tutorials
125
126 def _processFormInput(form):
127 tutorial = form.save(commit=False)
128 # Add more information to the dataset like date, time, contributor ...
129
130 if tutorial.media_src:
131 matchResult = youtubeUrlRegex.match(tutorial.media_src)
132 videoId = matchResult.group(1) if matchResult else None
133 if videoId:
134 tutorial.media_src = "https://www.youtube.com/embed/" + videoId
135
136 return tutorial
137
138 def formattedMarkdown(markdown):
139 """
140 A parameter is used to enter a text formatted as markdown that is formatted
141 to html and returned. This functionality is implemented using Markdown2.
142
143 :param markdown:
144 :return:
145 """
146
147 # escapes html but also escapes html code blocks lke "exampel code:
148 # (1 tab) code"
149 # checkbox also not rendered as expected "- [ ]"
150 markdowner = Markdown(safe_mode=True)
151 markdowner.html_removed_text = ""
152
153 return markdowner.convert(markdown)
154
155
156 class ListTutorials(View):
157 def get(self, request):
158 """
159 Load and list the available tutorials.
160
161 :param request: A HTTP-request object sent by the Django framework.
162 :return: Tutorials renderer
163 """
164
165 # Gathering all tutorials
166
167 tutorials = _gatherTutorials()
168
169 return render(
170 request, 'list.html', {"tutorials": tutorials}
171 )
172
173
174 class TutorialDetail(View):
175 def get(self, request, tutorial_id):
176 """
177 Detail view for specific tutorial.
178
179 :param request: A HTTP-request object sent by the Django framework.
180 :return: Tutorials renderer
181 """
182
183 # Gathering all tutorials
184
185 tutorial = _gatherTutorials(tutorial_id)
186
187 return render(
188 request, 'detail.html', {"tutorial": tutorial}
189 )
190
191
192 class CreateNewTutorial(LoginRequiredMixin, CreateView):
193 template_name = 'add.html'
194 redirect_url = 'detail_tutorial'
195 form_class = TutorialForm
196 login_url = '/user/login/'
197 redirect_field_name = 'redirect_to'
198
199 def form_valid(self, form):
200 """
201 validates a form and stores the values in the database and inserts a
202 value for the tutorials field html.
203
204 :param form:
205 :return:
206 """
207
208 tutorial = _processFormInput(form)
209 tutorial.save()
210
211 # Convert markdown to HTML and save to db
212 _html = formattedMarkdown(tutorial.markdown)
213 addHtml = Tutorial.objects.get(pk=tutorial.id)
214 addHtml.html = _html
215 addHtml.save()
216
217 return redirect(self.redirect_url, tutorial_id=tutorial.id)
218
219 def addTutorialFromMarkdownFile(self):
220 pass
221
222
223 class EditTutorials(LoginRequiredMixin, UpdateView):
224 template_name = 'add.html'
225 redirect_url = 'detail_tutorial'
226 model = Tutorial
227 form_class = TutorialForm
228 pk_url_kwarg = 'tutorial_id'
229 login_url = '/user/login/'
230 redirect_field_name = 'redirect_to'
231
232 def form_valid(self, form):
233 """
234 validates a form and stores the values in the database and inserts a
235 value for the tutorials field html.
236
237 :param form:
238 :return:
239 """
240 tutorial = _processFormInput(form)
241 tutorial.save()
242
243 _html = formattedMarkdown(tutorial.markdown)
244 addHtml = Tutorial.objects.get(pk=tutorial.id)
245 addHtml.html = _html
246 addHtml.save()
247
248 return redirect(self.redirect_url, tutorial_id=tutorial.id)
249
250
251 class DeleteTutorial(LoginRequiredMixin, DeleteView):
252 template_name = 'tutorial_confirm_delete.html'
253 model = Tutorial
254 pk_url_kwarg = 'tutorial_id'
255 success_url = reverse_lazy('list_tutorials')
256 login_url = '/user/login/'
257 redirect_field_name = 'redirect_to'
258
259
260
261
262
263
264
265
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tutorials/views.py b/tutorials/views.py
--- a/tutorials/views.py
+++ b/tutorials/views.py
@@ -147,7 +147,8 @@
# escapes html but also escapes html code blocks lke "exampel code:
# (1 tab) code"
# checkbox also not rendered as expected "- [ ]"
- markdowner = Markdown(safe_mode=True)
+ # TODO: Add syntax highliting, add css files -> https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks
+ markdowner = Markdown( extras=["break-on-newline", "fenced-code-blocks"], safe_mode=True)
markdowner.html_removed_text = ""
return markdowner.convert(markdown)
@@ -210,6 +211,7 @@
# Convert markdown to HTML and save to db
_html = formattedMarkdown(tutorial.markdown)
+ print(_html)
addHtml = Tutorial.objects.get(pk=tutorial.id)
addHtml.html = _html
addHtml.save()
|
{"golden_diff": "diff --git a/tutorials/views.py b/tutorials/views.py\n--- a/tutorials/views.py\n+++ b/tutorials/views.py\n@@ -147,7 +147,8 @@\n # escapes html but also escapes html code blocks lke \"exampel code:\n # (1 tab) code\"\n # checkbox also not rendered as expected \"- [ ]\"\n- markdowner = Markdown(safe_mode=True)\n+ # TODO: Add syntax highliting, add css files -> https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks \n+ markdowner = Markdown( extras=[\"break-on-newline\", \"fenced-code-blocks\"], safe_mode=True)\n markdowner.html_removed_text = \"\"\n \n return markdowner.convert(markdown)\n@@ -210,6 +211,7 @@\n \n # Convert markdown to HTML and save to db\n _html = formattedMarkdown(tutorial.markdown)\n+ print(_html)\n addHtml = Tutorial.objects.get(pk=tutorial.id)\n addHtml.html = _html\n addHtml.save()\n", "issue": "Tutorial form: Markdown for code does not work\nSee example tutorial\r\nhttps://openenergy-platform.org/tutorials/12/\r\n\r\nThis would be important for our beginners tutorial to work:\r\nhttps://openenergy-platform.org/tutorials/11/\r\n \r\n\r\n> See also #405 \r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.urls import exceptions, reverse_lazy\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nimport os\nimport json\n\nfrom copy import deepcopy\n\nfrom django.conf import settings\n\nfrom markdown2 import Markdown\n\nfrom .forms import TutorialForm\nfrom .models import Tutorial\n\nimport re\n# Create your views here.\n\nyoutubeUrlRegex = re.compile('^.*youtube\\.com\\/watch\\?v=(?P<id>[A-z0-9]+)$')\n\ndef _resolveStaticTutorial(tutorial):\n try:\n with open(os.path.join(settings.BASE_DIR, \"examples\", \"build\", tutorial[\"fileName\"]), 'r') as buildFile:\n buildFileContent = buildFile.read()\n\n return {\n \"html\": buildFileContent\n }\n\n except:\n return {\"html\": \"Tutorial is missing\"}\n\n\ndef _resolveStaticTutorials():\n resolvedTutorials = []\n\n # Load list of static tutorials\n\n try:\n with open(os.path.join(settings.BASE_DIR, \"examples\", \"build\", 'meta.json'), 'r') as metaFile:\n metaContent = json.load(metaFile)\n\n for tutorial in metaContent:\n rTut = _resolveStaticTutorial(tutorial)\n resolvedTutorials.append({\n 'id': tutorial['id'],\n 'fileName': tutorial['fileName'],\n 'title': tutorial['title'] or tutorial['fileName'],\n 'html': rTut['html'],\n })\n\n return sorted(resolvedTutorials, key=lambda x: x[\"title\"])\n except Exception as e:\n print('Static tutorials could not be loaded, error=%s' % e)\n # If we do not have a generated meta.json or we cannot read them, we just do not return any static\n # tutorials. This is completly fine and dynamic tutorials can be used like normal.\n return []\n\n\ndef _resolveDynamicTutorial(evaluatedQs):\n \"\"\"\n\n\n :param evaluatedQs: Evaluated queryset object\n :return:\n \"\"\"\n\n # Initialize dict that stores a tutorial\n currentTutorial = {'id': '', 'title': '', 'html': '', 'markdown': '', 'category': '', 'media_src': '', 'level': ''}\n\n # populate dict\n currentTutorial.update(id=str(evaluatedQs.id),\n title=evaluatedQs.title,\n html=evaluatedQs.html,\n markdown=evaluatedQs.markdown,\n category= evaluatedQs.category,\n media_src= evaluatedQs.media_src,\n level=evaluatedQs.level)\n\n return currentTutorial\n\n\ndef _resolveDynamicTutorials(tutorials_qs):\n \"\"\"\n Evaluates a QuerySet and passes each evaluated object to the next function which returns a python\n dictionary that contains all parameters from the object as dict. The dict is added to a list to\n later merge the static and dynamic tutorials together.\n\n :param tutorials_qs:\n :return:\n \"\"\"\n resolvedTutorials = []\n\n for tutorial in tutorials_qs:\n paramsToAdd = _resolveDynamicTutorial(tutorial)\n\n resolvedTutorials.append(paramsToAdd)\n\n return resolvedTutorials\n\n\ndef _gatherTutorials(id=None):\n \"\"\"\n Collects all existing tutorials (static/dynamic) and returns them as a list. If an id is\n specified as parameter a specific tutorial is returned filtered by id.\n\n :param id:\n :return:\n \"\"\"\n\n # Retrieve allTutorials objects from db and cache\n dynamicTutorialsQs = Tutorial.objects.all()\n\n tutorials = _resolveStaticTutorials()\n tutorials.extend(_resolveDynamicTutorials(dynamicTutorialsQs))\n\n if id:\n filteredElement = list(filter(lambda tutorial: tutorial[\"id\"] == id, tutorials))[0]\n return filteredElement\n\n return tutorials\n\ndef _processFormInput(form):\n tutorial = form.save(commit=False)\n # Add more information to the dataset like date, time, contributor ...\n\n if tutorial.media_src:\n matchResult = youtubeUrlRegex.match(tutorial.media_src)\n videoId = matchResult.group(1) if matchResult else None\n if videoId:\n tutorial.media_src = \"https://www.youtube.com/embed/\" + videoId\n\n return tutorial\n\ndef formattedMarkdown(markdown):\n \"\"\"\n A parameter is used to enter a text formatted as markdown that is formatted\n to html and returned. This functionality is implemented using Markdown2.\n\n :param markdown:\n :return:\n \"\"\"\n\n # escapes html but also escapes html code blocks lke \"exampel code:\n # (1 tab) code\"\n # checkbox also not rendered as expected \"- [ ]\"\n markdowner = Markdown(safe_mode=True)\n markdowner.html_removed_text = \"\"\n\n return markdowner.convert(markdown)\n\n\nclass ListTutorials(View):\n def get(self, request):\n \"\"\"\n Load and list the available tutorials.\n\n :param request: A HTTP-request object sent by the Django framework.\n :return: Tutorials renderer\n \"\"\"\n\n # Gathering all tutorials\n\n tutorials = _gatherTutorials()\n\n return render(\n request, 'list.html', {\"tutorials\": tutorials}\n )\n\n\nclass TutorialDetail(View):\n def get(self, request, tutorial_id):\n \"\"\"\n Detail view for specific tutorial.\n\n :param request: A HTTP-request object sent by the Django framework.\n :return: Tutorials renderer\n \"\"\"\n\n # Gathering all tutorials\n\n tutorial = _gatherTutorials(tutorial_id)\n\n return render(\n request, 'detail.html', {\"tutorial\": tutorial}\n )\n\n\nclass CreateNewTutorial(LoginRequiredMixin, CreateView):\n template_name = 'add.html'\n redirect_url = 'detail_tutorial'\n form_class = TutorialForm\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n def form_valid(self, form):\n \"\"\"\n validates a form and stores the values in the database and inserts a\n value for the tutorials field html.\n\n :param form:\n :return:\n \"\"\"\n\n tutorial = _processFormInput(form)\n tutorial.save()\n\n # Convert markdown to HTML and save to db\n _html = formattedMarkdown(tutorial.markdown)\n addHtml = Tutorial.objects.get(pk=tutorial.id)\n addHtml.html = _html\n addHtml.save()\n\n return redirect(self.redirect_url, tutorial_id=tutorial.id)\n\n def addTutorialFromMarkdownFile(self):\n pass\n\n\nclass EditTutorials(LoginRequiredMixin, UpdateView):\n template_name = 'add.html'\n redirect_url = 'detail_tutorial'\n model = Tutorial\n form_class = TutorialForm\n pk_url_kwarg = 'tutorial_id'\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n def form_valid(self, form):\n \"\"\"\n validates a form and stores the values in the database and inserts a\n value for the tutorials field html.\n\n :param form:\n :return:\n \"\"\"\n tutorial = _processFormInput(form)\n tutorial.save()\n\n _html = formattedMarkdown(tutorial.markdown)\n addHtml = Tutorial.objects.get(pk=tutorial.id)\n addHtml.html = _html\n addHtml.save()\n\n return redirect(self.redirect_url, tutorial_id=tutorial.id)\n\n\nclass DeleteTutorial(LoginRequiredMixin, DeleteView):\n template_name = 'tutorial_confirm_delete.html'\n model = Tutorial\n pk_url_kwarg = 'tutorial_id'\n success_url = reverse_lazy('list_tutorials')\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n\n\n\n\n\n\n\n", "path": "tutorials/views.py"}], "after_files": [{"content": "from django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.urls import exceptions, reverse_lazy\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nimport os\nimport json\n\nfrom copy import deepcopy\n\nfrom django.conf import settings\n\nfrom markdown2 import Markdown\n\nfrom .forms import TutorialForm\nfrom .models import Tutorial\n\nimport re\n# Create your views here.\n\nyoutubeUrlRegex = re.compile('^.*youtube\\.com\\/watch\\?v=(?P<id>[A-z0-9]+)$')\n\ndef _resolveStaticTutorial(tutorial):\n try:\n with open(os.path.join(settings.BASE_DIR, \"examples\", \"build\", tutorial[\"fileName\"]), 'r') as buildFile:\n buildFileContent = buildFile.read()\n\n return {\n \"html\": buildFileContent\n }\n\n except:\n return {\"html\": \"Tutorial is missing\"}\n\n\ndef _resolveStaticTutorials():\n resolvedTutorials = []\n\n # Load list of static tutorials\n\n try:\n with open(os.path.join(settings.BASE_DIR, \"examples\", \"build\", 'meta.json'), 'r') as metaFile:\n metaContent = json.load(metaFile)\n\n for tutorial in metaContent:\n rTut = _resolveStaticTutorial(tutorial)\n resolvedTutorials.append({\n 'id': tutorial['id'],\n 'fileName': tutorial['fileName'],\n 'title': tutorial['title'] or tutorial['fileName'],\n 'html': rTut['html'],\n })\n\n return sorted(resolvedTutorials, key=lambda x: x[\"title\"])\n except Exception as e:\n print('Static tutorials could not be loaded, error=%s' % e)\n # If we do not have a generated meta.json or we cannot read them, we just do not return any static\n # tutorials. This is completly fine and dynamic tutorials can be used like normal.\n return []\n\n\ndef _resolveDynamicTutorial(evaluatedQs):\n \"\"\"\n\n\n :param evaluatedQs: Evaluated queryset object\n :return:\n \"\"\"\n\n # Initialize dict that stores a tutorial\n currentTutorial = {'id': '', 'title': '', 'html': '', 'markdown': '', 'category': '', 'media_src': '', 'level': ''}\n\n # populate dict\n currentTutorial.update(id=str(evaluatedQs.id),\n title=evaluatedQs.title,\n html=evaluatedQs.html,\n markdown=evaluatedQs.markdown,\n category= evaluatedQs.category,\n media_src= evaluatedQs.media_src,\n level=evaluatedQs.level)\n\n return currentTutorial\n\n\ndef _resolveDynamicTutorials(tutorials_qs):\n \"\"\"\n Evaluates a QuerySet and passes each evaluated object to the next function which returns a python\n dictionary that contains all parameters from the object as dict. The dict is added to a list to\n later merge the static and dynamic tutorials together.\n\n :param tutorials_qs:\n :return:\n \"\"\"\n resolvedTutorials = []\n\n for tutorial in tutorials_qs:\n paramsToAdd = _resolveDynamicTutorial(tutorial)\n\n resolvedTutorials.append(paramsToAdd)\n\n return resolvedTutorials\n\n\ndef _gatherTutorials(id=None):\n \"\"\"\n Collects all existing tutorials (static/dynamic) and returns them as a list. If an id is\n specified as parameter a specific tutorial is returned filtered by id.\n\n :param id:\n :return:\n \"\"\"\n\n # Retrieve allTutorials objects from db and cache\n dynamicTutorialsQs = Tutorial.objects.all()\n\n tutorials = _resolveStaticTutorials()\n tutorials.extend(_resolveDynamicTutorials(dynamicTutorialsQs))\n\n if id:\n filteredElement = list(filter(lambda tutorial: tutorial[\"id\"] == id, tutorials))[0]\n return filteredElement\n\n return tutorials\n\ndef _processFormInput(form):\n tutorial = form.save(commit=False)\n # Add more information to the dataset like date, time, contributor ...\n\n if tutorial.media_src:\n matchResult = youtubeUrlRegex.match(tutorial.media_src)\n videoId = matchResult.group(1) if matchResult else None\n if videoId:\n tutorial.media_src = \"https://www.youtube.com/embed/\" + videoId\n\n return tutorial\n\ndef formattedMarkdown(markdown):\n \"\"\"\n A parameter is used to enter a text formatted as markdown that is formatted\n to html and returned. This functionality is implemented using Markdown2.\n\n :param markdown:\n :return:\n \"\"\"\n\n # escapes html but also escapes html code blocks lke \"exampel code:\n # (1 tab) code\"\n # checkbox also not rendered as expected \"- [ ]\"\n # TODO: Add syntax highliting, add css files -> https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks \n markdowner = Markdown( extras=[\"break-on-newline\", \"fenced-code-blocks\"], safe_mode=True)\n markdowner.html_removed_text = \"\"\n\n return markdowner.convert(markdown)\n\n\nclass ListTutorials(View):\n def get(self, request):\n \"\"\"\n Load and list the available tutorials.\n\n :param request: A HTTP-request object sent by the Django framework.\n :return: Tutorials renderer\n \"\"\"\n\n # Gathering all tutorials\n\n tutorials = _gatherTutorials()\n\n return render(\n request, 'list.html', {\"tutorials\": tutorials}\n )\n\n\nclass TutorialDetail(View):\n def get(self, request, tutorial_id):\n \"\"\"\n Detail view for specific tutorial.\n\n :param request: A HTTP-request object sent by the Django framework.\n :return: Tutorials renderer\n \"\"\"\n\n # Gathering all tutorials\n\n tutorial = _gatherTutorials(tutorial_id)\n\n return render(\n request, 'detail.html', {\"tutorial\": tutorial}\n )\n\n\nclass CreateNewTutorial(LoginRequiredMixin, CreateView):\n template_name = 'add.html'\n redirect_url = 'detail_tutorial'\n form_class = TutorialForm\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n def form_valid(self, form):\n \"\"\"\n validates a form and stores the values in the database and inserts a\n value for the tutorials field html.\n\n :param form:\n :return:\n \"\"\"\n\n tutorial = _processFormInput(form)\n tutorial.save()\n\n # Convert markdown to HTML and save to db\n _html = formattedMarkdown(tutorial.markdown)\n print(_html)\n addHtml = Tutorial.objects.get(pk=tutorial.id)\n addHtml.html = _html\n addHtml.save()\n\n return redirect(self.redirect_url, tutorial_id=tutorial.id)\n\n def addTutorialFromMarkdownFile(self):\n pass\n\n\nclass EditTutorials(LoginRequiredMixin, UpdateView):\n template_name = 'add.html'\n redirect_url = 'detail_tutorial'\n model = Tutorial\n form_class = TutorialForm\n pk_url_kwarg = 'tutorial_id'\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n def form_valid(self, form):\n \"\"\"\n validates a form and stores the values in the database and inserts a\n value for the tutorials field html.\n\n :param form:\n :return:\n \"\"\"\n tutorial = _processFormInput(form)\n tutorial.save()\n\n _html = formattedMarkdown(tutorial.markdown)\n addHtml = Tutorial.objects.get(pk=tutorial.id)\n addHtml.html = _html\n addHtml.save()\n\n return redirect(self.redirect_url, tutorial_id=tutorial.id)\n\n\nclass DeleteTutorial(LoginRequiredMixin, DeleteView):\n template_name = 'tutorial_confirm_delete.html'\n model = Tutorial\n pk_url_kwarg = 'tutorial_id'\n success_url = reverse_lazy('list_tutorials')\n login_url = '/user/login/'\n redirect_field_name = 'redirect_to'\n\n\n\n\n\n\n\n\n", "path": "tutorials/views.py"}]}
| 2,678 | 236 |
gh_patches_debug_9363
|
rasdani/github-patches
|
git_diff
|
crytic__slither-1530
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"not in UPPER_CASE_WITH_UNDERSCORES" should not apply to public/external constants
### Describe the issue:
`public`/`external` fields of a contract are exported as functions, so they are part of a public API, and Slither should not report the warning `not in UPPER_CASE_WITH_UNDERSCORES` if these fields are in lowercase.
### Code example to reproduce the issue:
```
contract A { uint256 public myVal; }
```
### Version:
0.8.3
### Relevant log output:
```shell
Constant A.myVal (contracts/A.sol#1) is not in UPPER_CASE_WITH_UNDERSCORES
```
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/naming_convention/naming_convention.py`
Content:
```
1 import re
2 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
3 from slither.formatters.naming_convention.naming_convention import custom_format
4
5
6 class NamingConvention(AbstractDetector):
7 """
8 Check if naming conventions are followed
9 https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions
10
11 Exceptions:
12 - Allow constant variables name/symbol/decimals to be lowercase (ERC20)
13 - Allow '_' at the beggining of the mixed_case match for private variables and unused parameters
14 - Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_'
15 """
16
17 ARGUMENT = "naming-convention"
18 HELP = "Conformity to Solidity naming conventions"
19 IMPACT = DetectorClassification.INFORMATIONAL
20 CONFIDENCE = DetectorClassification.HIGH
21
22 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions"
23
24 WIKI_TITLE = "Conformance to Solidity naming conventions"
25
26 # region wiki_description
27 WIKI_DESCRIPTION = """
28 Solidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed.
29 #### Rule exceptions
30 - Allow constant variable name/symbol/decimals to be lowercase (`ERC20`).
31 - Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters."""
32 # endregion wiki_description
33
34 WIKI_RECOMMENDATION = "Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions)."
35
36 STANDARD_JSON = False
37
38 @staticmethod
39 def is_cap_words(name):
40 return re.search("^[A-Z]([A-Za-z0-9]+)?_?$", name) is not None
41
42 @staticmethod
43 def is_mixed_case(name):
44 return re.search("^[a-z]([A-Za-z0-9]+)?_?$", name) is not None
45
46 @staticmethod
47 def is_mixed_case_with_underscore(name):
48 # Allow _ at the beginning to represent private variable
49 # or unused parameters
50 return re.search("^[_]?[a-z]([A-Za-z0-9]+)?_?$", name) is not None
51
52 @staticmethod
53 def is_upper_case_with_underscores(name):
54 return re.search("^[A-Z0-9_]+_?$", name) is not None
55
56 @staticmethod
57 def should_avoid_name(name):
58 return re.search("^[lOI]$", name) is not None
59
60 def _detect(self): # pylint: disable=too-many-branches,too-many-statements
61
62 results = []
63 for contract in self.contracts:
64
65 if not self.is_cap_words(contract.name):
66 info = ["Contract ", contract, " is not in CapWords\n"]
67
68 res = self.generate_result(info)
69 res.add(contract, {"target": "contract", "convention": "CapWords"})
70 results.append(res)
71
72 for struct in contract.structures_declared:
73 if not self.is_cap_words(struct.name):
74 info = ["Struct ", struct, " is not in CapWords\n"]
75
76 res = self.generate_result(info)
77 res.add(struct, {"target": "structure", "convention": "CapWords"})
78 results.append(res)
79
80 for event in contract.events_declared:
81 if not self.is_cap_words(event.name):
82 info = ["Event ", event, " is not in CapWords\n"]
83
84 res = self.generate_result(info)
85 res.add(event, {"target": "event", "convention": "CapWords"})
86 results.append(res)
87
88 for func in contract.functions_declared:
89 if func.is_constructor:
90 continue
91 if not self.is_mixed_case(func.name):
92 if func.visibility in [
93 "internal",
94 "private",
95 ] and self.is_mixed_case_with_underscore(func.name):
96 continue
97 if func.name.startswith(("echidna_", "crytic_")):
98 continue
99 info = ["Function ", func, " is not in mixedCase\n"]
100
101 res = self.generate_result(info)
102 res.add(func, {"target": "function", "convention": "mixedCase"})
103 results.append(res)
104
105 for argument in func.parameters:
106 # Ignore parameter names that are not specified i.e. empty strings
107 if argument.name == "":
108 continue
109 if argument in func.variables_read_or_written:
110 correct_naming = self.is_mixed_case(argument.name)
111 else:
112 correct_naming = self.is_mixed_case_with_underscore(argument.name)
113 if not correct_naming:
114 info = ["Parameter ", argument, " is not in mixedCase\n"]
115
116 res = self.generate_result(info)
117 res.add(argument, {"target": "parameter", "convention": "mixedCase"})
118 results.append(res)
119
120 for var in contract.state_variables_declared:
121 if self.should_avoid_name(var.name):
122 info = [
123 "Variable ",
124 var,
125 " is single letter l, O, or I, which should not be used\n",
126 ]
127
128 res = self.generate_result(info)
129 res.add(
130 var,
131 {
132 "target": "variable",
133 "convention": "l_O_I_should_not_be_used",
134 },
135 )
136 results.append(res)
137
138 if var.is_constant is True:
139 # For ERC20 compatibility
140 if var.name in ["symbol", "name", "decimals"]:
141 continue
142
143 if not self.is_upper_case_with_underscores(var.name):
144 info = [
145 "Constant ",
146 var,
147 " is not in UPPER_CASE_WITH_UNDERSCORES\n",
148 ]
149
150 res = self.generate_result(info)
151 res.add(
152 var,
153 {
154 "target": "variable_constant",
155 "convention": "UPPER_CASE_WITH_UNDERSCORES",
156 },
157 )
158 results.append(res)
159
160 else:
161 if var.visibility == "private":
162 correct_naming = self.is_mixed_case_with_underscore(var.name)
163 else:
164 correct_naming = self.is_mixed_case(var.name)
165 if not correct_naming:
166 info = ["Variable ", var, " is not in mixedCase\n"]
167
168 res = self.generate_result(info)
169 res.add(var, {"target": "variable", "convention": "mixedCase"})
170 results.append(res)
171
172 for enum in contract.enums_declared:
173 if not self.is_cap_words(enum.name):
174 info = ["Enum ", enum, " is not in CapWords\n"]
175
176 res = self.generate_result(info)
177 res.add(enum, {"target": "enum", "convention": "CapWords"})
178 results.append(res)
179
180 for modifier in contract.modifiers_declared:
181 if not self.is_mixed_case(modifier.name):
182 info = ["Modifier ", modifier, " is not in mixedCase\n"]
183
184 res = self.generate_result(info)
185 res.add(modifier, {"target": "modifier", "convention": "mixedCase"})
186 results.append(res)
187
188 return results
189
190 @staticmethod
191 def _format(slither, result):
192 custom_format(slither, result)
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/detectors/naming_convention/naming_convention.py b/slither/detectors/naming_convention/naming_convention.py
--- a/slither/detectors/naming_convention/naming_convention.py
+++ b/slither/detectors/naming_convention/naming_convention.py
@@ -139,7 +139,8 @@
# For ERC20 compatibility
if var.name in ["symbol", "name", "decimals"]:
continue
-
+ if var.visibility == "public":
+ continue
if not self.is_upper_case_with_underscores(var.name):
info = [
"Constant ",
|
{"golden_diff": "diff --git a/slither/detectors/naming_convention/naming_convention.py b/slither/detectors/naming_convention/naming_convention.py\n--- a/slither/detectors/naming_convention/naming_convention.py\n+++ b/slither/detectors/naming_convention/naming_convention.py\n@@ -139,7 +139,8 @@\n # For ERC20 compatibility\n if var.name in [\"symbol\", \"name\", \"decimals\"]:\n continue\n-\n+ if var.visibility == \"public\":\n+ continue\n if not self.is_upper_case_with_underscores(var.name):\n info = [\n \"Constant \",\n", "issue": "\"not in UPPER_CASE_WITH_UNDERSCORES\" should not apply to public/external constants\n### Describe the issue:\r\n\r\n`public`/`external` fields of a contract are exported as functions, so they are part of a public API, and Slither should not report the warning `not in UPPER_CASE_WITH_UNDERSCORES` if these fields are in lowercase.\r\n\r\n### Code example to reproduce the issue:\r\n\r\n```\r\ncontract A { uint256 public myVal; }\r\n```\r\n\r\n### Version:\r\n\r\n0.8.3\r\n\r\n### Relevant log output:\r\n\r\n```shell\r\nConstant A.myVal (contracts/A.sol#1) is not in UPPER_CASE_WITH_UNDERSCORES\r\n```\r\n```\r\n\n", "before_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.naming_convention.naming_convention import custom_format\n\n\nclass NamingConvention(AbstractDetector):\n \"\"\"\n Check if naming conventions are followed\n https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions\n\n Exceptions:\n - Allow constant variables name/symbol/decimals to be lowercase (ERC20)\n - Allow '_' at the beggining of the mixed_case match for private variables and unused parameters\n - Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_'\n \"\"\"\n\n ARGUMENT = \"naming-convention\"\n HELP = \"Conformity to Solidity naming conventions\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions\"\n\n WIKI_TITLE = \"Conformance to Solidity naming conventions\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"\nSolidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed.\n#### Rule exceptions\n- Allow constant variable name/symbol/decimals to be lowercase (`ERC20`).\n- Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters.\"\"\"\n # endregion wiki_description\n\n WIKI_RECOMMENDATION = \"Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions).\"\n\n STANDARD_JSON = False\n\n @staticmethod\n def is_cap_words(name):\n return re.search(\"^[A-Z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case(name):\n return re.search(\"^[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case_with_underscore(name):\n # Allow _ at the beginning to represent private variable\n # or unused parameters\n return re.search(\"^[_]?[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_upper_case_with_underscores(name):\n return re.search(\"^[A-Z0-9_]+_?$\", name) is not None\n\n @staticmethod\n def should_avoid_name(name):\n return re.search(\"^[lOI]$\", name) is not None\n\n def _detect(self): # pylint: disable=too-many-branches,too-many-statements\n\n results = []\n for contract in self.contracts:\n\n if not self.is_cap_words(contract.name):\n info = [\"Contract \", contract, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(contract, {\"target\": \"contract\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for struct in contract.structures_declared:\n if not self.is_cap_words(struct.name):\n info = [\"Struct \", struct, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(struct, {\"target\": \"structure\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for event in contract.events_declared:\n if not self.is_cap_words(event.name):\n info = [\"Event \", event, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(event, {\"target\": \"event\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for func in contract.functions_declared:\n if func.is_constructor:\n continue\n if not self.is_mixed_case(func.name):\n if func.visibility in [\n \"internal\",\n \"private\",\n ] and self.is_mixed_case_with_underscore(func.name):\n continue\n if func.name.startswith((\"echidna_\", \"crytic_\")):\n continue\n info = [\"Function \", func, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(func, {\"target\": \"function\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for argument in func.parameters:\n # Ignore parameter names that are not specified i.e. empty strings\n if argument.name == \"\":\n continue\n if argument in func.variables_read_or_written:\n correct_naming = self.is_mixed_case(argument.name)\n else:\n correct_naming = self.is_mixed_case_with_underscore(argument.name)\n if not correct_naming:\n info = [\"Parameter \", argument, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(argument, {\"target\": \"parameter\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for var in contract.state_variables_declared:\n if self.should_avoid_name(var.name):\n info = [\n \"Variable \",\n var,\n \" is single letter l, O, or I, which should not be used\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable\",\n \"convention\": \"l_O_I_should_not_be_used\",\n },\n )\n results.append(res)\n\n if var.is_constant is True:\n # For ERC20 compatibility\n if var.name in [\"symbol\", \"name\", \"decimals\"]:\n continue\n\n if not self.is_upper_case_with_underscores(var.name):\n info = [\n \"Constant \",\n var,\n \" is not in UPPER_CASE_WITH_UNDERSCORES\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable_constant\",\n \"convention\": \"UPPER_CASE_WITH_UNDERSCORES\",\n },\n )\n results.append(res)\n\n else:\n if var.visibility == \"private\":\n correct_naming = self.is_mixed_case_with_underscore(var.name)\n else:\n correct_naming = self.is_mixed_case(var.name)\n if not correct_naming:\n info = [\"Variable \", var, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(var, {\"target\": \"variable\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for enum in contract.enums_declared:\n if not self.is_cap_words(enum.name):\n info = [\"Enum \", enum, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(enum, {\"target\": \"enum\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for modifier in contract.modifiers_declared:\n if not self.is_mixed_case(modifier.name):\n info = [\"Modifier \", modifier, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(modifier, {\"target\": \"modifier\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n custom_format(slither, result)\n", "path": "slither/detectors/naming_convention/naming_convention.py"}], "after_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.naming_convention.naming_convention import custom_format\n\n\nclass NamingConvention(AbstractDetector):\n \"\"\"\n Check if naming conventions are followed\n https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions\n\n Exceptions:\n - Allow constant variables name/symbol/decimals to be lowercase (ERC20)\n - Allow '_' at the beggining of the mixed_case match for private variables and unused parameters\n - Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_'\n \"\"\"\n\n ARGUMENT = \"naming-convention\"\n HELP = \"Conformity to Solidity naming conventions\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions\"\n\n WIKI_TITLE = \"Conformance to Solidity naming conventions\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"\nSolidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed.\n#### Rule exceptions\n- Allow constant variable name/symbol/decimals to be lowercase (`ERC20`).\n- Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters.\"\"\"\n # endregion wiki_description\n\n WIKI_RECOMMENDATION = \"Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions).\"\n\n STANDARD_JSON = False\n\n @staticmethod\n def is_cap_words(name):\n return re.search(\"^[A-Z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case(name):\n return re.search(\"^[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case_with_underscore(name):\n # Allow _ at the beginning to represent private variable\n # or unused parameters\n return re.search(\"^[_]?[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_upper_case_with_underscores(name):\n return re.search(\"^[A-Z0-9_]+_?$\", name) is not None\n\n @staticmethod\n def should_avoid_name(name):\n return re.search(\"^[lOI]$\", name) is not None\n\n def _detect(self): # pylint: disable=too-many-branches,too-many-statements\n\n results = []\n for contract in self.contracts:\n\n if not self.is_cap_words(contract.name):\n info = [\"Contract \", contract, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(contract, {\"target\": \"contract\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for struct in contract.structures_declared:\n if not self.is_cap_words(struct.name):\n info = [\"Struct \", struct, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(struct, {\"target\": \"structure\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for event in contract.events_declared:\n if not self.is_cap_words(event.name):\n info = [\"Event \", event, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(event, {\"target\": \"event\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for func in contract.functions_declared:\n if func.is_constructor:\n continue\n if not self.is_mixed_case(func.name):\n if func.visibility in [\n \"internal\",\n \"private\",\n ] and self.is_mixed_case_with_underscore(func.name):\n continue\n if func.name.startswith((\"echidna_\", \"crytic_\")):\n continue\n info = [\"Function \", func, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(func, {\"target\": \"function\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for argument in func.parameters:\n # Ignore parameter names that are not specified i.e. empty strings\n if argument.name == \"\":\n continue\n if argument in func.variables_read_or_written:\n correct_naming = self.is_mixed_case(argument.name)\n else:\n correct_naming = self.is_mixed_case_with_underscore(argument.name)\n if not correct_naming:\n info = [\"Parameter \", argument, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(argument, {\"target\": \"parameter\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for var in contract.state_variables_declared:\n if self.should_avoid_name(var.name):\n info = [\n \"Variable \",\n var,\n \" is single letter l, O, or I, which should not be used\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable\",\n \"convention\": \"l_O_I_should_not_be_used\",\n },\n )\n results.append(res)\n\n if var.is_constant is True:\n # For ERC20 compatibility\n if var.name in [\"symbol\", \"name\", \"decimals\"]:\n continue\n if var.visibility == \"public\":\n continue\n if not self.is_upper_case_with_underscores(var.name):\n info = [\n \"Constant \",\n var,\n \" is not in UPPER_CASE_WITH_UNDERSCORES\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable_constant\",\n \"convention\": \"UPPER_CASE_WITH_UNDERSCORES\",\n },\n )\n results.append(res)\n\n else:\n if var.visibility == \"private\":\n correct_naming = self.is_mixed_case_with_underscore(var.name)\n else:\n correct_naming = self.is_mixed_case(var.name)\n if not correct_naming:\n info = [\"Variable \", var, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(var, {\"target\": \"variable\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for enum in contract.enums_declared:\n if not self.is_cap_words(enum.name):\n info = [\"Enum \", enum, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(enum, {\"target\": \"enum\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for modifier in contract.modifiers_declared:\n if not self.is_mixed_case(modifier.name):\n info = [\"Modifier \", modifier, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(modifier, {\"target\": \"modifier\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n custom_format(slither, result)\n", "path": "slither/detectors/naming_convention/naming_convention.py"}]}
| 2,487 | 144 |
gh_patches_debug_27866
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-1247
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Segfault when exiting Command Line Usage Example
<!-- In the following, please describe your issue in detail! -->
<!-- If some of the sections do not apply, just remove them. -->
### Short description
When Running the command line example, and closing the window, I get a segmentation fault
### Code to reproduce
<!-- Please provide a minimal working example that reproduces the issue in the code block below.
Ideally, this should be a full example someone else could run without additional setup. -->
```bash
python -m pyqtgraph.examples
```
Run the command line usage example, and close out the window.
### Expected behavior
Window closes
### Real behavior
Window closes, generates a segfault
```
/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py:125: RuntimeWarning: 'pyqtgraph.examples.__main__' found in sys.modules after import of package 'pyqtgraph.examples', but prior to execution of 'pyqtgraph.examples.__main__'; this may result in unpredictable behaviour
warn(RuntimeWarning(msg))
qt.qpa.fonts: Populating font family aliases took 246 ms. Replace uses of missing font family "FreeMono" with one that exists to avoid this cost.
Using PyQt5 (default graphics system)
Fatal Python error: Segmentation fault
Current thread 0x000000010840bdc0 (most recent call first):
File "/Users/ognyan/Developer/pyqtgraph/.venv/lib/python3.7/site-packages/pyqtgraph/examples/CLIexample.py", line 26 in <module>
```
### Tested environment(s)
* PyQtGraph version: 0.11
* Qt Python binding: PySide2 5.15.0 and PyQt5 5.15.0
* Python version: 3.7.7
* NumPy version: 1.18.4
* Operating system: macOS Catalina
* Installation method: pip
### Additional context
This is not an issue with 0.11.rc0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/graphicsWindows.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 DEPRECATED: The classes below are convenience classes that create a new window
4 containting a single, specific widget. These classes are now unnecessary because
5 it is possible to place any widget into its own window by simply calling its
6 show() method.
7 """
8
9 from .Qt import QtCore, QtGui, mkQApp
10 from .widgets.PlotWidget import *
11 from .imageview import *
12 from .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget
13 from .widgets.GraphicsView import GraphicsView
14
15
16 class GraphicsWindow(GraphicsLayoutWidget):
17 """
18 (deprecated; use :class:`~pyqtgraph.GraphicsLayoutWidget` instead)
19
20 Convenience subclass of :class:`~pyqtgraph.GraphicsLayoutWidget`. This class
21 is intended for use from the interactive python prompt.
22 """
23 def __init__(self, title=None, size=(800,600), **kargs):
24 mkQApp()
25 GraphicsLayoutWidget.__init__(self, **kargs)
26 self.resize(*size)
27 if title is not None:
28 self.setWindowTitle(title)
29 self.show()
30
31
32 class TabWindow(QtGui.QMainWindow):
33 """
34 (deprecated)
35 """
36 def __init__(self, title=None, size=(800,600)):
37 mkQApp()
38 QtGui.QMainWindow.__init__(self)
39 self.resize(*size)
40 self.cw = QtGui.QTabWidget()
41 self.setCentralWidget(self.cw)
42 if title is not None:
43 self.setWindowTitle(title)
44 self.show()
45
46 def __getattr__(self, attr):
47 return getattr(self.cw, attr)
48
49
50 class PlotWindow(PlotWidget):
51 sigClosed = QtCore.Signal(object)
52
53 """
54 (deprecated; use :class:`~pyqtgraph.PlotWidget` instead)
55 """
56 def __init__(self, title=None, **kargs):
57 mkQApp()
58 PlotWidget.__init__(self, **kargs)
59 if title is not None:
60 self.setWindowTitle(title)
61 self.show()
62
63 def closeEvent(self, event):
64 PlotWidget.closeEvent(self, event)
65 self.sigClosed.emit(self)
66
67
68 class ImageWindow(ImageView):
69 sigClosed = QtCore.Signal(object)
70
71 """
72 (deprecated; use :class:`~pyqtgraph.ImageView` instead)
73 """
74 def __init__(self, *args, **kargs):
75 mkQApp()
76 ImageView.__init__(self)
77 if 'title' in kargs:
78 self.setWindowTitle(kargs['title'])
79 del kargs['title']
80 if len(args) > 0 or len(kargs) > 0:
81 self.setImage(*args, **kargs)
82 self.show()
83
84 def closeEvent(self, event):
85 ImageView.closeEvent(self, event)
86 self.sigClosed.emit(self)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/graphicsWindows.py b/pyqtgraph/graphicsWindows.py
--- a/pyqtgraph/graphicsWindows.py
+++ b/pyqtgraph/graphicsWindows.py
@@ -55,10 +55,14 @@
"""
def __init__(self, title=None, **kargs):
mkQApp()
+ self.win = QtGui.QMainWindow()
PlotWidget.__init__(self, **kargs)
+ self.win.setCentralWidget(self)
+ for m in ['resize']:
+ setattr(self, m, getattr(self.win, m))
if title is not None:
- self.setWindowTitle(title)
- self.show()
+ self.win.setWindowTitle(title)
+ self.win.show()
def closeEvent(self, event):
PlotWidget.closeEvent(self, event)
@@ -73,14 +77,20 @@
"""
def __init__(self, *args, **kargs):
mkQApp()
- ImageView.__init__(self)
+ self.win = QtGui.QMainWindow()
+ self.win.resize(800,600)
if 'title' in kargs:
- self.setWindowTitle(kargs['title'])
+ self.win.setWindowTitle(kargs['title'])
del kargs['title']
+ ImageView.__init__(self, self.win)
if len(args) > 0 or len(kargs) > 0:
self.setImage(*args, **kargs)
- self.show()
-
+
+ self.win.setCentralWidget(self)
+ for m in ['resize']:
+ setattr(self, m, getattr(self.win, m))
+ self.win.show()
+
def closeEvent(self, event):
ImageView.closeEvent(self, event)
self.sigClosed.emit(self)
|
{"golden_diff": "diff --git a/pyqtgraph/graphicsWindows.py b/pyqtgraph/graphicsWindows.py\n--- a/pyqtgraph/graphicsWindows.py\n+++ b/pyqtgraph/graphicsWindows.py\n@@ -55,10 +55,14 @@\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n+ self.win = QtGui.QMainWindow()\n PlotWidget.__init__(self, **kargs)\n+ self.win.setCentralWidget(self)\n+ for m in ['resize']:\n+ setattr(self, m, getattr(self.win, m))\n if title is not None:\n- self.setWindowTitle(title)\n- self.show()\n+ self.win.setWindowTitle(title)\n+ self.win.show()\n \n def closeEvent(self, event):\n PlotWidget.closeEvent(self, event)\n@@ -73,14 +77,20 @@\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n- ImageView.__init__(self)\n+ self.win = QtGui.QMainWindow()\n+ self.win.resize(800,600)\n if 'title' in kargs:\n- self.setWindowTitle(kargs['title'])\n+ self.win.setWindowTitle(kargs['title'])\n del kargs['title']\n+ ImageView.__init__(self, self.win)\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n- self.show()\n-\n+ \n+ self.win.setCentralWidget(self)\n+ for m in ['resize']:\n+ setattr(self, m, getattr(self.win, m))\n+ self.win.show()\n+ \n def closeEvent(self, event):\n ImageView.closeEvent(self, event)\n self.sigClosed.emit(self)\n", "issue": "Segfault when exiting Command Line Usage Example\n<!-- In the following, please describe your issue in detail! -->\r\n<!-- If some of the sections do not apply, just remove them. -->\r\n\r\n### Short description\r\nWhen Running the command line example, and closing the window, I get a segmentation fault\r\n\r\n### Code to reproduce\r\n<!-- Please provide a minimal working example that reproduces the issue in the code block below.\r\n Ideally, this should be a full example someone else could run without additional setup. -->\r\n```bash\r\npython -m pyqtgraph.examples\r\n```\r\n\r\nRun the command line usage example, and close out the window.\r\n\r\n\r\n### Expected behavior\r\nWindow closes\r\n\r\n### Real behavior\r\nWindow closes, generates a segfault\r\n\r\n```\r\n/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py:125: RuntimeWarning: 'pyqtgraph.examples.__main__' found in sys.modules after import of package 'pyqtgraph.examples', but prior to execution of 'pyqtgraph.examples.__main__'; this may result in unpredictable behaviour\r\n warn(RuntimeWarning(msg))\r\nqt.qpa.fonts: Populating font family aliases took 246 ms. Replace uses of missing font family \"FreeMono\" with one that exists to avoid this cost.\r\nUsing PyQt5 (default graphics system)\r\nFatal Python error: Segmentation fault\r\n\r\nCurrent thread 0x000000010840bdc0 (most recent call first):\r\n File \"/Users/ognyan/Developer/pyqtgraph/.venv/lib/python3.7/site-packages/pyqtgraph/examples/CLIexample.py\", line 26 in <module>\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.11\r\n * Qt Python binding: PySide2 5.15.0 and PyQt5 5.15.0\r\n * Python version: 3.7.7\r\n * NumPy version: 1.18.4\r\n * Operating system: macOS Catalina\r\n * Installation method: pip\r\n\r\n### Additional context\r\n\r\nThis is not an issue with 0.11.rc0\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDEPRECATED: The classes below are convenience classes that create a new window\ncontainting a single, specific widget. These classes are now unnecessary because\nit is possible to place any widget into its own window by simply calling its\nshow() method.\n\"\"\"\n\nfrom .Qt import QtCore, QtGui, mkQApp\nfrom .widgets.PlotWidget import *\nfrom .imageview import *\nfrom .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget\nfrom .widgets.GraphicsView import GraphicsView\n\n\nclass GraphicsWindow(GraphicsLayoutWidget):\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.GraphicsLayoutWidget` instead)\n \n Convenience subclass of :class:`~pyqtgraph.GraphicsLayoutWidget`. This class\n is intended for use from the interactive python prompt.\n \"\"\"\n def __init__(self, title=None, size=(800,600), **kargs):\n mkQApp()\n GraphicsLayoutWidget.__init__(self, **kargs)\n self.resize(*size)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n\nclass TabWindow(QtGui.QMainWindow):\n \"\"\"\n (deprecated)\n \"\"\"\n def __init__(self, title=None, size=(800,600)):\n mkQApp()\n QtGui.QMainWindow.__init__(self)\n self.resize(*size)\n self.cw = QtGui.QTabWidget()\n self.setCentralWidget(self.cw)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n def __getattr__(self, attr):\n return getattr(self.cw, attr)\n \n\nclass PlotWindow(PlotWidget):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.PlotWidget` instead)\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n PlotWidget.__init__(self, **kargs)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n\n def closeEvent(self, event):\n PlotWidget.closeEvent(self, event)\n self.sigClosed.emit(self)\n\n\nclass ImageWindow(ImageView):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.ImageView` instead)\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n ImageView.__init__(self)\n if 'title' in kargs:\n self.setWindowTitle(kargs['title'])\n del kargs['title']\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n self.show()\n\n def closeEvent(self, event):\n ImageView.closeEvent(self, event)\n self.sigClosed.emit(self)\n", "path": "pyqtgraph/graphicsWindows.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDEPRECATED: The classes below are convenience classes that create a new window\ncontainting a single, specific widget. These classes are now unnecessary because\nit is possible to place any widget into its own window by simply calling its\nshow() method.\n\"\"\"\n\nfrom .Qt import QtCore, QtGui, mkQApp\nfrom .widgets.PlotWidget import *\nfrom .imageview import *\nfrom .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget\nfrom .widgets.GraphicsView import GraphicsView\n\n\nclass GraphicsWindow(GraphicsLayoutWidget):\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.GraphicsLayoutWidget` instead)\n \n Convenience subclass of :class:`~pyqtgraph.GraphicsLayoutWidget`. This class\n is intended for use from the interactive python prompt.\n \"\"\"\n def __init__(self, title=None, size=(800,600), **kargs):\n mkQApp()\n GraphicsLayoutWidget.__init__(self, **kargs)\n self.resize(*size)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n\nclass TabWindow(QtGui.QMainWindow):\n \"\"\"\n (deprecated)\n \"\"\"\n def __init__(self, title=None, size=(800,600)):\n mkQApp()\n QtGui.QMainWindow.__init__(self)\n self.resize(*size)\n self.cw = QtGui.QTabWidget()\n self.setCentralWidget(self.cw)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n def __getattr__(self, attr):\n return getattr(self.cw, attr)\n \n\nclass PlotWindow(PlotWidget):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.PlotWidget` instead)\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n self.win = QtGui.QMainWindow()\n PlotWidget.__init__(self, **kargs)\n self.win.setCentralWidget(self)\n for m in ['resize']:\n setattr(self, m, getattr(self.win, m))\n if title is not None:\n self.win.setWindowTitle(title)\n self.win.show()\n\n def closeEvent(self, event):\n PlotWidget.closeEvent(self, event)\n self.sigClosed.emit(self)\n\n\nclass ImageWindow(ImageView):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.ImageView` instead)\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n self.win = QtGui.QMainWindow()\n self.win.resize(800,600)\n if 'title' in kargs:\n self.win.setWindowTitle(kargs['title'])\n del kargs['title']\n ImageView.__init__(self, self.win)\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n \n self.win.setCentralWidget(self)\n for m in ['resize']:\n setattr(self, m, getattr(self.win, m))\n self.win.show()\n \n def closeEvent(self, event):\n ImageView.closeEvent(self, event)\n self.sigClosed.emit(self)\n", "path": "pyqtgraph/graphicsWindows.py"}]}
| 1,476 | 383 |
gh_patches_debug_7757
|
rasdani/github-patches
|
git_diff
|
conda__conda-6847
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Conda(py2) Installation to a path (ascii characters only) with non-ascii characters in username
Create a user with non-ascii characters in name. And try to install Miniconda2 to a path with only ascii characters
```
C:\Users\AndrΓ©sGarcΓa\Downloads>start /wait "" Miniconda2-4.3.30.2-Windows-x86_64.exe
/InstallationType=JustMe /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=C:\AG\mc2
```
This will throw a menuinst traceback, ignore for now. See https://github.com/ContinuumIO/menuinst/issues/54
Then try to activate the base environment:
```
C:\Users\AndrΓ©sGarcΓa\Downloads>cd /d C:\AG\mc2
C:\AG\mc2>Scripts\activate
Traceback (most recent call last):
File "C:\AG\mc2\Scripts\conda-script.py", line 10, in <module>
sys.exit(main())
File "C:\AG\mc2\lib\site-packages\conda\cli\main.py", line 178, in main
init_loggers()
File "C:\AG\mc2\lib\site-packages\conda\cli\main.py", line 84, in init_loggers
from ..console import setup_verbose_handlers
File "C:\AG\mc2\lib\site-packages\conda\console.py", line 9, in <module>
from .base.context import context
File "C:\AG\mc2\lib\site-packages\conda\base\context.py", line 771, in <module>
context = Context(SEARCH_PATH, APP_NAME, None)
File "C:\AG\mc2\lib\site-packages\conda\common\configuration.py", line 727, in __init__
self._set_search_path(search_path)
File "C:\AG\mc2\lib\site-packages\conda\common\configuration.py", line 745, in _set_search_path
self._set_raw_data(load_file_configs(search_path))
File "C:\AG\mc2\lib\site-packages\conda\common\configuration.py", line 364, in load_file_configs
expanded_paths = tuple(expand(path) for path in search_path)
File "C:\AG\mc2\lib\site-packages\conda\common\configuration.py", line 364, in <genexpr>
expanded_paths = tuple(expand(path) for path in search_path)
File "C:\AG\mc2\lib\site-packages\conda\common\path.py", line 247, in expand
return abspath(expanduser(expandvars(path)))
File "C:\AG\mc2\lib\ntpath.py", line 311, in expanduser
return userhome + path[i:]
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe9 in position 13: ordinal not in range(128)
C:\AG\mc2>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/common/path.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from functools import reduce
5 from logging import getLogger
6 import os
7 from os.path import abspath, basename, expanduser, expandvars, join, normpath, split, splitext
8 import re
9 import subprocess
10
11 from .compat import on_win, string_types
12 from .. import CondaError
13 from .._vendor.auxlib.decorators import memoize
14
15 try:
16 # Python 3
17 from urllib.parse import unquote, urlsplit
18 from urllib.request import url2pathname
19 except ImportError: # pragma: no cover
20 # Python 2
21 from urllib import unquote, url2pathname # NOQA
22 from urlparse import urlsplit # NOQA
23
24 try:
25 from cytoolz.itertoolz import accumulate, concat, take
26 except ImportError: # pragma: no cover
27 from .._vendor.toolz.itertoolz import accumulate, concat, take
28
29 log = getLogger(__name__)
30
31 PATH_MATCH_REGEX = (
32 r"\./" # ./
33 r"|\.\." # ..
34 r"|~" # ~
35 r"|/" # /
36 r"|[a-zA-Z]:[/\\]" # drive letter, colon, forward or backslash
37 r"|\\\\" # windows UNC path
38 r"|//" # windows UNC path
39 )
40
41
42 def is_path(value):
43 if '://' in value:
44 return False
45 return re.match(PATH_MATCH_REGEX, value)
46
47
48 def expand(path):
49 return abspath(expanduser(expandvars(path)))
50
51
52 def paths_equal(path1, path2):
53 """
54 Examples:
55 >>> paths_equal('/a/b/c', '/a/b/c/d/..')
56 True
57
58 """
59 return normpath(abspath(path1)) == normpath(abspath(path2))
60
61
62 @memoize
63 def url_to_path(url):
64 """Convert a file:// URL to a path.
65
66 Relative file URLs (i.e. `file:relative/path`) are not supported.
67 """
68 if is_path(url):
69 return url
70 if not url.startswith("file://"): # pragma: no cover
71 raise CondaError("You can only turn absolute file: urls into paths (not %s)" % url)
72 _, netloc, path, _, _ = urlsplit(url)
73 path = unquote(path)
74 if netloc not in ('', 'localhost', '127.0.0.1', '::1'):
75 if not netloc.startswith('\\\\'):
76 # The only net location potentially accessible is a Windows UNC path
77 netloc = '//' + netloc
78 else:
79 netloc = ''
80 # Handle Windows drive letters if present
81 if re.match('^/([a-z])[:|]', path, re.I):
82 path = path[1] + ':' + path[3:]
83 return netloc + path
84
85
86 def tokenized_startswith(test_iterable, startswith_iterable):
87 return all(t == sw for t, sw in zip(test_iterable, startswith_iterable))
88
89
90 def get_all_directories(files):
91 directories = sorted(set(tuple(f.split('/')[:-1]) for f in files))
92 return directories or ()
93
94
95 def get_leaf_directories(files):
96 # type: (List[str]) -> List[str]
97 # give this function a list of files, and it will hand back a list of leaf directories to
98 # pass to os.makedirs()
99 directories = get_all_directories(files)
100 if not directories:
101 return ()
102
103 leaves = []
104
105 def _process(x, y):
106 if not tokenized_startswith(y, x):
107 leaves.append(x)
108 return y
109 last = reduce(_process, directories)
110
111 if not leaves:
112 leaves.append(directories[-1])
113 elif not tokenized_startswith(last, leaves[-1]):
114 leaves.append(last)
115
116 return tuple('/'.join(leaf) for leaf in leaves)
117
118
119 def explode_directories(child_directories, already_split=False):
120 # get all directories including parents
121 # use already_split=True for the result of get_all_directories()
122 maybe_split = lambda x: x if already_split else x.split('/')
123 return set(concat(accumulate(join, maybe_split(directory)) for directory in child_directories))
124
125
126 def pyc_path(py_path, python_major_minor_version):
127 pyver_string = python_major_minor_version.replace('.', '')
128 if pyver_string.startswith('2'):
129 return py_path + 'c'
130 else:
131 directory, py_file = split(py_path)
132 basename_root, extension = splitext(py_file)
133 pyc_file = "__pycache__/%s.cpython-%s%sc" % (basename_root, pyver_string, extension)
134 return "%s/%s" % (directory, pyc_file) if directory else pyc_file
135
136
137 def missing_pyc_files(python_major_minor_version, files):
138 # returns a tuple of tuples, with the inner tuple being the .py file and the missing .pyc file
139 py_files = (f for f in files if f.endswith('.py'))
140 pyc_matches = ((py_file, pyc_path(py_file, python_major_minor_version))
141 for py_file in py_files)
142 result = tuple(match for match in pyc_matches if match[1] not in files)
143 return result
144
145
146 def parse_entry_point_def(ep_definition):
147 cmd_mod, func = ep_definition.rsplit(':', 1)
148 command, module = cmd_mod.rsplit("=", 1)
149 command, module, func = command.strip(), module.strip(), func.strip()
150 return command, module, func
151
152
153 def get_python_short_path(python_version=None):
154 if on_win:
155 return "python.exe"
156 if python_version and '.' not in python_version:
157 python_version = '.'.join(python_version)
158 return join("bin", "python%s" % (python_version or ''))
159
160
161 def get_python_site_packages_short_path(python_version):
162 if python_version is None:
163 return None
164 elif on_win:
165 return 'Lib/site-packages'
166 else:
167 py_ver = get_major_minor_version(python_version)
168 return 'lib/python%s/site-packages' % py_ver
169
170
171 def get_major_minor_version(string, with_dot=True):
172 # returns None if not found, otherwise two digits as a string
173 # should work for
174 # - 3.5.2
175 # - 27
176 # - bin/python2.7
177 # - lib/python34/site-packages/
178 # the last two are dangers because windows doesn't have version information there
179 assert isinstance(string, string_types)
180 digits = tuple(take(2, (c for c in string if c.isdigit())))
181 if len(digits) == 2:
182 return '.'.join(digits) if with_dot else ''.join(digits)
183 return None
184
185
186 def get_bin_directory_short_path():
187 return 'Scripts' if on_win else 'bin'
188
189
190 def win_path_ok(path):
191 return path.replace('/', '\\') if on_win else path
192
193
194 def win_path_double_escape(path):
195 return path.replace('\\', '\\\\') if on_win else path
196
197
198 def win_path_backout(path):
199 # replace all backslashes except those escaping spaces
200 # if we pass a file url, something like file://\\unc\path\on\win, make sure
201 # we clean that up too
202 return re.sub(r"(\\(?! ))", r"/", path).replace(':////', '://')
203
204
205 def ensure_pad(name, pad="_"):
206 """
207
208 Examples:
209 >>> ensure_pad('conda')
210 '_conda_'
211
212 """
213 return name and "%s%s%s" % (pad, name.strip(pad), pad)
214
215
216 def is_private_env_name(env_name):
217 """
218
219 Examples:
220 >>> is_private_env_name("_conda")
221 False
222 >>> is_private_env_name("_conda_")
223 True
224
225 """
226 return env_name and env_name[0] == env_name[-1] == "_"
227
228
229 def is_private_env_path(env_path):
230 """
231
232 Examples:
233 >>> is_private_env_path('/some/path/to/envs/_conda_')
234 True
235 >>> is_private_env_path('/not/an/envs_dir/_conda_')
236 False
237
238 """
239 if env_path is not None:
240 envs_directory, env_name = split(env_path)
241 if basename(envs_directory) != "envs":
242 return False
243 return is_private_env_name(env_name)
244 return False
245
246
247 def right_pad_os_sep(path):
248 return path if path.endswith(os.sep) else path + os.sep
249
250
251 def split_filename(path_or_url):
252 dn, fn = split(path_or_url)
253 return (dn or None, fn) if '.' in fn else (path_or_url, None)
254
255
256 def get_python_noarch_target_path(source_short_path, target_site_packages_short_path):
257 if source_short_path.startswith('site-packages/'):
258 sp_dir = target_site_packages_short_path
259 return source_short_path.replace('site-packages', sp_dir, 1)
260 elif source_short_path.startswith('python-scripts/'):
261 bin_dir = get_bin_directory_short_path()
262 return source_short_path.replace('python-scripts', bin_dir, 1)
263 else:
264 return source_short_path
265
266
267 def win_path_to_unix(path, root_prefix=""):
268 # If the user wishes to drive conda from MSYS2 itself while also having
269 # msys2 packages in their environment this allows the path conversion to
270 # happen relative to the actual shell. The onus is on the user to set
271 # CYGPATH to e.g. /usr/bin/cygpath.exe (this will be translated to e.g.
272 # (C:\msys32\usr\bin\cygpath.exe by MSYS2) to ensure this one is used.
273 if not path:
274 return ''
275 cygpath = os.environ.get('CYGPATH', 'cygpath.exe')
276 try:
277 path = subprocess.check_output([cygpath, '-up', path]).decode('ascii').split('\n')[0]
278 except Exception as e:
279 log.debug('%r' % e, exc_info=True)
280 # Convert a path or ;-separated string of paths into a unix representation
281 # Does not add cygdrive. If you need that, set root_prefix to "/cygdrive"
282 def _translation(found_path): # NOQA
283 found = found_path.group(1).replace("\\", "/").replace(":", "").replace("//", "/")
284 return root_prefix + "/" + found
285 path_re = '(?<![:/^a-zA-Z])([a-zA-Z]:[\/\\\\]+(?:[^:*?"<>|]+[\/\\\\]+)*[^:*?"<>|;\/\\\\]+?(?![a-zA-Z]:))' # noqa
286 path = re.sub(path_re, _translation, path).replace(";/", ":/")
287 return path
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/common/path.py b/conda/common/path.py
--- a/conda/common/path.py
+++ b/conda/common/path.py
@@ -8,7 +8,7 @@
import re
import subprocess
-from .compat import on_win, string_types
+from .compat import PY2, ensure_fs_path_encoding, on_win, string_types
from .. import CondaError
from .._vendor.auxlib.decorators import memoize
@@ -46,6 +46,8 @@
def expand(path):
+ if on_win and PY2:
+ path = ensure_fs_path_encoding(path)
return abspath(expanduser(expandvars(path)))
|
{"golden_diff": "diff --git a/conda/common/path.py b/conda/common/path.py\n--- a/conda/common/path.py\n+++ b/conda/common/path.py\n@@ -8,7 +8,7 @@\n import re\n import subprocess\n \n-from .compat import on_win, string_types\n+from .compat import PY2, ensure_fs_path_encoding, on_win, string_types\n from .. import CondaError\n from .._vendor.auxlib.decorators import memoize\n \n@@ -46,6 +46,8 @@\n \n \n def expand(path):\n+ if on_win and PY2:\n+ path = ensure_fs_path_encoding(path)\n return abspath(expanduser(expandvars(path)))\n", "issue": "Conda(py2) Installation to a path (ascii characters only) with non-ascii characters in username\nCreate a user with non-ascii characters in name. And try to install Miniconda2 to a path with only ascii characters\r\n```\r\nC:\\Users\\Andr\u00e9sGarc\u00eda\\Downloads>start /wait \"\" Miniconda2-4.3.30.2-Windows-x86_64.exe \r\n/InstallationType=JustMe /AddToPath=0 /RegisterPython=0 /NoRegistry=1 /S /D=C:\\AG\\mc2\r\n```\r\nThis will throw a menuinst traceback, ignore for now. See https://github.com/ContinuumIO/menuinst/issues/54\r\n\r\nThen try to activate the base environment:\r\n```\r\nC:\\Users\\Andr\u00e9sGarc\u00eda\\Downloads>cd /d C:\\AG\\mc2\r\n\r\nC:\\AG\\mc2>Scripts\\activate\r\nTraceback (most recent call last):\r\n File \"C:\\AG\\mc2\\Scripts\\conda-script.py\", line 10, in <module>\r\n sys.exit(main())\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\cli\\main.py\", line 178, in main\r\n init_loggers()\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\cli\\main.py\", line 84, in init_loggers\r\n from ..console import setup_verbose_handlers\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\console.py\", line 9, in <module>\r\n from .base.context import context\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\base\\context.py\", line 771, in <module>\r\n context = Context(SEARCH_PATH, APP_NAME, None)\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\common\\configuration.py\", line 727, in __init__\r\n self._set_search_path(search_path)\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\common\\configuration.py\", line 745, in _set_search_path\r\n self._set_raw_data(load_file_configs(search_path))\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\common\\configuration.py\", line 364, in load_file_configs\r\n expanded_paths = tuple(expand(path) for path in search_path)\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\common\\configuration.py\", line 364, in <genexpr>\r\n expanded_paths = tuple(expand(path) for path in search_path)\r\n File \"C:\\AG\\mc2\\lib\\site-packages\\conda\\common\\path.py\", line 247, in expand\r\n return abspath(expanduser(expandvars(path)))\r\n File \"C:\\AG\\mc2\\lib\\ntpath.py\", line 311, in expanduser\r\n return userhome + path[i:]\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xe9 in position 13: ordinal not in range(128)\r\n\r\nC:\\AG\\mc2>\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import reduce\nfrom logging import getLogger\nimport os\nfrom os.path import abspath, basename, expanduser, expandvars, join, normpath, split, splitext\nimport re\nimport subprocess\n\nfrom .compat import on_win, string_types\nfrom .. import CondaError\nfrom .._vendor.auxlib.decorators import memoize\n\ntry:\n # Python 3\n from urllib.parse import unquote, urlsplit\n from urllib.request import url2pathname\nexcept ImportError: # pragma: no cover\n # Python 2\n from urllib import unquote, url2pathname # NOQA\n from urlparse import urlsplit # NOQA\n\ntry:\n from cytoolz.itertoolz import accumulate, concat, take\nexcept ImportError: # pragma: no cover\n from .._vendor.toolz.itertoolz import accumulate, concat, take\n\nlog = getLogger(__name__)\n\nPATH_MATCH_REGEX = (\n r\"\\./\" # ./\n r\"|\\.\\.\" # ..\n r\"|~\" # ~\n r\"|/\" # /\n r\"|[a-zA-Z]:[/\\\\]\" # drive letter, colon, forward or backslash\n r\"|\\\\\\\\\" # windows UNC path\n r\"|//\" # windows UNC path\n)\n\n\ndef is_path(value):\n if '://' in value:\n return False\n return re.match(PATH_MATCH_REGEX, value)\n\n\ndef expand(path):\n return abspath(expanduser(expandvars(path)))\n\n\ndef paths_equal(path1, path2):\n \"\"\"\n Examples:\n >>> paths_equal('/a/b/c', '/a/b/c/d/..')\n True\n\n \"\"\"\n return normpath(abspath(path1)) == normpath(abspath(path2))\n\n\n@memoize\ndef url_to_path(url):\n \"\"\"Convert a file:// URL to a path.\n\n Relative file URLs (i.e. `file:relative/path`) are not supported.\n \"\"\"\n if is_path(url):\n return url\n if not url.startswith(\"file://\"): # pragma: no cover\n raise CondaError(\"You can only turn absolute file: urls into paths (not %s)\" % url)\n _, netloc, path, _, _ = urlsplit(url)\n path = unquote(path)\n if netloc not in ('', 'localhost', '127.0.0.1', '::1'):\n if not netloc.startswith('\\\\\\\\'):\n # The only net location potentially accessible is a Windows UNC path\n netloc = '//' + netloc\n else:\n netloc = ''\n # Handle Windows drive letters if present\n if re.match('^/([a-z])[:|]', path, re.I):\n path = path[1] + ':' + path[3:]\n return netloc + path\n\n\ndef tokenized_startswith(test_iterable, startswith_iterable):\n return all(t == sw for t, sw in zip(test_iterable, startswith_iterable))\n\n\ndef get_all_directories(files):\n directories = sorted(set(tuple(f.split('/')[:-1]) for f in files))\n return directories or ()\n\n\ndef get_leaf_directories(files):\n # type: (List[str]) -> List[str]\n # give this function a list of files, and it will hand back a list of leaf directories to\n # pass to os.makedirs()\n directories = get_all_directories(files)\n if not directories:\n return ()\n\n leaves = []\n\n def _process(x, y):\n if not tokenized_startswith(y, x):\n leaves.append(x)\n return y\n last = reduce(_process, directories)\n\n if not leaves:\n leaves.append(directories[-1])\n elif not tokenized_startswith(last, leaves[-1]):\n leaves.append(last)\n\n return tuple('/'.join(leaf) for leaf in leaves)\n\n\ndef explode_directories(child_directories, already_split=False):\n # get all directories including parents\n # use already_split=True for the result of get_all_directories()\n maybe_split = lambda x: x if already_split else x.split('/')\n return set(concat(accumulate(join, maybe_split(directory)) for directory in child_directories))\n\n\ndef pyc_path(py_path, python_major_minor_version):\n pyver_string = python_major_minor_version.replace('.', '')\n if pyver_string.startswith('2'):\n return py_path + 'c'\n else:\n directory, py_file = split(py_path)\n basename_root, extension = splitext(py_file)\n pyc_file = \"__pycache__/%s.cpython-%s%sc\" % (basename_root, pyver_string, extension)\n return \"%s/%s\" % (directory, pyc_file) if directory else pyc_file\n\n\ndef missing_pyc_files(python_major_minor_version, files):\n # returns a tuple of tuples, with the inner tuple being the .py file and the missing .pyc file\n py_files = (f for f in files if f.endswith('.py'))\n pyc_matches = ((py_file, pyc_path(py_file, python_major_minor_version))\n for py_file in py_files)\n result = tuple(match for match in pyc_matches if match[1] not in files)\n return result\n\n\ndef parse_entry_point_def(ep_definition):\n cmd_mod, func = ep_definition.rsplit(':', 1)\n command, module = cmd_mod.rsplit(\"=\", 1)\n command, module, func = command.strip(), module.strip(), func.strip()\n return command, module, func\n\n\ndef get_python_short_path(python_version=None):\n if on_win:\n return \"python.exe\"\n if python_version and '.' not in python_version:\n python_version = '.'.join(python_version)\n return join(\"bin\", \"python%s\" % (python_version or ''))\n\n\ndef get_python_site_packages_short_path(python_version):\n if python_version is None:\n return None\n elif on_win:\n return 'Lib/site-packages'\n else:\n py_ver = get_major_minor_version(python_version)\n return 'lib/python%s/site-packages' % py_ver\n\n\ndef get_major_minor_version(string, with_dot=True):\n # returns None if not found, otherwise two digits as a string\n # should work for\n # - 3.5.2\n # - 27\n # - bin/python2.7\n # - lib/python34/site-packages/\n # the last two are dangers because windows doesn't have version information there\n assert isinstance(string, string_types)\n digits = tuple(take(2, (c for c in string if c.isdigit())))\n if len(digits) == 2:\n return '.'.join(digits) if with_dot else ''.join(digits)\n return None\n\n\ndef get_bin_directory_short_path():\n return 'Scripts' if on_win else 'bin'\n\n\ndef win_path_ok(path):\n return path.replace('/', '\\\\') if on_win else path\n\n\ndef win_path_double_escape(path):\n return path.replace('\\\\', '\\\\\\\\') if on_win else path\n\n\ndef win_path_backout(path):\n # replace all backslashes except those escaping spaces\n # if we pass a file url, something like file://\\\\unc\\path\\on\\win, make sure\n # we clean that up too\n return re.sub(r\"(\\\\(?! ))\", r\"/\", path).replace(':////', '://')\n\n\ndef ensure_pad(name, pad=\"_\"):\n \"\"\"\n\n Examples:\n >>> ensure_pad('conda')\n '_conda_'\n\n \"\"\"\n return name and \"%s%s%s\" % (pad, name.strip(pad), pad)\n\n\ndef is_private_env_name(env_name):\n \"\"\"\n\n Examples:\n >>> is_private_env_name(\"_conda\")\n False\n >>> is_private_env_name(\"_conda_\")\n True\n\n \"\"\"\n return env_name and env_name[0] == env_name[-1] == \"_\"\n\n\ndef is_private_env_path(env_path):\n \"\"\"\n\n Examples:\n >>> is_private_env_path('/some/path/to/envs/_conda_')\n True\n >>> is_private_env_path('/not/an/envs_dir/_conda_')\n False\n\n \"\"\"\n if env_path is not None:\n envs_directory, env_name = split(env_path)\n if basename(envs_directory) != \"envs\":\n return False\n return is_private_env_name(env_name)\n return False\n\n\ndef right_pad_os_sep(path):\n return path if path.endswith(os.sep) else path + os.sep\n\n\ndef split_filename(path_or_url):\n dn, fn = split(path_or_url)\n return (dn or None, fn) if '.' in fn else (path_or_url, None)\n\n\ndef get_python_noarch_target_path(source_short_path, target_site_packages_short_path):\n if source_short_path.startswith('site-packages/'):\n sp_dir = target_site_packages_short_path\n return source_short_path.replace('site-packages', sp_dir, 1)\n elif source_short_path.startswith('python-scripts/'):\n bin_dir = get_bin_directory_short_path()\n return source_short_path.replace('python-scripts', bin_dir, 1)\n else:\n return source_short_path\n\n\ndef win_path_to_unix(path, root_prefix=\"\"):\n # If the user wishes to drive conda from MSYS2 itself while also having\n # msys2 packages in their environment this allows the path conversion to\n # happen relative to the actual shell. The onus is on the user to set\n # CYGPATH to e.g. /usr/bin/cygpath.exe (this will be translated to e.g.\n # (C:\\msys32\\usr\\bin\\cygpath.exe by MSYS2) to ensure this one is used.\n if not path:\n return ''\n cygpath = os.environ.get('CYGPATH', 'cygpath.exe')\n try:\n path = subprocess.check_output([cygpath, '-up', path]).decode('ascii').split('\\n')[0]\n except Exception as e:\n log.debug('%r' % e, exc_info=True)\n # Convert a path or ;-separated string of paths into a unix representation\n # Does not add cygdrive. If you need that, set root_prefix to \"/cygdrive\"\n def _translation(found_path): # NOQA\n found = found_path.group(1).replace(\"\\\\\", \"/\").replace(\":\", \"\").replace(\"//\", \"/\")\n return root_prefix + \"/\" + found\n path_re = '(?<![:/^a-zA-Z])([a-zA-Z]:[\\/\\\\\\\\]+(?:[^:*?\"<>|]+[\\/\\\\\\\\]+)*[^:*?\"<>|;\\/\\\\\\\\]+?(?![a-zA-Z]:))' # noqa\n path = re.sub(path_re, _translation, path).replace(\";/\", \":/\")\n return path\n", "path": "conda/common/path.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom functools import reduce\nfrom logging import getLogger\nimport os\nfrom os.path import abspath, basename, expanduser, expandvars, join, normpath, split, splitext\nimport re\nimport subprocess\n\nfrom .compat import PY2, ensure_fs_path_encoding, on_win, string_types\nfrom .. import CondaError\nfrom .._vendor.auxlib.decorators import memoize\n\ntry:\n # Python 3\n from urllib.parse import unquote, urlsplit\n from urllib.request import url2pathname\nexcept ImportError: # pragma: no cover\n # Python 2\n from urllib import unquote, url2pathname # NOQA\n from urlparse import urlsplit # NOQA\n\ntry:\n from cytoolz.itertoolz import accumulate, concat, take\nexcept ImportError: # pragma: no cover\n from .._vendor.toolz.itertoolz import accumulate, concat, take\n\nlog = getLogger(__name__)\n\nPATH_MATCH_REGEX = (\n r\"\\./\" # ./\n r\"|\\.\\.\" # ..\n r\"|~\" # ~\n r\"|/\" # /\n r\"|[a-zA-Z]:[/\\\\]\" # drive letter, colon, forward or backslash\n r\"|\\\\\\\\\" # windows UNC path\n r\"|//\" # windows UNC path\n)\n\n\ndef is_path(value):\n if '://' in value:\n return False\n return re.match(PATH_MATCH_REGEX, value)\n\n\ndef expand(path):\n if on_win and PY2:\n path = ensure_fs_path_encoding(path)\n return abspath(expanduser(expandvars(path)))\n\n\ndef paths_equal(path1, path2):\n \"\"\"\n Examples:\n >>> paths_equal('/a/b/c', '/a/b/c/d/..')\n True\n\n \"\"\"\n return normpath(abspath(path1)) == normpath(abspath(path2))\n\n\n@memoize\ndef url_to_path(url):\n \"\"\"Convert a file:// URL to a path.\n\n Relative file URLs (i.e. `file:relative/path`) are not supported.\n \"\"\"\n if is_path(url):\n return url\n if not url.startswith(\"file://\"): # pragma: no cover\n raise CondaError(\"You can only turn absolute file: urls into paths (not %s)\" % url)\n _, netloc, path, _, _ = urlsplit(url)\n path = unquote(path)\n if netloc not in ('', 'localhost', '127.0.0.1', '::1'):\n if not netloc.startswith('\\\\\\\\'):\n # The only net location potentially accessible is a Windows UNC path\n netloc = '//' + netloc\n else:\n netloc = ''\n # Handle Windows drive letters if present\n if re.match('^/([a-z])[:|]', path, re.I):\n path = path[1] + ':' + path[3:]\n return netloc + path\n\n\ndef tokenized_startswith(test_iterable, startswith_iterable):\n return all(t == sw for t, sw in zip(test_iterable, startswith_iterable))\n\n\ndef get_all_directories(files):\n directories = sorted(set(tuple(f.split('/')[:-1]) for f in files))\n return directories or ()\n\n\ndef get_leaf_directories(files):\n # type: (List[str]) -> List[str]\n # give this function a list of files, and it will hand back a list of leaf directories to\n # pass to os.makedirs()\n directories = get_all_directories(files)\n if not directories:\n return ()\n\n leaves = []\n\n def _process(x, y):\n if not tokenized_startswith(y, x):\n leaves.append(x)\n return y\n last = reduce(_process, directories)\n\n if not leaves:\n leaves.append(directories[-1])\n elif not tokenized_startswith(last, leaves[-1]):\n leaves.append(last)\n\n return tuple('/'.join(leaf) for leaf in leaves)\n\n\ndef explode_directories(child_directories, already_split=False):\n # get all directories including parents\n # use already_split=True for the result of get_all_directories()\n maybe_split = lambda x: x if already_split else x.split('/')\n return set(concat(accumulate(join, maybe_split(directory)) for directory in child_directories))\n\n\ndef pyc_path(py_path, python_major_minor_version):\n pyver_string = python_major_minor_version.replace('.', '')\n if pyver_string.startswith('2'):\n return py_path + 'c'\n else:\n directory, py_file = split(py_path)\n basename_root, extension = splitext(py_file)\n pyc_file = \"__pycache__/%s.cpython-%s%sc\" % (basename_root, pyver_string, extension)\n return \"%s/%s\" % (directory, pyc_file) if directory else pyc_file\n\n\ndef missing_pyc_files(python_major_minor_version, files):\n # returns a tuple of tuples, with the inner tuple being the .py file and the missing .pyc file\n py_files = (f for f in files if f.endswith('.py'))\n pyc_matches = ((py_file, pyc_path(py_file, python_major_minor_version))\n for py_file in py_files)\n result = tuple(match for match in pyc_matches if match[1] not in files)\n return result\n\n\ndef parse_entry_point_def(ep_definition):\n cmd_mod, func = ep_definition.rsplit(':', 1)\n command, module = cmd_mod.rsplit(\"=\", 1)\n command, module, func = command.strip(), module.strip(), func.strip()\n return command, module, func\n\n\ndef get_python_short_path(python_version=None):\n if on_win:\n return \"python.exe\"\n if python_version and '.' not in python_version:\n python_version = '.'.join(python_version)\n return join(\"bin\", \"python%s\" % (python_version or ''))\n\n\ndef get_python_site_packages_short_path(python_version):\n if python_version is None:\n return None\n elif on_win:\n return 'Lib/site-packages'\n else:\n py_ver = get_major_minor_version(python_version)\n return 'lib/python%s/site-packages' % py_ver\n\n\ndef get_major_minor_version(string, with_dot=True):\n # returns None if not found, otherwise two digits as a string\n # should work for\n # - 3.5.2\n # - 27\n # - bin/python2.7\n # - lib/python34/site-packages/\n # the last two are dangers because windows doesn't have version information there\n assert isinstance(string, string_types)\n digits = tuple(take(2, (c for c in string if c.isdigit())))\n if len(digits) == 2:\n return '.'.join(digits) if with_dot else ''.join(digits)\n return None\n\n\ndef get_bin_directory_short_path():\n return 'Scripts' if on_win else 'bin'\n\n\ndef win_path_ok(path):\n return path.replace('/', '\\\\') if on_win else path\n\n\ndef win_path_double_escape(path):\n return path.replace('\\\\', '\\\\\\\\') if on_win else path\n\n\ndef win_path_backout(path):\n # replace all backslashes except those escaping spaces\n # if we pass a file url, something like file://\\\\unc\\path\\on\\win, make sure\n # we clean that up too\n return re.sub(r\"(\\\\(?! ))\", r\"/\", path).replace(':////', '://')\n\n\ndef ensure_pad(name, pad=\"_\"):\n \"\"\"\n\n Examples:\n >>> ensure_pad('conda')\n '_conda_'\n\n \"\"\"\n return name and \"%s%s%s\" % (pad, name.strip(pad), pad)\n\n\ndef is_private_env_name(env_name):\n \"\"\"\n\n Examples:\n >>> is_private_env_name(\"_conda\")\n False\n >>> is_private_env_name(\"_conda_\")\n True\n\n \"\"\"\n return env_name and env_name[0] == env_name[-1] == \"_\"\n\n\ndef is_private_env_path(env_path):\n \"\"\"\n\n Examples:\n >>> is_private_env_path('/some/path/to/envs/_conda_')\n True\n >>> is_private_env_path('/not/an/envs_dir/_conda_')\n False\n\n \"\"\"\n if env_path is not None:\n envs_directory, env_name = split(env_path)\n if basename(envs_directory) != \"envs\":\n return False\n return is_private_env_name(env_name)\n return False\n\n\ndef right_pad_os_sep(path):\n return path if path.endswith(os.sep) else path + os.sep\n\n\ndef split_filename(path_or_url):\n dn, fn = split(path_or_url)\n return (dn or None, fn) if '.' in fn else (path_or_url, None)\n\n\ndef get_python_noarch_target_path(source_short_path, target_site_packages_short_path):\n if source_short_path.startswith('site-packages/'):\n sp_dir = target_site_packages_short_path\n return source_short_path.replace('site-packages', sp_dir, 1)\n elif source_short_path.startswith('python-scripts/'):\n bin_dir = get_bin_directory_short_path()\n return source_short_path.replace('python-scripts', bin_dir, 1)\n else:\n return source_short_path\n\n\ndef win_path_to_unix(path, root_prefix=\"\"):\n # If the user wishes to drive conda from MSYS2 itself while also having\n # msys2 packages in their environment this allows the path conversion to\n # happen relative to the actual shell. The onus is on the user to set\n # CYGPATH to e.g. /usr/bin/cygpath.exe (this will be translated to e.g.\n # (C:\\msys32\\usr\\bin\\cygpath.exe by MSYS2) to ensure this one is used.\n if not path:\n return ''\n cygpath = os.environ.get('CYGPATH', 'cygpath.exe')\n try:\n path = subprocess.check_output([cygpath, '-up', path]).decode('ascii').split('\\n')[0]\n except Exception as e:\n log.debug('%r' % e, exc_info=True)\n # Convert a path or ;-separated string of paths into a unix representation\n # Does not add cygdrive. If you need that, set root_prefix to \"/cygdrive\"\n def _translation(found_path): # NOQA\n found = found_path.group(1).replace(\"\\\\\", \"/\").replace(\":\", \"\").replace(\"//\", \"/\")\n return root_prefix + \"/\" + found\n path_re = '(?<![:/^a-zA-Z])([a-zA-Z]:[\\/\\\\\\\\]+(?:[^:*?\"<>|]+[\\/\\\\\\\\]+)*[^:*?\"<>|;\\/\\\\\\\\]+?(?![a-zA-Z]:))' # noqa\n path = re.sub(path_re, _translation, path).replace(\";/\", \":/\")\n return path\n", "path": "conda/common/path.py"}]}
| 4,086 | 144 |
gh_patches_debug_15978
|
rasdani/github-patches
|
git_diff
|
NVIDIA-Merlin__NVTabular-455
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEA] Warn on parquet row group sizes out of recommended bounds
**Is your feature request related to a problem? Please describe.**
We should warn on parquet files that contain row groups bigger than recommended - with actionable links and information for our customers
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvtabular/io/parquet.py`
Content:
```
1 #
2 # Copyright (c) 2020, NVIDIA CORPORATION.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 import functools
17 import logging
18 import os
19 import threading
20 import warnings
21 from io import BytesIO
22 from uuid import uuid4
23
24 import cudf
25 import dask_cudf
26 from cudf.io.parquet import ParquetWriter as pwriter
27 from dask.utils import natural_sort_key
28 from pyarrow import parquet as pq
29
30 from .dataset_engine import DatasetEngine
31 from .shuffle import Shuffle, _shuffle_gdf
32 from .writer import ThreadedWriter
33
34 LOG = logging.getLogger("nvtabular")
35
36
37 class ParquetDatasetEngine(DatasetEngine):
38 """ParquetDatasetEngine is a Dask-based version of cudf.read_parquet."""
39
40 def __init__(
41 self,
42 paths,
43 part_size,
44 storage_options,
45 row_groups_per_part=None,
46 legacy=False,
47 batch_size=None, # Ignored
48 ):
49 super().__init__(paths, part_size, storage_options)
50 if row_groups_per_part is None:
51 path0 = self._dataset.pieces[0].path
52 rg_byte_size_0 = _memory_usage(cudf.io.read_parquet(path0, row_groups=0, row_group=0))
53 row_groups_per_part = self.part_size / rg_byte_size_0
54 if row_groups_per_part < 1.0:
55 warnings.warn(
56 f"Row group size {rg_byte_size_0} is bigger than requested part_size "
57 f"{self.part_size}"
58 )
59 row_groups_per_part = 1.0
60
61 self.row_groups_per_part = int(row_groups_per_part)
62
63 assert self.row_groups_per_part > 0
64
65 @property
66 @functools.lru_cache(1)
67 def _dataset(self):
68 paths = self.paths
69 fs = self.fs
70 if len(paths) > 1:
71 # This is a list of files
72 dataset = pq.ParquetDataset(paths, filesystem=fs, validate_schema=False)
73 elif fs.isdir(paths[0]):
74 # This is a directory
75 dataset = pq.ParquetDataset(paths[0], filesystem=fs, validate_schema=False)
76 else:
77 # This is a single file
78 dataset = pq.ParquetDataset(paths[0], filesystem=fs)
79 return dataset
80
81 @property
82 @functools.lru_cache(1)
83 def num_rows(self):
84 # TODO: Avoid parsing metadata here if we can confirm upstream dask
85 # can get the length efficiently (in all practical cases)
86 dataset = self._dataset
87 if dataset.metadata:
88 # We have a metadata file
89 return dataset.metadata.num_rows
90 else:
91 # Sum up row-group sizes manually
92 num_rows = 0
93 for piece in dataset.pieces:
94 num_rows += piece.get_metadata().num_rows
95 return num_rows
96
97 def to_ddf(self, columns=None):
98 return dask_cudf.read_parquet(
99 self.paths,
100 columns=columns,
101 # can't omit reading the index in if we aren't being passed columns
102 index=None if columns is None else False,
103 gather_statistics=False,
104 split_row_groups=self.row_groups_per_part,
105 storage_options=self.storage_options,
106 )
107
108
109 class ParquetWriter(ThreadedWriter):
110 def __init__(self, out_dir, **kwargs):
111 super().__init__(out_dir, **kwargs)
112 self.data_paths = []
113 self.data_writers = []
114 self.data_bios = []
115 self._lock = threading.RLock()
116
117 def _get_filename(self, i):
118 if self.use_guid:
119 fn = f"{i}.{guid()}.parquet"
120 else:
121 fn = f"{i}.parquet"
122
123 return os.path.join(self.out_dir, fn)
124
125 def _get_or_create_writer(self, idx):
126 # lazily initializes a writer for the given index
127 with self._lock:
128 while len(self.data_writers) <= idx:
129 path = self._get_filename(len(self.data_writers))
130 self.data_paths.append(path)
131 if self.bytes_io:
132 bio = BytesIO()
133 self.data_bios.append(bio)
134 self.data_writers.append(pwriter(bio, compression=None))
135 else:
136 self.data_writers.append(pwriter(path, compression=None))
137
138 return self.data_writers[idx]
139
140 def _write_table(self, idx, data, has_list_column=False):
141 if has_list_column:
142 # currently cudf doesn't support chunked parquet writers with list columns
143 # write out a new file, rather than stream multiple chunks to a single file
144 filename = self._get_filename(len(self.data_paths))
145 data.to_parquet(filename)
146 self.data_paths.append(filename)
147 else:
148 writer = self._get_or_create_writer(idx)
149 writer.write_table(data)
150
151 def _write_thread(self):
152 while True:
153 item = self.queue.get()
154 try:
155 if item is self._eod:
156 break
157 idx, data = item
158 with self.write_locks[idx]:
159 self._write_table(idx, data, False)
160 finally:
161 self.queue.task_done()
162
163 @classmethod
164 def write_special_metadata(cls, md, fs, out_dir):
165 # Sort metadata by file name and convert list of
166 # tuples to a list of metadata byte-blobs
167 md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]
168
169 # Aggregate metadata and write _metadata file
170 _write_pq_metadata_file(md_list, fs, out_dir)
171
172 def _close_writers(self):
173 md_dict = {}
174 for writer, path in zip(self.data_writers, self.data_paths):
175 fn = path.split(self.fs.sep)[-1]
176 md_dict[fn] = writer.close(metadata_file_path=fn)
177 return md_dict
178
179 def _bytesio_to_disk(self):
180 for bio, path in zip(self.data_bios, self.data_paths):
181 gdf = cudf.io.read_parquet(bio, index=False)
182 bio.close()
183 if self.shuffle == Shuffle.PER_WORKER:
184 gdf = _shuffle_gdf(gdf)
185 gdf.to_parquet(path, compression=None, index=False)
186 return
187
188
189 def _write_pq_metadata_file(md_list, fs, path):
190 """ Converts list of parquet metadata objects into a single shared _metadata file. """
191 if md_list:
192 metadata_path = fs.sep.join([path, "_metadata"])
193 _meta = cudf.io.merge_parquet_filemetadata(md_list) if len(md_list) > 1 else md_list[0]
194 with fs.open(metadata_path, "wb") as fil:
195 _meta.tofile(fil)
196 return
197
198
199 def guid():
200 """Simple utility function to get random hex string"""
201 return uuid4().hex
202
203
204 def _memory_usage(df):
205 """this function is a workaround of a problem with getting memory usage of lists
206 in cudf0.16. This can be deleted and just use `df.memory_usage(deep= True, index=True).sum()`
207 once we are using cudf 0.17 (fixed in https://github.com/rapidsai/cudf/pull/6549)"""
208 size = 0
209 for col in df._data.columns:
210 if cudf.utils.dtypes.is_list_dtype(col.dtype):
211 for child in col.base_children:
212 size += child.__sizeof__()
213 else:
214 size += col._memory_usage(deep=True)
215 size += df.index.memory_usage(deep=True)
216 return size
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nvtabular/io/parquet.py b/nvtabular/io/parquet.py
--- a/nvtabular/io/parquet.py
+++ b/nvtabular/io/parquet.py
@@ -53,8 +53,12 @@
row_groups_per_part = self.part_size / rg_byte_size_0
if row_groups_per_part < 1.0:
warnings.warn(
- f"Row group size {rg_byte_size_0} is bigger than requested part_size "
- f"{self.part_size}"
+ f"Row group memory size ({rg_byte_size_0}) (bytes) of parquet file is bigger"
+ f" than requested part_size ({self.part_size}) for the NVTabular dataset."
+ f"A row group memory size of 128 MB is generally recommended. You can find"
+ f" info on how to set the row group size of parquet files in "
+ f"https://nvidia.github.io/NVTabular/main/HowItWorks.html"
+ f"#getting-your-data-ready-for-nvtabular"
)
row_groups_per_part = 1.0
|
{"golden_diff": "diff --git a/nvtabular/io/parquet.py b/nvtabular/io/parquet.py\n--- a/nvtabular/io/parquet.py\n+++ b/nvtabular/io/parquet.py\n@@ -53,8 +53,12 @@\n row_groups_per_part = self.part_size / rg_byte_size_0\n if row_groups_per_part < 1.0:\n warnings.warn(\n- f\"Row group size {rg_byte_size_0} is bigger than requested part_size \"\n- f\"{self.part_size}\"\n+ f\"Row group memory size ({rg_byte_size_0}) (bytes) of parquet file is bigger\"\n+ f\" than requested part_size ({self.part_size}) for the NVTabular dataset.\"\n+ f\"A row group memory size of 128 MB is generally recommended. You can find\"\n+ f\" info on how to set the row group size of parquet files in \"\n+ f\"https://nvidia.github.io/NVTabular/main/HowItWorks.html\"\n+ f\"#getting-your-data-ready-for-nvtabular\"\n )\n row_groups_per_part = 1.0\n", "issue": "[FEA] Warn on parquet row group sizes out of recommended bounds\n**Is your feature request related to a problem? Please describe.**\r\nWe should warn on parquet files that contain row groups bigger than recommended - with actionable links and information for our customers\r\n\n", "before_files": [{"content": "#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport functools\nimport logging\nimport os\nimport threading\nimport warnings\nfrom io import BytesIO\nfrom uuid import uuid4\n\nimport cudf\nimport dask_cudf\nfrom cudf.io.parquet import ParquetWriter as pwriter\nfrom dask.utils import natural_sort_key\nfrom pyarrow import parquet as pq\n\nfrom .dataset_engine import DatasetEngine\nfrom .shuffle import Shuffle, _shuffle_gdf\nfrom .writer import ThreadedWriter\n\nLOG = logging.getLogger(\"nvtabular\")\n\n\nclass ParquetDatasetEngine(DatasetEngine):\n \"\"\"ParquetDatasetEngine is a Dask-based version of cudf.read_parquet.\"\"\"\n\n def __init__(\n self,\n paths,\n part_size,\n storage_options,\n row_groups_per_part=None,\n legacy=False,\n batch_size=None, # Ignored\n ):\n super().__init__(paths, part_size, storage_options)\n if row_groups_per_part is None:\n path0 = self._dataset.pieces[0].path\n rg_byte_size_0 = _memory_usage(cudf.io.read_parquet(path0, row_groups=0, row_group=0))\n row_groups_per_part = self.part_size / rg_byte_size_0\n if row_groups_per_part < 1.0:\n warnings.warn(\n f\"Row group size {rg_byte_size_0} is bigger than requested part_size \"\n f\"{self.part_size}\"\n )\n row_groups_per_part = 1.0\n\n self.row_groups_per_part = int(row_groups_per_part)\n\n assert self.row_groups_per_part > 0\n\n @property\n @functools.lru_cache(1)\n def _dataset(self):\n paths = self.paths\n fs = self.fs\n if len(paths) > 1:\n # This is a list of files\n dataset = pq.ParquetDataset(paths, filesystem=fs, validate_schema=False)\n elif fs.isdir(paths[0]):\n # This is a directory\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, validate_schema=False)\n else:\n # This is a single file\n dataset = pq.ParquetDataset(paths[0], filesystem=fs)\n return dataset\n\n @property\n @functools.lru_cache(1)\n def num_rows(self):\n # TODO: Avoid parsing metadata here if we can confirm upstream dask\n # can get the length efficiently (in all practical cases)\n dataset = self._dataset\n if dataset.metadata:\n # We have a metadata file\n return dataset.metadata.num_rows\n else:\n # Sum up row-group sizes manually\n num_rows = 0\n for piece in dataset.pieces:\n num_rows += piece.get_metadata().num_rows\n return num_rows\n\n def to_ddf(self, columns=None):\n return dask_cudf.read_parquet(\n self.paths,\n columns=columns,\n # can't omit reading the index in if we aren't being passed columns\n index=None if columns is None else False,\n gather_statistics=False,\n split_row_groups=self.row_groups_per_part,\n storage_options=self.storage_options,\n )\n\n\nclass ParquetWriter(ThreadedWriter):\n def __init__(self, out_dir, **kwargs):\n super().__init__(out_dir, **kwargs)\n self.data_paths = []\n self.data_writers = []\n self.data_bios = []\n self._lock = threading.RLock()\n\n def _get_filename(self, i):\n if self.use_guid:\n fn = f\"{i}.{guid()}.parquet\"\n else:\n fn = f\"{i}.parquet\"\n\n return os.path.join(self.out_dir, fn)\n\n def _get_or_create_writer(self, idx):\n # lazily initializes a writer for the given index\n with self._lock:\n while len(self.data_writers) <= idx:\n path = self._get_filename(len(self.data_writers))\n self.data_paths.append(path)\n if self.bytes_io:\n bio = BytesIO()\n self.data_bios.append(bio)\n self.data_writers.append(pwriter(bio, compression=None))\n else:\n self.data_writers.append(pwriter(path, compression=None))\n\n return self.data_writers[idx]\n\n def _write_table(self, idx, data, has_list_column=False):\n if has_list_column:\n # currently cudf doesn't support chunked parquet writers with list columns\n # write out a new file, rather than stream multiple chunks to a single file\n filename = self._get_filename(len(self.data_paths))\n data.to_parquet(filename)\n self.data_paths.append(filename)\n else:\n writer = self._get_or_create_writer(idx)\n writer.write_table(data)\n\n def _write_thread(self):\n while True:\n item = self.queue.get()\n try:\n if item is self._eod:\n break\n idx, data = item\n with self.write_locks[idx]:\n self._write_table(idx, data, False)\n finally:\n self.queue.task_done()\n\n @classmethod\n def write_special_metadata(cls, md, fs, out_dir):\n # Sort metadata by file name and convert list of\n # tuples to a list of metadata byte-blobs\n md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]\n\n # Aggregate metadata and write _metadata file\n _write_pq_metadata_file(md_list, fs, out_dir)\n\n def _close_writers(self):\n md_dict = {}\n for writer, path in zip(self.data_writers, self.data_paths):\n fn = path.split(self.fs.sep)[-1]\n md_dict[fn] = writer.close(metadata_file_path=fn)\n return md_dict\n\n def _bytesio_to_disk(self):\n for bio, path in zip(self.data_bios, self.data_paths):\n gdf = cudf.io.read_parquet(bio, index=False)\n bio.close()\n if self.shuffle == Shuffle.PER_WORKER:\n gdf = _shuffle_gdf(gdf)\n gdf.to_parquet(path, compression=None, index=False)\n return\n\n\ndef _write_pq_metadata_file(md_list, fs, path):\n \"\"\" Converts list of parquet metadata objects into a single shared _metadata file. \"\"\"\n if md_list:\n metadata_path = fs.sep.join([path, \"_metadata\"])\n _meta = cudf.io.merge_parquet_filemetadata(md_list) if len(md_list) > 1 else md_list[0]\n with fs.open(metadata_path, \"wb\") as fil:\n _meta.tofile(fil)\n return\n\n\ndef guid():\n \"\"\"Simple utility function to get random hex string\"\"\"\n return uuid4().hex\n\n\ndef _memory_usage(df):\n \"\"\"this function is a workaround of a problem with getting memory usage of lists\n in cudf0.16. This can be deleted and just use `df.memory_usage(deep= True, index=True).sum()`\n once we are using cudf 0.17 (fixed in https://github.com/rapidsai/cudf/pull/6549)\"\"\"\n size = 0\n for col in df._data.columns:\n if cudf.utils.dtypes.is_list_dtype(col.dtype):\n for child in col.base_children:\n size += child.__sizeof__()\n else:\n size += col._memory_usage(deep=True)\n size += df.index.memory_usage(deep=True)\n return size\n", "path": "nvtabular/io/parquet.py"}], "after_files": [{"content": "#\n# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport functools\nimport logging\nimport os\nimport threading\nimport warnings\nfrom io import BytesIO\nfrom uuid import uuid4\n\nimport cudf\nimport dask_cudf\nfrom cudf.io.parquet import ParquetWriter as pwriter\nfrom dask.utils import natural_sort_key\nfrom pyarrow import parquet as pq\n\nfrom .dataset_engine import DatasetEngine\nfrom .shuffle import Shuffle, _shuffle_gdf\nfrom .writer import ThreadedWriter\n\nLOG = logging.getLogger(\"nvtabular\")\n\n\nclass ParquetDatasetEngine(DatasetEngine):\n \"\"\"ParquetDatasetEngine is a Dask-based version of cudf.read_parquet.\"\"\"\n\n def __init__(\n self,\n paths,\n part_size,\n storage_options,\n row_groups_per_part=None,\n legacy=False,\n batch_size=None, # Ignored\n ):\n super().__init__(paths, part_size, storage_options)\n if row_groups_per_part is None:\n path0 = self._dataset.pieces[0].path\n rg_byte_size_0 = _memory_usage(cudf.io.read_parquet(path0, row_groups=0, row_group=0))\n row_groups_per_part = self.part_size / rg_byte_size_0\n if row_groups_per_part < 1.0:\n warnings.warn(\n f\"Row group memory size ({rg_byte_size_0}) (bytes) of parquet file is bigger\"\n f\" than requested part_size ({self.part_size}) for the NVTabular dataset.\"\n f\"A row group memory size of 128 MB is generally recommended. You can find\"\n f\" info on how to set the row group size of parquet files in \"\n f\"https://nvidia.github.io/NVTabular/main/HowItWorks.html\"\n f\"#getting-your-data-ready-for-nvtabular\"\n )\n row_groups_per_part = 1.0\n\n self.row_groups_per_part = int(row_groups_per_part)\n\n assert self.row_groups_per_part > 0\n\n @property\n @functools.lru_cache(1)\n def _dataset(self):\n paths = self.paths\n fs = self.fs\n if len(paths) > 1:\n # This is a list of files\n dataset = pq.ParquetDataset(paths, filesystem=fs, validate_schema=False)\n elif fs.isdir(paths[0]):\n # This is a directory\n dataset = pq.ParquetDataset(paths[0], filesystem=fs, validate_schema=False)\n else:\n # This is a single file\n dataset = pq.ParquetDataset(paths[0], filesystem=fs)\n return dataset\n\n @property\n @functools.lru_cache(1)\n def num_rows(self):\n # TODO: Avoid parsing metadata here if we can confirm upstream dask\n # can get the length efficiently (in all practical cases)\n dataset = self._dataset\n if dataset.metadata:\n # We have a metadata file\n return dataset.metadata.num_rows\n else:\n # Sum up row-group sizes manually\n num_rows = 0\n for piece in dataset.pieces:\n num_rows += piece.get_metadata().num_rows\n return num_rows\n\n def to_ddf(self, columns=None):\n return dask_cudf.read_parquet(\n self.paths,\n columns=columns,\n # can't omit reading the index in if we aren't being passed columns\n index=None if columns is None else False,\n gather_statistics=False,\n split_row_groups=self.row_groups_per_part,\n storage_options=self.storage_options,\n )\n\n\nclass ParquetWriter(ThreadedWriter):\n def __init__(self, out_dir, **kwargs):\n super().__init__(out_dir, **kwargs)\n self.data_paths = []\n self.data_writers = []\n self.data_bios = []\n self._lock = threading.RLock()\n\n def _get_filename(self, i):\n if self.use_guid:\n fn = f\"{i}.{guid()}.parquet\"\n else:\n fn = f\"{i}.parquet\"\n\n return os.path.join(self.out_dir, fn)\n\n def _get_or_create_writer(self, idx):\n # lazily initializes a writer for the given index\n with self._lock:\n while len(self.data_writers) <= idx:\n path = self._get_filename(len(self.data_writers))\n self.data_paths.append(path)\n if self.bytes_io:\n bio = BytesIO()\n self.data_bios.append(bio)\n self.data_writers.append(pwriter(bio, compression=None))\n else:\n self.data_writers.append(pwriter(path, compression=None))\n\n return self.data_writers[idx]\n\n def _write_table(self, idx, data, has_list_column=False):\n if has_list_column:\n # currently cudf doesn't support chunked parquet writers with list columns\n # write out a new file, rather than stream multiple chunks to a single file\n filename = self._get_filename(len(self.data_paths))\n data.to_parquet(filename)\n self.data_paths.append(filename)\n else:\n writer = self._get_or_create_writer(idx)\n writer.write_table(data)\n\n def _write_thread(self):\n while True:\n item = self.queue.get()\n try:\n if item is self._eod:\n break\n idx, data = item\n with self.write_locks[idx]:\n self._write_table(idx, data, False)\n finally:\n self.queue.task_done()\n\n @classmethod\n def write_special_metadata(cls, md, fs, out_dir):\n # Sort metadata by file name and convert list of\n # tuples to a list of metadata byte-blobs\n md_list = [m[1] for m in sorted(list(md.items()), key=lambda x: natural_sort_key(x[0]))]\n\n # Aggregate metadata and write _metadata file\n _write_pq_metadata_file(md_list, fs, out_dir)\n\n def _close_writers(self):\n md_dict = {}\n for writer, path in zip(self.data_writers, self.data_paths):\n fn = path.split(self.fs.sep)[-1]\n md_dict[fn] = writer.close(metadata_file_path=fn)\n return md_dict\n\n def _bytesio_to_disk(self):\n for bio, path in zip(self.data_bios, self.data_paths):\n gdf = cudf.io.read_parquet(bio, index=False)\n bio.close()\n if self.shuffle == Shuffle.PER_WORKER:\n gdf = _shuffle_gdf(gdf)\n gdf.to_parquet(path, compression=None, index=False)\n return\n\n\ndef _write_pq_metadata_file(md_list, fs, path):\n \"\"\" Converts list of parquet metadata objects into a single shared _metadata file. \"\"\"\n if md_list:\n metadata_path = fs.sep.join([path, \"_metadata\"])\n _meta = cudf.io.merge_parquet_filemetadata(md_list) if len(md_list) > 1 else md_list[0]\n with fs.open(metadata_path, \"wb\") as fil:\n _meta.tofile(fil)\n return\n\n\ndef guid():\n \"\"\"Simple utility function to get random hex string\"\"\"\n return uuid4().hex\n\n\ndef _memory_usage(df):\n \"\"\"this function is a workaround of a problem with getting memory usage of lists\n in cudf0.16. This can be deleted and just use `df.memory_usage(deep= True, index=True).sum()`\n once we are using cudf 0.17 (fixed in https://github.com/rapidsai/cudf/pull/6549)\"\"\"\n size = 0\n for col in df._data.columns:\n if cudf.utils.dtypes.is_list_dtype(col.dtype):\n for child in col.base_children:\n size += child.__sizeof__()\n else:\n size += col._memory_usage(deep=True)\n size += df.index.memory_usage(deep=True)\n return size\n", "path": "nvtabular/io/parquet.py"}]}
| 2,623 | 255 |
gh_patches_debug_8470
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-338
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
kinto start fails: no module named functools32
```
kinto start
Starting subprocess with file monitor
2015-11-29 10:12:24,821 INFO [venusian][MainThread] kinto 1.9.0 starting.
Traceback (most recent call last):
File "/var/www/kinto.leplat.re/venv/bin/kinto", line 9, in <module>
load_entry_point('kinto==1.9.0', 'console_scripts', 'kinto')()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__main__.py", line 55, in main
pserve.main(pserve_argv)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 60, in main
return command.run()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 366, in run
global_conf=vars)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 401, in loadapp
return loadapp(app_spec, name=name, relative_to=relative_to, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 247, in loadapp
return loadobj(APP, uri, name=name, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 272, in loadobj
return context.create()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 710, in create
return self.object_type.invoke(self)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 146, in invoke
return fix_call(context.object, context.global_conf, **context.local_conf)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/util.py", line 55, in fix_call
val = callable(*args, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__init__.py", line 77, in main
config.scan("kinto.views", **kwargs)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/config/__init__.py", line 974, in scan
ignore=ignore)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/venusian/__init__.py", line 205, in scan
__import__(modname)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/buckets.py", line 12, in <module>
from kinto.views.collections import Collection
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/collections.py", line 2, in <module>
import jsonschema
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/__init__.py", line 12, in <module>
from jsonschema.exceptions import (
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/exceptions.py", line 6, in <module>
from jsonschema import _utils
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/_utils.py", line 6, in <module>
from jsonschema.compat import str_types, MutableMapping, urlsplit
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/compat.py", line 39, in <module>
from functools32 import lru_cache
ImportError: No module named functools32
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import codecs
3 from setuptools import setup, find_packages
4
5 here = os.path.abspath(os.path.dirname(__file__))
6
7
8 def read_file(filename):
9 """Open a related file and return its content."""
10 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
11 content = f.read()
12 return content
13
14 README = read_file('README.rst')
15 CHANGELOG = read_file('CHANGELOG.rst')
16 CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
17
18 REQUIREMENTS = [
19 'waitress',
20 'cliquet>=2.13,<3',
21 'jsonschema',
22 ]
23
24 POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
25 'cliquet[postgresql]>=2.13,<3'
26 ]
27
28 MONITORING_REQUIREMENTS = REQUIREMENTS + [
29 'cliquet[monitoring]>=2.13,<3'
30 ]
31
32 FXA_REQUIREMENTS = REQUIREMENTS + [
33 'cliquet-fxa'
34 ]
35
36 ENTRY_POINTS = {
37 'paste.app_factory': [
38 'main = kinto:main',
39 ],
40 'console_scripts': [
41 'kinto = kinto.__main__:main'
42 ],
43 }
44
45 DEPENDENCY_LINKS = [
46 ]
47
48 setup(name='kinto',
49 version='1.11.0.dev0',
50 description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
51 long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
52 license='Apache License (2.0)',
53 classifiers=[
54 "Programming Language :: Python",
55 "Programming Language :: Python :: 2",
56 "Programming Language :: Python :: 2.7",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.4",
59 "Programming Language :: Python :: 3.5",
60 "Programming Language :: Python :: Implementation :: CPython",
61 "Programming Language :: Python :: Implementation :: PyPy",
62 "Topic :: Internet :: WWW/HTTP",
63 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
64 "License :: OSI Approved :: Apache Software License"
65 ],
66 keywords="web services",
67 author='Mozilla Services',
68 author_email='[email protected]',
69 url='https://github.com/Kinto/kinto',
70 packages=find_packages(),
71 include_package_data=True,
72 zip_safe=False,
73 install_requires=REQUIREMENTS,
74 extras_require={
75 'postgresql': POSTGRESQL_REQUIREMENTS,
76 'monitoring': MONITORING_REQUIREMENTS,
77 'fxa': FXA_REQUIREMENTS,
78 },
79 entry_points=ENTRY_POINTS,
80 dependency_links=DEPENDENCY_LINKS)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,6 @@
-import os
import codecs
+import os
+import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
@@ -21,6 +22,11 @@
'jsonschema',
]
+if sys.version_info < (3,):
+ REQUIREMENTS.extend([
+ 'functools32', # not installed by jsonschema with old pip versions.
+ ])
+
POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
'cliquet[postgresql]>=2.13,<3'
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,6 @@\n-import os\n import codecs\n+import os\n+import sys\n from setuptools import setup, find_packages\n \n here = os.path.abspath(os.path.dirname(__file__))\n@@ -21,6 +22,11 @@\n 'jsonschema',\n ]\n \n+if sys.version_info < (3,):\n+ REQUIREMENTS.extend([\n+ 'functools32', # not installed by jsonschema with old pip versions.\n+ ])\n+\n POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[postgresql]>=2.13,<3'\n ]\n", "issue": "kinto start fails: no module named functools32\n```\nkinto start\nStarting subprocess with file monitor\n2015-11-29 10:12:24,821 INFO [venusian][MainThread] kinto 1.9.0 starting. \nTraceback (most recent call last):\n File \"/var/www/kinto.leplat.re/venv/bin/kinto\", line 9, in <module>\n load_entry_point('kinto==1.9.0', 'console_scripts', 'kinto')()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__main__.py\", line 55, in main\n pserve.main(pserve_argv)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 60, in main\n return command.run()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 366, in run\n global_conf=vars)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 401, in loadapp\n return loadapp(app_spec, name=name, relative_to=relative_to, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 247, in loadapp\n return loadobj(APP, uri, name=name, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 272, in loadobj\n return context.create()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 710, in create\n return self.object_type.invoke(self)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 146, in invoke\n return fix_call(context.object, context.global_conf, **context.local_conf)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/util.py\", line 55, in fix_call\n val = callable(*args, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__init__.py\", line 77, in main\n config.scan(\"kinto.views\", **kwargs)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/config/__init__.py\", line 974, in scan\n ignore=ignore)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/venusian/__init__.py\", line 205, in scan\n __import__(modname)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/buckets.py\", line 12, in <module>\n from kinto.views.collections import Collection\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/collections.py\", line 2, in <module>\n import jsonschema\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/__init__.py\", line 12, in <module>\n from jsonschema.exceptions import (\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/exceptions.py\", line 6, in <module>\n from jsonschema import _utils\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/_utils.py\", line 6, in <module>\n from jsonschema.compat import str_types, MutableMapping, urlsplit\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/compat.py\", line 39, in <module>\n from functools32 import lru_cache\nImportError: No module named functools32\n```\n\n", "before_files": [{"content": "import os\nimport codecs\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'waitress',\n 'cliquet>=2.13,<3',\n 'jsonschema',\n]\n\nPOSTGRESQL_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[postgresql]>=2.13,<3'\n]\n\nMONITORING_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[monitoring]>=2.13,<3'\n]\n\nFXA_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet-fxa'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\nDEPENDENCY_LINKS = [\n]\n\nsetup(name='kinto',\n version='1.11.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={\n 'postgresql': POSTGRESQL_REQUIREMENTS,\n 'monitoring': MONITORING_REQUIREMENTS,\n 'fxa': FXA_REQUIREMENTS,\n },\n entry_points=ENTRY_POINTS,\n dependency_links=DEPENDENCY_LINKS)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'waitress',\n 'cliquet>=2.13,<3',\n 'jsonschema',\n]\n\nif sys.version_info < (3,):\n REQUIREMENTS.extend([\n 'functools32', # not installed by jsonschema with old pip versions.\n ])\n\nPOSTGRESQL_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[postgresql]>=2.13,<3'\n]\n\nMONITORING_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[monitoring]>=2.13,<3'\n]\n\nFXA_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet-fxa'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\nDEPENDENCY_LINKS = [\n]\n\nsetup(name='kinto',\n version='1.11.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={\n 'postgresql': POSTGRESQL_REQUIREMENTS,\n 'monitoring': MONITORING_REQUIREMENTS,\n 'fxa': FXA_REQUIREMENTS,\n },\n entry_points=ENTRY_POINTS,\n dependency_links=DEPENDENCY_LINKS)\n", "path": "setup.py"}]}
| 1,956 | 150 |
gh_patches_debug_21792
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-2546
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Footer widget footer--description css selector not working
Have you checked closed issues? https://github.com/Textualize/textual/issues?q=is%3Aissue+is%3Aclosed
yes
Please give a brief but clear explanation of the issue. If you can, include a complete working example that demonstrates the bug. **Check it can run without modifications.**
This is my first issue submitted and I'm new to Textual so this could be user error. I'm trying to style a footer widget with CSS. I have successfully used the footer--highlight, footer--highlight-key and footer--key component classes to style the footer, but the footer--description class doesn't seem to do anything and the default css from the Footer class is used. Shouldn't footer--description override Footer in my example below? I would expect the "Quit" text to start off with a purple background. Thanks!
```
from textual.app import App, ComposeResult
from textual.widgets import Footer
class App(App):
CSS = """
Footer .footer--description {
background: purple;
}
Footer .footer--highlight {
background: red;
}
Footer .footer--highlight-key {
background: blue;
}
Footer .footer--key {
background: green;
}
"""
BINDINGS = [
("q", "quit", "Quit"),
]
def compose(self) -> ComposeResult:
yield Footer()
if __name__ == "__main__":
app = App()
app.run()
```
It will be helpful if you run the following command and paste the results:
```
textual diagnose
```
# Textual Diagnostics
## Versions
| Name | Value |
|---------|--------|
| Textual | 0.24.1 |
| Rich | 13.3.5 |
## Python
| Name | Value |
|----------------|--------------------------------------------------------|
| Version | 3.11.0 |
| Implementation | CPython |
| Compiler | Clang 14.0.0 (clang-1400.0.29.102) |
| Executable | /Users/Tom/Projects/Python/PythonDemos/venv/bin/python |
## Operating System
| Name | Value |
|---------|--------------------------------------------------------------------------------------------------|
| System | Darwin |
| Release | 22.4.0 |
| Version | Darwin Kernel Version 22.4.0: Mon Mar 6 21:00:17 PST 2023; root:xnu-8796.101.5~3/RELEASE_X86_64 |
## Terminal
| Name | Value |
|----------------------|-------------|
| Terminal Application | Kitty |
| TERM | xterm-kitty |
| COLORTERM | truecolor |
| FORCE_COLOR | *Not set* |
| NO_COLOR | *Not set* |
## Rich Console options
| Name | Value |
|----------------|----------------------|
| size | width=192, height=45 |
| legacy_windows | False |
| min_width | 1 |
| max_width | 192 |
| is_terminal | True |
| encoding | utf-8 |
| max_height | 45 |
| justify | None |
| overflow | None |
| no_wrap | False |
| highlight | None |
| markup | None |
| height | None |
Feel free to add screenshots and / or videos. These can be very helpful!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_footer.py`
Content:
```
1 from __future__ import annotations
2
3 from collections import defaultdict
4 from typing import ClassVar, Optional
5
6 import rich.repr
7 from rich.console import RenderableType
8 from rich.text import Text
9
10 from .. import events
11 from ..reactive import reactive
12 from ..widget import Widget
13
14
15 @rich.repr.auto
16 class Footer(Widget):
17 """A simple footer widget which docks itself to the bottom of the parent container."""
18
19 COMPONENT_CLASSES: ClassVar[set[str]] = {
20 "footer--description",
21 "footer--key",
22 "footer--highlight",
23 "footer--highlight-key",
24 }
25 """
26 | Class | Description |
27 | :- | :- |
28 | `footer--description` | Targets the descriptions of the key bindings. |
29 | `footer--highlight` | Targets the highlighted key binding. |
30 | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |
31 | `footer--key` | Targets the key portions of the key bindings. |
32 """
33
34 DEFAULT_CSS = """
35 Footer {
36 background: $accent;
37 color: $text;
38 dock: bottom;
39 height: 1;
40 }
41 Footer > .footer--highlight {
42 background: $accent-darken-1;
43 }
44
45 Footer > .footer--highlight-key {
46 background: $secondary;
47 text-style: bold;
48 }
49
50 Footer > .footer--key {
51 text-style: bold;
52 background: $accent-darken-2;
53 }
54 """
55
56 highlight_key: reactive[str | None] = reactive[Optional[str]](None)
57
58 def __init__(self) -> None:
59 super().__init__()
60 self._key_text: Text | None = None
61 self.auto_links = False
62
63 async def watch_highlight_key(self) -> None:
64 """If highlight key changes we need to regenerate the text."""
65 self._key_text = None
66 self.refresh()
67
68 def _on_mount(self, _: events.Mount) -> None:
69 self.watch(self.screen, "focused", self._bindings_changed)
70 self.watch(self.screen, "stack_updates", self._bindings_changed)
71
72 def _bindings_changed(self, _: Widget | None) -> None:
73 self._key_text = None
74 self.refresh()
75
76 def _on_mouse_move(self, event: events.MouseMove) -> None:
77 """Store any key we are moving over."""
78 self.highlight_key = event.style.meta.get("key")
79
80 def _on_leave(self, _: events.Leave) -> None:
81 """Clear any highlight when the mouse leaves the widget"""
82 if self.screen.is_current:
83 self.highlight_key = None
84
85 def __rich_repr__(self) -> rich.repr.Result:
86 yield from super().__rich_repr__()
87
88 def _make_key_text(self) -> Text:
89 """Create text containing all the keys."""
90 base_style = self.rich_style
91 text = Text(
92 style=self.rich_style,
93 no_wrap=True,
94 overflow="ellipsis",
95 justify="left",
96 end="",
97 )
98 highlight_style = self.get_component_rich_style("footer--highlight")
99 highlight_key_style = self.get_component_rich_style("footer--highlight-key")
100 key_style = self.get_component_rich_style("footer--key")
101
102 bindings = [
103 binding
104 for (_, binding) in self.app.namespace_bindings.values()
105 if binding.show
106 ]
107
108 action_to_bindings = defaultdict(list)
109 for binding in bindings:
110 action_to_bindings[binding.action].append(binding)
111
112 for _, bindings in action_to_bindings.items():
113 binding = bindings[0]
114 if binding.key_display is None:
115 key_display = self.app.get_key_display(binding.key)
116 if key_display is None:
117 key_display = binding.key.upper()
118 else:
119 key_display = binding.key_display
120 hovered = self.highlight_key == binding.key
121 key_text = Text.assemble(
122 (f" {key_display} ", highlight_key_style if hovered else key_style),
123 (
124 f" {binding.description} ",
125 highlight_style if hovered else base_style,
126 ),
127 meta={
128 "@click": f"app.check_bindings('{binding.key}')",
129 "key": binding.key,
130 },
131 )
132 text.append_text(key_text)
133 return text
134
135 def notify_style_update(self) -> None:
136 self._key_text = None
137
138 def post_render(self, renderable):
139 return renderable
140
141 def render(self) -> RenderableType:
142 if self._key_text is None:
143 self._key_text = self._make_key_text()
144 return self._key_text
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/textual/widgets/_footer.py b/src/textual/widgets/_footer.py
--- a/src/textual/widgets/_footer.py
+++ b/src/textual/widgets/_footer.py
@@ -98,6 +98,7 @@
highlight_style = self.get_component_rich_style("footer--highlight")
highlight_key_style = self.get_component_rich_style("footer--highlight-key")
key_style = self.get_component_rich_style("footer--key")
+ description_style = self.get_component_rich_style("footer--description")
bindings = [
binding
@@ -122,7 +123,7 @@
(f" {key_display} ", highlight_key_style if hovered else key_style),
(
f" {binding.description} ",
- highlight_style if hovered else base_style,
+ highlight_style if hovered else base_style + description_style,
),
meta={
"@click": f"app.check_bindings('{binding.key}')",
|
{"golden_diff": "diff --git a/src/textual/widgets/_footer.py b/src/textual/widgets/_footer.py\n--- a/src/textual/widgets/_footer.py\n+++ b/src/textual/widgets/_footer.py\n@@ -98,6 +98,7 @@\n highlight_style = self.get_component_rich_style(\"footer--highlight\")\n highlight_key_style = self.get_component_rich_style(\"footer--highlight-key\")\n key_style = self.get_component_rich_style(\"footer--key\")\n+ description_style = self.get_component_rich_style(\"footer--description\")\n \n bindings = [\n binding\n@@ -122,7 +123,7 @@\n (f\" {key_display} \", highlight_key_style if hovered else key_style),\n (\n f\" {binding.description} \",\n- highlight_style if hovered else base_style,\n+ highlight_style if hovered else base_style + description_style,\n ),\n meta={\n \"@click\": f\"app.check_bindings('{binding.key}')\",\n", "issue": "Footer widget footer--description css selector not working\nHave you checked closed issues? https://github.com/Textualize/textual/issues?q=is%3Aissue+is%3Aclosed\r\nyes\r\n\r\nPlease give a brief but clear explanation of the issue. If you can, include a complete working example that demonstrates the bug. **Check it can run without modifications.**\r\n\r\nThis is my first issue submitted and I'm new to Textual so this could be user error. I'm trying to style a footer widget with CSS. I have successfully used the footer--highlight, footer--highlight-key and footer--key component classes to style the footer, but the footer--description class doesn't seem to do anything and the default css from the Footer class is used. Shouldn't footer--description override Footer in my example below? I would expect the \"Quit\" text to start off with a purple background. Thanks!\r\n\r\n```\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Footer\r\n\r\n\r\nclass App(App):\r\n\r\n CSS = \"\"\"\r\n Footer .footer--description {\r\n background: purple;\r\n }\r\n \r\n Footer .footer--highlight {\r\n background: red;\r\n }\r\n \r\n Footer .footer--highlight-key {\r\n background: blue;\r\n }\r\n \r\n Footer .footer--key {\r\n background: green;\r\n }\r\n \"\"\"\r\n\r\n BINDINGS = [\r\n (\"q\", \"quit\", \"Quit\"),\r\n ]\r\n\r\n def compose(self) -> ComposeResult:\r\n yield Footer()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = App()\r\n app.run()\r\n```\r\n\r\n\r\nIt will be helpful if you run the following command and paste the results:\r\n\r\n```\r\ntextual diagnose\r\n```\r\n\r\n# Textual Diagnostics\r\n\r\n## Versions\r\n\r\n| Name | Value |\r\n|---------|--------|\r\n| Textual | 0.24.1 |\r\n| Rich | 13.3.5 |\r\n\r\n## Python\r\n\r\n| Name | Value |\r\n|----------------|--------------------------------------------------------|\r\n| Version | 3.11.0 |\r\n| Implementation | CPython |\r\n| Compiler | Clang 14.0.0 (clang-1400.0.29.102) |\r\n| Executable | /Users/Tom/Projects/Python/PythonDemos/venv/bin/python |\r\n\r\n## Operating System\r\n\r\n| Name | Value |\r\n|---------|--------------------------------------------------------------------------------------------------|\r\n| System | Darwin |\r\n| Release | 22.4.0 |\r\n| Version | Darwin Kernel Version 22.4.0: Mon Mar 6 21:00:17 PST 2023; root:xnu-8796.101.5~3/RELEASE_X86_64 |\r\n\r\n## Terminal\r\n\r\n| Name | Value |\r\n|----------------------|-------------|\r\n| Terminal Application | Kitty |\r\n| TERM | xterm-kitty |\r\n| COLORTERM | truecolor |\r\n| FORCE_COLOR | *Not set* |\r\n| NO_COLOR | *Not set* |\r\n\r\n## Rich Console options\r\n\r\n| Name | Value |\r\n|----------------|----------------------|\r\n| size | width=192, height=45 |\r\n| legacy_windows | False |\r\n| min_width | 1 |\r\n| max_width | 192 |\r\n| is_terminal | True |\r\n| encoding | utf-8 |\r\n| max_height | 45 |\r\n| justify | None |\r\n| overflow | None |\r\n| no_wrap | False |\r\n| highlight | None |\r\n| markup | None |\r\n| height | None |\r\n\r\n\r\nFeel free to add screenshots and / or videos. These can be very helpful!\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom typing import ClassVar, Optional\n\nimport rich.repr\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events\nfrom ..reactive import reactive\nfrom ..widget import Widget\n\n\[email protected]\nclass Footer(Widget):\n \"\"\"A simple footer widget which docks itself to the bottom of the parent container.\"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"footer--description\",\n \"footer--key\",\n \"footer--highlight\",\n \"footer--highlight-key\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `footer--description` | Targets the descriptions of the key bindings. |\n | `footer--highlight` | Targets the highlighted key binding. |\n | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |\n | `footer--key` | Targets the key portions of the key bindings. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Footer {\n background: $accent;\n color: $text;\n dock: bottom;\n height: 1;\n }\n Footer > .footer--highlight {\n background: $accent-darken-1;\n }\n\n Footer > .footer--highlight-key {\n background: $secondary;\n text-style: bold;\n }\n\n Footer > .footer--key {\n text-style: bold;\n background: $accent-darken-2;\n }\n \"\"\"\n\n highlight_key: reactive[str | None] = reactive[Optional[str]](None)\n\n def __init__(self) -> None:\n super().__init__()\n self._key_text: Text | None = None\n self.auto_links = False\n\n async def watch_highlight_key(self) -> None:\n \"\"\"If highlight key changes we need to regenerate the text.\"\"\"\n self._key_text = None\n self.refresh()\n\n def _on_mount(self, _: events.Mount) -> None:\n self.watch(self.screen, \"focused\", self._bindings_changed)\n self.watch(self.screen, \"stack_updates\", self._bindings_changed)\n\n def _bindings_changed(self, _: Widget | None) -> None:\n self._key_text = None\n self.refresh()\n\n def _on_mouse_move(self, event: events.MouseMove) -> None:\n \"\"\"Store any key we are moving over.\"\"\"\n self.highlight_key = event.style.meta.get(\"key\")\n\n def _on_leave(self, _: events.Leave) -> None:\n \"\"\"Clear any highlight when the mouse leaves the widget\"\"\"\n if self.screen.is_current:\n self.highlight_key = None\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n\n def _make_key_text(self) -> Text:\n \"\"\"Create text containing all the keys.\"\"\"\n base_style = self.rich_style\n text = Text(\n style=self.rich_style,\n no_wrap=True,\n overflow=\"ellipsis\",\n justify=\"left\",\n end=\"\",\n )\n highlight_style = self.get_component_rich_style(\"footer--highlight\")\n highlight_key_style = self.get_component_rich_style(\"footer--highlight-key\")\n key_style = self.get_component_rich_style(\"footer--key\")\n\n bindings = [\n binding\n for (_, binding) in self.app.namespace_bindings.values()\n if binding.show\n ]\n\n action_to_bindings = defaultdict(list)\n for binding in bindings:\n action_to_bindings[binding.action].append(binding)\n\n for _, bindings in action_to_bindings.items():\n binding = bindings[0]\n if binding.key_display is None:\n key_display = self.app.get_key_display(binding.key)\n if key_display is None:\n key_display = binding.key.upper()\n else:\n key_display = binding.key_display\n hovered = self.highlight_key == binding.key\n key_text = Text.assemble(\n (f\" {key_display} \", highlight_key_style if hovered else key_style),\n (\n f\" {binding.description} \",\n highlight_style if hovered else base_style,\n ),\n meta={\n \"@click\": f\"app.check_bindings('{binding.key}')\",\n \"key\": binding.key,\n },\n )\n text.append_text(key_text)\n return text\n\n def notify_style_update(self) -> None:\n self._key_text = None\n\n def post_render(self, renderable):\n return renderable\n\n def render(self) -> RenderableType:\n if self._key_text is None:\n self._key_text = self._make_key_text()\n return self._key_text\n", "path": "src/textual/widgets/_footer.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom typing import ClassVar, Optional\n\nimport rich.repr\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events\nfrom ..reactive import reactive\nfrom ..widget import Widget\n\n\[email protected]\nclass Footer(Widget):\n \"\"\"A simple footer widget which docks itself to the bottom of the parent container.\"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"footer--description\",\n \"footer--key\",\n \"footer--highlight\",\n \"footer--highlight-key\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `footer--description` | Targets the descriptions of the key bindings. |\n | `footer--highlight` | Targets the highlighted key binding. |\n | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |\n | `footer--key` | Targets the key portions of the key bindings. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Footer {\n background: $accent;\n color: $text;\n dock: bottom;\n height: 1;\n }\n Footer > .footer--highlight {\n background: $accent-darken-1;\n }\n\n Footer > .footer--highlight-key {\n background: $secondary;\n text-style: bold;\n }\n\n Footer > .footer--key {\n text-style: bold;\n background: $accent-darken-2;\n }\n \"\"\"\n\n highlight_key: reactive[str | None] = reactive[Optional[str]](None)\n\n def __init__(self) -> None:\n super().__init__()\n self._key_text: Text | None = None\n self.auto_links = False\n\n async def watch_highlight_key(self) -> None:\n \"\"\"If highlight key changes we need to regenerate the text.\"\"\"\n self._key_text = None\n self.refresh()\n\n def _on_mount(self, _: events.Mount) -> None:\n self.watch(self.screen, \"focused\", self._bindings_changed)\n self.watch(self.screen, \"stack_updates\", self._bindings_changed)\n\n def _bindings_changed(self, _: Widget | None) -> None:\n self._key_text = None\n self.refresh()\n\n def _on_mouse_move(self, event: events.MouseMove) -> None:\n \"\"\"Store any key we are moving over.\"\"\"\n self.highlight_key = event.style.meta.get(\"key\")\n\n def _on_leave(self, _: events.Leave) -> None:\n \"\"\"Clear any highlight when the mouse leaves the widget\"\"\"\n if self.screen.is_current:\n self.highlight_key = None\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n\n def _make_key_text(self) -> Text:\n \"\"\"Create text containing all the keys.\"\"\"\n base_style = self.rich_style\n text = Text(\n style=self.rich_style,\n no_wrap=True,\n overflow=\"ellipsis\",\n justify=\"left\",\n end=\"\",\n )\n highlight_style = self.get_component_rich_style(\"footer--highlight\")\n highlight_key_style = self.get_component_rich_style(\"footer--highlight-key\")\n key_style = self.get_component_rich_style(\"footer--key\")\n description_style = self.get_component_rich_style(\"footer--description\")\n\n bindings = [\n binding\n for (_, binding) in self.app.namespace_bindings.values()\n if binding.show\n ]\n\n action_to_bindings = defaultdict(list)\n for binding in bindings:\n action_to_bindings[binding.action].append(binding)\n\n for _, bindings in action_to_bindings.items():\n binding = bindings[0]\n if binding.key_display is None:\n key_display = self.app.get_key_display(binding.key)\n if key_display is None:\n key_display = binding.key.upper()\n else:\n key_display = binding.key_display\n hovered = self.highlight_key == binding.key\n key_text = Text.assemble(\n (f\" {key_display} \", highlight_key_style if hovered else key_style),\n (\n f\" {binding.description} \",\n highlight_style if hovered else base_style + description_style,\n ),\n meta={\n \"@click\": f\"app.check_bindings('{binding.key}')\",\n \"key\": binding.key,\n },\n )\n text.append_text(key_text)\n return text\n\n def notify_style_update(self) -> None:\n self._key_text = None\n\n def post_render(self, renderable):\n return renderable\n\n def render(self) -> RenderableType:\n if self._key_text is None:\n self._key_text = self._make_key_text()\n return self._key_text\n", "path": "src/textual/widgets/_footer.py"}]}
| 2,377 | 207 |
gh_patches_debug_10904
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-1174
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filtering for POIs in event list not working
### Describe the Bug
<!-- A clear and concise description of what the bug is. -->
When filters for POIs in the event list are applied, all events are returned, independent from their location
### Steps to Reproduce
1. Go to event list
2. Filter for event location
### Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
Only the events with the selected location should be shown
### Actual Behavior
<!-- A clear and concise description of what actually happened. -->
All events are shown
### Additional Information
<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `integreat_cms/cms/views/events/event_form_view.py`
Content:
```
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.core.exceptions import PermissionDenied
6 from django.shortcuts import render, redirect
7 from django.utils.decorators import method_decorator
8 from django.utils.translation import ugettext as _
9 from django.views.generic import TemplateView
10
11
12 from ...constants import status, translation_status
13 from ...decorators import permission_required
14 from ...forms import EventForm, EventTranslationForm, RecurrenceRuleForm
15 from ...models import Language, Event, EventTranslation, RecurrenceRule, POI
16 from .event_context_mixin import EventContextMixin
17 from ..media.media_context_mixin import MediaContextMixin
18
19
20 logger = logging.getLogger(__name__)
21
22
23 @method_decorator(permission_required("cms.view_event"), name="dispatch")
24 @method_decorator(permission_required("cms.change_event"), name="post")
25 class EventFormView(TemplateView, EventContextMixin, MediaContextMixin):
26 """
27 Class for rendering the events form
28 """
29
30 #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)
31 template_name = "events/event_form.html"
32 #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)
33 extra_context = {
34 "current_menu_item": "events_form",
35 "translation_status": translation_status,
36 }
37
38 # pylint: disable=too-many-locals
39 def get(self, request, *args, **kwargs):
40 r"""
41 Render event form for HTTP GET requests
42
43 :param request: Object representing the user call
44 :type request: ~django.http.HttpRequest
45
46 :param \*args: The supplied arguments
47 :type \*args: list
48
49 :param \**kwargs: The supplied keyword arguments
50 :type \**kwargs: dict
51
52 :return: The rendered template response
53 :rtype: ~django.template.response.TemplateResponse
54 """
55 region = request.region
56 language = region.get_language_or_404(
57 kwargs.get("language_slug"), only_active=True
58 )
59
60 # get event and event translation objects if they exist, otherwise objects are None
61 event_instance = region.events.filter(id=kwargs.get("event_id")).first()
62 event_translation_instance = language.event_translations.filter(
63 event=event_instance
64 ).first()
65 recurrence_rule_instance = RecurrenceRule.objects.filter(
66 event=event_instance
67 ).first()
68 poi_instance = region.pois.filter(events=event_instance).first()
69
70 # Make form disabled if event is archived or user doesn't have the permission to edit the event
71 if event_instance and event_instance.archived:
72 disabled = True
73 messages.warning(
74 request, _("You cannot edit this event because it is archived.")
75 )
76 elif not request.user.has_perm("cms.change_event"):
77 disabled = True
78 messages.warning(
79 request, _("You don't have the permission to edit events.")
80 )
81 else:
82 disabled = False
83
84 if not request.user.has_perm("cms.publish_event"):
85 messages.warning(
86 request,
87 _(
88 "You don't have the permission to publish events, but you can propose changes and submit them for review instead."
89 ),
90 )
91
92 event_form = EventForm(instance=event_instance, disabled=disabled)
93 event_translation_form = EventTranslationForm(
94 instance=event_translation_instance, disabled=disabled
95 )
96 recurrence_rule_form = RecurrenceRuleForm(
97 instance=recurrence_rule_instance, disabled=disabled
98 )
99 url_link = f"{settings.WEBAPP_URL}/{region.slug}/{language.slug}/events/"
100 return render(
101 request,
102 self.template_name,
103 {
104 **self.get_context_data(**kwargs),
105 "event_form": event_form,
106 "event_translation_form": event_translation_form,
107 "recurrence_rule_form": recurrence_rule_form,
108 "poi": poi_instance,
109 "language": language,
110 "languages": region.active_languages if event_instance else [language],
111 "url_link": url_link,
112 "translation_states": event_instance.translation_states
113 if event_instance
114 else [],
115 },
116 )
117
118 # pylint: disable=too-many-locals,too-many-branches
119 def post(self, request, **kwargs):
120 r"""
121 Save event and ender event form for HTTP POST requests
122
123 :param request: Object representing the user call
124 :type request: ~django.http.HttpRequest
125
126 :param \**kwargs: The supplied keyword arguments
127 :type \**kwargs: dict
128
129 :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to publish events
130
131 :return: The rendered template response
132 :rtype: ~django.template.response.TemplateResponse
133 """
134 region = request.region
135 language = Language.objects.get(slug=kwargs.get("language_slug"))
136 poi = POI.objects.filter(id=request.POST.get("location")).first()
137
138 event_instance = Event.objects.filter(id=kwargs.get("event_id")).first()
139 recurrence_rule_instance = RecurrenceRule.objects.filter(
140 event=event_instance
141 ).first()
142 event_translation_instance = EventTranslation.objects.filter(
143 event=event_instance, language=language
144 ).first()
145
146 event_form = EventForm(
147 data=request.POST,
148 files=request.FILES,
149 instance=event_instance,
150 additional_instance_attributes={"region": region, "location": poi},
151 )
152 # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation
153 event_form_valid = event_form.is_valid()
154 recurrence_rule_form = RecurrenceRuleForm(
155 data=request.POST,
156 instance=recurrence_rule_instance,
157 event_start_date=event_form.cleaned_data.get("start_date", None),
158 )
159 event_translation_form = EventTranslationForm(
160 data=request.POST,
161 instance=event_translation_instance,
162 additional_instance_attributes={
163 "creator": request.user,
164 "language": language,
165 "event": event_form.instance,
166 },
167 )
168
169 if (
170 not event_form_valid
171 or not event_translation_form.is_valid()
172 or (
173 event_form.cleaned_data["is_recurring"]
174 and not recurrence_rule_form.is_valid()
175 )
176 ):
177 # Add error messages
178 event_form.add_error_messages(request)
179 event_translation_form.add_error_messages(request)
180 # do not call recurrence rule form clean method when recurrence rule is not set
181 if event_form.cleaned_data["is_recurring"]:
182 recurrence_rule_form.add_error_messages(request)
183 elif (
184 event_translation_form.instance.status == status.AUTO_SAVE
185 and not event_form.has_changed()
186 and not event_translation_form.has_changed()
187 and not recurrence_rule_form.has_changed()
188 ):
189 messages.info(request, _("No changes detected, autosave skipped"))
190 else:
191 # Check publish permissions
192 if event_translation_form.instance.status == status.PUBLIC:
193 if not request.user.has_perm("cms.publish_event"):
194 raise PermissionDenied(
195 f"{request.user!r} does not have the permission 'cms.publish_event'"
196 )
197 # Save forms
198 if event_form.cleaned_data.get("is_recurring"):
199 # If event is recurring, save recurrence rule
200 event_form.instance.recurrence_rule = recurrence_rule_form.save()
201 elif event_form.instance.recurrence_rule:
202 # If the event is not recurring but it was before, delete the associated recurrence rule
203 event_form.instance.recurrence_rule.delete()
204 event_form.instance.recurrence_rule = None
205 event_translation_form.instance.event = event_form.save()
206 event_translation_form.save()
207 # Add the success message and redirect to the edit page
208 if not event_instance:
209 messages.success(
210 request,
211 _('Event "{}" was successfully created').format(
212 event_translation_form.instance
213 ),
214 )
215 elif (
216 not event_form.has_changed()
217 and not event_translation_form.has_changed()
218 and not recurrence_rule_form.has_changed()
219 ):
220 messages.info(request, _("No changes detected, but date refreshed"))
221 else:
222 # Add the success message
223 event_translation_form.add_success_message(request)
224 return redirect(
225 "edit_event",
226 **{
227 "event_id": event_form.instance.id,
228 "region_slug": region.slug,
229 "language_slug": language.slug,
230 },
231 )
232
233 return render(
234 request,
235 self.template_name,
236 {
237 **self.get_context_data(**kwargs),
238 "event_form": event_form,
239 "event_translation_form": event_translation_form,
240 "recurrence_rule_form": recurrence_rule_form,
241 "poi": poi,
242 "language": language,
243 "languages": region.active_languages if event_instance else [language],
244 "translation_states": event_instance.translation_states
245 if event_instance
246 else [],
247 },
248 )
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/integreat_cms/cms/views/events/event_form_view.py b/integreat_cms/cms/views/events/event_form_view.py
--- a/integreat_cms/cms/views/events/event_form_view.py
+++ b/integreat_cms/cms/views/events/event_form_view.py
@@ -65,7 +65,10 @@
recurrence_rule_instance = RecurrenceRule.objects.filter(
event=event_instance
).first()
- poi_instance = region.pois.filter(events=event_instance).first()
+ if event_instance:
+ poi_instance = event_instance.location
+ else:
+ poi_instance = None
# Make form disabled if event is archived or user doesn't have the permission to edit the event
if event_instance and event_instance.archived:
|
{"golden_diff": "diff --git a/integreat_cms/cms/views/events/event_form_view.py b/integreat_cms/cms/views/events/event_form_view.py\n--- a/integreat_cms/cms/views/events/event_form_view.py\n+++ b/integreat_cms/cms/views/events/event_form_view.py\n@@ -65,7 +65,10 @@\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n- poi_instance = region.pois.filter(events=event_instance).first()\n+ if event_instance:\n+ poi_instance = event_instance.location\n+ else:\n+ poi_instance = None\n \n # Make form disabled if event is archived or user doesn't have the permission to edit the event\n if event_instance and event_instance.archived:\n", "issue": "Filtering for POIs in event list not working\n### Describe the Bug\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhen filters for POIs in the event list are applied, all events are returned, independent from their location\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to event list\r\n2. Filter for event location\r\n\r\n### Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nOnly the events with the selected location should be shown\r\n\r\n### Actual Behavior\r\n<!-- A clear and concise description of what actually happened. -->\r\nAll events are shown\r\n\r\n### Additional Information\r\n<!-- Add any other context (e.g. logs, screenshots, etc.) about the problem here. -->\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\n\nfrom ...constants import status, translation_status\nfrom ...decorators import permission_required\nfrom ...forms import EventForm, EventTranslationForm, RecurrenceRuleForm\nfrom ...models import Language, Event, EventTranslation, RecurrenceRule, POI\nfrom .event_context_mixin import EventContextMixin\nfrom ..media.media_context_mixin import MediaContextMixin\n\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_event\"), name=\"dispatch\")\n@method_decorator(permission_required(\"cms.change_event\"), name=\"post\")\nclass EventFormView(TemplateView, EventContextMixin, MediaContextMixin):\n \"\"\"\n Class for rendering the events form\n \"\"\"\n\n #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n template_name = \"events/event_form.html\"\n #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)\n extra_context = {\n \"current_menu_item\": \"events_form\",\n \"translation_status\": translation_status,\n }\n\n # pylint: disable=too-many-locals\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Render event form for HTTP GET requests\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n region = request.region\n language = region.get_language_or_404(\n kwargs.get(\"language_slug\"), only_active=True\n )\n\n # get event and event translation objects if they exist, otherwise objects are None\n event_instance = region.events.filter(id=kwargs.get(\"event_id\")).first()\n event_translation_instance = language.event_translations.filter(\n event=event_instance\n ).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n poi_instance = region.pois.filter(events=event_instance).first()\n\n # Make form disabled if event is archived or user doesn't have the permission to edit the event\n if event_instance and event_instance.archived:\n disabled = True\n messages.warning(\n request, _(\"You cannot edit this event because it is archived.\")\n )\n elif not request.user.has_perm(\"cms.change_event\"):\n disabled = True\n messages.warning(\n request, _(\"You don't have the permission to edit events.\")\n )\n else:\n disabled = False\n\n if not request.user.has_perm(\"cms.publish_event\"):\n messages.warning(\n request,\n _(\n \"You don't have the permission to publish events, but you can propose changes and submit them for review instead.\"\n ),\n )\n\n event_form = EventForm(instance=event_instance, disabled=disabled)\n event_translation_form = EventTranslationForm(\n instance=event_translation_instance, disabled=disabled\n )\n recurrence_rule_form = RecurrenceRuleForm(\n instance=recurrence_rule_instance, disabled=disabled\n )\n url_link = f\"{settings.WEBAPP_URL}/{region.slug}/{language.slug}/events/\"\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi_instance,\n \"language\": language,\n \"languages\": region.active_languages if event_instance else [language],\n \"url_link\": url_link,\n \"translation_states\": event_instance.translation_states\n if event_instance\n else [],\n },\n )\n\n # pylint: disable=too-many-locals,too-many-branches\n def post(self, request, **kwargs):\n r\"\"\"\n Save event and ender event form for HTTP POST requests\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to publish events\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n region = request.region\n language = Language.objects.get(slug=kwargs.get(\"language_slug\"))\n poi = POI.objects.filter(id=request.POST.get(\"location\")).first()\n\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n\n event_form = EventForm(\n data=request.POST,\n files=request.FILES,\n instance=event_instance,\n additional_instance_attributes={\"region\": region, \"location\": poi},\n )\n # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation\n event_form_valid = event_form.is_valid()\n recurrence_rule_form = RecurrenceRuleForm(\n data=request.POST,\n instance=recurrence_rule_instance,\n event_start_date=event_form.cleaned_data.get(\"start_date\", None),\n )\n event_translation_form = EventTranslationForm(\n data=request.POST,\n instance=event_translation_instance,\n additional_instance_attributes={\n \"creator\": request.user,\n \"language\": language,\n \"event\": event_form.instance,\n },\n )\n\n if (\n not event_form_valid\n or not event_translation_form.is_valid()\n or (\n event_form.cleaned_data[\"is_recurring\"]\n and not recurrence_rule_form.is_valid()\n )\n ):\n # Add error messages\n event_form.add_error_messages(request)\n event_translation_form.add_error_messages(request)\n # do not call recurrence rule form clean method when recurrence rule is not set\n if event_form.cleaned_data[\"is_recurring\"]:\n recurrence_rule_form.add_error_messages(request)\n elif (\n event_translation_form.instance.status == status.AUTO_SAVE\n and not event_form.has_changed()\n and not event_translation_form.has_changed()\n and not recurrence_rule_form.has_changed()\n ):\n messages.info(request, _(\"No changes detected, autosave skipped\"))\n else:\n # Check publish permissions\n if event_translation_form.instance.status == status.PUBLIC:\n if not request.user.has_perm(\"cms.publish_event\"):\n raise PermissionDenied(\n f\"{request.user!r} does not have the permission 'cms.publish_event'\"\n )\n # Save forms\n if event_form.cleaned_data.get(\"is_recurring\"):\n # If event is recurring, save recurrence rule\n event_form.instance.recurrence_rule = recurrence_rule_form.save()\n elif event_form.instance.recurrence_rule:\n # If the event is not recurring but it was before, delete the associated recurrence rule\n event_form.instance.recurrence_rule.delete()\n event_form.instance.recurrence_rule = None\n event_translation_form.instance.event = event_form.save()\n event_translation_form.save()\n # Add the success message and redirect to the edit page\n if not event_instance:\n messages.success(\n request,\n _('Event \"{}\" was successfully created').format(\n event_translation_form.instance\n ),\n )\n elif (\n not event_form.has_changed()\n and not event_translation_form.has_changed()\n and not recurrence_rule_form.has_changed()\n ):\n messages.info(request, _(\"No changes detected, but date refreshed\"))\n else:\n # Add the success message\n event_translation_form.add_success_message(request)\n return redirect(\n \"edit_event\",\n **{\n \"event_id\": event_form.instance.id,\n \"region_slug\": region.slug,\n \"language_slug\": language.slug,\n },\n )\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi,\n \"language\": language,\n \"languages\": region.active_languages if event_instance else [language],\n \"translation_states\": event_instance.translation_states\n if event_instance\n else [],\n },\n )\n", "path": "integreat_cms/cms/views/events/event_form_view.py"}], "after_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\n\nfrom ...constants import status, translation_status\nfrom ...decorators import permission_required\nfrom ...forms import EventForm, EventTranslationForm, RecurrenceRuleForm\nfrom ...models import Language, Event, EventTranslation, RecurrenceRule, POI\nfrom .event_context_mixin import EventContextMixin\nfrom ..media.media_context_mixin import MediaContextMixin\n\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_event\"), name=\"dispatch\")\n@method_decorator(permission_required(\"cms.change_event\"), name=\"post\")\nclass EventFormView(TemplateView, EventContextMixin, MediaContextMixin):\n \"\"\"\n Class for rendering the events form\n \"\"\"\n\n #: The template to render (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n template_name = \"events/event_form.html\"\n #: The context dict passed to the template (see :class:`~django.views.generic.base.ContextMixin`)\n extra_context = {\n \"current_menu_item\": \"events_form\",\n \"translation_status\": translation_status,\n }\n\n # pylint: disable=too-many-locals\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Render event form for HTTP GET requests\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n region = request.region\n language = region.get_language_or_404(\n kwargs.get(\"language_slug\"), only_active=True\n )\n\n # get event and event translation objects if they exist, otherwise objects are None\n event_instance = region.events.filter(id=kwargs.get(\"event_id\")).first()\n event_translation_instance = language.event_translations.filter(\n event=event_instance\n ).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n if event_instance:\n poi_instance = event_instance.location\n else:\n poi_instance = None\n\n # Make form disabled if event is archived or user doesn't have the permission to edit the event\n if event_instance and event_instance.archived:\n disabled = True\n messages.warning(\n request, _(\"You cannot edit this event because it is archived.\")\n )\n elif not request.user.has_perm(\"cms.change_event\"):\n disabled = True\n messages.warning(\n request, _(\"You don't have the permission to edit events.\")\n )\n else:\n disabled = False\n\n if not request.user.has_perm(\"cms.publish_event\"):\n messages.warning(\n request,\n _(\n \"You don't have the permission to publish events, but you can propose changes and submit them for review instead.\"\n ),\n )\n\n event_form = EventForm(instance=event_instance, disabled=disabled)\n event_translation_form = EventTranslationForm(\n instance=event_translation_instance, disabled=disabled\n )\n recurrence_rule_form = RecurrenceRuleForm(\n instance=recurrence_rule_instance, disabled=disabled\n )\n url_link = f\"{settings.WEBAPP_URL}/{region.slug}/{language.slug}/events/\"\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi_instance,\n \"language\": language,\n \"languages\": region.active_languages if event_instance else [language],\n \"url_link\": url_link,\n \"translation_states\": event_instance.translation_states\n if event_instance\n else [],\n },\n )\n\n # pylint: disable=too-many-locals,too-many-branches\n def post(self, request, **kwargs):\n r\"\"\"\n Save event and ender event form for HTTP POST requests\n\n :param request: Object representing the user call\n :type request: ~django.http.HttpRequest\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to publish events\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n region = request.region\n language = Language.objects.get(slug=kwargs.get(\"language_slug\"))\n poi = POI.objects.filter(id=request.POST.get(\"location\")).first()\n\n event_instance = Event.objects.filter(id=kwargs.get(\"event_id\")).first()\n recurrence_rule_instance = RecurrenceRule.objects.filter(\n event=event_instance\n ).first()\n event_translation_instance = EventTranslation.objects.filter(\n event=event_instance, language=language\n ).first()\n\n event_form = EventForm(\n data=request.POST,\n files=request.FILES,\n instance=event_instance,\n additional_instance_attributes={\"region\": region, \"location\": poi},\n )\n # clean data of event form to be able to pass the cleaned start date to the recurrence form for validation\n event_form_valid = event_form.is_valid()\n recurrence_rule_form = RecurrenceRuleForm(\n data=request.POST,\n instance=recurrence_rule_instance,\n event_start_date=event_form.cleaned_data.get(\"start_date\", None),\n )\n event_translation_form = EventTranslationForm(\n data=request.POST,\n instance=event_translation_instance,\n additional_instance_attributes={\n \"creator\": request.user,\n \"language\": language,\n \"event\": event_form.instance,\n },\n )\n\n if (\n not event_form_valid\n or not event_translation_form.is_valid()\n or (\n event_form.cleaned_data[\"is_recurring\"]\n and not recurrence_rule_form.is_valid()\n )\n ):\n # Add error messages\n event_form.add_error_messages(request)\n event_translation_form.add_error_messages(request)\n # do not call recurrence rule form clean method when recurrence rule is not set\n if event_form.cleaned_data[\"is_recurring\"]:\n recurrence_rule_form.add_error_messages(request)\n elif (\n event_translation_form.instance.status == status.AUTO_SAVE\n and not event_form.has_changed()\n and not event_translation_form.has_changed()\n and not recurrence_rule_form.has_changed()\n ):\n messages.info(request, _(\"No changes detected, autosave skipped\"))\n else:\n # Check publish permissions\n if event_translation_form.instance.status == status.PUBLIC:\n if not request.user.has_perm(\"cms.publish_event\"):\n raise PermissionDenied(\n f\"{request.user!r} does not have the permission 'cms.publish_event'\"\n )\n # Save forms\n if event_form.cleaned_data.get(\"is_recurring\"):\n # If event is recurring, save recurrence rule\n event_form.instance.recurrence_rule = recurrence_rule_form.save()\n elif event_form.instance.recurrence_rule:\n # If the event is not recurring but it was before, delete the associated recurrence rule\n event_form.instance.recurrence_rule.delete()\n event_form.instance.recurrence_rule = None\n event_translation_form.instance.event = event_form.save()\n event_translation_form.save()\n # Add the success message and redirect to the edit page\n if not event_instance:\n messages.success(\n request,\n _('Event \"{}\" was successfully created').format(\n event_translation_form.instance\n ),\n )\n elif (\n not event_form.has_changed()\n and not event_translation_form.has_changed()\n and not recurrence_rule_form.has_changed()\n ):\n messages.info(request, _(\"No changes detected, but date refreshed\"))\n else:\n # Add the success message\n event_translation_form.add_success_message(request)\n return redirect(\n \"edit_event\",\n **{\n \"event_id\": event_form.instance.id,\n \"region_slug\": region.slug,\n \"language_slug\": language.slug,\n },\n )\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"event_form\": event_form,\n \"event_translation_form\": event_translation_form,\n \"recurrence_rule_form\": recurrence_rule_form,\n \"poi\": poi,\n \"language\": language,\n \"languages\": region.active_languages if event_instance else [language],\n \"translation_states\": event_instance.translation_states\n if event_instance\n else [],\n },\n )\n", "path": "integreat_cms/cms/views/events/event_form_view.py"}]}
| 2,885 | 165 |
gh_patches_debug_43103
|
rasdani/github-patches
|
git_diff
|
crytic__slither-438
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Human-summary printer: show number of lines of assemly code
The human summary printer should show if a contract uses inline assembly, and if so, shows the number of lines of assembly code
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/printers/summary/human_summary.py`
Content:
```
1 """
2 Module printing summary of the contract
3 """
4 import logging
5
6 from slither.printers.abstract_printer import AbstractPrinter
7 from slither.utils import output
8 from slither.utils.code_complexity import compute_cyclomatic_complexity
9 from slither.utils.colors import green, red, yellow
10 from slither.utils.standard_libraries import is_standard_library
11
12 class PrinterHumanSummary(AbstractPrinter):
13 ARGUMENT = 'human-summary'
14 HELP = 'Print a human-readable summary of the contracts'
15
16 WIKI = 'https://github.com/trailofbits/slither/wiki/Printer-documentation#human-summary'
17
18 @staticmethod
19 def _get_summary_erc20(contract):
20
21 functions_name = [f.name for f in contract.functions]
22 state_variables = [v.name for v in contract.state_variables]
23
24 pause = 'pause' in functions_name
25
26 if 'mint' in functions_name:
27 if not 'mintingFinished' in state_variables:
28 mint_limited = False
29 else:
30 mint_limited = True
31 else:
32 mint_limited = None # no minting
33
34 race_condition_mitigated = 'increaseApproval' in functions_name or\
35 'safeIncreaseAllowance' in functions_name
36
37 return pause, mint_limited, race_condition_mitigated
38
39
40 def get_summary_erc20(self, contract):
41 txt = ''
42
43 pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)
44
45 if pause:
46 txt += "\t\t Can be paused? : {}\n".format(yellow('Yes'))
47 else:
48 txt += "\t\t Can be paused? : {}\n".format(green('No'))
49
50 if mint_limited is None:
51 txt += "\t\t Minting restriction? : {}\n".format(green('No Minting'))
52 else:
53 if mint_limited:
54 txt += "\t\t Minting restriction? : {}\n".format(red('Yes'))
55 else:
56 txt += "\t\t Minting restriction? : {}\n".format(yellow('No'))
57
58 if race_condition_mitigated:
59 txt += "\t\t ERC20 race condition mitigation: {}\n".format(green('Yes'))
60 else:
61 txt += "\t\t ERC20 race condition mitigation: {}\n".format(red('No'))
62
63 return txt
64
65 def _get_detectors_result(self):
66 # disable detectors logger
67 logger = logging.getLogger('Detectors')
68 logger.setLevel(logging.ERROR)
69
70 checks_optimization = self.slither.detectors_optimization
71 checks_informational = self.slither.detectors_informational
72 checks_low = self.slither.detectors_low
73 checks_medium = self.slither.detectors_medium
74 checks_high = self.slither.detectors_high
75
76 issues_optimization = [c.detect() for c in checks_optimization]
77 issues_optimization = [c for c in issues_optimization if c]
78 issues_optimization = [item for sublist in issues_optimization for item in sublist]
79
80 issues_informational = [c.detect() for c in checks_informational]
81 issues_informational = [c for c in issues_informational if c]
82 issues_informational = [item for sublist in issues_informational for item in sublist]
83
84 issues_low = [c.detect() for c in checks_low]
85 issues_low = [c for c in issues_low if c]
86 issues_low = [item for sublist in issues_low for item in sublist]
87
88 issues_medium = (c.detect() for c in checks_medium)
89 issues_medium = [c for c in issues_medium if c]
90 issues_medium = [item for sublist in issues_medium for item in sublist]
91
92 issues_high = [c.detect() for c in checks_high]
93 issues_high = [c for c in issues_high if c]
94 issues_high = [item for sublist in issues_high for item in sublist]
95
96
97
98 return (len(issues_optimization),
99 len(issues_informational),
100 len(issues_low),
101 len(issues_medium),
102 len(issues_high))
103
104 def get_detectors_result(self):
105 issues_optimization, issues_informational, issues_low, issues_medium, issues_high = self._get_detectors_result()
106 txt = "Number of optimization issues: {}\n".format(green(issues_optimization))
107 txt += "Number of informational issues: {}\n".format(green(issues_informational))
108 txt += "Number of low issues: {}\n".format(green(issues_low))
109 if issues_medium > 0:
110 txt += "Number of medium issues: {}\n".format(yellow(issues_medium))
111 else:
112 txt += "Number of medium issues: {}\n".format(green(issues_medium))
113 if issues_high > 0:
114 txt += "Number of high issues: {}\n".format(red(issues_high))
115 else:
116 txt += "Number of high issues: {}\n\n".format(green(issues_high))
117
118 return txt
119
120 @staticmethod
121 def _is_complex_code(contract):
122 for f in contract.functions:
123 if compute_cyclomatic_complexity(f) > 7:
124 return True
125 return False
126
127 def is_complex_code(self, contract):
128 """
129 Check if the code is complex
130 Heuristic, the code is complex if:
131 - One function has a cyclomatic complexity > 7
132 Args:
133 contract
134 """
135
136 is_complex = self._is_complex_code(contract)
137
138 result = red('Yes') if is_complex else green('No')
139
140 return "\tComplex code? {}\n".format(result)
141
142 @staticmethod
143 def _number_functions(contract):
144 return len(contract.functions)
145
146 def _lines_number(self):
147 if not self.slither.source_code:
148 return None
149 total_dep_lines = 0
150 total_lines = 0
151 for filename, source_code in self.slither.source_code.items():
152 lines = len(source_code.splitlines())
153 is_dep = False
154 if self.slither.crytic_compile:
155 is_dep = self.slither.crytic_compile.is_dependency(filename)
156 if is_dep:
157 total_dep_lines += lines
158 else:
159 total_lines += lines
160 return total_lines, total_dep_lines
161
162 def _compilation_type(self):
163 if self.slither.crytic_compile is None:
164 return 'Compilation non standard\n'
165 return f'Compiled with {self.slither.crytic_compile.type}\n'
166
167 def _number_contracts(self):
168 if self.slither.crytic_compile is None:
169 len(self.slither.contracts), 0
170 deps = [c for c in self.slither.contracts if c.is_from_dependency()]
171 contracts = [c for c in self.slither.contracts if not c.is_from_dependency()]
172 return len(contracts), len(deps)
173
174 def _standard_libraries(self):
175 libraries = []
176 for contract in self.contracts:
177 lib = is_standard_library(contract)
178 if lib:
179 libraries.append(lib)
180
181 return libraries
182
183 def _ercs(self):
184 ercs = []
185 for contract in self.contracts:
186 ercs += contract.ercs()
187 return list(set(ercs))
188
189 def output(self, _filename):
190 """
191 _filename is not used
192 Args:
193 _filename(string)
194 """
195
196 txt = "\n"
197 txt += self._compilation_type()
198
199 results = {
200 'contracts': {
201 "elements": []
202 },
203 'number_lines': 0,
204 'number_lines_in_dependencies': 0,
205 'standard_libraries': [],
206 'ercs': [],
207 }
208
209
210 lines_number = self._lines_number()
211 if lines_number:
212 total_lines, total_dep_lines = lines_number
213 txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\n'
214 results['number_lines'] = total_lines
215 results['number_lines__dependencies'] = total_dep_lines
216
217 number_contracts, number_contracts_deps = self._number_contracts()
218 txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \n\n'
219
220 txt += self.get_detectors_result()
221
222 libs = self._standard_libraries()
223 if libs:
224 txt += f'\nUse: {", ".join(libs)}\n'
225 results['standard_libraries'] = [str(l) for l in libs]
226
227 ercs = self._ercs()
228 if ercs:
229 txt += f'ERCs: {", ".join(ercs)}\n'
230 results['ercs'] = [str(e) for e in ercs]
231
232 for contract in self.slither.contracts_derived:
233 txt += "\nContract {}\n".format(contract.name)
234 txt += self.is_complex_code(contract)
235 txt += '\tNumber of functions: {}\n'.format(self._number_functions(contract))
236 ercs = contract.ercs()
237 if ercs:
238 txt += '\tERCs: ' + ','.join(ercs) + '\n'
239 is_erc20 = contract.is_erc20()
240 if is_erc20:
241 txt += '\tERC20 info:\n'
242 txt += self.get_summary_erc20(contract)
243
244 self.info(txt)
245
246 results_contract = output.Output('')
247 for contract in self.slither.contracts_derived:
248 optimization, info, low, medium, high = self._get_detectors_result()
249 contract_d = {'contract_name': contract.name,
250 'is_complex_code': self._is_complex_code(contract),
251 'optimization_issues': optimization,
252 'informational_issues': info,
253 'low_issues': low,
254 'medium_issues': medium,
255 'high_issues': high,
256 'is_erc20': contract.is_erc20(),
257 'number_functions': self._number_functions(contract)}
258 if contract_d['is_erc20']:
259 pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)
260 contract_d['erc20_pause'] = pause
261 if mint_limited is not None:
262 contract_d['erc20_can_mint'] = True
263 contract_d['erc20_mint_limited'] = mint_limited
264 else:
265 contract_d['erc20_can_mint'] = False
266 contract_d['erc20_race_condition_mitigated'] = race_condition_mitigated
267
268 results_contract.add_contract(contract, additional_fields=contract_d)
269
270 results['contracts']['elements'] = results_contract.elements
271
272 json = self.generate_output(txt, additional_fields=results)
273
274 return json
275
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/printers/summary/human_summary.py b/slither/printers/summary/human_summary.py
--- a/slither/printers/summary/human_summary.py
+++ b/slither/printers/summary/human_summary.py
@@ -8,6 +8,8 @@
from slither.utils.code_complexity import compute_cyclomatic_complexity
from slither.utils.colors import green, red, yellow
from slither.utils.standard_libraries import is_standard_library
+from slither.core.cfg.node import NodeType
+
class PrinterHumanSummary(AbstractPrinter):
ARGUMENT = 'human-summary'
@@ -29,14 +31,13 @@
else:
mint_limited = True
else:
- mint_limited = None # no minting
+ mint_limited = None # no minting
- race_condition_mitigated = 'increaseApproval' in functions_name or\
+ race_condition_mitigated = 'increaseApproval' in functions_name or \
'safeIncreaseAllowance' in functions_name
return pause, mint_limited, race_condition_mitigated
-
def get_summary_erc20(self, contract):
txt = ''
@@ -93,8 +94,6 @@
issues_high = [c for c in issues_high if c]
issues_high = [item for sublist in issues_high for item in sublist]
-
-
return (len(issues_optimization),
len(issues_informational),
len(issues_low),
@@ -159,6 +158,17 @@
total_lines += lines
return total_lines, total_dep_lines
+ def _get_number_of_assembly_lines(self):
+ total_asm_lines = 0
+ for contract in self.contracts:
+ for function in contract.functions_declared:
+ for node in function.nodes:
+ if node.type == NodeType.ASSEMBLY:
+ inline_asm = node.inline_asm
+ if inline_asm:
+ total_asm_lines += len(inline_asm.splitlines())
+ return total_asm_lines
+
def _compilation_type(self):
if self.slither.crytic_compile is None:
return 'Compilation non standard\n'
@@ -202,17 +212,20 @@
},
'number_lines': 0,
'number_lines_in_dependencies': 0,
+ 'number_lines_assembly': 0,
'standard_libraries': [],
'ercs': [],
}
-
lines_number = self._lines_number()
if lines_number:
total_lines, total_dep_lines = lines_number
txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\n'
results['number_lines'] = total_lines
results['number_lines__dependencies'] = total_dep_lines
+ total_asm_lines = self._get_number_of_assembly_lines()
+ txt += f"Number of assembly lines: {total_asm_lines}\n"
+ results['number_lines_assembly'] = total_asm_lines
number_contracts, number_contracts_deps = self._number_contracts()
txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \n\n'
@@ -272,4 +285,3 @@
json = self.generate_output(txt, additional_fields=results)
return json
-
|
{"golden_diff": "diff --git a/slither/printers/summary/human_summary.py b/slither/printers/summary/human_summary.py\n--- a/slither/printers/summary/human_summary.py\n+++ b/slither/printers/summary/human_summary.py\n@@ -8,6 +8,8 @@\n from slither.utils.code_complexity import compute_cyclomatic_complexity\n from slither.utils.colors import green, red, yellow\n from slither.utils.standard_libraries import is_standard_library\n+from slither.core.cfg.node import NodeType\n+\n \n class PrinterHumanSummary(AbstractPrinter):\n ARGUMENT = 'human-summary'\n@@ -29,14 +31,13 @@\n else:\n mint_limited = True\n else:\n- mint_limited = None # no minting\n+ mint_limited = None # no minting\n \n- race_condition_mitigated = 'increaseApproval' in functions_name or\\\n+ race_condition_mitigated = 'increaseApproval' in functions_name or \\\n 'safeIncreaseAllowance' in functions_name\n \n return pause, mint_limited, race_condition_mitigated\n \n-\n def get_summary_erc20(self, contract):\n txt = ''\n \n@@ -93,8 +94,6 @@\n issues_high = [c for c in issues_high if c]\n issues_high = [item for sublist in issues_high for item in sublist]\n \n-\n-\n return (len(issues_optimization),\n len(issues_informational),\n len(issues_low),\n@@ -159,6 +158,17 @@\n total_lines += lines\n return total_lines, total_dep_lines\n \n+ def _get_number_of_assembly_lines(self):\n+ total_asm_lines = 0\n+ for contract in self.contracts:\n+ for function in contract.functions_declared:\n+ for node in function.nodes:\n+ if node.type == NodeType.ASSEMBLY:\n+ inline_asm = node.inline_asm\n+ if inline_asm:\n+ total_asm_lines += len(inline_asm.splitlines())\n+ return total_asm_lines\n+\n def _compilation_type(self):\n if self.slither.crytic_compile is None:\n return 'Compilation non standard\\n'\n@@ -202,17 +212,20 @@\n },\n 'number_lines': 0,\n 'number_lines_in_dependencies': 0,\n+ 'number_lines_assembly': 0,\n 'standard_libraries': [],\n 'ercs': [],\n }\n \n-\n lines_number = self._lines_number()\n if lines_number:\n total_lines, total_dep_lines = lines_number\n txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\\n'\n results['number_lines'] = total_lines\n results['number_lines__dependencies'] = total_dep_lines\n+ total_asm_lines = self._get_number_of_assembly_lines()\n+ txt += f\"Number of assembly lines: {total_asm_lines}\\n\"\n+ results['number_lines_assembly'] = total_asm_lines\n \n number_contracts, number_contracts_deps = self._number_contracts()\n txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \\n\\n'\n@@ -272,4 +285,3 @@\n json = self.generate_output(txt, additional_fields=results)\n \n return json\n-\n", "issue": "Human-summary printer: show number of lines of assemly code\nThe human summary printer should show if a contract uses inline assembly, and if so, shows the number of lines of assembly code\n", "before_files": [{"content": "\"\"\"\nModule printing summary of the contract\n\"\"\"\nimport logging\n\nfrom slither.printers.abstract_printer import AbstractPrinter\nfrom slither.utils import output\nfrom slither.utils.code_complexity import compute_cyclomatic_complexity\nfrom slither.utils.colors import green, red, yellow\nfrom slither.utils.standard_libraries import is_standard_library\n\nclass PrinterHumanSummary(AbstractPrinter):\n ARGUMENT = 'human-summary'\n HELP = 'Print a human-readable summary of the contracts'\n\n WIKI = 'https://github.com/trailofbits/slither/wiki/Printer-documentation#human-summary'\n\n @staticmethod\n def _get_summary_erc20(contract):\n\n functions_name = [f.name for f in contract.functions]\n state_variables = [v.name for v in contract.state_variables]\n\n pause = 'pause' in functions_name\n\n if 'mint' in functions_name:\n if not 'mintingFinished' in state_variables:\n mint_limited = False\n else:\n mint_limited = True\n else:\n mint_limited = None # no minting\n\n race_condition_mitigated = 'increaseApproval' in functions_name or\\\n 'safeIncreaseAllowance' in functions_name\n\n return pause, mint_limited, race_condition_mitigated\n\n\n def get_summary_erc20(self, contract):\n txt = ''\n\n pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)\n\n if pause:\n txt += \"\\t\\t Can be paused? : {}\\n\".format(yellow('Yes'))\n else:\n txt += \"\\t\\t Can be paused? : {}\\n\".format(green('No'))\n\n if mint_limited is None:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(green('No Minting'))\n else:\n if mint_limited:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(red('Yes'))\n else:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(yellow('No'))\n\n if race_condition_mitigated:\n txt += \"\\t\\t ERC20 race condition mitigation: {}\\n\".format(green('Yes'))\n else:\n txt += \"\\t\\t ERC20 race condition mitigation: {}\\n\".format(red('No'))\n\n return txt\n\n def _get_detectors_result(self):\n # disable detectors logger\n logger = logging.getLogger('Detectors')\n logger.setLevel(logging.ERROR)\n\n checks_optimization = self.slither.detectors_optimization\n checks_informational = self.slither.detectors_informational\n checks_low = self.slither.detectors_low\n checks_medium = self.slither.detectors_medium\n checks_high = self.slither.detectors_high\n\n issues_optimization = [c.detect() for c in checks_optimization]\n issues_optimization = [c for c in issues_optimization if c]\n issues_optimization = [item for sublist in issues_optimization for item in sublist]\n\n issues_informational = [c.detect() for c in checks_informational]\n issues_informational = [c for c in issues_informational if c]\n issues_informational = [item for sublist in issues_informational for item in sublist]\n\n issues_low = [c.detect() for c in checks_low]\n issues_low = [c for c in issues_low if c]\n issues_low = [item for sublist in issues_low for item in sublist]\n\n issues_medium = (c.detect() for c in checks_medium)\n issues_medium = [c for c in issues_medium if c]\n issues_medium = [item for sublist in issues_medium for item in sublist]\n\n issues_high = [c.detect() for c in checks_high]\n issues_high = [c for c in issues_high if c]\n issues_high = [item for sublist in issues_high for item in sublist]\n\n\n\n return (len(issues_optimization),\n len(issues_informational),\n len(issues_low),\n len(issues_medium),\n len(issues_high))\n\n def get_detectors_result(self):\n issues_optimization, issues_informational, issues_low, issues_medium, issues_high = self._get_detectors_result()\n txt = \"Number of optimization issues: {}\\n\".format(green(issues_optimization))\n txt += \"Number of informational issues: {}\\n\".format(green(issues_informational))\n txt += \"Number of low issues: {}\\n\".format(green(issues_low))\n if issues_medium > 0:\n txt += \"Number of medium issues: {}\\n\".format(yellow(issues_medium))\n else:\n txt += \"Number of medium issues: {}\\n\".format(green(issues_medium))\n if issues_high > 0:\n txt += \"Number of high issues: {}\\n\".format(red(issues_high))\n else:\n txt += \"Number of high issues: {}\\n\\n\".format(green(issues_high))\n\n return txt\n\n @staticmethod\n def _is_complex_code(contract):\n for f in contract.functions:\n if compute_cyclomatic_complexity(f) > 7:\n return True\n return False\n\n def is_complex_code(self, contract):\n \"\"\"\n Check if the code is complex\n Heuristic, the code is complex if:\n - One function has a cyclomatic complexity > 7\n Args:\n contract\n \"\"\"\n\n is_complex = self._is_complex_code(contract)\n\n result = red('Yes') if is_complex else green('No')\n\n return \"\\tComplex code? {}\\n\".format(result)\n\n @staticmethod\n def _number_functions(contract):\n return len(contract.functions)\n\n def _lines_number(self):\n if not self.slither.source_code:\n return None\n total_dep_lines = 0\n total_lines = 0\n for filename, source_code in self.slither.source_code.items():\n lines = len(source_code.splitlines())\n is_dep = False\n if self.slither.crytic_compile:\n is_dep = self.slither.crytic_compile.is_dependency(filename)\n if is_dep:\n total_dep_lines += lines\n else:\n total_lines += lines\n return total_lines, total_dep_lines\n\n def _compilation_type(self):\n if self.slither.crytic_compile is None:\n return 'Compilation non standard\\n'\n return f'Compiled with {self.slither.crytic_compile.type}\\n'\n\n def _number_contracts(self):\n if self.slither.crytic_compile is None:\n len(self.slither.contracts), 0\n deps = [c for c in self.slither.contracts if c.is_from_dependency()]\n contracts = [c for c in self.slither.contracts if not c.is_from_dependency()]\n return len(contracts), len(deps)\n\n def _standard_libraries(self):\n libraries = []\n for contract in self.contracts:\n lib = is_standard_library(contract)\n if lib:\n libraries.append(lib)\n\n return libraries\n\n def _ercs(self):\n ercs = []\n for contract in self.contracts:\n ercs += contract.ercs()\n return list(set(ercs))\n\n def output(self, _filename):\n \"\"\"\n _filename is not used\n Args:\n _filename(string)\n \"\"\"\n\n txt = \"\\n\"\n txt += self._compilation_type()\n\n results = {\n 'contracts': {\n \"elements\": []\n },\n 'number_lines': 0,\n 'number_lines_in_dependencies': 0,\n 'standard_libraries': [],\n 'ercs': [],\n }\n\n\n lines_number = self._lines_number()\n if lines_number:\n total_lines, total_dep_lines = lines_number\n txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\\n'\n results['number_lines'] = total_lines\n results['number_lines__dependencies'] = total_dep_lines\n\n number_contracts, number_contracts_deps = self._number_contracts()\n txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \\n\\n'\n\n txt += self.get_detectors_result()\n\n libs = self._standard_libraries()\n if libs:\n txt += f'\\nUse: {\", \".join(libs)}\\n'\n results['standard_libraries'] = [str(l) for l in libs]\n\n ercs = self._ercs()\n if ercs:\n txt += f'ERCs: {\", \".join(ercs)}\\n'\n results['ercs'] = [str(e) for e in ercs]\n\n for contract in self.slither.contracts_derived:\n txt += \"\\nContract {}\\n\".format(contract.name)\n txt += self.is_complex_code(contract)\n txt += '\\tNumber of functions: {}\\n'.format(self._number_functions(contract))\n ercs = contract.ercs()\n if ercs:\n txt += '\\tERCs: ' + ','.join(ercs) + '\\n'\n is_erc20 = contract.is_erc20()\n if is_erc20:\n txt += '\\tERC20 info:\\n'\n txt += self.get_summary_erc20(contract)\n\n self.info(txt)\n\n results_contract = output.Output('')\n for contract in self.slither.contracts_derived:\n optimization, info, low, medium, high = self._get_detectors_result()\n contract_d = {'contract_name': contract.name,\n 'is_complex_code': self._is_complex_code(contract),\n 'optimization_issues': optimization,\n 'informational_issues': info,\n 'low_issues': low,\n 'medium_issues': medium,\n 'high_issues': high,\n 'is_erc20': contract.is_erc20(),\n 'number_functions': self._number_functions(contract)}\n if contract_d['is_erc20']:\n pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)\n contract_d['erc20_pause'] = pause\n if mint_limited is not None:\n contract_d['erc20_can_mint'] = True\n contract_d['erc20_mint_limited'] = mint_limited\n else:\n contract_d['erc20_can_mint'] = False\n contract_d['erc20_race_condition_mitigated'] = race_condition_mitigated\n\n results_contract.add_contract(contract, additional_fields=contract_d)\n\n results['contracts']['elements'] = results_contract.elements\n\n json = self.generate_output(txt, additional_fields=results)\n\n return json\n\n", "path": "slither/printers/summary/human_summary.py"}], "after_files": [{"content": "\"\"\"\nModule printing summary of the contract\n\"\"\"\nimport logging\n\nfrom slither.printers.abstract_printer import AbstractPrinter\nfrom slither.utils import output\nfrom slither.utils.code_complexity import compute_cyclomatic_complexity\nfrom slither.utils.colors import green, red, yellow\nfrom slither.utils.standard_libraries import is_standard_library\nfrom slither.core.cfg.node import NodeType\n\n\nclass PrinterHumanSummary(AbstractPrinter):\n ARGUMENT = 'human-summary'\n HELP = 'Print a human-readable summary of the contracts'\n\n WIKI = 'https://github.com/trailofbits/slither/wiki/Printer-documentation#human-summary'\n\n @staticmethod\n def _get_summary_erc20(contract):\n\n functions_name = [f.name for f in contract.functions]\n state_variables = [v.name for v in contract.state_variables]\n\n pause = 'pause' in functions_name\n\n if 'mint' in functions_name:\n if not 'mintingFinished' in state_variables:\n mint_limited = False\n else:\n mint_limited = True\n else:\n mint_limited = None # no minting\n\n race_condition_mitigated = 'increaseApproval' in functions_name or \\\n 'safeIncreaseAllowance' in functions_name\n\n return pause, mint_limited, race_condition_mitigated\n\n def get_summary_erc20(self, contract):\n txt = ''\n\n pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)\n\n if pause:\n txt += \"\\t\\t Can be paused? : {}\\n\".format(yellow('Yes'))\n else:\n txt += \"\\t\\t Can be paused? : {}\\n\".format(green('No'))\n\n if mint_limited is None:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(green('No Minting'))\n else:\n if mint_limited:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(red('Yes'))\n else:\n txt += \"\\t\\t Minting restriction? : {}\\n\".format(yellow('No'))\n\n if race_condition_mitigated:\n txt += \"\\t\\t ERC20 race condition mitigation: {}\\n\".format(green('Yes'))\n else:\n txt += \"\\t\\t ERC20 race condition mitigation: {}\\n\".format(red('No'))\n\n return txt\n\n def _get_detectors_result(self):\n # disable detectors logger\n logger = logging.getLogger('Detectors')\n logger.setLevel(logging.ERROR)\n\n checks_optimization = self.slither.detectors_optimization\n checks_informational = self.slither.detectors_informational\n checks_low = self.slither.detectors_low\n checks_medium = self.slither.detectors_medium\n checks_high = self.slither.detectors_high\n\n issues_optimization = [c.detect() for c in checks_optimization]\n issues_optimization = [c for c in issues_optimization if c]\n issues_optimization = [item for sublist in issues_optimization for item in sublist]\n\n issues_informational = [c.detect() for c in checks_informational]\n issues_informational = [c for c in issues_informational if c]\n issues_informational = [item for sublist in issues_informational for item in sublist]\n\n issues_low = [c.detect() for c in checks_low]\n issues_low = [c for c in issues_low if c]\n issues_low = [item for sublist in issues_low for item in sublist]\n\n issues_medium = (c.detect() for c in checks_medium)\n issues_medium = [c for c in issues_medium if c]\n issues_medium = [item for sublist in issues_medium for item in sublist]\n\n issues_high = [c.detect() for c in checks_high]\n issues_high = [c for c in issues_high if c]\n issues_high = [item for sublist in issues_high for item in sublist]\n\n return (len(issues_optimization),\n len(issues_informational),\n len(issues_low),\n len(issues_medium),\n len(issues_high))\n\n def get_detectors_result(self):\n issues_optimization, issues_informational, issues_low, issues_medium, issues_high = self._get_detectors_result()\n txt = \"Number of optimization issues: {}\\n\".format(green(issues_optimization))\n txt += \"Number of informational issues: {}\\n\".format(green(issues_informational))\n txt += \"Number of low issues: {}\\n\".format(green(issues_low))\n if issues_medium > 0:\n txt += \"Number of medium issues: {}\\n\".format(yellow(issues_medium))\n else:\n txt += \"Number of medium issues: {}\\n\".format(green(issues_medium))\n if issues_high > 0:\n txt += \"Number of high issues: {}\\n\".format(red(issues_high))\n else:\n txt += \"Number of high issues: {}\\n\\n\".format(green(issues_high))\n\n return txt\n\n @staticmethod\n def _is_complex_code(contract):\n for f in contract.functions:\n if compute_cyclomatic_complexity(f) > 7:\n return True\n return False\n\n def is_complex_code(self, contract):\n \"\"\"\n Check if the code is complex\n Heuristic, the code is complex if:\n - One function has a cyclomatic complexity > 7\n Args:\n contract\n \"\"\"\n\n is_complex = self._is_complex_code(contract)\n\n result = red('Yes') if is_complex else green('No')\n\n return \"\\tComplex code? {}\\n\".format(result)\n\n @staticmethod\n def _number_functions(contract):\n return len(contract.functions)\n\n def _lines_number(self):\n if not self.slither.source_code:\n return None\n total_dep_lines = 0\n total_lines = 0\n for filename, source_code in self.slither.source_code.items():\n lines = len(source_code.splitlines())\n is_dep = False\n if self.slither.crytic_compile:\n is_dep = self.slither.crytic_compile.is_dependency(filename)\n if is_dep:\n total_dep_lines += lines\n else:\n total_lines += lines\n return total_lines, total_dep_lines\n\n def _get_number_of_assembly_lines(self):\n total_asm_lines = 0\n for contract in self.contracts:\n for function in contract.functions_declared:\n for node in function.nodes:\n if node.type == NodeType.ASSEMBLY:\n inline_asm = node.inline_asm\n if inline_asm:\n total_asm_lines += len(inline_asm.splitlines())\n return total_asm_lines\n\n def _compilation_type(self):\n if self.slither.crytic_compile is None:\n return 'Compilation non standard\\n'\n return f'Compiled with {self.slither.crytic_compile.type}\\n'\n\n def _number_contracts(self):\n if self.slither.crytic_compile is None:\n len(self.slither.contracts), 0\n deps = [c for c in self.slither.contracts if c.is_from_dependency()]\n contracts = [c for c in self.slither.contracts if not c.is_from_dependency()]\n return len(contracts), len(deps)\n\n def _standard_libraries(self):\n libraries = []\n for contract in self.contracts:\n lib = is_standard_library(contract)\n if lib:\n libraries.append(lib)\n\n return libraries\n\n def _ercs(self):\n ercs = []\n for contract in self.contracts:\n ercs += contract.ercs()\n return list(set(ercs))\n\n def output(self, _filename):\n \"\"\"\n _filename is not used\n Args:\n _filename(string)\n \"\"\"\n\n txt = \"\\n\"\n txt += self._compilation_type()\n\n results = {\n 'contracts': {\n \"elements\": []\n },\n 'number_lines': 0,\n 'number_lines_in_dependencies': 0,\n 'number_lines_assembly': 0,\n 'standard_libraries': [],\n 'ercs': [],\n }\n\n lines_number = self._lines_number()\n if lines_number:\n total_lines, total_dep_lines = lines_number\n txt += f'Number of lines: {total_lines} (+ {total_dep_lines} in dependencies)\\n'\n results['number_lines'] = total_lines\n results['number_lines__dependencies'] = total_dep_lines\n total_asm_lines = self._get_number_of_assembly_lines()\n txt += f\"Number of assembly lines: {total_asm_lines}\\n\"\n results['number_lines_assembly'] = total_asm_lines\n\n number_contracts, number_contracts_deps = self._number_contracts()\n txt += f'Number of contracts: {number_contracts} (+ {number_contracts_deps} in dependencies) \\n\\n'\n\n txt += self.get_detectors_result()\n\n libs = self._standard_libraries()\n if libs:\n txt += f'\\nUse: {\", \".join(libs)}\\n'\n results['standard_libraries'] = [str(l) for l in libs]\n\n ercs = self._ercs()\n if ercs:\n txt += f'ERCs: {\", \".join(ercs)}\\n'\n results['ercs'] = [str(e) for e in ercs]\n\n for contract in self.slither.contracts_derived:\n txt += \"\\nContract {}\\n\".format(contract.name)\n txt += self.is_complex_code(contract)\n txt += '\\tNumber of functions: {}\\n'.format(self._number_functions(contract))\n ercs = contract.ercs()\n if ercs:\n txt += '\\tERCs: ' + ','.join(ercs) + '\\n'\n is_erc20 = contract.is_erc20()\n if is_erc20:\n txt += '\\tERC20 info:\\n'\n txt += self.get_summary_erc20(contract)\n\n self.info(txt)\n\n results_contract = output.Output('')\n for contract in self.slither.contracts_derived:\n optimization, info, low, medium, high = self._get_detectors_result()\n contract_d = {'contract_name': contract.name,\n 'is_complex_code': self._is_complex_code(contract),\n 'optimization_issues': optimization,\n 'informational_issues': info,\n 'low_issues': low,\n 'medium_issues': medium,\n 'high_issues': high,\n 'is_erc20': contract.is_erc20(),\n 'number_functions': self._number_functions(contract)}\n if contract_d['is_erc20']:\n pause, mint_limited, race_condition_mitigated = self._get_summary_erc20(contract)\n contract_d['erc20_pause'] = pause\n if mint_limited is not None:\n contract_d['erc20_can_mint'] = True\n contract_d['erc20_mint_limited'] = mint_limited\n else:\n contract_d['erc20_can_mint'] = False\n contract_d['erc20_race_condition_mitigated'] = race_condition_mitigated\n\n results_contract.add_contract(contract, additional_fields=contract_d)\n\n results['contracts']['elements'] = results_contract.elements\n\n json = self.generate_output(txt, additional_fields=results)\n\n return json\n", "path": "slither/printers/summary/human_summary.py"}]}
| 3,333 | 738 |
gh_patches_debug_3910
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-4935
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Instrumenting ambiguity: Integrate with SQLAlchemy or not when underlying psycopg2 used
<!--
Thanks for taking the time for reporting an issue!
Before reporting an issue on dd-trace-py, please be sure to provide all
necessary information.
If you're hitting a bug, make sure that you're using the latest version of this
library.
-->
### Summary of problem
It's not immediately clear from documentation if going through the trouble of patching and instrumenting SQLAlchemy provides any additional details over the auto-instrumented psycopg2 via `ddtrace-run`.
I currently have a Flask app using SQLAlchemy by hand (not Flask-SQLAlchemy) which connects to a Postgres database via psycopg2. Running my app with `ddtrace-run` in gunicorn successfully reports the Flask and Postgres services to APM. However upon seeing that ddtrace additionally has an [integration to SQLAlchemy](https://ddtrace.readthedocs.io/en/v1.7.1/integrations.html#sqlalchemy), I'm curious if it provides any added benefit on top of the auto instrumentation. Is it meant to be complimentary? Or is it meant to be an alternative/replacement mechanism for instrumenting the database/what psycopg2 integration would report.
I ending up debugging into `ddtrace.contrib.sqlalchemy.engine.EngineTracer` in my application to see what `Pin.get_from(self.engine)` would report without any overrides. It gave me:
```
Pin(service=postgres, tags=None, tracer=<ddtrace.tracer.Tracer object at 0xffff8bf3e580>)
```
which leads me to believe manual patching and integrating with SQLAlchemy is redundant if I'm using pscopg2 and `ddtrace-run`. Eyeballing the `EngineTracer` source code _appears_ to indicate it adds no additional benefit over psychopg2's auto-instrumentation.
Documentation should be updated (hell, I'd love a confirmatory answer here π ) on whether or not one should even bother going to the trouble of manually patching SQLAlchemy in this context if they already have/use/configure SQLAlchemy's engine to connect to a postgres DB provided by psycopg2. Something to the effect, in SQLAlchemy's section:
> # SQLAlchemy
>
> Integrating with SQLAlchemy is not necessary if you're integrating psycopg and SQLAlchemy connects to a Postgres database.
>
> To trace sqlalchemy queries, add instrumentation to the engine class using the patch method that must be called before importing sqlalchemy:
> ...
### Which version of dd-trace-py are you using?
1.7.1
### Which version of pip are you using?
22.3.1
### Which libraries and their versions are you using?
```
Flask==1.1.1
SQLAlchemy==1.3.11
ddtrace==1.7.1
psycopg2-binary==2.9.1
```
### How can we reproduce your problem?
N/A
### What is the result that you get?
N/A
### What is the result that you expected?
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/sqlalchemy/__init__.py`
Content:
```
1 """
2 To trace sqlalchemy queries, add instrumentation to the engine class
3 using the patch method that **must be called before** importing sqlalchemy::
4
5 # patch before importing `create_engine`
6 from ddtrace import Pin, patch
7 patch(sqlalchemy=True)
8
9 # use SQLAlchemy as usual
10 from sqlalchemy import create_engine
11
12 engine = create_engine('sqlite:///:memory:')
13 engine.connect().execute("SELECT COUNT(*) FROM users")
14
15 # Use a PIN to specify metadata related to this engine
16 Pin.override(engine, service='replica-db')
17 """
18 from ...internal.utils.importlib import require_modules
19
20
21 required_modules = ["sqlalchemy", "sqlalchemy.event"]
22
23 with require_modules(required_modules) as missing_modules:
24 if not missing_modules:
25 from .engine import trace_engine
26 from .patch import patch
27 from .patch import unpatch
28
29 __all__ = ["trace_engine", "patch", "unpatch"]
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/contrib/sqlalchemy/__init__.py b/ddtrace/contrib/sqlalchemy/__init__.py
--- a/ddtrace/contrib/sqlalchemy/__init__.py
+++ b/ddtrace/contrib/sqlalchemy/__init__.py
@@ -1,4 +1,8 @@
"""
+Enabling the SQLAlchemy integration is only necessary if there is no
+instrumentation available or enabled for the underlying database engine (e.g.
+pymysql, psycopg, mysql-connector, etc.).
+
To trace sqlalchemy queries, add instrumentation to the engine class
using the patch method that **must be called before** importing sqlalchemy::
|
{"golden_diff": "diff --git a/ddtrace/contrib/sqlalchemy/__init__.py b/ddtrace/contrib/sqlalchemy/__init__.py\n--- a/ddtrace/contrib/sqlalchemy/__init__.py\n+++ b/ddtrace/contrib/sqlalchemy/__init__.py\n@@ -1,4 +1,8 @@\n \"\"\"\n+Enabling the SQLAlchemy integration is only necessary if there is no\n+instrumentation available or enabled for the underlying database engine (e.g.\n+pymysql, psycopg, mysql-connector, etc.).\n+\n To trace sqlalchemy queries, add instrumentation to the engine class\n using the patch method that **must be called before** importing sqlalchemy::\n", "issue": "Instrumenting ambiguity: Integrate with SQLAlchemy or not when underlying psycopg2 used\n<!--\r\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n-->\r\n\r\n### Summary of problem\r\n\r\nIt's not immediately clear from documentation if going through the trouble of patching and instrumenting SQLAlchemy provides any additional details over the auto-instrumented psycopg2 via `ddtrace-run`.\r\n\r\nI currently have a Flask app using SQLAlchemy by hand (not Flask-SQLAlchemy) which connects to a Postgres database via psycopg2. Running my app with `ddtrace-run` in gunicorn successfully reports the Flask and Postgres services to APM. However upon seeing that ddtrace additionally has an [integration to SQLAlchemy](https://ddtrace.readthedocs.io/en/v1.7.1/integrations.html#sqlalchemy), I'm curious if it provides any added benefit on top of the auto instrumentation. Is it meant to be complimentary? Or is it meant to be an alternative/replacement mechanism for instrumenting the database/what psycopg2 integration would report.\r\n\r\nI ending up debugging into `ddtrace.contrib.sqlalchemy.engine.EngineTracer` in my application to see what `Pin.get_from(self.engine)` would report without any overrides. It gave me:\r\n\r\n```\r\nPin(service=postgres, tags=None, tracer=<ddtrace.tracer.Tracer object at 0xffff8bf3e580>)\r\n```\r\n\r\nwhich leads me to believe manual patching and integrating with SQLAlchemy is redundant if I'm using pscopg2 and `ddtrace-run`. Eyeballing the `EngineTracer` source code _appears_ to indicate it adds no additional benefit over psychopg2's auto-instrumentation.\r\n\r\nDocumentation should be updated (hell, I'd love a confirmatory answer here \ud83d\ude03 ) on whether or not one should even bother going to the trouble of manually patching SQLAlchemy in this context if they already have/use/configure SQLAlchemy's engine to connect to a postgres DB provided by psycopg2. Something to the effect, in SQLAlchemy's section:\r\n\r\n> # SQLAlchemy\r\n> \r\n> Integrating with SQLAlchemy is not necessary if you're integrating psycopg and SQLAlchemy connects to a Postgres database.\r\n>\r\n> To trace sqlalchemy queries, add instrumentation to the engine class using the patch method that must be called before importing sqlalchemy:\r\n> ...\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\n1.7.1\r\n\r\n### Which version of pip are you using?\r\n\r\n22.3.1\r\n\r\n### Which libraries and their versions are you using?\r\n\r\n```\r\nFlask==1.1.1\r\nSQLAlchemy==1.3.11\r\nddtrace==1.7.1\r\npsycopg2-binary==2.9.1\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\nN/A\r\n\r\n### What is the result that you get?\r\n\r\nN/A\r\n\r\n### What is the result that you expected?\r\n\r\nN/A\n", "before_files": [{"content": "\"\"\"\nTo trace sqlalchemy queries, add instrumentation to the engine class\nusing the patch method that **must be called before** importing sqlalchemy::\n\n # patch before importing `create_engine`\n from ddtrace import Pin, patch\n patch(sqlalchemy=True)\n\n # use SQLAlchemy as usual\n from sqlalchemy import create_engine\n\n engine = create_engine('sqlite:///:memory:')\n engine.connect().execute(\"SELECT COUNT(*) FROM users\")\n\n # Use a PIN to specify metadata related to this engine\n Pin.override(engine, service='replica-db')\n\"\"\"\nfrom ...internal.utils.importlib import require_modules\n\n\nrequired_modules = [\"sqlalchemy\", \"sqlalchemy.event\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .engine import trace_engine\n from .patch import patch\n from .patch import unpatch\n\n __all__ = [\"trace_engine\", \"patch\", \"unpatch\"]\n", "path": "ddtrace/contrib/sqlalchemy/__init__.py"}], "after_files": [{"content": "\"\"\"\nEnabling the SQLAlchemy integration is only necessary if there is no\ninstrumentation available or enabled for the underlying database engine (e.g.\npymysql, psycopg, mysql-connector, etc.).\n\nTo trace sqlalchemy queries, add instrumentation to the engine class\nusing the patch method that **must be called before** importing sqlalchemy::\n\n # patch before importing `create_engine`\n from ddtrace import Pin, patch\n patch(sqlalchemy=True)\n\n # use SQLAlchemy as usual\n from sqlalchemy import create_engine\n\n engine = create_engine('sqlite:///:memory:')\n engine.connect().execute(\"SELECT COUNT(*) FROM users\")\n\n # Use a PIN to specify metadata related to this engine\n Pin.override(engine, service='replica-db')\n\"\"\"\nfrom ...internal.utils.importlib import require_modules\n\n\nrequired_modules = [\"sqlalchemy\", \"sqlalchemy.event\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .engine import trace_engine\n from .patch import patch\n from .patch import unpatch\n\n __all__ = [\"trace_engine\", \"patch\", \"unpatch\"]\n", "path": "ddtrace/contrib/sqlalchemy/__init__.py"}]}
| 1,154 | 132 |
gh_patches_debug_19439
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1138
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IndexError when conducting spatial join with empty polygons
Hey, not sure if this addressed elsewhere, but a search of past results suggests it's not.
```python
import geopandas, numpy, shapely.geometry as geom
center = geom.Point((0,0))
circle = center.buffer(1)
empty = center.buffer(0)
empty_df = geopandas.GeoDataFrame(geometry=[circle, empty])
points = [geom.Point(x) for x in numpy.random.random(size=(10,2))]
points_df = geopandas.GeoDataFrame(geometry=points)
geopandas.sjoin(points_df, empty_df, op='intersects') # raises an IndexError but completes
```
I get a result from this join, but I also get an `IndexError`:
```
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
_ctypes/callbacks.c in 'calling callback function'()
~/anaconda3/envs/ana/lib/python3.7/site-packages/rtree/index.py in py_next_item(p_id, p_mins, p_maxs, p_dimension, p_data, p_length)
708 # xmin, xmax, ymin, ymax, zmin, zmax
709 for i in range(dimension):
--> 710 mins[i] = coordinates[i*2]
711 maxs[i] = coordinates[(i*2)+1]
712
IndexError: list index out of range
```
Not sure how this `IndexError` relates to the result, but I didn't see another issue mentioning this, so I thought I'd raise it.
rtree 0.8.3
geopandas 0.4.1
shapely 1.6.4.post1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/tools/sjoin.py`
Content:
```
1 from warnings import warn
2
3 import numpy as np
4 import pandas as pd
5
6 from shapely import prepared
7
8 from geopandas import GeoDataFrame
9
10
11 def sjoin(
12 left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
13 ):
14 """Spatial join of two GeoDataFrames.
15
16 Parameters
17 ----------
18 left_df, right_df : GeoDataFrames
19 how : string, default 'inner'
20 The type of join:
21
22 * 'left': use keys from left_df; retain only left_df geometry column
23 * 'right': use keys from right_df; retain only right_df geometry column
24 * 'inner': use intersection of keys from both dfs; retain only
25 left_df geometry column
26 op : string, default 'intersection'
27 Binary predicate, one of {'intersects', 'contains', 'within'}.
28 See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
29 lsuffix : string, default 'left'
30 Suffix to apply to overlapping column names (left GeoDataFrame).
31 rsuffix : string, default 'right'
32 Suffix to apply to overlapping column names (right GeoDataFrame).
33
34 """
35 if not isinstance(left_df, GeoDataFrame):
36 raise ValueError(
37 "'left_df' should be GeoDataFrame, got {}".format(type(left_df))
38 )
39
40 if not isinstance(right_df, GeoDataFrame):
41 raise ValueError(
42 "'right_df' should be GeoDataFrame, got {}".format(type(right_df))
43 )
44
45 allowed_hows = ["left", "right", "inner"]
46 if how not in allowed_hows:
47 raise ValueError(
48 '`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
49 )
50
51 allowed_ops = ["contains", "within", "intersects"]
52 if op not in allowed_ops:
53 raise ValueError(
54 '`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
55 )
56
57 if left_df.crs != right_df.crs:
58 warn(
59 (
60 "CRS of frames being joined does not match!"
61 "(%s != %s)" % (left_df.crs, right_df.crs)
62 )
63 )
64
65 index_left = "index_%s" % lsuffix
66 index_right = "index_%s" % rsuffix
67
68 # due to GH 352
69 if any(left_df.columns.isin([index_left, index_right])) or any(
70 right_df.columns.isin([index_left, index_right])
71 ):
72 raise ValueError(
73 "'{0}' and '{1}' cannot be names in the frames being"
74 " joined".format(index_left, index_right)
75 )
76
77 # Attempt to re-use spatial indexes, otherwise generate the spatial index
78 # for the longer dataframe
79 if right_df._sindex_generated or (
80 not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]
81 ):
82 tree_idx = right_df.sindex
83 tree_idx_right = True
84 else:
85 tree_idx = left_df.sindex
86 tree_idx_right = False
87
88 # the rtree spatial index only allows limited (numeric) index types, but an
89 # index in geopandas may be any arbitrary dtype. so reset both indices now
90 # and store references to the original indices, to be reaffixed later.
91 # GH 352
92 left_df = left_df.copy(deep=True)
93 left_df.index = left_df.index.rename(index_left)
94 left_df = left_df.reset_index()
95 right_df = right_df.copy(deep=True)
96 right_df.index = right_df.index.rename(index_right)
97 right_df = right_df.reset_index()
98
99 if op == "within":
100 # within implemented as the inverse of contains; swap names
101 left_df, right_df = right_df, left_df
102 tree_idx_right = not tree_idx_right
103
104 r_idx = np.empty((0, 0))
105 l_idx = np.empty((0, 0))
106 # get rtree spatial index
107 if tree_idx_right:
108 idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
109 lambda x: list(tree_idx.intersection(x))
110 )
111 idxmatch = idxmatch[idxmatch.apply(len) > 0]
112 # indexes of overlapping boundaries
113 if idxmatch.shape[0] > 0:
114 r_idx = np.concatenate(idxmatch.values)
115 l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
116 else:
117 # tree_idx_df == 'left'
118 idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
119 lambda x: list(tree_idx.intersection(x))
120 )
121 idxmatch = idxmatch[idxmatch.apply(len) > 0]
122 if idxmatch.shape[0] > 0:
123 # indexes of overlapping boundaries
124 l_idx = np.concatenate(idxmatch.values)
125 r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
126
127 if len(r_idx) > 0 and len(l_idx) > 0:
128 # Vectorize predicate operations
129 def find_intersects(a1, a2):
130 return a1.intersects(a2)
131
132 def find_contains(a1, a2):
133 return a1.contains(a2)
134
135 predicate_d = {
136 "intersects": find_intersects,
137 "contains": find_contains,
138 "within": find_contains,
139 }
140
141 check_predicates = np.vectorize(predicate_d[op])
142
143 result = pd.DataFrame(
144 np.column_stack(
145 [
146 l_idx,
147 r_idx,
148 check_predicates(
149 left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],
150 right_df[right_df.geometry.name][r_idx],
151 ),
152 ]
153 )
154 )
155
156 result.columns = ["_key_left", "_key_right", "match_bool"]
157 result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
158 "match_bool", axis=1
159 )
160
161 else:
162 # when output from the join has no overlapping geometries
163 result = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
164
165 if op == "within":
166 # within implemented as the inverse of contains; swap names
167 left_df, right_df = right_df, left_df
168 result = result.rename(
169 columns={"_key_left": "_key_right", "_key_right": "_key_left"}
170 )
171
172 if how == "inner":
173 result = result.set_index("_key_left")
174 joined = left_df.merge(result, left_index=True, right_index=True).merge(
175 right_df.drop(right_df.geometry.name, axis=1),
176 left_on="_key_right",
177 right_index=True,
178 suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
179 )
180 joined = joined.set_index(index_left).drop(["_key_right"], axis=1)
181 joined.index.name = None
182 elif how == "left":
183 result = result.set_index("_key_left")
184 joined = left_df.merge(
185 result, left_index=True, right_index=True, how="left"
186 ).merge(
187 right_df.drop(right_df.geometry.name, axis=1),
188 how="left",
189 left_on="_key_right",
190 right_index=True,
191 suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
192 )
193 joined = joined.set_index(index_left).drop(["_key_right"], axis=1)
194 joined.index.name = None
195 else: # how == 'right':
196 joined = (
197 left_df.drop(left_df.geometry.name, axis=1)
198 .merge(
199 result.merge(
200 right_df, left_on="_key_right", right_index=True, how="right"
201 ),
202 left_index=True,
203 right_on="_key_left",
204 how="right",
205 )
206 .set_index(index_right)
207 )
208 joined = joined.drop(["_key_left", "_key_right"], axis=1)
209
210 return joined
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py
--- a/geopandas/tools/sjoin.py
+++ b/geopandas/tools/sjoin.py
@@ -106,7 +106,7 @@
# get rtree spatial index
if tree_idx_right:
idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
- lambda x: list(tree_idx.intersection(x))
+ lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
# indexes of overlapping boundaries
@@ -116,7 +116,7 @@
else:
# tree_idx_df == 'left'
idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
- lambda x: list(tree_idx.intersection(x))
+ lambda x: list(tree_idx.intersection(x)) if not x == () else []
)
idxmatch = idxmatch[idxmatch.apply(len) > 0]
if idxmatch.shape[0] > 0:
|
{"golden_diff": "diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py\n--- a/geopandas/tools/sjoin.py\n+++ b/geopandas/tools/sjoin.py\n@@ -106,7 +106,7 @@\n # get rtree spatial index\n if tree_idx_right:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n- lambda x: list(tree_idx.intersection(x))\n+ lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n@@ -116,7 +116,7 @@\n else:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n- lambda x: list(tree_idx.intersection(x))\n+ lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n", "issue": "IndexError when conducting spatial join with empty polygons\nHey, not sure if this addressed elsewhere, but a search of past results suggests it's not. \r\n\r\n```python\r\nimport geopandas, numpy, shapely.geometry as geom\r\n\r\ncenter = geom.Point((0,0))\r\ncircle = center.buffer(1)\r\nempty = center.buffer(0)\r\n\r\nempty_df = geopandas.GeoDataFrame(geometry=[circle, empty])\r\n\r\npoints = [geom.Point(x) for x in numpy.random.random(size=(10,2))]\r\npoints_df = geopandas.GeoDataFrame(geometry=points)\r\n\r\ngeopandas.sjoin(points_df, empty_df, op='intersects') # raises an IndexError but completes\r\n```\r\nI get a result from this join, but I also get an `IndexError`:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nIndexError Traceback (most recent call last)\r\n_ctypes/callbacks.c in 'calling callback function'()\r\n\r\n~/anaconda3/envs/ana/lib/python3.7/site-packages/rtree/index.py in py_next_item(p_id, p_mins, p_maxs, p_dimension, p_data, p_length)\r\n 708 # xmin, xmax, ymin, ymax, zmin, zmax\r\n 709 for i in range(dimension):\r\n--> 710 mins[i] = coordinates[i*2]\r\n 711 maxs[i] = coordinates[(i*2)+1]\r\n 712 \r\n\r\nIndexError: list index out of range\r\n```\r\nNot sure how this `IndexError` relates to the result, but I didn't see another issue mentioning this, so I thought I'd raise it. \r\n\r\nrtree 0.8.3\r\ngeopandas 0.4.1\r\nshapely 1.6.4.post1\r\n\n", "before_files": [{"content": "from warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely import prepared\n\nfrom geopandas import GeoDataFrame\n\n\ndef sjoin(\n left_df, right_df, how=\"inner\", op=\"intersects\", lsuffix=\"left\", rsuffix=\"right\"\n):\n \"\"\"Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n if not isinstance(left_df, GeoDataFrame):\n raise ValueError(\n \"'left_df' should be GeoDataFrame, got {}\".format(type(left_df))\n )\n\n if not isinstance(right_df, GeoDataFrame):\n raise ValueError(\n \"'right_df' should be GeoDataFrame, got {}\".format(type(right_df))\n )\n\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n allowed_ops = [\"contains\", \"within\", \"intersects\"]\n if op not in allowed_ops:\n raise ValueError(\n '`op` was \"%s\" but is expected to be in %s' % (op, allowed_ops)\n )\n\n if left_df.crs != right_df.crs:\n warn(\n (\n \"CRS of frames being joined does not match!\"\n \"(%s != %s)\" % (left_df.crs, right_df.crs)\n )\n )\n\n index_left = \"index_%s\" % lsuffix\n index_right = \"index_%s\" % rsuffix\n\n # due to GH 352\n if any(left_df.columns.isin([index_left, index_right])) or any(\n right_df.columns.isin([index_left, index_right])\n ):\n raise ValueError(\n \"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right)\n )\n\n # Attempt to re-use spatial indexes, otherwise generate the spatial index\n # for the longer dataframe\n if right_df._sindex_generated or (\n not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]\n ):\n tree_idx = right_df.sindex\n tree_idx_right = True\n else:\n tree_idx = left_df.sindex\n tree_idx_right = False\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n right_df = right_df.copy(deep=True)\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n tree_idx_right = not tree_idx_right\n\n r_idx = np.empty((0, 0))\n l_idx = np.empty((0, 0))\n # get rtree spatial index\n if tree_idx_right:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x))\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n if idxmatch.shape[0] > 0:\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n else:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x))\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n # indexes of overlapping boundaries\n l_idx = np.concatenate(idxmatch.values)\n r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n # Vectorize predicate operations\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {\n \"intersects\": find_intersects,\n \"contains\": find_contains,\n \"within\": find_contains,\n }\n\n check_predicates = np.vectorize(predicate_d[op])\n\n result = pd.DataFrame(\n np.column_stack(\n [\n l_idx,\n r_idx,\n check_predicates(\n left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx],\n ),\n ]\n )\n )\n\n result.columns = [\"_key_left\", \"_key_right\", \"match_bool\"]\n result = pd.DataFrame(result[result[\"match_bool\"] == 1]).drop(\n \"match_bool\", axis=1\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(\n columns={\"_key_left\": \"_key_right\", \"_key_right\": \"_key_left\"}\n )\n\n if how == \"inner\":\n result = result.set_index(\"_key_left\")\n joined = left_df.merge(result, left_index=True, right_index=True).merge(\n right_df.drop(right_df.geometry.name, axis=1),\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n joined = joined.set_index(index_left).drop([\"_key_right\"], axis=1)\n joined.index.name = None\n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n joined = left_df.merge(\n result, left_index=True, right_index=True, how=\"left\"\n ).merge(\n right_df.drop(right_df.geometry.name, axis=1),\n how=\"left\",\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n joined = joined.set_index(index_left).drop([\"_key_right\"], axis=1)\n joined.index.name = None\n else: # how == 'right':\n joined = (\n left_df.drop(left_df.geometry.name, axis=1)\n .merge(\n result.merge(\n right_df, left_on=\"_key_right\", right_index=True, how=\"right\"\n ),\n left_index=True,\n right_on=\"_key_left\",\n how=\"right\",\n )\n .set_index(index_right)\n )\n joined = joined.drop([\"_key_left\", \"_key_right\"], axis=1)\n\n return joined\n", "path": "geopandas/tools/sjoin.py"}], "after_files": [{"content": "from warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely import prepared\n\nfrom geopandas import GeoDataFrame\n\n\ndef sjoin(\n left_df, right_df, how=\"inner\", op=\"intersects\", lsuffix=\"left\", rsuffix=\"right\"\n):\n \"\"\"Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n if not isinstance(left_df, GeoDataFrame):\n raise ValueError(\n \"'left_df' should be GeoDataFrame, got {}\".format(type(left_df))\n )\n\n if not isinstance(right_df, GeoDataFrame):\n raise ValueError(\n \"'right_df' should be GeoDataFrame, got {}\".format(type(right_df))\n )\n\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n allowed_ops = [\"contains\", \"within\", \"intersects\"]\n if op not in allowed_ops:\n raise ValueError(\n '`op` was \"%s\" but is expected to be in %s' % (op, allowed_ops)\n )\n\n if left_df.crs != right_df.crs:\n warn(\n (\n \"CRS of frames being joined does not match!\"\n \"(%s != %s)\" % (left_df.crs, right_df.crs)\n )\n )\n\n index_left = \"index_%s\" % lsuffix\n index_right = \"index_%s\" % rsuffix\n\n # due to GH 352\n if any(left_df.columns.isin([index_left, index_right])) or any(\n right_df.columns.isin([index_left, index_right])\n ):\n raise ValueError(\n \"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right)\n )\n\n # Attempt to re-use spatial indexes, otherwise generate the spatial index\n # for the longer dataframe\n if right_df._sindex_generated or (\n not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]\n ):\n tree_idx = right_df.sindex\n tree_idx_right = True\n else:\n tree_idx = left_df.sindex\n tree_idx_right = False\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n right_df = right_df.copy(deep=True)\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n tree_idx_right = not tree_idx_right\n\n r_idx = np.empty((0, 0))\n l_idx = np.empty((0, 0))\n # get rtree spatial index\n if tree_idx_right:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n if idxmatch.shape[0] > 0:\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n else:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n # indexes of overlapping boundaries\n l_idx = np.concatenate(idxmatch.values)\n r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n # Vectorize predicate operations\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {\n \"intersects\": find_intersects,\n \"contains\": find_contains,\n \"within\": find_contains,\n }\n\n check_predicates = np.vectorize(predicate_d[op])\n\n result = pd.DataFrame(\n np.column_stack(\n [\n l_idx,\n r_idx,\n check_predicates(\n left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx],\n ),\n ]\n )\n )\n\n result.columns = [\"_key_left\", \"_key_right\", \"match_bool\"]\n result = pd.DataFrame(result[result[\"match_bool\"] == 1]).drop(\n \"match_bool\", axis=1\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(\n columns={\"_key_left\": \"_key_right\", \"_key_right\": \"_key_left\"}\n )\n\n if how == \"inner\":\n result = result.set_index(\"_key_left\")\n joined = left_df.merge(result, left_index=True, right_index=True).merge(\n right_df.drop(right_df.geometry.name, axis=1),\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n joined = joined.set_index(index_left).drop([\"_key_right\"], axis=1)\n joined.index.name = None\n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n joined = left_df.merge(\n result, left_index=True, right_index=True, how=\"left\"\n ).merge(\n right_df.drop(right_df.geometry.name, axis=1),\n how=\"left\",\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n joined = joined.set_index(index_left).drop([\"_key_right\"], axis=1)\n joined.index.name = None\n else: # how == 'right':\n joined = (\n left_df.drop(left_df.geometry.name, axis=1)\n .merge(\n result.merge(\n right_df, left_on=\"_key_right\", right_index=True, how=\"right\"\n ),\n left_index=True,\n right_on=\"_key_left\",\n how=\"right\",\n )\n .set_index(index_right)\n )\n joined = joined.drop([\"_key_left\", \"_key_right\"], axis=1)\n\n return joined\n", "path": "geopandas/tools/sjoin.py"}]}
| 2,919 | 237 |
gh_patches_debug_32433
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-9834
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Quickies on Xvideos
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm requesting a site-specific feature
- [X] I've verified that I'm running yt-dlp version **2023.02.17** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
Worldwide
### Example URLs
https://www.xvideos.com/profiles/firolix#quickies/a/51392867
https://www.xvideos.com/profiles/firolix#quickies/a/63386619
### Provide a description that is worded well enough to be understood
Quickies, the version of shorts on Xvideos, aren't currently downloadable. Can you add support for them?
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['-vU', '-f', 'mp4', 'https://www.xvideos.com/profiles/firolix#quickies/a/51392867']
[debug] User config "C:\Users\Noah Bangs\yt-dlp.conf": ['-o', 'H:\\youtube-dl\\Export\\%(webpage_url_domain)s\\%(title)s - %(channel,artist - album)s.%(ext)s', '--retries', 'infinite', '--download-archive', 'registry.txt']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2023.02.17 [a0a7c01] (win_exe)
[debug] Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.19044-SP0 (OpenSSL 1.1.1k 25 Mar 2021)
[debug] exe versions: ffmpeg git-2020-04-03-52523b6, ffprobe git-2020-04-03-52523b6
[debug] Optional libraries: Cryptodome-3.17, brotli-1.0.9, certifi-2022.12.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-10.4
[debug] Proxy map: {}
[debug] Loaded 1782 extractors
[debug] Loading archive file 'registry.txt'
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.02.17, Current version: 2023.02.17
yt-dlp is up to date (2023.02.17)
[generic] Extracting URL: https://www.xvideos.com/profiles/firolix#quickies/a/51392867
[generic] 51392867: Downloading webpage
WARNING: [generic] Falling back on generic information extractor
[generic] 51392867: Extracting information
[debug] Looking for embeds
ERROR: Unsupported URL: https://www.xvideos.com/profiles/firolix#quickies/a/51392867
Traceback (most recent call last):
File "yt_dlp\YoutubeDL.py", line 1502, in wrapper
File "yt_dlp\YoutubeDL.py", line 1578, in __extract_info
File "yt_dlp\extractor\common.py", line 693, in extract
File "yt_dlp\extractor\generic.py", line 2509, in _real_extract
yt_dlp.utils.UnsupportedError: Unsupported URL: https://www.xvideos.com/profiles/firolix#quickies/a/51392867
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/xvideos.py`
Content:
```
1 import re
2
3 from .common import InfoExtractor
4 from ..compat import compat_urllib_parse_unquote
5 from ..utils import (
6 clean_html,
7 determine_ext,
8 ExtractorError,
9 int_or_none,
10 parse_duration,
11 )
12
13
14 class XVideosIE(InfoExtractor):
15 _VALID_URL = r'''(?x)
16 https?://
17 (?:
18 (?:[^/]+\.)?xvideos2?\.com/video\.?|
19 (?:www\.)?xvideos\.es/video\.?|
20 (?:www|flashservice)\.xvideos\.com/embedframe/|
21 static-hw\.xvideos\.com/swf/xv-player\.swf\?.*?\bid_video=
22 )
23 (?P<id>[0-9a-z]+)
24 '''
25 _TESTS = [{
26 'url': 'http://xvideos.com/video.ucuvbkfda4e/a_beautiful_red-haired_stranger_was_refused_but_still_came_to_my_room_for_sex',
27 'md5': '396255a900a6bddb3e98985f0b86c3fd',
28 'info_dict': {
29 'id': 'ucuvbkfda4e',
30 'ext': 'mp4',
31 'title': 'A Beautiful Red-Haired Stranger Was Refused, But Still Came To My Room For Sex',
32 'duration': 1238,
33 'age_limit': 18,
34 'thumbnail': r're:^https://cdn\d+-pic.xvideos-cdn.com/.+\.jpg',
35 }
36 }, {
37 # Broken HLS formats
38 'url': 'https://www.xvideos.com/video65982001/what_s_her_name',
39 'md5': '56742808292c8fa1418e4538c262c58b',
40 'info_dict': {
41 'id': '65982001',
42 'ext': 'mp4',
43 'title': 'what\'s her name?',
44 'duration': 120,
45 'age_limit': 18,
46 'thumbnail': r're:^https://cdn\d+-pic.xvideos-cdn.com/.+\.jpg',
47 }
48 }, {
49 'url': 'https://flashservice.xvideos.com/embedframe/4588838',
50 'only_matching': True,
51 }, {
52 'url': 'https://www.xvideos.com/embedframe/4588838',
53 'only_matching': True,
54 }, {
55 'url': 'http://static-hw.xvideos.com/swf/xv-player.swf?id_video=4588838',
56 'only_matching': True,
57 }, {
58 'url': 'http://xvideos.com/video4588838/biker_takes_his_girl',
59 'only_matching': True
60 }, {
61 'url': 'https://xvideos.com/video4588838/biker_takes_his_girl',
62 'only_matching': True
63 }, {
64 'url': 'https://xvideos.es/video4588838/biker_takes_his_girl',
65 'only_matching': True
66 }, {
67 'url': 'https://www.xvideos.es/video4588838/biker_takes_his_girl',
68 'only_matching': True
69 }, {
70 'url': 'http://xvideos.es/video4588838/biker_takes_his_girl',
71 'only_matching': True
72 }, {
73 'url': 'http://www.xvideos.es/video4588838/biker_takes_his_girl',
74 'only_matching': True
75 }, {
76 'url': 'http://fr.xvideos.com/video4588838/biker_takes_his_girl',
77 'only_matching': True
78 }, {
79 'url': 'https://fr.xvideos.com/video4588838/biker_takes_his_girl',
80 'only_matching': True
81 }, {
82 'url': 'http://it.xvideos.com/video4588838/biker_takes_his_girl',
83 'only_matching': True
84 }, {
85 'url': 'https://it.xvideos.com/video4588838/biker_takes_his_girl',
86 'only_matching': True
87 }, {
88 'url': 'http://de.xvideos.com/video4588838/biker_takes_his_girl',
89 'only_matching': True
90 }, {
91 'url': 'https://de.xvideos.com/video4588838/biker_takes_his_girl',
92 'only_matching': True
93 }, {
94 'url': 'https://flashservice.xvideos.com/embedframe/ucuvbkfda4e',
95 'only_matching': True,
96 }, {
97 'url': 'https://www.xvideos.com/embedframe/ucuvbkfda4e',
98 'only_matching': True,
99 }, {
100 'url': 'http://static-hw.xvideos.com/swf/xv-player.swf?id_video=ucuvbkfda4e',
101 'only_matching': True,
102 }, {
103 'url': 'https://xvideos.es/video.ucuvbkfda4e/a_beautiful_red-haired_stranger_was_refused_but_still_came_to_my_room_for_sex',
104 'only_matching': True
105 }]
106
107 def _real_extract(self, url):
108 video_id = self._match_id(url)
109 webpage = self._download_webpage(url, video_id)
110
111 mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage)
112 if mobj:
113 raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True)
114
115 title = self._html_search_regex(
116 (r'<title>(?P<title>.+?)\s+-\s+XVID',
117 r'setVideoTitle\s*\(\s*(["\'])(?P<title>(?:(?!\1).)+)\1'),
118 webpage, 'title', default=None,
119 group='title') or self._og_search_title(webpage)
120
121 thumbnails = []
122 for preference, thumbnail in enumerate(('', '169')):
123 thumbnail_url = self._search_regex(
124 r'setThumbUrl%s\(\s*(["\'])(?P<thumbnail>(?:(?!\1).)+)\1' % thumbnail,
125 webpage, 'thumbnail', default=None, group='thumbnail')
126 if thumbnail_url:
127 thumbnails.append({
128 'url': thumbnail_url,
129 'preference': preference,
130 })
131
132 duration = int_or_none(self._og_search_property(
133 'duration', webpage, default=None)) or parse_duration(
134 self._search_regex(
135 r'<span[^>]+class=["\']duration["\'][^>]*>.*?(\d[^<]+)',
136 webpage, 'duration', fatal=False))
137
138 formats = []
139
140 video_url = compat_urllib_parse_unquote(self._search_regex(
141 r'flv_url=(.+?)&', webpage, 'video URL', default=''))
142 if video_url:
143 formats.append({
144 'url': video_url,
145 'format_id': 'flv',
146 })
147
148 for kind, _, format_url in re.findall(
149 r'setVideo([^(]+)\((["\'])(http.+?)\2\)', webpage):
150 format_id = kind.lower()
151 if format_id == 'hls':
152 hls_formats = self._extract_m3u8_formats(
153 format_url, video_id, 'mp4',
154 entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
155 self._check_formats(hls_formats, video_id)
156 formats.extend(hls_formats)
157 elif format_id in ('urllow', 'urlhigh'):
158 formats.append({
159 'url': format_url,
160 'format_id': '%s-%s' % (determine_ext(format_url, 'mp4'), format_id[3:]),
161 'quality': -2 if format_id.endswith('low') else None,
162 })
163
164 return {
165 'id': video_id,
166 'formats': formats,
167 'title': title,
168 'duration': duration,
169 'thumbnails': thumbnails,
170 'age_limit': 18,
171 }
172
173
174 class XVideosQuickiesIE(InfoExtractor):
175 IE_NAME = 'xvideos:quickies'
176 _VALID_URL = r'https?://(?P<domain>(?:[^/]+\.)?xvideos2?\.com)/amateur-channels/[^#]+#quickies/a/(?P<id>\d+)'
177 _TESTS = [{
178 'url': 'https://www.xvideos.com/amateur-channels/wifeluna#quickies/a/47258683',
179 'md5': '16e322a93282667f1963915568f782c1',
180 'info_dict': {
181 'id': '47258683',
182 'ext': 'mp4',
183 'title': 'Verification video',
184 'age_limit': 18,
185 'duration': 16,
186 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\.jpg',
187 }
188 }]
189
190 def _real_extract(self, url):
191 domain, id_ = self._match_valid_url(url).group('domain', 'id')
192 return self.url_result(f'https://{domain}/video{id_}/_', XVideosIE, id_)
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt_dlp/extractor/xvideos.py b/yt_dlp/extractor/xvideos.py
--- a/yt_dlp/extractor/xvideos.py
+++ b/yt_dlp/extractor/xvideos.py
@@ -173,8 +173,41 @@
class XVideosQuickiesIE(InfoExtractor):
IE_NAME = 'xvideos:quickies'
- _VALID_URL = r'https?://(?P<domain>(?:[^/]+\.)?xvideos2?\.com)/amateur-channels/[^#]+#quickies/a/(?P<id>\d+)'
+ _VALID_URL = r'https?://(?P<domain>(?:[^/?#]+\.)?xvideos2?\.com)/(?:profiles/|amateur-channels/)?[^/?#]+#quickies/a/(?P<id>\w+)'
_TESTS = [{
+ 'url': 'https://www.xvideos.com/lili_love#quickies/a/ipdtikh1a4c',
+ 'md5': 'f9e4f518ff1de14b99a400bbd0fc5ee0',
+ 'info_dict': {
+ 'id': 'ipdtikh1a4c',
+ 'ext': 'mp4',
+ 'title': 'Mexican chichΓ³na putisima',
+ 'age_limit': 18,
+ 'duration': 81,
+ 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\.jpg',
+ }
+ }, {
+ 'url': 'https://www.xvideos.com/profiles/lili_love#quickies/a/ipphaob6fd1',
+ 'md5': '5340938aac6b46e19ebdd1d84535862e',
+ 'info_dict': {
+ 'id': 'ipphaob6fd1',
+ 'ext': 'mp4',
+ 'title': 'Puta chichona mexicana squirting',
+ 'age_limit': 18,
+ 'duration': 56,
+ 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\.jpg',
+ }
+ }, {
+ 'url': 'https://www.xvideos.com/amateur-channels/lili_love#quickies/a/hfmffmd7661',
+ 'md5': '92428518bbabcb4c513e55922e022491',
+ 'info_dict': {
+ 'id': 'hfmffmd7661',
+ 'ext': 'mp4',
+ 'title': 'Chichona mexican slut',
+ 'age_limit': 18,
+ 'duration': 9,
+ 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\.jpg',
+ }
+ }, {
'url': 'https://www.xvideos.com/amateur-channels/wifeluna#quickies/a/47258683',
'md5': '16e322a93282667f1963915568f782c1',
'info_dict': {
@@ -189,4 +222,4 @@
def _real_extract(self, url):
domain, id_ = self._match_valid_url(url).group('domain', 'id')
- return self.url_result(f'https://{domain}/video{id_}/_', XVideosIE, id_)
+ return self.url_result(f'https://{domain}/video{"" if id_.isdecimal() else "."}{id_}/_', XVideosIE, id_)
|
{"golden_diff": "diff --git a/yt_dlp/extractor/xvideos.py b/yt_dlp/extractor/xvideos.py\n--- a/yt_dlp/extractor/xvideos.py\n+++ b/yt_dlp/extractor/xvideos.py\n@@ -173,8 +173,41 @@\n \n class XVideosQuickiesIE(InfoExtractor):\n IE_NAME = 'xvideos:quickies'\n- _VALID_URL = r'https?://(?P<domain>(?:[^/]+\\.)?xvideos2?\\.com)/amateur-channels/[^#]+#quickies/a/(?P<id>\\d+)'\n+ _VALID_URL = r'https?://(?P<domain>(?:[^/?#]+\\.)?xvideos2?\\.com)/(?:profiles/|amateur-channels/)?[^/?#]+#quickies/a/(?P<id>\\w+)'\n _TESTS = [{\n+ 'url': 'https://www.xvideos.com/lili_love#quickies/a/ipdtikh1a4c',\n+ 'md5': 'f9e4f518ff1de14b99a400bbd0fc5ee0',\n+ 'info_dict': {\n+ 'id': 'ipdtikh1a4c',\n+ 'ext': 'mp4',\n+ 'title': 'Mexican chich\u00f3na putisima',\n+ 'age_limit': 18,\n+ 'duration': 81,\n+ 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\\.jpg',\n+ }\n+ }, {\n+ 'url': 'https://www.xvideos.com/profiles/lili_love#quickies/a/ipphaob6fd1',\n+ 'md5': '5340938aac6b46e19ebdd1d84535862e',\n+ 'info_dict': {\n+ 'id': 'ipphaob6fd1',\n+ 'ext': 'mp4',\n+ 'title': 'Puta chichona mexicana squirting',\n+ 'age_limit': 18,\n+ 'duration': 56,\n+ 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\\.jpg',\n+ }\n+ }, {\n+ 'url': 'https://www.xvideos.com/amateur-channels/lili_love#quickies/a/hfmffmd7661',\n+ 'md5': '92428518bbabcb4c513e55922e022491',\n+ 'info_dict': {\n+ 'id': 'hfmffmd7661',\n+ 'ext': 'mp4',\n+ 'title': 'Chichona mexican slut',\n+ 'age_limit': 18,\n+ 'duration': 9,\n+ 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\\.jpg',\n+ }\n+ }, {\n 'url': 'https://www.xvideos.com/amateur-channels/wifeluna#quickies/a/47258683',\n 'md5': '16e322a93282667f1963915568f782c1',\n 'info_dict': {\n@@ -189,4 +222,4 @@\n \n def _real_extract(self, url):\n domain, id_ = self._match_valid_url(url).group('domain', 'id')\n- return self.url_result(f'https://{domain}/video{id_}/_', XVideosIE, id_)\n+ return self.url_result(f'https://{domain}/video{\"\" if id_.isdecimal() else \".\"}{id_}/_', XVideosIE, id_)\n", "issue": "Quickies on Xvideos\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm requesting a site-specific feature\n- [X] I've verified that I'm running yt-dlp version **2023.02.17** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nWorldwide\n\n### Example URLs\n\nhttps://www.xvideos.com/profiles/firolix#quickies/a/51392867\r\nhttps://www.xvideos.com/profiles/firolix#quickies/a/63386619\n\n### Provide a description that is worded well enough to be understood\n\nQuickies, the version of shorts on Xvideos, aren't currently downloadable. Can you add support for them?\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['-vU', '-f', 'mp4', 'https://www.xvideos.com/profiles/firolix#quickies/a/51392867']\r\n[debug] User config \"C:\\Users\\Noah Bangs\\yt-dlp.conf\": ['-o', 'H:\\\\youtube-dl\\\\Export\\\\%(webpage_url_domain)s\\\\%(title)s - %(channel,artist - album)s.%(ext)s', '--retries', 'infinite', '--download-archive', 'registry.txt']\r\n[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version 2023.02.17 [a0a7c01] (win_exe)\r\n[debug] Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.19044-SP0 (OpenSSL 1.1.1k 25 Mar 2021)\r\n[debug] exe versions: ffmpeg git-2020-04-03-52523b6, ffprobe git-2020-04-03-52523b6\r\n[debug] Optional libraries: Cryptodome-3.17, brotli-1.0.9, certifi-2022.12.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-10.4\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1782 extractors\r\n[debug] Loading archive file 'registry.txt'\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\r\nLatest version: 2023.02.17, Current version: 2023.02.17\r\nyt-dlp is up to date (2023.02.17)\r\n[generic] Extracting URL: https://www.xvideos.com/profiles/firolix#quickies/a/51392867\r\n[generic] 51392867: Downloading webpage\r\nWARNING: [generic] Falling back on generic information extractor\r\n[generic] 51392867: Extracting information\r\n[debug] Looking for embeds\r\nERROR: Unsupported URL: https://www.xvideos.com/profiles/firolix#quickies/a/51392867\r\nTraceback (most recent call last):\r\n File \"yt_dlp\\YoutubeDL.py\", line 1502, in wrapper\r\n File \"yt_dlp\\YoutubeDL.py\", line 1578, in __extract_info\r\n File \"yt_dlp\\extractor\\common.py\", line 693, in extract\r\n File \"yt_dlp\\extractor\\generic.py\", line 2509, in _real_extract\r\nyt_dlp.utils.UnsupportedError: Unsupported URL: https://www.xvideos.com/profiles/firolix#quickies/a/51392867\n```\n\n", "before_files": [{"content": "import re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_urllib_parse_unquote\nfrom ..utils import (\n clean_html,\n determine_ext,\n ExtractorError,\n int_or_none,\n parse_duration,\n)\n\n\nclass XVideosIE(InfoExtractor):\n _VALID_URL = r'''(?x)\n https?://\n (?:\n (?:[^/]+\\.)?xvideos2?\\.com/video\\.?|\n (?:www\\.)?xvideos\\.es/video\\.?|\n (?:www|flashservice)\\.xvideos\\.com/embedframe/|\n static-hw\\.xvideos\\.com/swf/xv-player\\.swf\\?.*?\\bid_video=\n )\n (?P<id>[0-9a-z]+)\n '''\n _TESTS = [{\n 'url': 'http://xvideos.com/video.ucuvbkfda4e/a_beautiful_red-haired_stranger_was_refused_but_still_came_to_my_room_for_sex',\n 'md5': '396255a900a6bddb3e98985f0b86c3fd',\n 'info_dict': {\n 'id': 'ucuvbkfda4e',\n 'ext': 'mp4',\n 'title': 'A Beautiful Red-Haired Stranger Was Refused, But Still Came To My Room For Sex',\n 'duration': 1238,\n 'age_limit': 18,\n 'thumbnail': r're:^https://cdn\\d+-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }, {\n # Broken HLS formats\n 'url': 'https://www.xvideos.com/video65982001/what_s_her_name',\n 'md5': '56742808292c8fa1418e4538c262c58b',\n 'info_dict': {\n 'id': '65982001',\n 'ext': 'mp4',\n 'title': 'what\\'s her name?',\n 'duration': 120,\n 'age_limit': 18,\n 'thumbnail': r're:^https://cdn\\d+-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }, {\n 'url': 'https://flashservice.xvideos.com/embedframe/4588838',\n 'only_matching': True,\n }, {\n 'url': 'https://www.xvideos.com/embedframe/4588838',\n 'only_matching': True,\n }, {\n 'url': 'http://static-hw.xvideos.com/swf/xv-player.swf?id_video=4588838',\n 'only_matching': True,\n }, {\n 'url': 'http://xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://xvideos.es/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://www.xvideos.es/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://xvideos.es/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://www.xvideos.es/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://fr.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://fr.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://it.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://it.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://de.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://de.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://flashservice.xvideos.com/embedframe/ucuvbkfda4e',\n 'only_matching': True,\n }, {\n 'url': 'https://www.xvideos.com/embedframe/ucuvbkfda4e',\n 'only_matching': True,\n }, {\n 'url': 'http://static-hw.xvideos.com/swf/xv-player.swf?id_video=ucuvbkfda4e',\n 'only_matching': True,\n }, {\n 'url': 'https://xvideos.es/video.ucuvbkfda4e/a_beautiful_red-haired_stranger_was_refused_but_still_came_to_my_room_for_sex',\n 'only_matching': True\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n\n mobj = re.search(r'<h1 class=\"inlineError\">(.+?)</h1>', webpage)\n if mobj:\n raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True)\n\n title = self._html_search_regex(\n (r'<title>(?P<title>.+?)\\s+-\\s+XVID',\n r'setVideoTitle\\s*\\(\\s*([\"\\'])(?P<title>(?:(?!\\1).)+)\\1'),\n webpage, 'title', default=None,\n group='title') or self._og_search_title(webpage)\n\n thumbnails = []\n for preference, thumbnail in enumerate(('', '169')):\n thumbnail_url = self._search_regex(\n r'setThumbUrl%s\\(\\s*([\"\\'])(?P<thumbnail>(?:(?!\\1).)+)\\1' % thumbnail,\n webpage, 'thumbnail', default=None, group='thumbnail')\n if thumbnail_url:\n thumbnails.append({\n 'url': thumbnail_url,\n 'preference': preference,\n })\n\n duration = int_or_none(self._og_search_property(\n 'duration', webpage, default=None)) or parse_duration(\n self._search_regex(\n r'<span[^>]+class=[\"\\']duration[\"\\'][^>]*>.*?(\\d[^<]+)',\n webpage, 'duration', fatal=False))\n\n formats = []\n\n video_url = compat_urllib_parse_unquote(self._search_regex(\n r'flv_url=(.+?)&', webpage, 'video URL', default=''))\n if video_url:\n formats.append({\n 'url': video_url,\n 'format_id': 'flv',\n })\n\n for kind, _, format_url in re.findall(\n r'setVideo([^(]+)\\(([\"\\'])(http.+?)\\2\\)', webpage):\n format_id = kind.lower()\n if format_id == 'hls':\n hls_formats = self._extract_m3u8_formats(\n format_url, video_id, 'mp4',\n entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)\n self._check_formats(hls_formats, video_id)\n formats.extend(hls_formats)\n elif format_id in ('urllow', 'urlhigh'):\n formats.append({\n 'url': format_url,\n 'format_id': '%s-%s' % (determine_ext(format_url, 'mp4'), format_id[3:]),\n 'quality': -2 if format_id.endswith('low') else None,\n })\n\n return {\n 'id': video_id,\n 'formats': formats,\n 'title': title,\n 'duration': duration,\n 'thumbnails': thumbnails,\n 'age_limit': 18,\n }\n\n\nclass XVideosQuickiesIE(InfoExtractor):\n IE_NAME = 'xvideos:quickies'\n _VALID_URL = r'https?://(?P<domain>(?:[^/]+\\.)?xvideos2?\\.com)/amateur-channels/[^#]+#quickies/a/(?P<id>\\d+)'\n _TESTS = [{\n 'url': 'https://www.xvideos.com/amateur-channels/wifeluna#quickies/a/47258683',\n 'md5': '16e322a93282667f1963915568f782c1',\n 'info_dict': {\n 'id': '47258683',\n 'ext': 'mp4',\n 'title': 'Verification video',\n 'age_limit': 18,\n 'duration': 16,\n 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }]\n\n def _real_extract(self, url):\n domain, id_ = self._match_valid_url(url).group('domain', 'id')\n return self.url_result(f'https://{domain}/video{id_}/_', XVideosIE, id_)\n", "path": "yt_dlp/extractor/xvideos.py"}], "after_files": [{"content": "import re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_urllib_parse_unquote\nfrom ..utils import (\n clean_html,\n determine_ext,\n ExtractorError,\n int_or_none,\n parse_duration,\n)\n\n\nclass XVideosIE(InfoExtractor):\n _VALID_URL = r'''(?x)\n https?://\n (?:\n (?:[^/]+\\.)?xvideos2?\\.com/video\\.?|\n (?:www\\.)?xvideos\\.es/video\\.?|\n (?:www|flashservice)\\.xvideos\\.com/embedframe/|\n static-hw\\.xvideos\\.com/swf/xv-player\\.swf\\?.*?\\bid_video=\n )\n (?P<id>[0-9a-z]+)\n '''\n _TESTS = [{\n 'url': 'http://xvideos.com/video.ucuvbkfda4e/a_beautiful_red-haired_stranger_was_refused_but_still_came_to_my_room_for_sex',\n 'md5': '396255a900a6bddb3e98985f0b86c3fd',\n 'info_dict': {\n 'id': 'ucuvbkfda4e',\n 'ext': 'mp4',\n 'title': 'A Beautiful Red-Haired Stranger Was Refused, But Still Came To My Room For Sex',\n 'duration': 1238,\n 'age_limit': 18,\n 'thumbnail': r're:^https://cdn\\d+-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }, {\n # Broken HLS formats\n 'url': 'https://www.xvideos.com/video65982001/what_s_her_name',\n 'md5': '56742808292c8fa1418e4538c262c58b',\n 'info_dict': {\n 'id': '65982001',\n 'ext': 'mp4',\n 'title': 'what\\'s her name?',\n 'duration': 120,\n 'age_limit': 18,\n 'thumbnail': r're:^https://cdn\\d+-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }, {\n 'url': 'https://flashservice.xvideos.com/embedframe/4588838',\n 'only_matching': True,\n }, {\n 'url': 'https://www.xvideos.com/embedframe/4588838',\n 'only_matching': True,\n }, {\n 'url': 'http://static-hw.xvideos.com/swf/xv-player.swf?id_video=4588838',\n 'only_matching': True,\n }, {\n 'url': 'http://xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://xvideos.es/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://www.xvideos.es/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://xvideos.es/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://www.xvideos.es/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://fr.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://fr.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://it.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://it.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'http://de.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://de.xvideos.com/video4588838/biker_takes_his_girl',\n 'only_matching': True\n }, {\n 'url': 'https://flashservice.xvideos.com/embedframe/ucuvbkfda4e',\n 'only_matching': True,\n }, {\n 'url': 'https://www.xvideos.com/embedframe/ucuvbkfda4e',\n 'only_matching': True,\n }, {\n 'url': 'http://static-hw.xvideos.com/swf/xv-player.swf?id_video=ucuvbkfda4e',\n 'only_matching': True,\n }, {\n 'url': 'https://xvideos.es/video.ucuvbkfda4e/a_beautiful_red-haired_stranger_was_refused_but_still_came_to_my_room_for_sex',\n 'only_matching': True\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n\n mobj = re.search(r'<h1 class=\"inlineError\">(.+?)</h1>', webpage)\n if mobj:\n raise ExtractorError('%s said: %s' % (self.IE_NAME, clean_html(mobj.group(1))), expected=True)\n\n title = self._html_search_regex(\n (r'<title>(?P<title>.+?)\\s+-\\s+XVID',\n r'setVideoTitle\\s*\\(\\s*([\"\\'])(?P<title>(?:(?!\\1).)+)\\1'),\n webpage, 'title', default=None,\n group='title') or self._og_search_title(webpage)\n\n thumbnails = []\n for preference, thumbnail in enumerate(('', '169')):\n thumbnail_url = self._search_regex(\n r'setThumbUrl%s\\(\\s*([\"\\'])(?P<thumbnail>(?:(?!\\1).)+)\\1' % thumbnail,\n webpage, 'thumbnail', default=None, group='thumbnail')\n if thumbnail_url:\n thumbnails.append({\n 'url': thumbnail_url,\n 'preference': preference,\n })\n\n duration = int_or_none(self._og_search_property(\n 'duration', webpage, default=None)) or parse_duration(\n self._search_regex(\n r'<span[^>]+class=[\"\\']duration[\"\\'][^>]*>.*?(\\d[^<]+)',\n webpage, 'duration', fatal=False))\n\n formats = []\n\n video_url = compat_urllib_parse_unquote(self._search_regex(\n r'flv_url=(.+?)&', webpage, 'video URL', default=''))\n if video_url:\n formats.append({\n 'url': video_url,\n 'format_id': 'flv',\n })\n\n for kind, _, format_url in re.findall(\n r'setVideo([^(]+)\\(([\"\\'])(http.+?)\\2\\)', webpage):\n format_id = kind.lower()\n if format_id == 'hls':\n hls_formats = self._extract_m3u8_formats(\n format_url, video_id, 'mp4',\n entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)\n self._check_formats(hls_formats, video_id)\n formats.extend(hls_formats)\n elif format_id in ('urllow', 'urlhigh'):\n formats.append({\n 'url': format_url,\n 'format_id': '%s-%s' % (determine_ext(format_url, 'mp4'), format_id[3:]),\n 'quality': -2 if format_id.endswith('low') else None,\n })\n\n return {\n 'id': video_id,\n 'formats': formats,\n 'title': title,\n 'duration': duration,\n 'thumbnails': thumbnails,\n 'age_limit': 18,\n }\n\n\nclass XVideosQuickiesIE(InfoExtractor):\n IE_NAME = 'xvideos:quickies'\n _VALID_URL = r'https?://(?P<domain>(?:[^/?#]+\\.)?xvideos2?\\.com)/(?:profiles/|amateur-channels/)?[^/?#]+#quickies/a/(?P<id>\\w+)'\n _TESTS = [{\n 'url': 'https://www.xvideos.com/lili_love#quickies/a/ipdtikh1a4c',\n 'md5': 'f9e4f518ff1de14b99a400bbd0fc5ee0',\n 'info_dict': {\n 'id': 'ipdtikh1a4c',\n 'ext': 'mp4',\n 'title': 'Mexican chich\u00f3na putisima',\n 'age_limit': 18,\n 'duration': 81,\n 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }, {\n 'url': 'https://www.xvideos.com/profiles/lili_love#quickies/a/ipphaob6fd1',\n 'md5': '5340938aac6b46e19ebdd1d84535862e',\n 'info_dict': {\n 'id': 'ipphaob6fd1',\n 'ext': 'mp4',\n 'title': 'Puta chichona mexicana squirting',\n 'age_limit': 18,\n 'duration': 56,\n 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }, {\n 'url': 'https://www.xvideos.com/amateur-channels/lili_love#quickies/a/hfmffmd7661',\n 'md5': '92428518bbabcb4c513e55922e022491',\n 'info_dict': {\n 'id': 'hfmffmd7661',\n 'ext': 'mp4',\n 'title': 'Chichona mexican slut',\n 'age_limit': 18,\n 'duration': 9,\n 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }, {\n 'url': 'https://www.xvideos.com/amateur-channels/wifeluna#quickies/a/47258683',\n 'md5': '16e322a93282667f1963915568f782c1',\n 'info_dict': {\n 'id': '47258683',\n 'ext': 'mp4',\n 'title': 'Verification video',\n 'age_limit': 18,\n 'duration': 16,\n 'thumbnail': r're:^https://cdn.*-pic.xvideos-cdn.com/.+\\.jpg',\n }\n }]\n\n def _real_extract(self, url):\n domain, id_ = self._match_valid_url(url).group('domain', 'id')\n return self.url_result(f'https://{domain}/video{\"\" if id_.isdecimal() else \".\"}{id_}/_', XVideosIE, id_)\n", "path": "yt_dlp/extractor/xvideos.py"}]}
| 4,065 | 857 |
gh_patches_debug_13466
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-4721
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to update device after deleting its device redundancy group if the device DRG priority is set
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): 1.6.4
* Python version: 3.11.6
* Database platform, version: mysql 8.1.0
* Middleware(s): pynautobot, GUI, Swagger UI
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create a device redundancy group (DRG)
2. Create a device
3. Assign device to redundancy group with a priority
4. Delete device redundancy group
5. Attempt to make an edit to the device
Note that with the DRG deleted, it's no longer listed in the device edit screen but the DRG priority remains set; since having a DRG priority set without having a corresponding DRG set fails validation, no edits can be made to the device until either the priority is removed or a new DRG is configured. Attempts to edit the device result in the error see in Observed Behavior, below. Bonus: If you attempt to add such a device to a Virtual Chassis, you get this error:

<!-- What did you expect to happen? -->
### Expected Behavior
I can absolutely see that changing this behavior may be more trouble than it's worth, however in a perfect world it feels to me as though either any devices using the deleted redundancy group should have their priority set to `None` or DRG deletion should fail until/unless no devices are using it. That said, I don't know the code base well enough to know which (or if either) of those solutions is correct.
<!-- What happened instead? -->
### Observed Behavior
After the DRG was deleted and I was unable to make any updates to the device without first nulling out the device DRG priority. returned message is:
```json
{
"device_redundancy_group_priority": [
"Must assign a redundancy group when defining a redundancy group priority."
]
}
```
Thoughts?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/dcim/signals.py`
Content:
```
1 import logging
2
3 from cacheops import invalidate_obj
4 from django.contrib.contenttypes.models import ContentType
5 from django.db.models.signals import m2m_changed, post_save, pre_delete
6 from django.db import transaction
7 from django.dispatch import receiver
8
9 from nautobot.core.signals import disable_for_loaddata
10 from .models import (
11 Cable,
12 CablePath,
13 Device,
14 PathEndpoint,
15 PowerPanel,
16 Rack,
17 RackGroup,
18 VirtualChassis,
19 Interface,
20 )
21 from .utils import validate_interface_tagged_vlans
22
23
24 def create_cablepath(node, rebuild=True):
25 """
26 Create CablePaths for all paths originating from the specified node.
27
28 rebuild (bool) - Used to refresh paths where this node is not an endpoint.
29 """
30 cp = CablePath.from_origin(node)
31 if cp:
32 try:
33 cp.save()
34 except Exception as e:
35 print(node, node.pk)
36 raise e
37 if rebuild:
38 rebuild_paths(node)
39
40
41 def rebuild_paths(obj):
42 """
43 Rebuild all CablePaths which traverse the specified node
44 """
45 cable_paths = CablePath.objects.filter(path__contains=obj)
46
47 with transaction.atomic():
48 for cp in cable_paths:
49 invalidate_obj(cp.origin)
50 cp.delete()
51 # Prevent looping back to rebuild_paths during the atomic transaction.
52 create_cablepath(cp.origin, rebuild=False)
53
54
55 #
56 # Site/location/rack/device assignment
57 #
58
59
60 @receiver(post_save, sender=RackGroup)
61 def handle_rackgroup_site_location_change(instance, created, raw=False, **kwargs):
62 """
63 Update child RackGroups, Racks, and PowerPanels if Site or Location assignment has changed.
64
65 We intentionally recurse through each child object instead of calling update() on the QuerySet
66 to ensure the proper change records get created for each.
67
68 Note that this is non-trivial for Location changes, since a LocationType that can contain RackGroups
69 may or may not be permitted to contain Racks or PowerPanels. If it's not permitted, rather than trying to search
70 through child locations to find the "right" one, the best we can do is simply to null out the location.
71 """
72 if raw:
73 return
74 if not created:
75 if instance.location is not None:
76 descendants = instance.location.descendants(include_self=True)
77 content_types = instance.location.location_type.content_types.all()
78 rack_groups_permitted = ContentType.objects.get_for_model(RackGroup) in content_types
79 racks_permitted = ContentType.objects.get_for_model(Rack) in content_types
80 power_panels_permitted = ContentType.objects.get_for_model(PowerPanel) in content_types
81 else:
82 descendants = None
83 rack_groups_permitted = False
84 racks_permitted = False
85 power_panels_permitted = False
86
87 for rackgroup in instance.get_children():
88 changed = False
89 if rackgroup.site != instance.site:
90 rackgroup.site = instance.site
91 changed = True
92
93 if instance.location is not None:
94 if rackgroup.location is not None and rackgroup.location not in descendants:
95 rackgroup.location = instance.location if rack_groups_permitted else None
96 changed = True
97 elif rackgroup.location is not None and rackgroup.location.base_site != instance.site:
98 rackgroup.location = None
99 changed = True
100
101 if changed:
102 rackgroup.save()
103
104 for rack in Rack.objects.filter(group=instance):
105 changed = False
106 if rack.site != instance.site:
107 rack.site = instance.site
108 changed = True
109
110 if instance.location is not None:
111 if rack.location is not None and rack.location not in descendants:
112 rack.location = instance.location if racks_permitted else None
113 changed = True
114 elif rack.location is not None and rack.location.base_site != instance.site:
115 rack.location = None
116 changed = True
117
118 if changed:
119 rack.save()
120
121 for powerpanel in PowerPanel.objects.filter(rack_group=instance):
122 changed = False
123 if powerpanel.site != instance.site:
124 powerpanel.site = instance.site
125 changed = True
126
127 if instance.location is not None:
128 if powerpanel.location is not None and powerpanel.location not in descendants:
129 powerpanel.location = instance.location if power_panels_permitted else None
130 changed = True
131 elif powerpanel.location is not None and powerpanel.location.base_site != instance.site:
132 powerpanel.location = None
133 changed = True
134
135 if changed:
136 powerpanel.save()
137
138
139 @receiver(post_save, sender=Rack)
140 def handle_rack_site_location_change(instance, created, raw=False, **kwargs):
141 """
142 Update child Devices if Site or Location assignment has changed.
143
144 Note that this is non-trivial for Location changes, since a LocationType that can contain Racks
145 may or may not be permitted to contain Devices. If it's not permitted, rather than trying to search
146 through child locations to find the "right" one, the best we can do is simply to null out the location.
147 """
148 if raw:
149 return
150 if not created:
151 if instance.location is not None:
152 devices_permitted = (
153 ContentType.objects.get_for_model(Device) in instance.location.location_type.content_types.all()
154 )
155
156 for device in Device.objects.filter(rack=instance):
157 changed = False
158 if device.site != instance.site:
159 device.site = instance.site
160 changed = True
161
162 if instance.location is not None:
163 if device.location is not None and device.location != instance.location:
164 device.location = instance.location if devices_permitted else None
165 changed = True
166 elif device.location is not None and device.location.base_site != instance.site:
167 device.location = None
168 changed = True
169
170 if changed:
171 device.save()
172
173
174 #
175 # Virtual chassis
176 #
177
178
179 @receiver(post_save, sender=VirtualChassis)
180 def assign_virtualchassis_master(instance, created, raw=False, **kwargs):
181 """
182 When a VirtualChassis is created, automatically assign its master device (if any) to the VC.
183 """
184 if raw:
185 return
186 if created and instance.master:
187 master = Device.objects.get(pk=instance.master.pk)
188 master.virtual_chassis = instance
189 if instance.master.vc_position is None:
190 master.vc_position = 1
191 master.save()
192
193
194 @receiver(pre_delete, sender=VirtualChassis)
195 def clear_virtualchassis_members(instance, **kwargs):
196 """
197 When a VirtualChassis is deleted, nullify the vc_position and vc_priority fields of its prior members.
198 """
199 devices = Device.objects.filter(virtual_chassis=instance.pk)
200 for device in devices:
201 device.vc_position = None
202 device.vc_priority = None
203 device.save()
204
205
206 #
207 # Cables
208 #
209
210
211 @receiver(post_save, sender=Cable)
212 def update_connected_endpoints(instance, created, raw=False, **kwargs):
213 """
214 When a Cable is saved, check for and update its two connected endpoints
215 """
216 logger = logging.getLogger("nautobot.dcim.cable")
217 if raw:
218 logger.debug(f"Skipping endpoint updates for imported cable {instance}")
219 return
220
221 # Cache the Cable on its two termination points
222 if instance.termination_a.cable != instance:
223 logger.debug(f"Updating termination A for cable {instance}")
224 instance.termination_a.cable = instance
225 instance.termination_a._cable_peer = instance.termination_b
226 instance.termination_a.save()
227 if instance.termination_b.cable != instance:
228 logger.debug(f"Updating termination B for cable {instance}")
229 instance.termination_b.cable = instance
230 instance.termination_b._cable_peer = instance.termination_a
231 instance.termination_b.save()
232
233 # Create/update cable paths
234 if created:
235 for termination in (instance.termination_a, instance.termination_b):
236 if isinstance(termination, PathEndpoint):
237 create_cablepath(termination)
238 else:
239 rebuild_paths(termination)
240 elif instance.status != instance._orig_status:
241 # We currently don't support modifying either termination of an existing Cable. (This
242 # may change in the future.) However, we do need to capture status changes and update
243 # any CablePaths accordingly.
244 if instance.status != Cable.STATUS_CONNECTED:
245 CablePath.objects.filter(path__contains=instance).update(is_active=False)
246 else:
247 rebuild_paths(instance)
248
249
250 @receiver(pre_delete, sender=Cable)
251 def nullify_connected_endpoints(instance, **kwargs):
252 """
253 When a Cable is deleted, check for and update its two connected endpoints
254 """
255 logger = logging.getLogger("nautobot.dcim.cable")
256
257 # Disassociate the Cable from its termination points
258 if instance.termination_a is not None:
259 logger.debug(f"Nullifying termination A for cable {instance}")
260 instance.termination_a.cable = None
261 instance.termination_a._cable_peer = None
262 instance.termination_a.save()
263 if instance.termination_b is not None:
264 logger.debug(f"Nullifying termination B for cable {instance}")
265 instance.termination_b.cable = None
266 instance.termination_b._cable_peer = None
267 instance.termination_b.save()
268
269 # Delete and retrace any dependent cable paths
270 for cablepath in CablePath.objects.filter(path__contains=instance):
271 cp = CablePath.from_origin(cablepath.origin)
272 if cp:
273 CablePath.objects.filter(pk=cablepath.pk).update(
274 path=cp.path,
275 destination_type=ContentType.objects.get_for_model(cp.destination) if cp.destination else None,
276 destination_id=cp.destination.pk if cp.destination else None,
277 is_active=cp.is_active,
278 is_split=cp.is_split,
279 )
280 else:
281 cablepath.delete()
282
283
284 #
285 # Interface tagged VLAMs
286 #
287
288
289 @receiver(m2m_changed, sender=Interface.tagged_vlans.through)
290 @disable_for_loaddata
291 def prevent_adding_tagged_vlans_with_incorrect_mode_or_site(sender, instance, action, **kwargs):
292 if action != "pre_add":
293 return
294
295 validate_interface_tagged_vlans(instance, kwargs["model"], kwargs["pk_set"])
296
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nautobot/dcim/signals.py b/nautobot/dcim/signals.py
--- a/nautobot/dcim/signals.py
+++ b/nautobot/dcim/signals.py
@@ -11,6 +11,7 @@
Cable,
CablePath,
Device,
+ DeviceRedundancyGroup,
PathEndpoint,
PowerPanel,
Rack,
@@ -171,6 +172,22 @@
device.save()
+#
+# Device redundancy group
+#
+
+
+@receiver(pre_delete, sender=DeviceRedundancyGroup)
+def clear_deviceredundancygroup_members(instance, **kwargs):
+ """
+ When a DeviceRedundancyGroup is deleted, nullify the device_redundancy_group_priority field of its prior members.
+ """
+ devices = Device.objects.filter(device_redundancy_group=instance.pk)
+ for device in devices:
+ device.device_redundancy_group_priority = None
+ device.save()
+
+
#
# Virtual chassis
#
|
{"golden_diff": "diff --git a/nautobot/dcim/signals.py b/nautobot/dcim/signals.py\n--- a/nautobot/dcim/signals.py\n+++ b/nautobot/dcim/signals.py\n@@ -11,6 +11,7 @@\n Cable,\n CablePath,\n Device,\n+ DeviceRedundancyGroup,\n PathEndpoint,\n PowerPanel,\n Rack,\n@@ -171,6 +172,22 @@\n device.save()\n \n \n+#\n+# Device redundancy group\n+#\n+\n+\n+@receiver(pre_delete, sender=DeviceRedundancyGroup)\n+def clear_deviceredundancygroup_members(instance, **kwargs):\n+ \"\"\"\n+ When a DeviceRedundancyGroup is deleted, nullify the device_redundancy_group_priority field of its prior members.\n+ \"\"\"\n+ devices = Device.objects.filter(device_redundancy_group=instance.pk)\n+ for device in devices:\n+ device.device_redundancy_group_priority = None\n+ device.save()\n+\n+\n #\n # Virtual chassis\n #\n", "issue": "Unable to update device after deleting its device redundancy group if the device DRG priority is set\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 1.6.4\r\n* Python version: 3.11.6\r\n* Database platform, version: mysql 8.1.0\r\n* Middleware(s): pynautobot, GUI, Swagger UI\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a device redundancy group (DRG)\r\n2. Create a device\r\n3. Assign device to redundancy group with a priority\r\n4. Delete device redundancy group\r\n5. Attempt to make an edit to the device\r\n\r\nNote that with the DRG deleted, it's no longer listed in the device edit screen but the DRG priority remains set; since having a DRG priority set without having a corresponding DRG set fails validation, no edits can be made to the device until either the priority is removed or a new DRG is configured. Attempts to edit the device result in the error see in Observed Behavior, below. Bonus: If you attempt to add such a device to a Virtual Chassis, you get this error:\r\n\r\n\r\n\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nI can absolutely see that changing this behavior may be more trouble than it's worth, however in a perfect world it feels to me as though either any devices using the deleted redundancy group should have their priority set to `None` or DRG deletion should fail until/unless no devices are using it. That said, I don't know the code base well enough to know which (or if either) of those solutions is correct. \r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nAfter the DRG was deleted and I was unable to make any updates to the device without first nulling out the device DRG priority. returned message is:\r\n\r\n```json\r\n{\r\n \"device_redundancy_group_priority\": [\r\n \"Must assign a redundancy group when defining a redundancy group priority.\"\r\n ]\r\n}\r\n```\r\n\r\nThoughts?\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom cacheops import invalidate_obj\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models.signals import m2m_changed, post_save, pre_delete\nfrom django.db import transaction\nfrom django.dispatch import receiver\n\nfrom nautobot.core.signals import disable_for_loaddata\nfrom .models import (\n Cable,\n CablePath,\n Device,\n PathEndpoint,\n PowerPanel,\n Rack,\n RackGroup,\n VirtualChassis,\n Interface,\n)\nfrom .utils import validate_interface_tagged_vlans\n\n\ndef create_cablepath(node, rebuild=True):\n \"\"\"\n Create CablePaths for all paths originating from the specified node.\n\n rebuild (bool) - Used to refresh paths where this node is not an endpoint.\n \"\"\"\n cp = CablePath.from_origin(node)\n if cp:\n try:\n cp.save()\n except Exception as e:\n print(node, node.pk)\n raise e\n if rebuild:\n rebuild_paths(node)\n\n\ndef rebuild_paths(obj):\n \"\"\"\n Rebuild all CablePaths which traverse the specified node\n \"\"\"\n cable_paths = CablePath.objects.filter(path__contains=obj)\n\n with transaction.atomic():\n for cp in cable_paths:\n invalidate_obj(cp.origin)\n cp.delete()\n # Prevent looping back to rebuild_paths during the atomic transaction.\n create_cablepath(cp.origin, rebuild=False)\n\n\n#\n# Site/location/rack/device assignment\n#\n\n\n@receiver(post_save, sender=RackGroup)\ndef handle_rackgroup_site_location_change(instance, created, raw=False, **kwargs):\n \"\"\"\n Update child RackGroups, Racks, and PowerPanels if Site or Location assignment has changed.\n\n We intentionally recurse through each child object instead of calling update() on the QuerySet\n to ensure the proper change records get created for each.\n\n Note that this is non-trivial for Location changes, since a LocationType that can contain RackGroups\n may or may not be permitted to contain Racks or PowerPanels. If it's not permitted, rather than trying to search\n through child locations to find the \"right\" one, the best we can do is simply to null out the location.\n \"\"\"\n if raw:\n return\n if not created:\n if instance.location is not None:\n descendants = instance.location.descendants(include_self=True)\n content_types = instance.location.location_type.content_types.all()\n rack_groups_permitted = ContentType.objects.get_for_model(RackGroup) in content_types\n racks_permitted = ContentType.objects.get_for_model(Rack) in content_types\n power_panels_permitted = ContentType.objects.get_for_model(PowerPanel) in content_types\n else:\n descendants = None\n rack_groups_permitted = False\n racks_permitted = False\n power_panels_permitted = False\n\n for rackgroup in instance.get_children():\n changed = False\n if rackgroup.site != instance.site:\n rackgroup.site = instance.site\n changed = True\n\n if instance.location is not None:\n if rackgroup.location is not None and rackgroup.location not in descendants:\n rackgroup.location = instance.location if rack_groups_permitted else None\n changed = True\n elif rackgroup.location is not None and rackgroup.location.base_site != instance.site:\n rackgroup.location = None\n changed = True\n\n if changed:\n rackgroup.save()\n\n for rack in Rack.objects.filter(group=instance):\n changed = False\n if rack.site != instance.site:\n rack.site = instance.site\n changed = True\n\n if instance.location is not None:\n if rack.location is not None and rack.location not in descendants:\n rack.location = instance.location if racks_permitted else None\n changed = True\n elif rack.location is not None and rack.location.base_site != instance.site:\n rack.location = None\n changed = True\n\n if changed:\n rack.save()\n\n for powerpanel in PowerPanel.objects.filter(rack_group=instance):\n changed = False\n if powerpanel.site != instance.site:\n powerpanel.site = instance.site\n changed = True\n\n if instance.location is not None:\n if powerpanel.location is not None and powerpanel.location not in descendants:\n powerpanel.location = instance.location if power_panels_permitted else None\n changed = True\n elif powerpanel.location is not None and powerpanel.location.base_site != instance.site:\n powerpanel.location = None\n changed = True\n\n if changed:\n powerpanel.save()\n\n\n@receiver(post_save, sender=Rack)\ndef handle_rack_site_location_change(instance, created, raw=False, **kwargs):\n \"\"\"\n Update child Devices if Site or Location assignment has changed.\n\n Note that this is non-trivial for Location changes, since a LocationType that can contain Racks\n may or may not be permitted to contain Devices. If it's not permitted, rather than trying to search\n through child locations to find the \"right\" one, the best we can do is simply to null out the location.\n \"\"\"\n if raw:\n return\n if not created:\n if instance.location is not None:\n devices_permitted = (\n ContentType.objects.get_for_model(Device) in instance.location.location_type.content_types.all()\n )\n\n for device in Device.objects.filter(rack=instance):\n changed = False\n if device.site != instance.site:\n device.site = instance.site\n changed = True\n\n if instance.location is not None:\n if device.location is not None and device.location != instance.location:\n device.location = instance.location if devices_permitted else None\n changed = True\n elif device.location is not None and device.location.base_site != instance.site:\n device.location = None\n changed = True\n\n if changed:\n device.save()\n\n\n#\n# Virtual chassis\n#\n\n\n@receiver(post_save, sender=VirtualChassis)\ndef assign_virtualchassis_master(instance, created, raw=False, **kwargs):\n \"\"\"\n When a VirtualChassis is created, automatically assign its master device (if any) to the VC.\n \"\"\"\n if raw:\n return\n if created and instance.master:\n master = Device.objects.get(pk=instance.master.pk)\n master.virtual_chassis = instance\n if instance.master.vc_position is None:\n master.vc_position = 1\n master.save()\n\n\n@receiver(pre_delete, sender=VirtualChassis)\ndef clear_virtualchassis_members(instance, **kwargs):\n \"\"\"\n When a VirtualChassis is deleted, nullify the vc_position and vc_priority fields of its prior members.\n \"\"\"\n devices = Device.objects.filter(virtual_chassis=instance.pk)\n for device in devices:\n device.vc_position = None\n device.vc_priority = None\n device.save()\n\n\n#\n# Cables\n#\n\n\n@receiver(post_save, sender=Cable)\ndef update_connected_endpoints(instance, created, raw=False, **kwargs):\n \"\"\"\n When a Cable is saved, check for and update its two connected endpoints\n \"\"\"\n logger = logging.getLogger(\"nautobot.dcim.cable\")\n if raw:\n logger.debug(f\"Skipping endpoint updates for imported cable {instance}\")\n return\n\n # Cache the Cable on its two termination points\n if instance.termination_a.cable != instance:\n logger.debug(f\"Updating termination A for cable {instance}\")\n instance.termination_a.cable = instance\n instance.termination_a._cable_peer = instance.termination_b\n instance.termination_a.save()\n if instance.termination_b.cable != instance:\n logger.debug(f\"Updating termination B for cable {instance}\")\n instance.termination_b.cable = instance\n instance.termination_b._cable_peer = instance.termination_a\n instance.termination_b.save()\n\n # Create/update cable paths\n if created:\n for termination in (instance.termination_a, instance.termination_b):\n if isinstance(termination, PathEndpoint):\n create_cablepath(termination)\n else:\n rebuild_paths(termination)\n elif instance.status != instance._orig_status:\n # We currently don't support modifying either termination of an existing Cable. (This\n # may change in the future.) However, we do need to capture status changes and update\n # any CablePaths accordingly.\n if instance.status != Cable.STATUS_CONNECTED:\n CablePath.objects.filter(path__contains=instance).update(is_active=False)\n else:\n rebuild_paths(instance)\n\n\n@receiver(pre_delete, sender=Cable)\ndef nullify_connected_endpoints(instance, **kwargs):\n \"\"\"\n When a Cable is deleted, check for and update its two connected endpoints\n \"\"\"\n logger = logging.getLogger(\"nautobot.dcim.cable\")\n\n # Disassociate the Cable from its termination points\n if instance.termination_a is not None:\n logger.debug(f\"Nullifying termination A for cable {instance}\")\n instance.termination_a.cable = None\n instance.termination_a._cable_peer = None\n instance.termination_a.save()\n if instance.termination_b is not None:\n logger.debug(f\"Nullifying termination B for cable {instance}\")\n instance.termination_b.cable = None\n instance.termination_b._cable_peer = None\n instance.termination_b.save()\n\n # Delete and retrace any dependent cable paths\n for cablepath in CablePath.objects.filter(path__contains=instance):\n cp = CablePath.from_origin(cablepath.origin)\n if cp:\n CablePath.objects.filter(pk=cablepath.pk).update(\n path=cp.path,\n destination_type=ContentType.objects.get_for_model(cp.destination) if cp.destination else None,\n destination_id=cp.destination.pk if cp.destination else None,\n is_active=cp.is_active,\n is_split=cp.is_split,\n )\n else:\n cablepath.delete()\n\n\n#\n# Interface tagged VLAMs\n#\n\n\n@receiver(m2m_changed, sender=Interface.tagged_vlans.through)\n@disable_for_loaddata\ndef prevent_adding_tagged_vlans_with_incorrect_mode_or_site(sender, instance, action, **kwargs):\n if action != \"pre_add\":\n return\n\n validate_interface_tagged_vlans(instance, kwargs[\"model\"], kwargs[\"pk_set\"])\n", "path": "nautobot/dcim/signals.py"}], "after_files": [{"content": "import logging\n\nfrom cacheops import invalidate_obj\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models.signals import m2m_changed, post_save, pre_delete\nfrom django.db import transaction\nfrom django.dispatch import receiver\n\nfrom nautobot.core.signals import disable_for_loaddata\nfrom .models import (\n Cable,\n CablePath,\n Device,\n DeviceRedundancyGroup,\n PathEndpoint,\n PowerPanel,\n Rack,\n RackGroup,\n VirtualChassis,\n Interface,\n)\nfrom .utils import validate_interface_tagged_vlans\n\n\ndef create_cablepath(node, rebuild=True):\n \"\"\"\n Create CablePaths for all paths originating from the specified node.\n\n rebuild (bool) - Used to refresh paths where this node is not an endpoint.\n \"\"\"\n cp = CablePath.from_origin(node)\n if cp:\n try:\n cp.save()\n except Exception as e:\n print(node, node.pk)\n raise e\n if rebuild:\n rebuild_paths(node)\n\n\ndef rebuild_paths(obj):\n \"\"\"\n Rebuild all CablePaths which traverse the specified node\n \"\"\"\n cable_paths = CablePath.objects.filter(path__contains=obj)\n\n with transaction.atomic():\n for cp in cable_paths:\n invalidate_obj(cp.origin)\n cp.delete()\n # Prevent looping back to rebuild_paths during the atomic transaction.\n create_cablepath(cp.origin, rebuild=False)\n\n\n#\n# Site/location/rack/device assignment\n#\n\n\n@receiver(post_save, sender=RackGroup)\ndef handle_rackgroup_site_location_change(instance, created, raw=False, **kwargs):\n \"\"\"\n Update child RackGroups, Racks, and PowerPanels if Site or Location assignment has changed.\n\n We intentionally recurse through each child object instead of calling update() on the QuerySet\n to ensure the proper change records get created for each.\n\n Note that this is non-trivial for Location changes, since a LocationType that can contain RackGroups\n may or may not be permitted to contain Racks or PowerPanels. If it's not permitted, rather than trying to search\n through child locations to find the \"right\" one, the best we can do is simply to null out the location.\n \"\"\"\n if raw:\n return\n if not created:\n if instance.location is not None:\n descendants = instance.location.descendants(include_self=True)\n content_types = instance.location.location_type.content_types.all()\n rack_groups_permitted = ContentType.objects.get_for_model(RackGroup) in content_types\n racks_permitted = ContentType.objects.get_for_model(Rack) in content_types\n power_panels_permitted = ContentType.objects.get_for_model(PowerPanel) in content_types\n else:\n descendants = None\n rack_groups_permitted = False\n racks_permitted = False\n power_panels_permitted = False\n\n for rackgroup in instance.get_children():\n changed = False\n if rackgroup.site != instance.site:\n rackgroup.site = instance.site\n changed = True\n\n if instance.location is not None:\n if rackgroup.location is not None and rackgroup.location not in descendants:\n rackgroup.location = instance.location if rack_groups_permitted else None\n changed = True\n elif rackgroup.location is not None and rackgroup.location.base_site != instance.site:\n rackgroup.location = None\n changed = True\n\n if changed:\n rackgroup.save()\n\n for rack in Rack.objects.filter(group=instance):\n changed = False\n if rack.site != instance.site:\n rack.site = instance.site\n changed = True\n\n if instance.location is not None:\n if rack.location is not None and rack.location not in descendants:\n rack.location = instance.location if racks_permitted else None\n changed = True\n elif rack.location is not None and rack.location.base_site != instance.site:\n rack.location = None\n changed = True\n\n if changed:\n rack.save()\n\n for powerpanel in PowerPanel.objects.filter(rack_group=instance):\n changed = False\n if powerpanel.site != instance.site:\n powerpanel.site = instance.site\n changed = True\n\n if instance.location is not None:\n if powerpanel.location is not None and powerpanel.location not in descendants:\n powerpanel.location = instance.location if power_panels_permitted else None\n changed = True\n elif powerpanel.location is not None and powerpanel.location.base_site != instance.site:\n powerpanel.location = None\n changed = True\n\n if changed:\n powerpanel.save()\n\n\n@receiver(post_save, sender=Rack)\ndef handle_rack_site_location_change(instance, created, raw=False, **kwargs):\n \"\"\"\n Update child Devices if Site or Location assignment has changed.\n\n Note that this is non-trivial for Location changes, since a LocationType that can contain Racks\n may or may not be permitted to contain Devices. If it's not permitted, rather than trying to search\n through child locations to find the \"right\" one, the best we can do is simply to null out the location.\n \"\"\"\n if raw:\n return\n if not created:\n if instance.location is not None:\n devices_permitted = (\n ContentType.objects.get_for_model(Device) in instance.location.location_type.content_types.all()\n )\n\n for device in Device.objects.filter(rack=instance):\n changed = False\n if device.site != instance.site:\n device.site = instance.site\n changed = True\n\n if instance.location is not None:\n if device.location is not None and device.location != instance.location:\n device.location = instance.location if devices_permitted else None\n changed = True\n elif device.location is not None and device.location.base_site != instance.site:\n device.location = None\n changed = True\n\n if changed:\n device.save()\n\n\n#\n# Device redundancy group\n#\n\n\n@receiver(pre_delete, sender=DeviceRedundancyGroup)\ndef clear_deviceredundancygroup_members(instance, **kwargs):\n \"\"\"\n When a DeviceRedundancyGroup is deleted, nullify the device_redundancy_group_priority field of its prior members.\n \"\"\"\n devices = Device.objects.filter(device_redundancy_group=instance.pk)\n for device in devices:\n device.device_redundancy_group_priority = None\n device.save()\n\n\n#\n# Virtual chassis\n#\n\n\n@receiver(post_save, sender=VirtualChassis)\ndef assign_virtualchassis_master(instance, created, raw=False, **kwargs):\n \"\"\"\n When a VirtualChassis is created, automatically assign its master device (if any) to the VC.\n \"\"\"\n if raw:\n return\n if created and instance.master:\n master = Device.objects.get(pk=instance.master.pk)\n master.virtual_chassis = instance\n if instance.master.vc_position is None:\n master.vc_position = 1\n master.save()\n\n\n@receiver(pre_delete, sender=VirtualChassis)\ndef clear_virtualchassis_members(instance, **kwargs):\n \"\"\"\n When a VirtualChassis is deleted, nullify the vc_position and vc_priority fields of its prior members.\n \"\"\"\n devices = Device.objects.filter(virtual_chassis=instance.pk)\n for device in devices:\n device.vc_position = None\n device.vc_priority = None\n device.save()\n\n\n#\n# Cables\n#\n\n\n@receiver(post_save, sender=Cable)\ndef update_connected_endpoints(instance, created, raw=False, **kwargs):\n \"\"\"\n When a Cable is saved, check for and update its two connected endpoints\n \"\"\"\n logger = logging.getLogger(\"nautobot.dcim.cable\")\n if raw:\n logger.debug(f\"Skipping endpoint updates for imported cable {instance}\")\n return\n\n # Cache the Cable on its two termination points\n if instance.termination_a.cable != instance:\n logger.debug(f\"Updating termination A for cable {instance}\")\n instance.termination_a.cable = instance\n instance.termination_a._cable_peer = instance.termination_b\n instance.termination_a.save()\n if instance.termination_b.cable != instance:\n logger.debug(f\"Updating termination B for cable {instance}\")\n instance.termination_b.cable = instance\n instance.termination_b._cable_peer = instance.termination_a\n instance.termination_b.save()\n\n # Create/update cable paths\n if created:\n for termination in (instance.termination_a, instance.termination_b):\n if isinstance(termination, PathEndpoint):\n create_cablepath(termination)\n else:\n rebuild_paths(termination)\n elif instance.status != instance._orig_status:\n # We currently don't support modifying either termination of an existing Cable. (This\n # may change in the future.) However, we do need to capture status changes and update\n # any CablePaths accordingly.\n if instance.status != Cable.STATUS_CONNECTED:\n CablePath.objects.filter(path__contains=instance).update(is_active=False)\n else:\n rebuild_paths(instance)\n\n\n@receiver(pre_delete, sender=Cable)\ndef nullify_connected_endpoints(instance, **kwargs):\n \"\"\"\n When a Cable is deleted, check for and update its two connected endpoints\n \"\"\"\n logger = logging.getLogger(\"nautobot.dcim.cable\")\n\n # Disassociate the Cable from its termination points\n if instance.termination_a is not None:\n logger.debug(f\"Nullifying termination A for cable {instance}\")\n instance.termination_a.cable = None\n instance.termination_a._cable_peer = None\n instance.termination_a.save()\n if instance.termination_b is not None:\n logger.debug(f\"Nullifying termination B for cable {instance}\")\n instance.termination_b.cable = None\n instance.termination_b._cable_peer = None\n instance.termination_b.save()\n\n # Delete and retrace any dependent cable paths\n for cablepath in CablePath.objects.filter(path__contains=instance):\n cp = CablePath.from_origin(cablepath.origin)\n if cp:\n CablePath.objects.filter(pk=cablepath.pk).update(\n path=cp.path,\n destination_type=ContentType.objects.get_for_model(cp.destination) if cp.destination else None,\n destination_id=cp.destination.pk if cp.destination else None,\n is_active=cp.is_active,\n is_split=cp.is_split,\n )\n else:\n cablepath.delete()\n\n\n#\n# Interface tagged VLAMs\n#\n\n\n@receiver(m2m_changed, sender=Interface.tagged_vlans.through)\n@disable_for_loaddata\ndef prevent_adding_tagged_vlans_with_incorrect_mode_or_site(sender, instance, action, **kwargs):\n if action != \"pre_add\":\n return\n\n validate_interface_tagged_vlans(instance, kwargs[\"model\"], kwargs[\"pk_set\"])\n", "path": "nautobot/dcim/signals.py"}]}
| 3,933 | 229 |
gh_patches_debug_7766
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-889
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Return empty json for empty bundles in ASR
Return empty json for empty bundles in ASR. Unclear whether ASR can handle 204 responses so let's default to empty json.
See also https://bugzilla.mozilla.org/show_bug.cgi?id=1511640
History:
- https://github.com/mozmeao/snippets-service/commit/4936f2371e341690def9be40a72abdd32205b0c9
- https://github.com/mozmeao/snippets-service/commit/cb0fc0ca63af4ed3786342e17c2f50d2fc6dc5c4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/views.py`
Content:
```
1 import json
2 import logging
3
4 from distutils.util import strtobool
5
6 from django.conf import settings
7 from django.contrib.auth.decorators import permission_required
8 from django.core.exceptions import ValidationError
9 from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
10 from django.shortcuts import get_object_or_404, render
11 from django.utils.functional import lazy
12 from django.views.decorators.cache import cache_control
13 from django.views.decorators.csrf import csrf_exempt
14 from django.views.decorators.http import require_POST
15 from django.views.generic import TemplateView
16
17 from django_statsd.clients import statsd
18 from raven.contrib.django.models import client as sentry_client
19
20 from snippets.base import util
21 from snippets.base.bundles import ASRSnippetBundle, SnippetBundle
22 from snippets.base.decorators import access_control
23 from snippets.base.encoders import JSONSnippetEncoder
24 from snippets.base.models import ASRSnippet, Client, JSONSnippet, Snippet, SnippetTemplate
25 from snippets.base.util import get_object_or_none
26
27
28 def _bundle_timeout():
29 return getattr(settings, 'SNIPPET_BUNDLE_TIMEOUT')
30 SNIPPET_BUNDLE_TIMEOUT = lazy(_bundle_timeout, int)() # noqa
31
32
33 class HomeView(TemplateView):
34 template_name = 'base/home.jinja'
35
36
37 @cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)
38 @access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)
39 def fetch_snippets(request, **kwargs):
40 """
41 Return one of the following responses:
42 - 200 with empty body when the bundle is empty
43 - 302 to a bundle URL after generating it if not cached.
44 """
45 statsd.incr('serve.snippets')
46
47 client = Client(**kwargs)
48 if client.startpage_version == 6:
49 bundle = ASRSnippetBundle(client)
50 else:
51 bundle = SnippetBundle(client)
52 if bundle.empty:
53 statsd.incr('bundle.empty')
54 # This is not a 204 because Activity Stream expects content, even if
55 # it's empty.
56 return HttpResponse(status=200, content='')
57 elif bundle.cached:
58 statsd.incr('bundle.cached')
59 else:
60 statsd.incr('bundle.generate')
61 bundle.generate()
62
63 return HttpResponseRedirect(bundle.url)
64
65
66 @cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)
67 @access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)
68 def fetch_json_snippets(request, **kwargs):
69 statsd.incr('serve.json_snippets')
70 client = Client(**kwargs)
71 matching_snippets = (JSONSnippet.objects
72 .filter(published=True)
73 .match_client(client)
74 .filter_by_available())
75 return HttpResponse(json.dumps(matching_snippets, cls=JSONSnippetEncoder),
76 content_type='application/json')
77
78
79 def preview_asr_snippet(request, uuid):
80 try:
81 snippet = get_object_or_404(ASRSnippet, uuid=uuid)
82 except ValidationError:
83 # Raised when UUID is a badly formed hexadecimal UUID string
84 raise Http404()
85
86 bundle_content = json.dumps({
87 'messages': [snippet.render(preview=True)],
88 })
89 return HttpResponse(bundle_content, content_type='application/json')
90
91
92 @csrf_exempt
93 @permission_required('base.change_snippet')
94 def preview_snippet(request):
95 """
96 Build a snippet using info from the POST parameters, and preview that
97 snippet on a mock about:home page.
98 """
99 try:
100 template_id = int(request.POST.get('template_id', None))
101 except (TypeError, ValueError):
102 return HttpResponseBadRequest()
103
104 template = get_object_or_none(SnippetTemplate, id=template_id)
105 data = request.POST.get('data', None)
106
107 # Validate that data is JSON.
108 try:
109 json.loads(data)
110 except (TypeError, ValueError):
111 data = None
112
113 # If your parameters are wrong, I have no sympathy for you.
114 if data is None or template is None:
115 return HttpResponseBadRequest()
116
117 # Build a snippet that isn't saved so we can render it.
118 snippet = Snippet(template=template, data=data)
119
120 if strtobool(request.POST.get('activity_stream', 'false')):
121 template_name = 'base/preview_as.jinja'
122 preview_client = Client(5, 'Firefox', '57.0', 'default', 'default', 'en-US',
123 'release', 'default', 'default', 'default')
124 else:
125 template_name = 'base/preview.jinja'
126 preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',
127 'release', 'default', 'default', 'default')
128
129 skip_boilerplate = request.POST.get('skip_boilerplate', 'false')
130 skip_boilerplate = strtobool(skip_boilerplate)
131 if skip_boilerplate:
132 template_name = 'base/preview_without_shell.jinja'
133
134 return render(request, template_name, {
135 'snippets_json': json.dumps([snippet.to_dict()]),
136 'client': preview_client,
137 'preview': True,
138 'current_firefox_major_version': util.current_firefox_major_version(),
139 })
140
141
142 def show_snippet(request, snippet_id, uuid=False):
143 preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',
144 'release', 'default', 'default', 'default')
145
146 if uuid:
147 snippet = get_object_or_404(Snippet, uuid=snippet_id)
148 else:
149 snippet = get_object_or_404(Snippet, pk=snippet_id)
150 if not snippet.published and not request.user.is_authenticated:
151 raise Http404()
152
153 template = 'base/preview.jinja'
154 if snippet.on_startpage_5:
155 template = 'base/preview_as.jinja'
156 return render(request, template, {
157 'snippets_json': json.dumps([snippet.to_dict()]),
158 'client': preview_client,
159 'preview': True,
160 'current_firefox_major_version': util.current_firefox_major_version(),
161 })
162
163
164 @csrf_exempt
165 @require_POST
166 def csp_violation_capture(request):
167 data = sentry_client.get_data_from_request(request)
168 data.update({
169 'level': logging.INFO,
170 'logger': 'CSP',
171 })
172 try:
173 csp_data = json.loads(request.body)
174 except ValueError:
175 # Cannot decode CSP violation data, ignore
176 return HttpResponseBadRequest('Invalid CSP Report')
177
178 try:
179 blocked_uri = csp_data['csp-report']['blocked-uri']
180 except KeyError:
181 # Incomplete CSP report
182 return HttpResponseBadRequest('Incomplete CSP Report')
183
184 sentry_client.captureMessage(
185 message='CSP Violation: {}'.format(blocked_uri),
186 data=data)
187
188 return HttpResponse('Captured CSP violation, thanks for reporting.')
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snippets/base/views.py b/snippets/base/views.py
--- a/snippets/base/views.py
+++ b/snippets/base/views.py
@@ -51,6 +51,11 @@
bundle = SnippetBundle(client)
if bundle.empty:
statsd.incr('bundle.empty')
+
+ if client.startpage_version == 6:
+ # Return valid JSON for Activity Stream Router
+ return HttpResponse(status=200, content='{}')
+
# This is not a 204 because Activity Stream expects content, even if
# it's empty.
return HttpResponse(status=200, content='')
|
{"golden_diff": "diff --git a/snippets/base/views.py b/snippets/base/views.py\n--- a/snippets/base/views.py\n+++ b/snippets/base/views.py\n@@ -51,6 +51,11 @@\n bundle = SnippetBundle(client)\n if bundle.empty:\n statsd.incr('bundle.empty')\n+\n+ if client.startpage_version == 6:\n+ # Return valid JSON for Activity Stream Router\n+ return HttpResponse(status=200, content='{}')\n+\n # This is not a 204 because Activity Stream expects content, even if\n # it's empty.\n return HttpResponse(status=200, content='')\n", "issue": "Return empty json for empty bundles in ASR\nReturn empty json for empty bundles in ASR. Unclear whether ASR can handle 204 responses so let's default to empty json. \r\n\r\nSee also https://bugzilla.mozilla.org/show_bug.cgi?id=1511640\r\n\r\nHistory:\r\n - https://github.com/mozmeao/snippets-service/commit/4936f2371e341690def9be40a72abdd32205b0c9\r\n - https://github.com/mozmeao/snippets-service/commit/cb0fc0ca63af4ed3786342e17c2f50d2fc6dc5c4\n", "before_files": [{"content": "import json\nimport logging\n\nfrom distutils.util import strtobool\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import permission_required\nfrom django.core.exceptions import ValidationError\nfrom django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.utils.functional import lazy\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import TemplateView\n\nfrom django_statsd.clients import statsd\nfrom raven.contrib.django.models import client as sentry_client\n\nfrom snippets.base import util\nfrom snippets.base.bundles import ASRSnippetBundle, SnippetBundle\nfrom snippets.base.decorators import access_control\nfrom snippets.base.encoders import JSONSnippetEncoder\nfrom snippets.base.models import ASRSnippet, Client, JSONSnippet, Snippet, SnippetTemplate\nfrom snippets.base.util import get_object_or_none\n\n\ndef _bundle_timeout():\n return getattr(settings, 'SNIPPET_BUNDLE_TIMEOUT')\nSNIPPET_BUNDLE_TIMEOUT = lazy(_bundle_timeout, int)() # noqa\n\n\nclass HomeView(TemplateView):\n template_name = 'base/home.jinja'\n\n\n@cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)\n@access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)\ndef fetch_snippets(request, **kwargs):\n \"\"\"\n Return one of the following responses:\n - 200 with empty body when the bundle is empty\n - 302 to a bundle URL after generating it if not cached.\n \"\"\"\n statsd.incr('serve.snippets')\n\n client = Client(**kwargs)\n if client.startpage_version == 6:\n bundle = ASRSnippetBundle(client)\n else:\n bundle = SnippetBundle(client)\n if bundle.empty:\n statsd.incr('bundle.empty')\n # This is not a 204 because Activity Stream expects content, even if\n # it's empty.\n return HttpResponse(status=200, content='')\n elif bundle.cached:\n statsd.incr('bundle.cached')\n else:\n statsd.incr('bundle.generate')\n bundle.generate()\n\n return HttpResponseRedirect(bundle.url)\n\n\n@cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)\n@access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)\ndef fetch_json_snippets(request, **kwargs):\n statsd.incr('serve.json_snippets')\n client = Client(**kwargs)\n matching_snippets = (JSONSnippet.objects\n .filter(published=True)\n .match_client(client)\n .filter_by_available())\n return HttpResponse(json.dumps(matching_snippets, cls=JSONSnippetEncoder),\n content_type='application/json')\n\n\ndef preview_asr_snippet(request, uuid):\n try:\n snippet = get_object_or_404(ASRSnippet, uuid=uuid)\n except ValidationError:\n # Raised when UUID is a badly formed hexadecimal UUID string\n raise Http404()\n\n bundle_content = json.dumps({\n 'messages': [snippet.render(preview=True)],\n })\n return HttpResponse(bundle_content, content_type='application/json')\n\n\n@csrf_exempt\n@permission_required('base.change_snippet')\ndef preview_snippet(request):\n \"\"\"\n Build a snippet using info from the POST parameters, and preview that\n snippet on a mock about:home page.\n \"\"\"\n try:\n template_id = int(request.POST.get('template_id', None))\n except (TypeError, ValueError):\n return HttpResponseBadRequest()\n\n template = get_object_or_none(SnippetTemplate, id=template_id)\n data = request.POST.get('data', None)\n\n # Validate that data is JSON.\n try:\n json.loads(data)\n except (TypeError, ValueError):\n data = None\n\n # If your parameters are wrong, I have no sympathy for you.\n if data is None or template is None:\n return HttpResponseBadRequest()\n\n # Build a snippet that isn't saved so we can render it.\n snippet = Snippet(template=template, data=data)\n\n if strtobool(request.POST.get('activity_stream', 'false')):\n template_name = 'base/preview_as.jinja'\n preview_client = Client(5, 'Firefox', '57.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n else:\n template_name = 'base/preview.jinja'\n preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n\n skip_boilerplate = request.POST.get('skip_boilerplate', 'false')\n skip_boilerplate = strtobool(skip_boilerplate)\n if skip_boilerplate:\n template_name = 'base/preview_without_shell.jinja'\n\n return render(request, template_name, {\n 'snippets_json': json.dumps([snippet.to_dict()]),\n 'client': preview_client,\n 'preview': True,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n\ndef show_snippet(request, snippet_id, uuid=False):\n preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n\n if uuid:\n snippet = get_object_or_404(Snippet, uuid=snippet_id)\n else:\n snippet = get_object_or_404(Snippet, pk=snippet_id)\n if not snippet.published and not request.user.is_authenticated:\n raise Http404()\n\n template = 'base/preview.jinja'\n if snippet.on_startpage_5:\n template = 'base/preview_as.jinja'\n return render(request, template, {\n 'snippets_json': json.dumps([snippet.to_dict()]),\n 'client': preview_client,\n 'preview': True,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n\n@csrf_exempt\n@require_POST\ndef csp_violation_capture(request):\n data = sentry_client.get_data_from_request(request)\n data.update({\n 'level': logging.INFO,\n 'logger': 'CSP',\n })\n try:\n csp_data = json.loads(request.body)\n except ValueError:\n # Cannot decode CSP violation data, ignore\n return HttpResponseBadRequest('Invalid CSP Report')\n\n try:\n blocked_uri = csp_data['csp-report']['blocked-uri']\n except KeyError:\n # Incomplete CSP report\n return HttpResponseBadRequest('Incomplete CSP Report')\n\n sentry_client.captureMessage(\n message='CSP Violation: {}'.format(blocked_uri),\n data=data)\n\n return HttpResponse('Captured CSP violation, thanks for reporting.')\n", "path": "snippets/base/views.py"}], "after_files": [{"content": "import json\nimport logging\n\nfrom distutils.util import strtobool\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import permission_required\nfrom django.core.exceptions import ValidationError\nfrom django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.utils.functional import lazy\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import TemplateView\n\nfrom django_statsd.clients import statsd\nfrom raven.contrib.django.models import client as sentry_client\n\nfrom snippets.base import util\nfrom snippets.base.bundles import ASRSnippetBundle, SnippetBundle\nfrom snippets.base.decorators import access_control\nfrom snippets.base.encoders import JSONSnippetEncoder\nfrom snippets.base.models import ASRSnippet, Client, JSONSnippet, Snippet, SnippetTemplate\nfrom snippets.base.util import get_object_or_none\n\n\ndef _bundle_timeout():\n return getattr(settings, 'SNIPPET_BUNDLE_TIMEOUT')\nSNIPPET_BUNDLE_TIMEOUT = lazy(_bundle_timeout, int)() # noqa\n\n\nclass HomeView(TemplateView):\n template_name = 'base/home.jinja'\n\n\n@cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)\n@access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)\ndef fetch_snippets(request, **kwargs):\n \"\"\"\n Return one of the following responses:\n - 200 with empty body when the bundle is empty\n - 302 to a bundle URL after generating it if not cached.\n \"\"\"\n statsd.incr('serve.snippets')\n\n client = Client(**kwargs)\n if client.startpage_version == 6:\n bundle = ASRSnippetBundle(client)\n else:\n bundle = SnippetBundle(client)\n if bundle.empty:\n statsd.incr('bundle.empty')\n\n if client.startpage_version == 6:\n # Return valid JSON for Activity Stream Router\n return HttpResponse(status=200, content='{}')\n\n # This is not a 204 because Activity Stream expects content, even if\n # it's empty.\n return HttpResponse(status=200, content='')\n elif bundle.cached:\n statsd.incr('bundle.cached')\n else:\n statsd.incr('bundle.generate')\n bundle.generate()\n\n return HttpResponseRedirect(bundle.url)\n\n\n@cache_control(public=True, max_age=SNIPPET_BUNDLE_TIMEOUT)\n@access_control(max_age=SNIPPET_BUNDLE_TIMEOUT)\ndef fetch_json_snippets(request, **kwargs):\n statsd.incr('serve.json_snippets')\n client = Client(**kwargs)\n matching_snippets = (JSONSnippet.objects\n .filter(published=True)\n .match_client(client)\n .filter_by_available())\n return HttpResponse(json.dumps(matching_snippets, cls=JSONSnippetEncoder),\n content_type='application/json')\n\n\ndef preview_asr_snippet(request, uuid):\n try:\n snippet = get_object_or_404(ASRSnippet, uuid=uuid)\n except ValidationError:\n # Raised when UUID is a badly formed hexadecimal UUID string\n raise Http404()\n\n bundle_content = json.dumps({\n 'messages': [snippet.render(preview=True)],\n })\n return HttpResponse(bundle_content, content_type='application/json')\n\n\n@csrf_exempt\n@permission_required('base.change_snippet')\ndef preview_snippet(request):\n \"\"\"\n Build a snippet using info from the POST parameters, and preview that\n snippet on a mock about:home page.\n \"\"\"\n try:\n template_id = int(request.POST.get('template_id', None))\n except (TypeError, ValueError):\n return HttpResponseBadRequest()\n\n template = get_object_or_none(SnippetTemplate, id=template_id)\n data = request.POST.get('data', None)\n\n # Validate that data is JSON.\n try:\n json.loads(data)\n except (TypeError, ValueError):\n data = None\n\n # If your parameters are wrong, I have no sympathy for you.\n if data is None or template is None:\n return HttpResponseBadRequest()\n\n # Build a snippet that isn't saved so we can render it.\n snippet = Snippet(template=template, data=data)\n\n if strtobool(request.POST.get('activity_stream', 'false')):\n template_name = 'base/preview_as.jinja'\n preview_client = Client(5, 'Firefox', '57.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n else:\n template_name = 'base/preview.jinja'\n preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n\n skip_boilerplate = request.POST.get('skip_boilerplate', 'false')\n skip_boilerplate = strtobool(skip_boilerplate)\n if skip_boilerplate:\n template_name = 'base/preview_without_shell.jinja'\n\n return render(request, template_name, {\n 'snippets_json': json.dumps([snippet.to_dict()]),\n 'client': preview_client,\n 'preview': True,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n\ndef show_snippet(request, snippet_id, uuid=False):\n preview_client = Client(4, 'Firefox', '24.0', 'default', 'default', 'en-US',\n 'release', 'default', 'default', 'default')\n\n if uuid:\n snippet = get_object_or_404(Snippet, uuid=snippet_id)\n else:\n snippet = get_object_or_404(Snippet, pk=snippet_id)\n if not snippet.published and not request.user.is_authenticated:\n raise Http404()\n\n template = 'base/preview.jinja'\n if snippet.on_startpage_5:\n template = 'base/preview_as.jinja'\n return render(request, template, {\n 'snippets_json': json.dumps([snippet.to_dict()]),\n 'client': preview_client,\n 'preview': True,\n 'current_firefox_major_version': util.current_firefox_major_version(),\n })\n\n\n@csrf_exempt\n@require_POST\ndef csp_violation_capture(request):\n data = sentry_client.get_data_from_request(request)\n data.update({\n 'level': logging.INFO,\n 'logger': 'CSP',\n })\n try:\n csp_data = json.loads(request.body)\n except ValueError:\n # Cannot decode CSP violation data, ignore\n return HttpResponseBadRequest('Invalid CSP Report')\n\n try:\n blocked_uri = csp_data['csp-report']['blocked-uri']\n except KeyError:\n # Incomplete CSP report\n return HttpResponseBadRequest('Incomplete CSP Report')\n\n sentry_client.captureMessage(\n message='CSP Violation: {}'.format(blocked_uri),\n data=data)\n\n return HttpResponse('Captured CSP violation, thanks for reporting.')\n", "path": "snippets/base/views.py"}]}
| 2,367 | 141 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.