problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_16805
|
rasdani/github-patches
|
git_diff
|
cython__cython-5332
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fatal Python error: PyEval_SaveThread when creating ufunc with nogil [BUG]
### Describe the bug
I'm generating ufuncs for my fortran library by the new version 3.0.0b1 of cython, using the @cython.ufunc function decorator.
I declare the function as nogil. and call it in python script. It works fine when the input array length is less than 500. When the input array length is greater than 500, it stops and throws a thread error.
```
Fatal Python error: PyEval_SaveThread: the function must be called with the GIL held, but the GIL is released (the current Python thread state is NULL)
Python runtime state: initialized
```
I'm using the mingw-w64 compiler provided by msys2 on windows, but that doesn't seem to be the cause of the error. I transferred the program to an ubuntu machine and found the same error.
### Code to reproduce the behaviour:
```cython
# test_ufunc.pyx
cimport cython
@cython.ufunc
cdef double add_one(double x) nogil:
return x+1
```
```python
import test_ufunc
import numpy as np
x=np.linspace(-1,1,501)
x_add=test_ufunc.add_one(x)
print(x_add)
```
### Expected behaviour
No errors, result returns successfully.
### OS
Ubuntu and Windows
### Python version
3.10.6 (Ubuntu), 3.11.2 (Windows)
### Cython version
3.0.0b1
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Cython/Compiler/UFuncs.py`
Content:
```
1 from . import (
2 Nodes,
3 ExprNodes,
4 FusedNode,
5 TreeFragment,
6 Pipeline,
7 ParseTreeTransforms,
8 Naming,
9 UtilNodes,
10 )
11 from .Errors import error
12 from . import PyrexTypes
13 from .UtilityCode import CythonUtilityCode
14 from .Code import TempitaUtilityCode, UtilityCode
15 from .Visitor import PrintTree, TreeVisitor, VisitorTransform
16
17 numpy_int_types = [
18 "NPY_BYTE",
19 "NPY_INT8",
20 "NPY_SHORT",
21 "NPY_INT16",
22 "NPY_INT",
23 "NPY_INT32",
24 "NPY_LONG",
25 "NPY_LONGLONG",
26 "NPY_INT64",
27 ]
28 numpy_uint_types = [tp.replace("NPY_", "NPY_U") for tp in numpy_int_types]
29 # note: half float type is deliberately omitted
30 numpy_numeric_types = (
31 numpy_int_types
32 + numpy_uint_types
33 + [
34 "NPY_FLOAT",
35 "NPY_FLOAT32",
36 "NPY_DOUBLE",
37 "NPY_FLOAT64",
38 "NPY_LONGDOUBLE",
39 ]
40 )
41
42
43 def _get_type_constant(pos, type_):
44 if type_.is_complex:
45 # 'is' checks don't seem to work for complex types
46 if type_ == PyrexTypes.c_float_complex_type:
47 return "NPY_CFLOAT"
48 elif type_ == PyrexTypes.c_double_complex_type:
49 return "NPY_CDOUBLE"
50 elif type_ == PyrexTypes.c_longdouble_complex_type:
51 return "NPY_CLONGDOUBLE"
52 elif type_.is_numeric:
53 postfix = type_.empty_declaration_code().upper().replace(" ", "")
54 typename = "NPY_%s" % postfix
55 if typename in numpy_numeric_types:
56 return typename
57 elif type_.is_pyobject:
58 return "NPY_OBJECT"
59 # TODO possible NPY_BOOL to bint but it needs a cast?
60 # TODO NPY_DATETIME, NPY_TIMEDELTA, NPY_STRING, NPY_UNICODE and maybe NPY_VOID might be handleable
61 error(pos, "Type '%s' cannot be used as a ufunc argument" % type_)
62
63
64 class _FindCFuncDefNode(TreeVisitor):
65 """
66 Finds the CFuncDefNode in the tree
67
68 The assumption is that there's only one CFuncDefNode
69 """
70
71 found_node = None
72
73 def visit_Node(self, node):
74 if self.found_node:
75 return
76 else:
77 self.visitchildren(node)
78
79 def visit_CFuncDefNode(self, node):
80 self.found_node = node
81
82 def __call__(self, tree):
83 self.visit(tree)
84 return self.found_node
85
86
87 def get_cfunc_from_tree(tree):
88 return _FindCFuncDefNode()(tree)
89
90
91 class _ArgumentInfo(object):
92 """
93 Everything related to defining an input/output argument for a ufunc
94
95 type - PyrexType
96 type_constant - str such as "NPY_INT8" representing numpy dtype constants
97 """
98
99 def __init__(self, type, type_constant):
100 self.type = type
101 self.type_constant = type_constant
102
103
104 class UFuncConversion(object):
105 def __init__(self, node):
106 self.node = node
107 self.global_scope = node.local_scope.global_scope()
108
109 self.in_definitions = self.get_in_type_info()
110 self.out_definitions = self.get_out_type_info()
111
112 def get_in_type_info(self):
113 definitions = []
114 for n, arg in enumerate(self.node.args):
115 type_const = _get_type_constant(self.node.pos, arg.type)
116 definitions.append(_ArgumentInfo(arg.type, type_const))
117 return definitions
118
119 def get_out_type_info(self):
120 if self.node.return_type.is_ctuple:
121 components = self.node.return_type.components
122 else:
123 components = [self.node.return_type]
124 definitions = []
125 for n, type in enumerate(components):
126 definitions.append(
127 _ArgumentInfo(type, _get_type_constant(self.node.pos, type))
128 )
129 return definitions
130
131 def generate_cy_utility_code(self):
132 arg_types = [a.type for a in self.in_definitions]
133 out_types = [a.type for a in self.out_definitions]
134 inline_func_decl = self.node.entry.type.declaration_code(
135 self.node.entry.cname, pyrex=True
136 )
137 self.node.entry.used = True
138
139 ufunc_cname = self.global_scope.next_id(self.node.entry.name + "_ufunc_def")
140
141 context = dict(
142 func_cname=ufunc_cname,
143 in_types=arg_types,
144 out_types=out_types,
145 inline_func_call=self.node.entry.cname,
146 inline_func_declaration=inline_func_decl,
147 nogil=self.node.entry.type.nogil,
148 )
149
150 code = CythonUtilityCode.load(
151 "UFuncDefinition",
152 "UFuncs.pyx",
153 context=context,
154 outer_module_scope=self.global_scope,
155 )
156
157 tree = code.get_tree(entries_only=True)
158 return tree
159
160 def use_generic_utility_code(self):
161 # use the invariant C utility code
162 self.global_scope.use_utility_code(
163 UtilityCode.load_cached("UFuncsInit", "UFuncs_C.c")
164 )
165 self.global_scope.use_utility_code(
166 UtilityCode.load_cached("NumpyImportUFunc", "NumpyImportArray.c")
167 )
168
169
170 def convert_to_ufunc(node):
171 if isinstance(node, Nodes.CFuncDefNode):
172 if node.local_scope.parent_scope.is_c_class_scope:
173 error(node.pos, "Methods cannot currently be converted to a ufunc")
174 return node
175 converters = [UFuncConversion(node)]
176 original_node = node
177 elif isinstance(node, FusedNode.FusedCFuncDefNode) and isinstance(
178 node.node, Nodes.CFuncDefNode
179 ):
180 if node.node.local_scope.parent_scope.is_c_class_scope:
181 error(node.pos, "Methods cannot currently be converted to a ufunc")
182 return node
183 converters = [UFuncConversion(n) for n in node.nodes]
184 original_node = node.node
185 else:
186 error(node.pos, "Only C functions can be converted to a ufunc")
187 return node
188
189 if not converters:
190 return # this path probably shouldn't happen
191
192 del converters[0].global_scope.entries[original_node.entry.name]
193 # the generic utility code is generic, so there's no reason to do it multiple times
194 converters[0].use_generic_utility_code()
195 return [node] + _generate_stats_from_converters(converters, original_node)
196
197
198 def generate_ufunc_initialization(converters, cfunc_nodes, original_node):
199 global_scope = converters[0].global_scope
200 ufunc_funcs_name = global_scope.next_id(Naming.pyrex_prefix + "funcs")
201 ufunc_types_name = global_scope.next_id(Naming.pyrex_prefix + "types")
202 ufunc_data_name = global_scope.next_id(Naming.pyrex_prefix + "data")
203 type_constants = []
204 narg_in = None
205 narg_out = None
206 for c in converters:
207 in_const = [d.type_constant for d in c.in_definitions]
208 if narg_in is not None:
209 assert narg_in == len(in_const)
210 else:
211 narg_in = len(in_const)
212 type_constants.extend(in_const)
213 out_const = [d.type_constant for d in c.out_definitions]
214 if narg_out is not None:
215 assert narg_out == len(out_const)
216 else:
217 narg_out = len(out_const)
218 type_constants.extend(out_const)
219
220 func_cnames = [cfnode.entry.cname for cfnode in cfunc_nodes]
221
222 context = dict(
223 ufunc_funcs_name=ufunc_funcs_name,
224 func_cnames=func_cnames,
225 ufunc_types_name=ufunc_types_name,
226 type_constants=type_constants,
227 ufunc_data_name=ufunc_data_name,
228 )
229 global_scope.use_utility_code(
230 TempitaUtilityCode.load("UFuncConsts", "UFuncs_C.c", context=context)
231 )
232
233 pos = original_node.pos
234 func_name = original_node.entry.name
235 docstr = original_node.doc
236 args_to_func = '%s(), %s, %s(), %s, %s, %s, PyUFunc_None, "%s", %s, 0' % (
237 ufunc_funcs_name,
238 ufunc_data_name,
239 ufunc_types_name,
240 len(func_cnames),
241 narg_in,
242 narg_out,
243 func_name,
244 docstr.as_c_string_literal() if docstr else "NULL",
245 )
246
247 call_node = ExprNodes.PythonCapiCallNode(
248 pos,
249 function_name="PyUFunc_FromFuncAndData",
250 # use a dummy type because it's honestly too fiddly
251 func_type=PyrexTypes.CFuncType(
252 PyrexTypes.py_object_type,
253 [PyrexTypes.CFuncTypeArg("dummy", PyrexTypes.c_void_ptr_type, None)],
254 ),
255 args=[
256 ExprNodes.ConstNode(
257 pos, type=PyrexTypes.c_void_ptr_type, value=args_to_func
258 )
259 ],
260 )
261 lhs_entry = global_scope.declare_var(func_name, PyrexTypes.py_object_type, pos)
262 assgn_node = Nodes.SingleAssignmentNode(
263 pos,
264 lhs=ExprNodes.NameNode(
265 pos, name=func_name, type=PyrexTypes.py_object_type, entry=lhs_entry
266 ),
267 rhs=call_node,
268 )
269 return assgn_node
270
271
272 def _generate_stats_from_converters(converters, node):
273 stats = []
274 for converter in converters:
275 tree = converter.generate_cy_utility_code()
276 ufunc_node = get_cfunc_from_tree(tree)
277 # merge in any utility code
278 converter.global_scope.utility_code_list.extend(tree.scope.utility_code_list)
279 stats.append(ufunc_node)
280
281 stats.append(generate_ufunc_initialization(converters, stats, node))
282 return stats
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Cython/Compiler/UFuncs.py b/Cython/Compiler/UFuncs.py
--- a/Cython/Compiler/UFuncs.py
+++ b/Cython/Compiler/UFuncs.py
@@ -138,6 +138,9 @@
ufunc_cname = self.global_scope.next_id(self.node.entry.name + "_ufunc_def")
+ will_be_called_without_gil = not (any(t.is_pyobject for t in arg_types) or
+ any(t.is_pyobject for t in out_types))
+
context = dict(
func_cname=ufunc_cname,
in_types=arg_types,
@@ -145,6 +148,7 @@
inline_func_call=self.node.entry.cname,
inline_func_declaration=inline_func_decl,
nogil=self.node.entry.type.nogil,
+ will_be_called_without_gil=will_be_called_without_gil,
)
code = CythonUtilityCode.load(
|
{"golden_diff": "diff --git a/Cython/Compiler/UFuncs.py b/Cython/Compiler/UFuncs.py\n--- a/Cython/Compiler/UFuncs.py\n+++ b/Cython/Compiler/UFuncs.py\n@@ -138,6 +138,9 @@\n \n ufunc_cname = self.global_scope.next_id(self.node.entry.name + \"_ufunc_def\")\n \n+ will_be_called_without_gil = not (any(t.is_pyobject for t in arg_types) or\n+ any(t.is_pyobject for t in out_types))\n+\n context = dict(\n func_cname=ufunc_cname,\n in_types=arg_types,\n@@ -145,6 +148,7 @@\n inline_func_call=self.node.entry.cname,\n inline_func_declaration=inline_func_decl,\n nogil=self.node.entry.type.nogil,\n+ will_be_called_without_gil=will_be_called_without_gil,\n )\n \n code = CythonUtilityCode.load(\n", "issue": "Fatal Python error: PyEval_SaveThread when creating ufunc with nogil [BUG] \n### Describe the bug\n\nI'm generating ufuncs for my fortran library by the new version 3.0.0b1 of cython, using the @cython.ufunc function decorator.\r\nI declare the function as nogil. and call it in python script. It works fine when the input array length is less than 500. When the input array length is greater than 500, it stops and throws a thread error.\r\n```\r\nFatal Python error: PyEval_SaveThread: the function must be called with the GIL held, but the GIL is released (the current Python thread state is NULL)\r\nPython runtime state: initialized\r\n```\r\nI'm using the mingw-w64 compiler provided by msys2 on windows, but that doesn't seem to be the cause of the error. I transferred the program to an ubuntu machine and found the same error.\n\n### Code to reproduce the behaviour:\n\n```cython\r\n# test_ufunc.pyx\r\ncimport cython\r\n\r\[email protected]\r\ncdef double add_one(double x) nogil:\r\n return x+1\r\n```\r\n\r\n```python\r\nimport test_ufunc\r\nimport numpy as np\r\n\r\nx=np.linspace(-1,1,501)\r\nx_add=test_ufunc.add_one(x)\r\nprint(x_add)\r\n```\n\n### Expected behaviour\n\nNo errors, result returns successfully.\n\n### OS\n\nUbuntu and Windows\n\n### Python version\n\n3.10.6 (Ubuntu), 3.11.2 (Windows)\n\n### Cython version\n\n3.0.0b1\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from . import (\n Nodes,\n ExprNodes,\n FusedNode,\n TreeFragment,\n Pipeline,\n ParseTreeTransforms,\n Naming,\n UtilNodes,\n)\nfrom .Errors import error\nfrom . import PyrexTypes\nfrom .UtilityCode import CythonUtilityCode\nfrom .Code import TempitaUtilityCode, UtilityCode\nfrom .Visitor import PrintTree, TreeVisitor, VisitorTransform\n\nnumpy_int_types = [\n \"NPY_BYTE\",\n \"NPY_INT8\",\n \"NPY_SHORT\",\n \"NPY_INT16\",\n \"NPY_INT\",\n \"NPY_INT32\",\n \"NPY_LONG\",\n \"NPY_LONGLONG\",\n \"NPY_INT64\",\n]\nnumpy_uint_types = [tp.replace(\"NPY_\", \"NPY_U\") for tp in numpy_int_types]\n# note: half float type is deliberately omitted\nnumpy_numeric_types = (\n numpy_int_types\n + numpy_uint_types\n + [\n \"NPY_FLOAT\",\n \"NPY_FLOAT32\",\n \"NPY_DOUBLE\",\n \"NPY_FLOAT64\",\n \"NPY_LONGDOUBLE\",\n ]\n)\n\n\ndef _get_type_constant(pos, type_):\n if type_.is_complex:\n # 'is' checks don't seem to work for complex types\n if type_ == PyrexTypes.c_float_complex_type:\n return \"NPY_CFLOAT\"\n elif type_ == PyrexTypes.c_double_complex_type:\n return \"NPY_CDOUBLE\"\n elif type_ == PyrexTypes.c_longdouble_complex_type:\n return \"NPY_CLONGDOUBLE\"\n elif type_.is_numeric:\n postfix = type_.empty_declaration_code().upper().replace(\" \", \"\")\n typename = \"NPY_%s\" % postfix\n if typename in numpy_numeric_types:\n return typename\n elif type_.is_pyobject:\n return \"NPY_OBJECT\"\n # TODO possible NPY_BOOL to bint but it needs a cast?\n # TODO NPY_DATETIME, NPY_TIMEDELTA, NPY_STRING, NPY_UNICODE and maybe NPY_VOID might be handleable\n error(pos, \"Type '%s' cannot be used as a ufunc argument\" % type_)\n\n\nclass _FindCFuncDefNode(TreeVisitor):\n \"\"\"\n Finds the CFuncDefNode in the tree\n\n The assumption is that there's only one CFuncDefNode\n \"\"\"\n\n found_node = None\n\n def visit_Node(self, node):\n if self.found_node:\n return\n else:\n self.visitchildren(node)\n\n def visit_CFuncDefNode(self, node):\n self.found_node = node\n\n def __call__(self, tree):\n self.visit(tree)\n return self.found_node\n\n\ndef get_cfunc_from_tree(tree):\n return _FindCFuncDefNode()(tree)\n\n\nclass _ArgumentInfo(object):\n \"\"\"\n Everything related to defining an input/output argument for a ufunc\n\n type - PyrexType\n type_constant - str such as \"NPY_INT8\" representing numpy dtype constants\n \"\"\"\n\n def __init__(self, type, type_constant):\n self.type = type\n self.type_constant = type_constant\n\n\nclass UFuncConversion(object):\n def __init__(self, node):\n self.node = node\n self.global_scope = node.local_scope.global_scope()\n\n self.in_definitions = self.get_in_type_info()\n self.out_definitions = self.get_out_type_info()\n\n def get_in_type_info(self):\n definitions = []\n for n, arg in enumerate(self.node.args):\n type_const = _get_type_constant(self.node.pos, arg.type)\n definitions.append(_ArgumentInfo(arg.type, type_const))\n return definitions\n\n def get_out_type_info(self):\n if self.node.return_type.is_ctuple:\n components = self.node.return_type.components\n else:\n components = [self.node.return_type]\n definitions = []\n for n, type in enumerate(components):\n definitions.append(\n _ArgumentInfo(type, _get_type_constant(self.node.pos, type))\n )\n return definitions\n\n def generate_cy_utility_code(self):\n arg_types = [a.type for a in self.in_definitions]\n out_types = [a.type for a in self.out_definitions]\n inline_func_decl = self.node.entry.type.declaration_code(\n self.node.entry.cname, pyrex=True\n )\n self.node.entry.used = True\n\n ufunc_cname = self.global_scope.next_id(self.node.entry.name + \"_ufunc_def\")\n\n context = dict(\n func_cname=ufunc_cname,\n in_types=arg_types,\n out_types=out_types,\n inline_func_call=self.node.entry.cname,\n inline_func_declaration=inline_func_decl,\n nogil=self.node.entry.type.nogil,\n )\n\n code = CythonUtilityCode.load(\n \"UFuncDefinition\",\n \"UFuncs.pyx\",\n context=context,\n outer_module_scope=self.global_scope,\n )\n\n tree = code.get_tree(entries_only=True)\n return tree\n\n def use_generic_utility_code(self):\n # use the invariant C utility code\n self.global_scope.use_utility_code(\n UtilityCode.load_cached(\"UFuncsInit\", \"UFuncs_C.c\")\n )\n self.global_scope.use_utility_code(\n UtilityCode.load_cached(\"NumpyImportUFunc\", \"NumpyImportArray.c\")\n )\n\n\ndef convert_to_ufunc(node):\n if isinstance(node, Nodes.CFuncDefNode):\n if node.local_scope.parent_scope.is_c_class_scope:\n error(node.pos, \"Methods cannot currently be converted to a ufunc\")\n return node\n converters = [UFuncConversion(node)]\n original_node = node\n elif isinstance(node, FusedNode.FusedCFuncDefNode) and isinstance(\n node.node, Nodes.CFuncDefNode\n ):\n if node.node.local_scope.parent_scope.is_c_class_scope:\n error(node.pos, \"Methods cannot currently be converted to a ufunc\")\n return node\n converters = [UFuncConversion(n) for n in node.nodes]\n original_node = node.node\n else:\n error(node.pos, \"Only C functions can be converted to a ufunc\")\n return node\n\n if not converters:\n return # this path probably shouldn't happen\n\n del converters[0].global_scope.entries[original_node.entry.name]\n # the generic utility code is generic, so there's no reason to do it multiple times\n converters[0].use_generic_utility_code()\n return [node] + _generate_stats_from_converters(converters, original_node)\n\n\ndef generate_ufunc_initialization(converters, cfunc_nodes, original_node):\n global_scope = converters[0].global_scope\n ufunc_funcs_name = global_scope.next_id(Naming.pyrex_prefix + \"funcs\")\n ufunc_types_name = global_scope.next_id(Naming.pyrex_prefix + \"types\")\n ufunc_data_name = global_scope.next_id(Naming.pyrex_prefix + \"data\")\n type_constants = []\n narg_in = None\n narg_out = None\n for c in converters:\n in_const = [d.type_constant for d in c.in_definitions]\n if narg_in is not None:\n assert narg_in == len(in_const)\n else:\n narg_in = len(in_const)\n type_constants.extend(in_const)\n out_const = [d.type_constant for d in c.out_definitions]\n if narg_out is not None:\n assert narg_out == len(out_const)\n else:\n narg_out = len(out_const)\n type_constants.extend(out_const)\n\n func_cnames = [cfnode.entry.cname for cfnode in cfunc_nodes]\n\n context = dict(\n ufunc_funcs_name=ufunc_funcs_name,\n func_cnames=func_cnames,\n ufunc_types_name=ufunc_types_name,\n type_constants=type_constants,\n ufunc_data_name=ufunc_data_name,\n )\n global_scope.use_utility_code(\n TempitaUtilityCode.load(\"UFuncConsts\", \"UFuncs_C.c\", context=context)\n )\n\n pos = original_node.pos\n func_name = original_node.entry.name\n docstr = original_node.doc\n args_to_func = '%s(), %s, %s(), %s, %s, %s, PyUFunc_None, \"%s\", %s, 0' % (\n ufunc_funcs_name,\n ufunc_data_name,\n ufunc_types_name,\n len(func_cnames),\n narg_in,\n narg_out,\n func_name,\n docstr.as_c_string_literal() if docstr else \"NULL\",\n )\n\n call_node = ExprNodes.PythonCapiCallNode(\n pos,\n function_name=\"PyUFunc_FromFuncAndData\",\n # use a dummy type because it's honestly too fiddly\n func_type=PyrexTypes.CFuncType(\n PyrexTypes.py_object_type,\n [PyrexTypes.CFuncTypeArg(\"dummy\", PyrexTypes.c_void_ptr_type, None)],\n ),\n args=[\n ExprNodes.ConstNode(\n pos, type=PyrexTypes.c_void_ptr_type, value=args_to_func\n )\n ],\n )\n lhs_entry = global_scope.declare_var(func_name, PyrexTypes.py_object_type, pos)\n assgn_node = Nodes.SingleAssignmentNode(\n pos,\n lhs=ExprNodes.NameNode(\n pos, name=func_name, type=PyrexTypes.py_object_type, entry=lhs_entry\n ),\n rhs=call_node,\n )\n return assgn_node\n\n\ndef _generate_stats_from_converters(converters, node):\n stats = []\n for converter in converters:\n tree = converter.generate_cy_utility_code()\n ufunc_node = get_cfunc_from_tree(tree)\n # merge in any utility code\n converter.global_scope.utility_code_list.extend(tree.scope.utility_code_list)\n stats.append(ufunc_node)\n\n stats.append(generate_ufunc_initialization(converters, stats, node))\n return stats\n", "path": "Cython/Compiler/UFuncs.py"}], "after_files": [{"content": "from . import (\n Nodes,\n ExprNodes,\n FusedNode,\n TreeFragment,\n Pipeline,\n ParseTreeTransforms,\n Naming,\n UtilNodes,\n)\nfrom .Errors import error\nfrom . import PyrexTypes\nfrom .UtilityCode import CythonUtilityCode\nfrom .Code import TempitaUtilityCode, UtilityCode\nfrom .Visitor import PrintTree, TreeVisitor, VisitorTransform\n\nnumpy_int_types = [\n \"NPY_BYTE\",\n \"NPY_INT8\",\n \"NPY_SHORT\",\n \"NPY_INT16\",\n \"NPY_INT\",\n \"NPY_INT32\",\n \"NPY_LONG\",\n \"NPY_LONGLONG\",\n \"NPY_INT64\",\n]\nnumpy_uint_types = [tp.replace(\"NPY_\", \"NPY_U\") for tp in numpy_int_types]\n# note: half float type is deliberately omitted\nnumpy_numeric_types = (\n numpy_int_types\n + numpy_uint_types\n + [\n \"NPY_FLOAT\",\n \"NPY_FLOAT32\",\n \"NPY_DOUBLE\",\n \"NPY_FLOAT64\",\n \"NPY_LONGDOUBLE\",\n ]\n)\n\n\ndef _get_type_constant(pos, type_):\n if type_.is_complex:\n # 'is' checks don't seem to work for complex types\n if type_ == PyrexTypes.c_float_complex_type:\n return \"NPY_CFLOAT\"\n elif type_ == PyrexTypes.c_double_complex_type:\n return \"NPY_CDOUBLE\"\n elif type_ == PyrexTypes.c_longdouble_complex_type:\n return \"NPY_CLONGDOUBLE\"\n elif type_.is_numeric:\n postfix = type_.empty_declaration_code().upper().replace(\" \", \"\")\n typename = \"NPY_%s\" % postfix\n if typename in numpy_numeric_types:\n return typename\n elif type_.is_pyobject:\n return \"NPY_OBJECT\"\n # TODO possible NPY_BOOL to bint but it needs a cast?\n # TODO NPY_DATETIME, NPY_TIMEDELTA, NPY_STRING, NPY_UNICODE and maybe NPY_VOID might be handleable\n error(pos, \"Type '%s' cannot be used as a ufunc argument\" % type_)\n\n\nclass _FindCFuncDefNode(TreeVisitor):\n \"\"\"\n Finds the CFuncDefNode in the tree\n\n The assumption is that there's only one CFuncDefNode\n \"\"\"\n\n found_node = None\n\n def visit_Node(self, node):\n if self.found_node:\n return\n else:\n self.visitchildren(node)\n\n def visit_CFuncDefNode(self, node):\n self.found_node = node\n\n def __call__(self, tree):\n self.visit(tree)\n return self.found_node\n\n\ndef get_cfunc_from_tree(tree):\n return _FindCFuncDefNode()(tree)\n\n\nclass _ArgumentInfo(object):\n \"\"\"\n Everything related to defining an input/output argument for a ufunc\n\n type - PyrexType\n type_constant - str such as \"NPY_INT8\" representing numpy dtype constants\n \"\"\"\n\n def __init__(self, type, type_constant):\n self.type = type\n self.type_constant = type_constant\n\n\nclass UFuncConversion(object):\n def __init__(self, node):\n self.node = node\n self.global_scope = node.local_scope.global_scope()\n\n self.in_definitions = self.get_in_type_info()\n self.out_definitions = self.get_out_type_info()\n\n def get_in_type_info(self):\n definitions = []\n for n, arg in enumerate(self.node.args):\n type_const = _get_type_constant(self.node.pos, arg.type)\n definitions.append(_ArgumentInfo(arg.type, type_const))\n return definitions\n\n def get_out_type_info(self):\n if self.node.return_type.is_ctuple:\n components = self.node.return_type.components\n else:\n components = [self.node.return_type]\n definitions = []\n for n, type in enumerate(components):\n definitions.append(\n _ArgumentInfo(type, _get_type_constant(self.node.pos, type))\n )\n return definitions\n\n def generate_cy_utility_code(self):\n arg_types = [a.type for a in self.in_definitions]\n out_types = [a.type for a in self.out_definitions]\n inline_func_decl = self.node.entry.type.declaration_code(\n self.node.entry.cname, pyrex=True\n )\n self.node.entry.used = True\n\n ufunc_cname = self.global_scope.next_id(self.node.entry.name + \"_ufunc_def\")\n\n will_be_called_without_gil = not (any(t.is_pyobject for t in arg_types) or\n any(t.is_pyobject for t in out_types))\n\n context = dict(\n func_cname=ufunc_cname,\n in_types=arg_types,\n out_types=out_types,\n inline_func_call=self.node.entry.cname,\n inline_func_declaration=inline_func_decl,\n nogil=self.node.entry.type.nogil,\n will_be_called_without_gil=will_be_called_without_gil,\n )\n\n code = CythonUtilityCode.load(\n \"UFuncDefinition\",\n \"UFuncs.pyx\",\n context=context,\n outer_module_scope=self.global_scope,\n )\n\n tree = code.get_tree(entries_only=True)\n return tree\n\n def use_generic_utility_code(self):\n # use the invariant C utility code\n self.global_scope.use_utility_code(\n UtilityCode.load_cached(\"UFuncsInit\", \"UFuncs_C.c\")\n )\n self.global_scope.use_utility_code(\n UtilityCode.load_cached(\"NumpyImportUFunc\", \"NumpyImportArray.c\")\n )\n\n\ndef convert_to_ufunc(node):\n if isinstance(node, Nodes.CFuncDefNode):\n if node.local_scope.parent_scope.is_c_class_scope:\n error(node.pos, \"Methods cannot currently be converted to a ufunc\")\n return node\n converters = [UFuncConversion(node)]\n original_node = node\n elif isinstance(node, FusedNode.FusedCFuncDefNode) and isinstance(\n node.node, Nodes.CFuncDefNode\n ):\n if node.node.local_scope.parent_scope.is_c_class_scope:\n error(node.pos, \"Methods cannot currently be converted to a ufunc\")\n return node\n converters = [UFuncConversion(n) for n in node.nodes]\n original_node = node.node\n else:\n error(node.pos, \"Only C functions can be converted to a ufunc\")\n return node\n\n if not converters:\n return # this path probably shouldn't happen\n\n del converters[0].global_scope.entries[original_node.entry.name]\n # the generic utility code is generic, so there's no reason to do it multiple times\n converters[0].use_generic_utility_code()\n return [node] + _generate_stats_from_converters(converters, original_node)\n\n\ndef generate_ufunc_initialization(converters, cfunc_nodes, original_node):\n global_scope = converters[0].global_scope\n ufunc_funcs_name = global_scope.next_id(Naming.pyrex_prefix + \"funcs\")\n ufunc_types_name = global_scope.next_id(Naming.pyrex_prefix + \"types\")\n ufunc_data_name = global_scope.next_id(Naming.pyrex_prefix + \"data\")\n type_constants = []\n narg_in = None\n narg_out = None\n for c in converters:\n in_const = [d.type_constant for d in c.in_definitions]\n if narg_in is not None:\n assert narg_in == len(in_const)\n else:\n narg_in = len(in_const)\n type_constants.extend(in_const)\n out_const = [d.type_constant for d in c.out_definitions]\n if narg_out is not None:\n assert narg_out == len(out_const)\n else:\n narg_out = len(out_const)\n type_constants.extend(out_const)\n\n func_cnames = [cfnode.entry.cname for cfnode in cfunc_nodes]\n\n context = dict(\n ufunc_funcs_name=ufunc_funcs_name,\n func_cnames=func_cnames,\n ufunc_types_name=ufunc_types_name,\n type_constants=type_constants,\n ufunc_data_name=ufunc_data_name,\n )\n global_scope.use_utility_code(\n TempitaUtilityCode.load(\"UFuncConsts\", \"UFuncs_C.c\", context=context)\n )\n\n pos = original_node.pos\n func_name = original_node.entry.name\n docstr = original_node.doc\n args_to_func = '%s(), %s, %s(), %s, %s, %s, PyUFunc_None, \"%s\", %s, 0' % (\n ufunc_funcs_name,\n ufunc_data_name,\n ufunc_types_name,\n len(func_cnames),\n narg_in,\n narg_out,\n func_name,\n docstr.as_c_string_literal() if docstr else \"NULL\",\n )\n\n call_node = ExprNodes.PythonCapiCallNode(\n pos,\n function_name=\"PyUFunc_FromFuncAndData\",\n # use a dummy type because it's honestly too fiddly\n func_type=PyrexTypes.CFuncType(\n PyrexTypes.py_object_type,\n [PyrexTypes.CFuncTypeArg(\"dummy\", PyrexTypes.c_void_ptr_type, None)],\n ),\n args=[\n ExprNodes.ConstNode(\n pos, type=PyrexTypes.c_void_ptr_type, value=args_to_func\n )\n ],\n )\n lhs_entry = global_scope.declare_var(func_name, PyrexTypes.py_object_type, pos)\n assgn_node = Nodes.SingleAssignmentNode(\n pos,\n lhs=ExprNodes.NameNode(\n pos, name=func_name, type=PyrexTypes.py_object_type, entry=lhs_entry\n ),\n rhs=call_node,\n )\n return assgn_node\n\n\ndef _generate_stats_from_converters(converters, node):\n stats = []\n for converter in converters:\n tree = converter.generate_cy_utility_code()\n ufunc_node = get_cfunc_from_tree(tree)\n # merge in any utility code\n converter.global_scope.utility_code_list.extend(tree.scope.utility_code_list)\n stats.append(ufunc_node)\n\n stats.append(generate_ufunc_initialization(converters, stats, node))\n return stats\n", "path": "Cython/Compiler/UFuncs.py"}]}
| 3,535 | 213 |
gh_patches_debug_12302
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-381
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aiopg instrumentation creating nested traces
**Describe your environment**
Python 3.6.12
Macos 11.1
opentelemetry-python-contrib: Tried released version, as well as `main`.
aiohttp==3.7.3
aiopg==1.1.0
Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.
**Steps to reproduce**
I am trying the `aiopg` instrumentation. I am using it along aiohttp. This may be an usage issue, but I have created a minimal example in [here](https://github.com/cdvv7788/aiopg-instrumentor-bug) (Just install requirements, run, and visit `localhost:4000`...you will need a postgres database named test and jaeger running somewhere).
The trace will keep growing with every request, nesting new traces every time:

**What is the expected behavior?**
Every connection has a single trace (no nesting)
**What is the actual behavior?**
Every time a request is made, a new trace is nested and sends the whole stack to jaeger
**Notes**
This may be a bad usage issue. If that is the case, please let me know what may be causing it and how to use this properly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py`
Content:
```
1 import typing
2
3 import wrapt
4 from aiopg.utils import _ContextManager, _PoolAcquireContextManager
5
6 from opentelemetry.instrumentation.dbapi import (
7 CursorTracer,
8 DatabaseApiIntegration,
9 )
10 from opentelemetry.trace import SpanKind
11
12
13 # pylint: disable=abstract-method
14 class AsyncProxyObject(wrapt.ObjectProxy):
15 def __aiter__(self):
16 return self.__wrapped__.__aiter__()
17
18 async def __anext__(self):
19 result = await self.__wrapped__.__anext__()
20 return result
21
22 async def __aenter__(self):
23 return await self.__wrapped__.__aenter__()
24
25 async def __aexit__(self, exc_type, exc_val, exc_tb):
26 return await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb)
27
28 def __await__(self):
29 return self.__wrapped__.__await__()
30
31
32 class AiopgIntegration(DatabaseApiIntegration):
33 async def wrapped_connection(
34 self,
35 connect_method: typing.Callable[..., typing.Any],
36 args: typing.Tuple[typing.Any, typing.Any],
37 kwargs: typing.Dict[typing.Any, typing.Any],
38 ):
39 """Add object proxy to connection object."""
40 connection = await connect_method(*args, **kwargs)
41 # pylint: disable=protected-access
42 self.get_connection_attributes(connection._conn)
43 return get_traced_connection_proxy(connection, self)
44
45 async def wrapped_pool(self, create_pool_method, args, kwargs):
46 pool = await create_pool_method(*args, **kwargs)
47 async with pool.acquire() as connection:
48 # pylint: disable=protected-access
49 self.get_connection_attributes(connection._conn)
50 return get_traced_pool_proxy(pool, self)
51
52
53 def get_traced_connection_proxy(
54 connection, db_api_integration, *args, **kwargs
55 ):
56 # pylint: disable=abstract-method
57 class TracedConnectionProxy(AsyncProxyObject):
58 # pylint: disable=unused-argument
59 def __init__(self, connection, *args, **kwargs):
60 super().__init__(connection)
61
62 def cursor(self, *args, **kwargs):
63 coro = self._cursor(*args, **kwargs)
64 return _ContextManager(coro)
65
66 async def _cursor(self, *args, **kwargs):
67 # pylint: disable=protected-access
68 cursor = await self.__wrapped__._cursor(*args, **kwargs)
69 return get_traced_cursor_proxy(cursor, db_api_integration)
70
71 return TracedConnectionProxy(connection, *args, **kwargs)
72
73
74 def get_traced_pool_proxy(pool, db_api_integration, *args, **kwargs):
75 # pylint: disable=abstract-method
76 class TracedPoolProxy(AsyncProxyObject):
77 # pylint: disable=unused-argument
78 def __init__(self, pool, *args, **kwargs):
79 super().__init__(pool)
80
81 def acquire(self):
82 """Acquire free connection from the pool."""
83 coro = self._acquire()
84 return _PoolAcquireContextManager(coro, self)
85
86 async def _acquire(self):
87 # pylint: disable=protected-access
88 connection = await self.__wrapped__._acquire()
89 return get_traced_connection_proxy(
90 connection, db_api_integration, *args, **kwargs
91 )
92
93 return TracedPoolProxy(pool, *args, **kwargs)
94
95
96 class AsyncCursorTracer(CursorTracer):
97 async def traced_execution(
98 self,
99 cursor,
100 query_method: typing.Callable[..., typing.Any],
101 *args: typing.Tuple[typing.Any, typing.Any],
102 **kwargs: typing.Dict[typing.Any, typing.Any]
103 ):
104 name = ""
105 if args:
106 name = self.get_operation_name(cursor, args)
107
108 if not name:
109 name = (
110 self._db_api_integration.database
111 if self._db_api_integration.database
112 else self._db_api_integration.name
113 )
114
115 with self._db_api_integration.get_tracer().start_as_current_span(
116 name, kind=SpanKind.CLIENT
117 ) as span:
118 self._populate_span(span, cursor, *args)
119 return await query_method(*args, **kwargs)
120
121
122 def get_traced_cursor_proxy(cursor, db_api_integration, *args, **kwargs):
123 _traced_cursor = AsyncCursorTracer(db_api_integration)
124
125 # pylint: disable=abstract-method
126 class AsyncCursorTracerProxy(AsyncProxyObject):
127
128 # pylint: disable=unused-argument
129 def __init__(self, cursor, *args, **kwargs):
130 super().__init__(cursor)
131
132 async def execute(self, *args, **kwargs):
133 result = await _traced_cursor.traced_execution(
134 self, self.__wrapped__.execute, *args, **kwargs
135 )
136 return result
137
138 async def executemany(self, *args, **kwargs):
139 result = await _traced_cursor.traced_execution(
140 self, self.__wrapped__.executemany, *args, **kwargs
141 )
142 return result
143
144 async def callproc(self, *args, **kwargs):
145 result = await _traced_cursor.traced_execution(
146 self, self.__wrapped__.callproc, *args, **kwargs
147 )
148 return result
149
150 return AsyncCursorTracerProxy(cursor, *args, **kwargs)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py b/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py
--- a/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py
+++ b/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py
@@ -86,9 +86,11 @@
async def _acquire(self):
# pylint: disable=protected-access
connection = await self.__wrapped__._acquire()
- return get_traced_connection_proxy(
- connection, db_api_integration, *args, **kwargs
- )
+ if not isinstance(connection, AsyncProxyObject):
+ connection = get_traced_connection_proxy(
+ connection, db_api_integration, *args, **kwargs
+ )
+ return connection
return TracedPoolProxy(pool, *args, **kwargs)
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py b/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py\n--- a/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py\n+++ b/instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py\n@@ -86,9 +86,11 @@\n async def _acquire(self):\n # pylint: disable=protected-access\n connection = await self.__wrapped__._acquire()\n- return get_traced_connection_proxy(\n- connection, db_api_integration, *args, **kwargs\n- )\n+ if not isinstance(connection, AsyncProxyObject):\n+ connection = get_traced_connection_proxy(\n+ connection, db_api_integration, *args, **kwargs\n+ )\n+ return connection\n \n return TracedPoolProxy(pool, *args, **kwargs)\n", "issue": "aiopg instrumentation creating nested traces\n**Describe your environment**\r\nPython 3.6.12\r\nMacos 11.1\r\nopentelemetry-python-contrib: Tried released version, as well as `main`.\r\naiohttp==3.7.3\r\naiopg==1.1.0\r\n\r\n Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.\r\n\r\n**Steps to reproduce**\r\nI am trying the `aiopg` instrumentation. I am using it along aiohttp. This may be an usage issue, but I have created a minimal example in [here](https://github.com/cdvv7788/aiopg-instrumentor-bug) (Just install requirements, run, and visit `localhost:4000`...you will need a postgres database named test and jaeger running somewhere).\r\nThe trace will keep growing with every request, nesting new traces every time:\r\n\r\n\r\n\r\n**What is the expected behavior?**\r\nEvery connection has a single trace (no nesting)\r\n\r\n**What is the actual behavior?**\r\nEvery time a request is made, a new trace is nested and sends the whole stack to jaeger\r\n\r\n**Notes**\r\nThis may be a bad usage issue. If that is the case, please let me know what may be causing it and how to use this properly.\r\n\n", "before_files": [{"content": "import typing\n\nimport wrapt\nfrom aiopg.utils import _ContextManager, _PoolAcquireContextManager\n\nfrom opentelemetry.instrumentation.dbapi import (\n CursorTracer,\n DatabaseApiIntegration,\n)\nfrom opentelemetry.trace import SpanKind\n\n\n# pylint: disable=abstract-method\nclass AsyncProxyObject(wrapt.ObjectProxy):\n def __aiter__(self):\n return self.__wrapped__.__aiter__()\n\n async def __anext__(self):\n result = await self.__wrapped__.__anext__()\n return result\n\n async def __aenter__(self):\n return await self.__wrapped__.__aenter__()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n return await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb)\n\n def __await__(self):\n return self.__wrapped__.__await__()\n\n\nclass AiopgIntegration(DatabaseApiIntegration):\n async def wrapped_connection(\n self,\n connect_method: typing.Callable[..., typing.Any],\n args: typing.Tuple[typing.Any, typing.Any],\n kwargs: typing.Dict[typing.Any, typing.Any],\n ):\n \"\"\"Add object proxy to connection object.\"\"\"\n connection = await connect_method(*args, **kwargs)\n # pylint: disable=protected-access\n self.get_connection_attributes(connection._conn)\n return get_traced_connection_proxy(connection, self)\n\n async def wrapped_pool(self, create_pool_method, args, kwargs):\n pool = await create_pool_method(*args, **kwargs)\n async with pool.acquire() as connection:\n # pylint: disable=protected-access\n self.get_connection_attributes(connection._conn)\n return get_traced_pool_proxy(pool, self)\n\n\ndef get_traced_connection_proxy(\n connection, db_api_integration, *args, **kwargs\n):\n # pylint: disable=abstract-method\n class TracedConnectionProxy(AsyncProxyObject):\n # pylint: disable=unused-argument\n def __init__(self, connection, *args, **kwargs):\n super().__init__(connection)\n\n def cursor(self, *args, **kwargs):\n coro = self._cursor(*args, **kwargs)\n return _ContextManager(coro)\n\n async def _cursor(self, *args, **kwargs):\n # pylint: disable=protected-access\n cursor = await self.__wrapped__._cursor(*args, **kwargs)\n return get_traced_cursor_proxy(cursor, db_api_integration)\n\n return TracedConnectionProxy(connection, *args, **kwargs)\n\n\ndef get_traced_pool_proxy(pool, db_api_integration, *args, **kwargs):\n # pylint: disable=abstract-method\n class TracedPoolProxy(AsyncProxyObject):\n # pylint: disable=unused-argument\n def __init__(self, pool, *args, **kwargs):\n super().__init__(pool)\n\n def acquire(self):\n \"\"\"Acquire free connection from the pool.\"\"\"\n coro = self._acquire()\n return _PoolAcquireContextManager(coro, self)\n\n async def _acquire(self):\n # pylint: disable=protected-access\n connection = await self.__wrapped__._acquire()\n return get_traced_connection_proxy(\n connection, db_api_integration, *args, **kwargs\n )\n\n return TracedPoolProxy(pool, *args, **kwargs)\n\n\nclass AsyncCursorTracer(CursorTracer):\n async def traced_execution(\n self,\n cursor,\n query_method: typing.Callable[..., typing.Any],\n *args: typing.Tuple[typing.Any, typing.Any],\n **kwargs: typing.Dict[typing.Any, typing.Any]\n ):\n name = \"\"\n if args:\n name = self.get_operation_name(cursor, args)\n\n if not name:\n name = (\n self._db_api_integration.database\n if self._db_api_integration.database\n else self._db_api_integration.name\n )\n\n with self._db_api_integration.get_tracer().start_as_current_span(\n name, kind=SpanKind.CLIENT\n ) as span:\n self._populate_span(span, cursor, *args)\n return await query_method(*args, **kwargs)\n\n\ndef get_traced_cursor_proxy(cursor, db_api_integration, *args, **kwargs):\n _traced_cursor = AsyncCursorTracer(db_api_integration)\n\n # pylint: disable=abstract-method\n class AsyncCursorTracerProxy(AsyncProxyObject):\n\n # pylint: disable=unused-argument\n def __init__(self, cursor, *args, **kwargs):\n super().__init__(cursor)\n\n async def execute(self, *args, **kwargs):\n result = await _traced_cursor.traced_execution(\n self, self.__wrapped__.execute, *args, **kwargs\n )\n return result\n\n async def executemany(self, *args, **kwargs):\n result = await _traced_cursor.traced_execution(\n self, self.__wrapped__.executemany, *args, **kwargs\n )\n return result\n\n async def callproc(self, *args, **kwargs):\n result = await _traced_cursor.traced_execution(\n self, self.__wrapped__.callproc, *args, **kwargs\n )\n return result\n\n return AsyncCursorTracerProxy(cursor, *args, **kwargs)\n", "path": "instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py"}], "after_files": [{"content": "import typing\n\nimport wrapt\nfrom aiopg.utils import _ContextManager, _PoolAcquireContextManager\n\nfrom opentelemetry.instrumentation.dbapi import (\n CursorTracer,\n DatabaseApiIntegration,\n)\nfrom opentelemetry.trace import SpanKind\n\n\n# pylint: disable=abstract-method\nclass AsyncProxyObject(wrapt.ObjectProxy):\n def __aiter__(self):\n return self.__wrapped__.__aiter__()\n\n async def __anext__(self):\n result = await self.__wrapped__.__anext__()\n return result\n\n async def __aenter__(self):\n return await self.__wrapped__.__aenter__()\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n return await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb)\n\n def __await__(self):\n return self.__wrapped__.__await__()\n\n\nclass AiopgIntegration(DatabaseApiIntegration):\n async def wrapped_connection(\n self,\n connect_method: typing.Callable[..., typing.Any],\n args: typing.Tuple[typing.Any, typing.Any],\n kwargs: typing.Dict[typing.Any, typing.Any],\n ):\n \"\"\"Add object proxy to connection object.\"\"\"\n connection = await connect_method(*args, **kwargs)\n # pylint: disable=protected-access\n self.get_connection_attributes(connection._conn)\n return get_traced_connection_proxy(connection, self)\n\n async def wrapped_pool(self, create_pool_method, args, kwargs):\n pool = await create_pool_method(*args, **kwargs)\n async with pool.acquire() as connection:\n # pylint: disable=protected-access\n self.get_connection_attributes(connection._conn)\n return get_traced_pool_proxy(pool, self)\n\n\ndef get_traced_connection_proxy(\n connection, db_api_integration, *args, **kwargs\n):\n # pylint: disable=abstract-method\n class TracedConnectionProxy(AsyncProxyObject):\n # pylint: disable=unused-argument\n def __init__(self, connection, *args, **kwargs):\n super().__init__(connection)\n\n def cursor(self, *args, **kwargs):\n coro = self._cursor(*args, **kwargs)\n return _ContextManager(coro)\n\n async def _cursor(self, *args, **kwargs):\n # pylint: disable=protected-access\n cursor = await self.__wrapped__._cursor(*args, **kwargs)\n return get_traced_cursor_proxy(cursor, db_api_integration)\n\n return TracedConnectionProxy(connection, *args, **kwargs)\n\n\ndef get_traced_pool_proxy(pool, db_api_integration, *args, **kwargs):\n # pylint: disable=abstract-method\n class TracedPoolProxy(AsyncProxyObject):\n # pylint: disable=unused-argument\n def __init__(self, pool, *args, **kwargs):\n super().__init__(pool)\n\n def acquire(self):\n \"\"\"Acquire free connection from the pool.\"\"\"\n coro = self._acquire()\n return _PoolAcquireContextManager(coro, self)\n\n async def _acquire(self):\n # pylint: disable=protected-access\n connection = await self.__wrapped__._acquire()\n if not isinstance(connection, AsyncProxyObject):\n connection = get_traced_connection_proxy(\n connection, db_api_integration, *args, **kwargs\n )\n return connection\n\n return TracedPoolProxy(pool, *args, **kwargs)\n\n\nclass AsyncCursorTracer(CursorTracer):\n async def traced_execution(\n self,\n cursor,\n query_method: typing.Callable[..., typing.Any],\n *args: typing.Tuple[typing.Any, typing.Any],\n **kwargs: typing.Dict[typing.Any, typing.Any]\n ):\n name = \"\"\n if args:\n name = self.get_operation_name(cursor, args)\n\n if not name:\n name = (\n self._db_api_integration.database\n if self._db_api_integration.database\n else self._db_api_integration.name\n )\n\n with self._db_api_integration.get_tracer().start_as_current_span(\n name, kind=SpanKind.CLIENT\n ) as span:\n self._populate_span(span, cursor, *args)\n return await query_method(*args, **kwargs)\n\n\ndef get_traced_cursor_proxy(cursor, db_api_integration, *args, **kwargs):\n _traced_cursor = AsyncCursorTracer(db_api_integration)\n\n # pylint: disable=abstract-method\n class AsyncCursorTracerProxy(AsyncProxyObject):\n\n # pylint: disable=unused-argument\n def __init__(self, cursor, *args, **kwargs):\n super().__init__(cursor)\n\n async def execute(self, *args, **kwargs):\n result = await _traced_cursor.traced_execution(\n self, self.__wrapped__.execute, *args, **kwargs\n )\n return result\n\n async def executemany(self, *args, **kwargs):\n result = await _traced_cursor.traced_execution(\n self, self.__wrapped__.executemany, *args, **kwargs\n )\n return result\n\n async def callproc(self, *args, **kwargs):\n result = await _traced_cursor.traced_execution(\n self, self.__wrapped__.callproc, *args, **kwargs\n )\n return result\n\n return AsyncCursorTracerProxy(cursor, *args, **kwargs)\n", "path": "instrumentation/opentelemetry-instrumentation-aiopg/src/opentelemetry/instrumentation/aiopg/aiopg_integration.py"}]}
| 2,165 | 257 |
gh_patches_debug_39156
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-576
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plot_stat_map (and probably plot_*) fails when display_mode axis has singleton dimension
Fuzzing the plotting code, I find the following testcase fails
```
for axis, direction in enumerate("xyz"):
shape = [5, 6, 7]
shape[axis] = 1
img = nibabel.Nifti1Image(np.ones(shape), np.eye(4))
plot_stat_map(img, None, display_mode=direction)
```
`ValueError: Too many cuts requested for the data: n_cuts=7, data size=1`
The culprit is the find_cut_slices function.
A fix would be to return all the slices along the axis if there are at least as many requested n_slices as there are slices.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/plotting/find_cuts.py`
Content:
```
1 """
2 Tools to find activations and cut on maps
3 """
4
5 # Author: Gael Varoquaux
6 # License: BSD
7
8 import numpy as np
9 from scipy import ndimage
10
11 # Local imports
12 from .._utils.ndimage import largest_connected_component
13 from .._utils import new_img_like
14 from .._utils.extmath import fast_abs_percentile
15 from .._utils.numpy_conversions import as_ndarray
16 from ..image.resampling import get_mask_bounds, coord_transform
17 from ..image.image import _smooth_array
18
19 ################################################################################
20 # Functions for automatic choice of cuts coordinates
21 ################################################################################
22
23
24 def find_xyz_cut_coords(img, mask=None, activation_threshold=None):
25 """ Find the center of the largest activation connected component.
26
27 Parameters
28 -----------
29 img : 3D Nifti1Image
30 The brain map.
31 mask : 3D ndarray, boolean, optional
32 An optional brain mask.
33 activation_threshold : float, optional
34 The lower threshold to the positive activation. If None, the
35 activation threshold is computed using the 80% percentile of
36 the absolute value of the map.
37
38 Returns
39 -------
40 x : float
41 the x world coordinate.
42 y : float
43 the y world coordinate.
44 z : float
45 the z world coordinate.
46 """
47 data = img.get_data()
48 # To speed up computations, we work with partial views of the array,
49 # and keep track of the offset
50 offset = np.zeros(3)
51
52 # Deal with masked arrays:
53 if hasattr(data, 'mask'):
54 not_mask = np.logical_not(data.mask)
55 if mask is None:
56 mask = not_mask
57 else:
58 mask *= not_mask
59 data = np.asarray(data)
60
61 # Get rid of potential memmapping
62 data = as_ndarray(data)
63 my_map = data.copy()
64 if mask is not None:
65 slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]
66 my_map = my_map[slice_x, slice_y, slice_z]
67 mask = mask[slice_x, slice_y, slice_z]
68 my_map *= mask
69 offset += [slice_x.start, slice_y.start, slice_z.start]
70
71 # Testing min and max is faster than np.all(my_map == 0)
72 if (my_map.max() == 0) and (my_map.min() == 0):
73 return .5 * np.array(data.shape)
74 if activation_threshold is None:
75 activation_threshold = fast_abs_percentile(my_map[my_map != 0].ravel(),
76 80)
77 mask = np.abs(my_map) > activation_threshold - 1.e-15
78 # mask may be zero everywhere in rare cases
79 if mask.max() == 0:
80 return .5 * np.array(data.shape)
81 mask = largest_connected_component(mask)
82 slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]
83 my_map = my_map[slice_x, slice_y, slice_z]
84 mask = mask[slice_x, slice_y, slice_z]
85 my_map *= mask
86 offset += [slice_x.start, slice_y.start, slice_z.start]
87
88 # For the second threshold, we use a mean, as it is much faster,
89 # althought it is less robust
90 second_threshold = np.abs(np.mean(my_map[mask]))
91 second_mask = (np.abs(my_map) > second_threshold)
92 if second_mask.sum() > 50:
93 my_map *= largest_connected_component(second_mask)
94 cut_coords = ndimage.center_of_mass(np.abs(my_map))
95 x_map, y_map, z_map = cut_coords + offset
96
97 # Return as a list of scalars
98 return np.asarray(coord_transform(x_map, y_map, z_map,
99 img.get_affine())).tolist()
100
101
102 def _get_auto_mask_bounds(img):
103 """ Compute the bounds of the data with an automaticaly computed mask
104 """
105 data = img.get_data().copy()
106 affine = img.get_affine()
107 if hasattr(data, 'mask'):
108 # Masked array
109 mask = np.logical_not(data.mask)
110 data = np.asarray(data)
111 else:
112 # The mask will be anything that is fairly different
113 # from the values in the corners
114 edge_value = float(data[0, 0, 0] + data[0, -1, 0]
115 + data[-1, 0, 0] + data[0, 0, -1]
116 + data[-1, -1, 0] + data[-1, 0, -1]
117 + data[0, -1, -1] + data[-1, -1, -1]
118 )
119 edge_value /= 6
120 mask = np.abs(data - edge_value) > .005*data.ptp()
121 xmin, xmax, ymin, ymax, zmin, zmax = \
122 get_mask_bounds(new_img_like(img, mask, affine))
123 return (xmin, xmax), (ymin, ymax), (zmin, zmax)
124
125
126 def find_cut_slices(img, direction='z', n_cuts=12, spacing='auto'):
127 """ Find 'good' cross-section slicing positions along a given axis.
128
129 Parameters
130 ----------
131 img: 3D Nifti1Image
132 the brain map
133 direction: string, optional (default "z")
134 sectional direction; possible values are "x", "y", or "z"
135 n_cuts: int, optional (default 12)
136 number of cuts in the plot
137 spacing: 'auto' or int, optional (default 'auto')
138 minimum spacing between cuts (in voxels, not milimeters)
139 if 'auto', the spacing is .5 / n_cuts * img_length
140
141 Returns
142 -------
143 cut_coords: 1D array of length n_cuts
144 the computed cut_coords
145
146 Notes
147 -----
148 This code works by iteratively locating peak activations that are
149 separated by a distance of at least 'spacing'. If n_cuts is very
150 large and all the activated regions are covered, cuts with a spacing
151 less than 'spacing' will be returned.
152 """
153
154 assert direction in 'xyz'
155
156 axis = 'xyz'.index(direction)
157
158 affine = img.get_affine()
159 orig_data = np.abs(img.get_data())
160 this_shape = orig_data.shape[axis]
161 if n_cuts > this_shape:
162 raise ValueError('Too many cuts requested for the data: '
163 'n_cuts=%i, data size=%i' % (n_cuts, this_shape))
164
165 data = orig_data.copy()
166 if data.dtype.kind == 'i':
167 data = data.astype(np.float)
168
169 data = _smooth_array(data, affine, fwhm='fast')
170
171 if spacing == 'auto':
172 spacing = max(int(.5 / n_cuts * data.shape[axis]), 1)
173
174 slices = [slice(None, None), slice(None, None), slice(None, None)]
175
176 cut_coords = list()
177
178 for _ in range(n_cuts):
179 # Find a peak
180 max_along_axis = np.unravel_index(np.abs(data).argmax(),
181 data.shape)[axis]
182
183 # cancel out the surroundings of the peak
184 start = max(0, max_along_axis - spacing)
185 stop = max_along_axis + spacing
186 slices[axis] = slice(start, stop)
187 # We don't actually fully zero the neighborhood, to avoid ending
188 # up with fully zeros if n_cuts is too big: we can do multiple
189 # passes on the data
190 data[slices] *= 1.e-3
191
192 cut_coords.append(max_along_axis)
193
194 # We sometimes get duplicated cuts, so we add cuts at the beginning
195 # and the end
196 cut_coords = np.unique(cut_coords).tolist()
197 while len(cut_coords) < n_cuts:
198 # Candidates for new cuts:
199 slice_below = min(cut_coords) - 2
200 slice_above = max(cut_coords) + 2
201 candidates = [slice_above]
202 # One slice where there is the biggest gap in the existing
203 # cut_coords
204 if len(cut_coords) > 1:
205 middle_idx = np.argmax(np.diff(cut_coords))
206 slice_middle = int(.5 * (cut_coords[middle_idx]
207 + cut_coords[middle_idx + 1]))
208 if not slice_middle in cut_coords:
209 candidates.append(slice_middle)
210 if slice_below >= 0:
211 # We need positive slice to avoid having negative
212 # indices, which would work, but not the way we think of them
213 candidates.append(slice_below)
214 best_weight = -10
215 for candidate in candidates:
216 if candidate >= this_shape:
217 this_weight = 0
218 else:
219 this_weight = np.sum(np.rollaxis(orig_data, axis)[candidate])
220 if this_weight > best_weight:
221 best_candidate = candidate
222 best_weight = this_weight
223
224 cut_coords.append(best_candidate)
225 cut_coords = np.unique(cut_coords).tolist()
226
227 cut_coords = np.array(cut_coords)
228 cut_coords.sort()
229
230 # Transform this back in image space
231 kwargs = dict()
232 for name in 'xyz':
233 kwargs[name] = np.zeros(len(cut_coords))
234 kwargs[direction] = cut_coords
235 kwargs['affine'] = affine
236
237 cut_coords = coord_transform(**kwargs)[axis]
238 # We need to atleast_1d to make sure that when n_cuts is 1 we do
239 # get an iterable
240 return np.atleast_1d(cut_coords)
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nilearn/plotting/find_cuts.py b/nilearn/plotting/find_cuts.py
--- a/nilearn/plotting/find_cuts.py
+++ b/nilearn/plotting/find_cuts.py
@@ -5,6 +5,7 @@
# Author: Gael Varoquaux
# License: BSD
+import warnings
import numpy as np
from scipy import ndimage
@@ -123,6 +124,39 @@
return (xmin, xmax), (ymin, ymax), (zmin, zmax)
+def _transform_cut_coords(cut_coords, direction, affine):
+ """Transforms cut_coords back in image space
+
+ Parameters
+ ----------
+ cut_coords: 1D array of length n_cuts
+ The coordinates to be transformed.
+
+ direction: string, optional (default "z")
+ sectional direction; possible values are "x", "y", or "z"
+
+ affine: 2D array of shape (4, 4)
+ The affine for the image.
+
+ Returns
+ -------
+ cut_coords: 1D array of length n_cuts
+ The original cut_coords transformed image space.
+ """
+ # make kwargs
+ axis = 'xyz'.index(direction)
+ kwargs = {}
+ for name in 'xyz':
+ kwargs[name] = np.zeros(len(cut_coords))
+ kwargs[direction] = cut_coords
+ kwargs['affine'] = affine
+
+ # We need atleast_1d to make sure that when n_cuts is 1 we do
+ # get an iterable
+ cut_coords = coord_transform(**kwargs)[axis]
+ return np.atleast_1d(cut_coords)
+
+
def find_cut_slices(img, direction='z', n_cuts=12, spacing='auto'):
""" Find 'good' cross-section slicing positions along a given axis.
@@ -151,16 +185,23 @@
less than 'spacing' will be returned.
"""
- assert direction in 'xyz'
-
+ # misc
+ if not direction in 'xyz':
+ raise ValueError(
+ "'direction' must be one of 'x', 'y', or 'z'. Got '%s'" % (
+ direction))
axis = 'xyz'.index(direction)
-
affine = img.get_affine()
orig_data = np.abs(img.get_data())
this_shape = orig_data.shape[axis]
+
+ # BF issue #575: Return all the slices along and axis if this axis
+ # is the display mode and there are at least as many requested
+ # n_slices as there are slices.
if n_cuts > this_shape:
- raise ValueError('Too many cuts requested for the data: '
- 'n_cuts=%i, data size=%i' % (n_cuts, this_shape))
+ warnings.warn('Too many cuts requested for the data: '
+ 'n_cuts=%i, data size=%i' % (n_cuts, this_shape))
+ return _transform_cut_coords(np.arange(this_shape), direction, affine)
data = orig_data.copy()
if data.dtype.kind == 'i':
@@ -227,14 +268,4 @@
cut_coords = np.array(cut_coords)
cut_coords.sort()
- # Transform this back in image space
- kwargs = dict()
- for name in 'xyz':
- kwargs[name] = np.zeros(len(cut_coords))
- kwargs[direction] = cut_coords
- kwargs['affine'] = affine
-
- cut_coords = coord_transform(**kwargs)[axis]
- # We need to atleast_1d to make sure that when n_cuts is 1 we do
- # get an iterable
- return np.atleast_1d(cut_coords)
+ return _transform_cut_coords(cut_coords, direction, affine)
|
{"golden_diff": "diff --git a/nilearn/plotting/find_cuts.py b/nilearn/plotting/find_cuts.py\n--- a/nilearn/plotting/find_cuts.py\n+++ b/nilearn/plotting/find_cuts.py\n@@ -5,6 +5,7 @@\n # Author: Gael Varoquaux\n # License: BSD\n \n+import warnings\n import numpy as np\n from scipy import ndimage\n \n@@ -123,6 +124,39 @@\n return (xmin, xmax), (ymin, ymax), (zmin, zmax)\n \n \n+def _transform_cut_coords(cut_coords, direction, affine):\n+ \"\"\"Transforms cut_coords back in image space\n+\n+ Parameters\n+ ----------\n+ cut_coords: 1D array of length n_cuts\n+ The coordinates to be transformed.\n+\n+ direction: string, optional (default \"z\")\n+ sectional direction; possible values are \"x\", \"y\", or \"z\"\n+\n+ affine: 2D array of shape (4, 4)\n+ The affine for the image.\n+\n+ Returns\n+ -------\n+ cut_coords: 1D array of length n_cuts\n+ The original cut_coords transformed image space.\n+ \"\"\"\n+ # make kwargs\n+ axis = 'xyz'.index(direction)\n+ kwargs = {}\n+ for name in 'xyz':\n+ kwargs[name] = np.zeros(len(cut_coords))\n+ kwargs[direction] = cut_coords\n+ kwargs['affine'] = affine\n+\n+ # We need atleast_1d to make sure that when n_cuts is 1 we do\n+ # get an iterable\n+ cut_coords = coord_transform(**kwargs)[axis]\n+ return np.atleast_1d(cut_coords)\n+\n+\n def find_cut_slices(img, direction='z', n_cuts=12, spacing='auto'):\n \"\"\" Find 'good' cross-section slicing positions along a given axis.\n \n@@ -151,16 +185,23 @@\n less than 'spacing' will be returned.\n \"\"\"\n \n- assert direction in 'xyz'\n-\n+ # misc\n+ if not direction in 'xyz':\n+ raise ValueError(\n+ \"'direction' must be one of 'x', 'y', or 'z'. Got '%s'\" % (\n+ direction))\n axis = 'xyz'.index(direction)\n-\n affine = img.get_affine()\n orig_data = np.abs(img.get_data())\n this_shape = orig_data.shape[axis]\n+\n+ # BF issue #575: Return all the slices along and axis if this axis\n+ # is the display mode and there are at least as many requested\n+ # n_slices as there are slices.\n if n_cuts > this_shape:\n- raise ValueError('Too many cuts requested for the data: '\n- 'n_cuts=%i, data size=%i' % (n_cuts, this_shape))\n+ warnings.warn('Too many cuts requested for the data: '\n+ 'n_cuts=%i, data size=%i' % (n_cuts, this_shape))\n+ return _transform_cut_coords(np.arange(this_shape), direction, affine)\n \n data = orig_data.copy()\n if data.dtype.kind == 'i':\n@@ -227,14 +268,4 @@\n cut_coords = np.array(cut_coords)\n cut_coords.sort()\n \n- # Transform this back in image space\n- kwargs = dict()\n- for name in 'xyz':\n- kwargs[name] = np.zeros(len(cut_coords))\n- kwargs[direction] = cut_coords\n- kwargs['affine'] = affine\n-\n- cut_coords = coord_transform(**kwargs)[axis]\n- # We need to atleast_1d to make sure that when n_cuts is 1 we do\n- # get an iterable\n- return np.atleast_1d(cut_coords)\n+ return _transform_cut_coords(cut_coords, direction, affine)\n", "issue": "plot_stat_map (and probably plot_*) fails when display_mode axis has singleton dimension\nFuzzing the plotting code, I find the following testcase fails\n\n```\n for axis, direction in enumerate(\"xyz\"):\n shape = [5, 6, 7]\n shape[axis] = 1\n img = nibabel.Nifti1Image(np.ones(shape), np.eye(4))\n plot_stat_map(img, None, display_mode=direction)\n```\n\n`ValueError: Too many cuts requested for the data: n_cuts=7, data size=1`\n\nThe culprit is the find_cut_slices function.\n\nA fix would be to return all the slices along the axis if there are at least as many requested n_slices as there are slices.\n\n", "before_files": [{"content": "\"\"\"\nTools to find activations and cut on maps\n\"\"\"\n\n# Author: Gael Varoquaux\n# License: BSD\n\nimport numpy as np\nfrom scipy import ndimage\n\n# Local imports\nfrom .._utils.ndimage import largest_connected_component\nfrom .._utils import new_img_like\nfrom .._utils.extmath import fast_abs_percentile\nfrom .._utils.numpy_conversions import as_ndarray\nfrom ..image.resampling import get_mask_bounds, coord_transform\nfrom ..image.image import _smooth_array\n\n################################################################################\n# Functions for automatic choice of cuts coordinates\n################################################################################\n\n\ndef find_xyz_cut_coords(img, mask=None, activation_threshold=None):\n \"\"\" Find the center of the largest activation connected component.\n\n Parameters\n -----------\n img : 3D Nifti1Image\n The brain map.\n mask : 3D ndarray, boolean, optional\n An optional brain mask.\n activation_threshold : float, optional\n The lower threshold to the positive activation. If None, the\n activation threshold is computed using the 80% percentile of\n the absolute value of the map.\n\n Returns\n -------\n x : float\n the x world coordinate.\n y : float\n the y world coordinate.\n z : float\n the z world coordinate.\n \"\"\"\n data = img.get_data()\n # To speed up computations, we work with partial views of the array,\n # and keep track of the offset\n offset = np.zeros(3)\n\n # Deal with masked arrays:\n if hasattr(data, 'mask'):\n not_mask = np.logical_not(data.mask)\n if mask is None:\n mask = not_mask\n else:\n mask *= not_mask\n data = np.asarray(data)\n\n # Get rid of potential memmapping\n data = as_ndarray(data)\n my_map = data.copy()\n if mask is not None:\n slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]\n my_map = my_map[slice_x, slice_y, slice_z]\n mask = mask[slice_x, slice_y, slice_z]\n my_map *= mask\n offset += [slice_x.start, slice_y.start, slice_z.start]\n\n # Testing min and max is faster than np.all(my_map == 0)\n if (my_map.max() == 0) and (my_map.min() == 0):\n return .5 * np.array(data.shape)\n if activation_threshold is None:\n activation_threshold = fast_abs_percentile(my_map[my_map != 0].ravel(),\n 80)\n mask = np.abs(my_map) > activation_threshold - 1.e-15\n # mask may be zero everywhere in rare cases\n if mask.max() == 0:\n return .5 * np.array(data.shape)\n mask = largest_connected_component(mask)\n slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]\n my_map = my_map[slice_x, slice_y, slice_z]\n mask = mask[slice_x, slice_y, slice_z]\n my_map *= mask\n offset += [slice_x.start, slice_y.start, slice_z.start]\n\n # For the second threshold, we use a mean, as it is much faster,\n # althought it is less robust\n second_threshold = np.abs(np.mean(my_map[mask]))\n second_mask = (np.abs(my_map) > second_threshold)\n if second_mask.sum() > 50:\n my_map *= largest_connected_component(second_mask)\n cut_coords = ndimage.center_of_mass(np.abs(my_map))\n x_map, y_map, z_map = cut_coords + offset\n\n # Return as a list of scalars\n return np.asarray(coord_transform(x_map, y_map, z_map,\n img.get_affine())).tolist()\n\n\ndef _get_auto_mask_bounds(img):\n \"\"\" Compute the bounds of the data with an automaticaly computed mask\n \"\"\"\n data = img.get_data().copy()\n affine = img.get_affine()\n if hasattr(data, 'mask'):\n # Masked array\n mask = np.logical_not(data.mask)\n data = np.asarray(data)\n else:\n # The mask will be anything that is fairly different\n # from the values in the corners\n edge_value = float(data[0, 0, 0] + data[0, -1, 0]\n + data[-1, 0, 0] + data[0, 0, -1]\n + data[-1, -1, 0] + data[-1, 0, -1]\n + data[0, -1, -1] + data[-1, -1, -1]\n )\n edge_value /= 6\n mask = np.abs(data - edge_value) > .005*data.ptp()\n xmin, xmax, ymin, ymax, zmin, zmax = \\\n get_mask_bounds(new_img_like(img, mask, affine))\n return (xmin, xmax), (ymin, ymax), (zmin, zmax)\n\n\ndef find_cut_slices(img, direction='z', n_cuts=12, spacing='auto'):\n \"\"\" Find 'good' cross-section slicing positions along a given axis.\n\n Parameters\n ----------\n img: 3D Nifti1Image\n the brain map\n direction: string, optional (default \"z\")\n sectional direction; possible values are \"x\", \"y\", or \"z\"\n n_cuts: int, optional (default 12)\n number of cuts in the plot\n spacing: 'auto' or int, optional (default 'auto')\n minimum spacing between cuts (in voxels, not milimeters)\n if 'auto', the spacing is .5 / n_cuts * img_length\n\n Returns\n -------\n cut_coords: 1D array of length n_cuts\n the computed cut_coords\n\n Notes\n -----\n This code works by iteratively locating peak activations that are\n separated by a distance of at least 'spacing'. If n_cuts is very\n large and all the activated regions are covered, cuts with a spacing\n less than 'spacing' will be returned.\n \"\"\"\n\n assert direction in 'xyz'\n\n axis = 'xyz'.index(direction)\n\n affine = img.get_affine()\n orig_data = np.abs(img.get_data())\n this_shape = orig_data.shape[axis]\n if n_cuts > this_shape:\n raise ValueError('Too many cuts requested for the data: '\n 'n_cuts=%i, data size=%i' % (n_cuts, this_shape))\n\n data = orig_data.copy()\n if data.dtype.kind == 'i':\n data = data.astype(np.float)\n\n data = _smooth_array(data, affine, fwhm='fast')\n\n if spacing == 'auto':\n spacing = max(int(.5 / n_cuts * data.shape[axis]), 1)\n\n slices = [slice(None, None), slice(None, None), slice(None, None)]\n\n cut_coords = list()\n\n for _ in range(n_cuts):\n # Find a peak\n max_along_axis = np.unravel_index(np.abs(data).argmax(),\n data.shape)[axis]\n\n # cancel out the surroundings of the peak\n start = max(0, max_along_axis - spacing)\n stop = max_along_axis + spacing\n slices[axis] = slice(start, stop)\n # We don't actually fully zero the neighborhood, to avoid ending\n # up with fully zeros if n_cuts is too big: we can do multiple\n # passes on the data\n data[slices] *= 1.e-3\n\n cut_coords.append(max_along_axis)\n\n # We sometimes get duplicated cuts, so we add cuts at the beginning\n # and the end\n cut_coords = np.unique(cut_coords).tolist()\n while len(cut_coords) < n_cuts:\n # Candidates for new cuts:\n slice_below = min(cut_coords) - 2\n slice_above = max(cut_coords) + 2\n candidates = [slice_above]\n # One slice where there is the biggest gap in the existing\n # cut_coords\n if len(cut_coords) > 1:\n middle_idx = np.argmax(np.diff(cut_coords))\n slice_middle = int(.5 * (cut_coords[middle_idx]\n + cut_coords[middle_idx + 1]))\n if not slice_middle in cut_coords:\n candidates.append(slice_middle)\n if slice_below >= 0:\n # We need positive slice to avoid having negative\n # indices, which would work, but not the way we think of them\n candidates.append(slice_below)\n best_weight = -10\n for candidate in candidates:\n if candidate >= this_shape:\n this_weight = 0\n else:\n this_weight = np.sum(np.rollaxis(orig_data, axis)[candidate])\n if this_weight > best_weight:\n best_candidate = candidate\n best_weight = this_weight\n\n cut_coords.append(best_candidate)\n cut_coords = np.unique(cut_coords).tolist()\n\n cut_coords = np.array(cut_coords)\n cut_coords.sort()\n\n # Transform this back in image space\n kwargs = dict()\n for name in 'xyz':\n kwargs[name] = np.zeros(len(cut_coords))\n kwargs[direction] = cut_coords\n kwargs['affine'] = affine\n\n cut_coords = coord_transform(**kwargs)[axis]\n # We need to atleast_1d to make sure that when n_cuts is 1 we do\n # get an iterable\n return np.atleast_1d(cut_coords)\n", "path": "nilearn/plotting/find_cuts.py"}], "after_files": [{"content": "\"\"\"\nTools to find activations and cut on maps\n\"\"\"\n\n# Author: Gael Varoquaux\n# License: BSD\n\nimport warnings\nimport numpy as np\nfrom scipy import ndimage\n\n# Local imports\nfrom .._utils.ndimage import largest_connected_component\nfrom .._utils import new_img_like\nfrom .._utils.extmath import fast_abs_percentile\nfrom .._utils.numpy_conversions import as_ndarray\nfrom ..image.resampling import get_mask_bounds, coord_transform\nfrom ..image.image import _smooth_array\n\n################################################################################\n# Functions for automatic choice of cuts coordinates\n################################################################################\n\n\ndef find_xyz_cut_coords(img, mask=None, activation_threshold=None):\n \"\"\" Find the center of the largest activation connected component.\n\n Parameters\n -----------\n img : 3D Nifti1Image\n The brain map.\n mask : 3D ndarray, boolean, optional\n An optional brain mask.\n activation_threshold : float, optional\n The lower threshold to the positive activation. If None, the\n activation threshold is computed using the 80% percentile of\n the absolute value of the map.\n\n Returns\n -------\n x : float\n the x world coordinate.\n y : float\n the y world coordinate.\n z : float\n the z world coordinate.\n \"\"\"\n data = img.get_data()\n # To speed up computations, we work with partial views of the array,\n # and keep track of the offset\n offset = np.zeros(3)\n\n # Deal with masked arrays:\n if hasattr(data, 'mask'):\n not_mask = np.logical_not(data.mask)\n if mask is None:\n mask = not_mask\n else:\n mask *= not_mask\n data = np.asarray(data)\n\n # Get rid of potential memmapping\n data = as_ndarray(data)\n my_map = data.copy()\n if mask is not None:\n slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]\n my_map = my_map[slice_x, slice_y, slice_z]\n mask = mask[slice_x, slice_y, slice_z]\n my_map *= mask\n offset += [slice_x.start, slice_y.start, slice_z.start]\n\n # Testing min and max is faster than np.all(my_map == 0)\n if (my_map.max() == 0) and (my_map.min() == 0):\n return .5 * np.array(data.shape)\n if activation_threshold is None:\n activation_threshold = fast_abs_percentile(my_map[my_map != 0].ravel(),\n 80)\n mask = np.abs(my_map) > activation_threshold - 1.e-15\n # mask may be zero everywhere in rare cases\n if mask.max() == 0:\n return .5 * np.array(data.shape)\n mask = largest_connected_component(mask)\n slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]\n my_map = my_map[slice_x, slice_y, slice_z]\n mask = mask[slice_x, slice_y, slice_z]\n my_map *= mask\n offset += [slice_x.start, slice_y.start, slice_z.start]\n\n # For the second threshold, we use a mean, as it is much faster,\n # althought it is less robust\n second_threshold = np.abs(np.mean(my_map[mask]))\n second_mask = (np.abs(my_map) > second_threshold)\n if second_mask.sum() > 50:\n my_map *= largest_connected_component(second_mask)\n cut_coords = ndimage.center_of_mass(np.abs(my_map))\n x_map, y_map, z_map = cut_coords + offset\n\n # Return as a list of scalars\n return np.asarray(coord_transform(x_map, y_map, z_map,\n img.get_affine())).tolist()\n\n\ndef _get_auto_mask_bounds(img):\n \"\"\" Compute the bounds of the data with an automaticaly computed mask\n \"\"\"\n data = img.get_data().copy()\n affine = img.get_affine()\n if hasattr(data, 'mask'):\n # Masked array\n mask = np.logical_not(data.mask)\n data = np.asarray(data)\n else:\n # The mask will be anything that is fairly different\n # from the values in the corners\n edge_value = float(data[0, 0, 0] + data[0, -1, 0]\n + data[-1, 0, 0] + data[0, 0, -1]\n + data[-1, -1, 0] + data[-1, 0, -1]\n + data[0, -1, -1] + data[-1, -1, -1]\n )\n edge_value /= 6\n mask = np.abs(data - edge_value) > .005*data.ptp()\n xmin, xmax, ymin, ymax, zmin, zmax = \\\n get_mask_bounds(new_img_like(img, mask, affine))\n return (xmin, xmax), (ymin, ymax), (zmin, zmax)\n\n\ndef _transform_cut_coords(cut_coords, direction, affine):\n \"\"\"Transforms cut_coords back in image space\n\n Parameters\n ----------\n cut_coords: 1D array of length n_cuts\n The coordinates to be transformed.\n\n direction: string, optional (default \"z\")\n sectional direction; possible values are \"x\", \"y\", or \"z\"\n\n affine: 2D array of shape (4, 4)\n The affine for the image.\n\n Returns\n -------\n cut_coords: 1D array of length n_cuts\n The original cut_coords transformed image space.\n \"\"\"\n # make kwargs\n axis = 'xyz'.index(direction)\n kwargs = {}\n for name in 'xyz':\n kwargs[name] = np.zeros(len(cut_coords))\n kwargs[direction] = cut_coords\n kwargs['affine'] = affine\n\n # We need atleast_1d to make sure that when n_cuts is 1 we do\n # get an iterable\n cut_coords = coord_transform(**kwargs)[axis]\n return np.atleast_1d(cut_coords)\n\n\ndef find_cut_slices(img, direction='z', n_cuts=12, spacing='auto'):\n \"\"\" Find 'good' cross-section slicing positions along a given axis.\n\n Parameters\n ----------\n img: 3D Nifti1Image\n the brain map\n direction: string, optional (default \"z\")\n sectional direction; possible values are \"x\", \"y\", or \"z\"\n n_cuts: int, optional (default 12)\n number of cuts in the plot\n spacing: 'auto' or int, optional (default 'auto')\n minimum spacing between cuts (in voxels, not milimeters)\n if 'auto', the spacing is .5 / n_cuts * img_length\n\n Returns\n -------\n cut_coords: 1D array of length n_cuts\n the computed cut_coords\n\n Notes\n -----\n This code works by iteratively locating peak activations that are\n separated by a distance of at least 'spacing'. If n_cuts is very\n large and all the activated regions are covered, cuts with a spacing\n less than 'spacing' will be returned.\n \"\"\"\n\n # misc\n if not direction in 'xyz':\n raise ValueError(\n \"'direction' must be one of 'x', 'y', or 'z'. Got '%s'\" % (\n direction))\n axis = 'xyz'.index(direction)\n affine = img.get_affine()\n orig_data = np.abs(img.get_data())\n this_shape = orig_data.shape[axis]\n\n # BF issue #575: Return all the slices along and axis if this axis\n # is the display mode and there are at least as many requested\n # n_slices as there are slices.\n if n_cuts > this_shape:\n warnings.warn('Too many cuts requested for the data: '\n 'n_cuts=%i, data size=%i' % (n_cuts, this_shape))\n return _transform_cut_coords(np.arange(this_shape), direction, affine)\n\n data = orig_data.copy()\n if data.dtype.kind == 'i':\n data = data.astype(np.float)\n\n data = _smooth_array(data, affine, fwhm='fast')\n\n if spacing == 'auto':\n spacing = max(int(.5 / n_cuts * data.shape[axis]), 1)\n\n slices = [slice(None, None), slice(None, None), slice(None, None)]\n\n cut_coords = list()\n\n for _ in range(n_cuts):\n # Find a peak\n max_along_axis = np.unravel_index(np.abs(data).argmax(),\n data.shape)[axis]\n\n # cancel out the surroundings of the peak\n start = max(0, max_along_axis - spacing)\n stop = max_along_axis + spacing\n slices[axis] = slice(start, stop)\n # We don't actually fully zero the neighborhood, to avoid ending\n # up with fully zeros if n_cuts is too big: we can do multiple\n # passes on the data\n data[slices] *= 1.e-3\n\n cut_coords.append(max_along_axis)\n\n # We sometimes get duplicated cuts, so we add cuts at the beginning\n # and the end\n cut_coords = np.unique(cut_coords).tolist()\n while len(cut_coords) < n_cuts:\n # Candidates for new cuts:\n slice_below = min(cut_coords) - 2\n slice_above = max(cut_coords) + 2\n candidates = [slice_above]\n # One slice where there is the biggest gap in the existing\n # cut_coords\n if len(cut_coords) > 1:\n middle_idx = np.argmax(np.diff(cut_coords))\n slice_middle = int(.5 * (cut_coords[middle_idx]\n + cut_coords[middle_idx + 1]))\n if not slice_middle in cut_coords:\n candidates.append(slice_middle)\n if slice_below >= 0:\n # We need positive slice to avoid having negative\n # indices, which would work, but not the way we think of them\n candidates.append(slice_below)\n best_weight = -10\n for candidate in candidates:\n if candidate >= this_shape:\n this_weight = 0\n else:\n this_weight = np.sum(np.rollaxis(orig_data, axis)[candidate])\n if this_weight > best_weight:\n best_candidate = candidate\n best_weight = this_weight\n\n cut_coords.append(best_candidate)\n cut_coords = np.unique(cut_coords).tolist()\n\n cut_coords = np.array(cut_coords)\n cut_coords.sort()\n\n return _transform_cut_coords(cut_coords, direction, affine)\n", "path": "nilearn/plotting/find_cuts.py"}]}
| 3,130 | 880 |
gh_patches_debug_418
|
rasdani/github-patches
|
git_diff
|
holoviz__holoviews-5924
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unpin matplotlib 3.8
In https://github.com/holoviz/holoviews/pull/5895 I pinned matplotlib 3.8, this should be removed before the next release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import json
4 import os
5 import sys
6 import shutil
7
8 from setuptools import setup, find_packages
9
10 import pyct.build
11
12 setup_args = {}
13 install_requires = [
14 "param >=1.12.0,<3.0",
15 "numpy >=1.0",
16 "pyviz_comms >=0.7.4",
17 "panel >=1.0",
18 "colorcet",
19 "packaging",
20 "pandas >=0.20.0",
21 ]
22
23 extras_require = {}
24
25 extras_require['lint'] = [
26 'ruff',
27 'pre-commit',
28 ]
29
30 # Test requirements
31 extras_require['tests_core'] = [
32 'pytest',
33 'pytest-cov',
34 'pytest-xdist',
35 'flaky',
36 'matplotlib >=3, <3.8', # 3.8 breaks tests
37 'nbconvert',
38 'bokeh >=3.1',
39 'pillow',
40 'plotly >=4.0',
41 'ipython >=5.4.0',
42 'contourpy',
43 ]
44
45 # Optional tests dependencies, i.e. one should be able
46 # to run and pass the test suite without installing any
47 # of those.
48 extras_require['tests'] = extras_require['tests_core'] + [
49 'dask',
50 'ibis-framework', # Mapped to ibis-sqlite in setup.cfg for conda
51 'xarray >=0.10.4',
52 'networkx',
53 'shapely',
54 'ffmpeg',
55 'cftime',
56 'scipy',
57 'selenium',
58 'spatialpandas',
59 'datashader >=0.11.1',
60 'dash >=1.16',
61 ]
62
63 extras_require['tests_ci'] = [
64 'codecov',
65 "pytest-github-actions-annotate-failures",
66 ]
67
68 extras_require['tests_gpu'] = extras_require['tests'] + [
69 'cudf',
70 ]
71
72 extras_require['tests_nb'] = ['nbval']
73 extras_require['ui'] = ['playwright', 'pytest-playwright']
74
75 # Notebook dependencies
76 extras_require["notebook"] = ["ipython >=5.4.0", "notebook"]
77
78 # IPython Notebook + pandas + matplotlib + bokeh
79 extras_require["recommended"] = extras_require["notebook"] + [
80 "matplotlib >=3",
81 "bokeh >=3.1",
82 ]
83
84 # Requirements to run all examples
85 extras_require["examples"] = extras_require["recommended"] + [
86 "networkx",
87 "pillow",
88 "xarray >=0.10.4",
89 "plotly >=4.0",
90 'dash >=1.16',
91 "streamz >=0.5.0",
92 "ffmpeg",
93 "cftime",
94 "netcdf4",
95 "dask",
96 "scipy",
97 "shapely",
98 "scikit-image",
99 "pyarrow",
100 "pooch",
101 "datashader >=0.11.1",
102 ]
103
104
105 extras_require["examples_tests"] = extras_require["examples"] + extras_require['tests_nb']
106
107 # Extra third-party libraries
108 extras_require["extras"] = extras_require["examples"] + [
109 "pscript ==0.7.1",
110 ]
111
112 # Not used in tox.ini or elsewhere, kept for backwards compatibility.
113 extras_require["unit_tests"] = extras_require["examples"] + extras_require["tests"] + extras_require['lint']
114
115 extras_require['doc'] = extras_require['examples'] + [
116 'nbsite >=0.8.2,<0.9.0',
117 'mpl_sample_data >=3.1.3',
118 'pscript',
119 'graphviz',
120 'bokeh >=3.1',
121 'pooch',
122 'selenium',
123 ]
124
125 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
126
127 extras_require["build"] = [
128 "param >=1.7.0",
129 "setuptools >=30.3.0",
130 "pyct >=0.4.4",
131 ]
132
133 def get_setup_version(reponame):
134 """
135 Helper to get the current version from either git describe or the
136 .version file (if available).
137 """
138 basepath = os.path.split(__file__)[0]
139 version_file_path = os.path.join(basepath, reponame, ".version")
140 try:
141 from param import version
142 except ImportError:
143 version = None
144 if version is not None:
145 return version.Version.setup_version(
146 basepath, reponame, archive_commit="$Format:%h$"
147 )
148 else:
149 print(
150 "WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0."
151 )
152 return json.load(open(version_file_path))["version_string"]
153
154
155 setup_args.update(
156 dict(
157 name="holoviews",
158 version=get_setup_version("holoviews"),
159 python_requires=">=3.9",
160 install_requires=install_requires,
161 extras_require=extras_require,
162 description="Stop plotting your data - annotate your data and let it visualize itself.",
163 long_description=open("README.md").read(),
164 long_description_content_type="text/markdown",
165 author="Jean-Luc Stevens and Philipp Rudiger",
166 author_email="[email protected]",
167 maintainer="HoloViz Developers",
168 maintainer_email="[email protected]",
169 platforms=["Windows", "Mac OS X", "Linux"],
170 license="BSD",
171 url="https://www.holoviews.org",
172 project_urls={
173 "Source": "https://github.com/holoviz/holoviews",
174 },
175 entry_points={"console_scripts": ["holoviews = holoviews.util.command:main"]},
176 packages=find_packages(),
177 include_package_data=True,
178 classifiers=[
179 "License :: OSI Approved :: BSD License",
180 "Development Status :: 5 - Production/Stable",
181 "Programming Language :: Python :: 3.9",
182 "Programming Language :: Python :: 3.10",
183 "Programming Language :: Python :: 3.11",
184 "Programming Language :: Python :: 3.12",
185 "Operating System :: OS Independent",
186 "Intended Audience :: Science/Research",
187 "Intended Audience :: Developers",
188 "Natural Language :: English",
189 "Framework :: Matplotlib",
190 "Topic :: Scientific/Engineering",
191 "Topic :: Software Development :: Libraries",
192 ],
193 )
194 )
195
196
197 if __name__ == "__main__":
198 example_path = os.path.join(
199 os.path.dirname(os.path.abspath(__file__)), "holoviews/examples"
200 )
201
202 if "develop" not in sys.argv and "egg_info" not in sys.argv:
203 pyct.build.examples(example_path, __file__, force=True)
204
205 if "install" in sys.argv:
206 header = "HOLOVIEWS INSTALLATION INFORMATION"
207 bars = "=" * len(header)
208
209 extras = "\n".join("holoviews[%s]" % e for e in setup_args["extras_require"])
210
211 print("%s\n%s\n%s" % (bars, header, bars))
212
213 print("\nHoloViews supports the following installation types:\n")
214 print("%s\n" % extras)
215 print("Users should consider using one of these options.\n")
216 print("By default only a core installation is performed and ")
217 print("only the minimal set of dependencies are fetched.\n\n")
218 print("For more information please visit http://holoviews.org/install.html\n")
219 print(bars + "\n")
220
221 setup(**setup_args)
222
223 if os.path.isdir(example_path):
224 shutil.rmtree(example_path)
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
'pytest-cov',
'pytest-xdist',
'flaky',
- 'matplotlib >=3, <3.8', # 3.8 breaks tests
+ 'matplotlib >=3',
'nbconvert',
'bokeh >=3.1',
'pillow',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n 'pytest-cov',\n 'pytest-xdist',\n 'flaky',\n- 'matplotlib >=3, <3.8', # 3.8 breaks tests\n+ 'matplotlib >=3',\n 'nbconvert',\n 'bokeh >=3.1',\n 'pillow',\n", "issue": "Unpin matplotlib 3.8\nIn https://github.com/holoviz/holoviews/pull/5895 I pinned matplotlib 3.8, this should be removed before the next release. \n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport json\nimport os\nimport sys\nimport shutil\n\nfrom setuptools import setup, find_packages\n\nimport pyct.build\n\nsetup_args = {}\ninstall_requires = [\n \"param >=1.12.0,<3.0\",\n \"numpy >=1.0\",\n \"pyviz_comms >=0.7.4\",\n \"panel >=1.0\",\n \"colorcet\",\n \"packaging\",\n \"pandas >=0.20.0\",\n]\n\nextras_require = {}\n\nextras_require['lint'] = [\n 'ruff',\n 'pre-commit',\n]\n\n# Test requirements\nextras_require['tests_core'] = [\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'flaky',\n 'matplotlib >=3, <3.8', # 3.8 breaks tests\n 'nbconvert',\n 'bokeh >=3.1',\n 'pillow',\n 'plotly >=4.0',\n 'ipython >=5.4.0',\n 'contourpy',\n]\n\n# Optional tests dependencies, i.e. one should be able\n# to run and pass the test suite without installing any\n# of those.\nextras_require['tests'] = extras_require['tests_core'] + [\n 'dask',\n 'ibis-framework', # Mapped to ibis-sqlite in setup.cfg for conda\n 'xarray >=0.10.4',\n 'networkx',\n 'shapely',\n 'ffmpeg',\n 'cftime',\n 'scipy',\n 'selenium',\n 'spatialpandas',\n 'datashader >=0.11.1',\n 'dash >=1.16',\n]\n\nextras_require['tests_ci'] = [\n 'codecov',\n \"pytest-github-actions-annotate-failures\",\n]\n\nextras_require['tests_gpu'] = extras_require['tests'] + [\n 'cudf',\n]\n\nextras_require['tests_nb'] = ['nbval']\nextras_require['ui'] = ['playwright', 'pytest-playwright']\n\n# Notebook dependencies\nextras_require[\"notebook\"] = [\"ipython >=5.4.0\", \"notebook\"]\n\n# IPython Notebook + pandas + matplotlib + bokeh\nextras_require[\"recommended\"] = extras_require[\"notebook\"] + [\n \"matplotlib >=3\",\n \"bokeh >=3.1\",\n]\n\n# Requirements to run all examples\nextras_require[\"examples\"] = extras_require[\"recommended\"] + [\n \"networkx\",\n \"pillow\",\n \"xarray >=0.10.4\",\n \"plotly >=4.0\",\n 'dash >=1.16',\n \"streamz >=0.5.0\",\n \"ffmpeg\",\n \"cftime\",\n \"netcdf4\",\n \"dask\",\n \"scipy\",\n \"shapely\",\n \"scikit-image\",\n \"pyarrow\",\n \"pooch\",\n \"datashader >=0.11.1\",\n]\n\n\nextras_require[\"examples_tests\"] = extras_require[\"examples\"] + extras_require['tests_nb']\n\n# Extra third-party libraries\nextras_require[\"extras\"] = extras_require[\"examples\"] + [\n \"pscript ==0.7.1\",\n]\n\n# Not used in tox.ini or elsewhere, kept for backwards compatibility.\nextras_require[\"unit_tests\"] = extras_require[\"examples\"] + extras_require[\"tests\"] + extras_require['lint']\n\nextras_require['doc'] = extras_require['examples'] + [\n 'nbsite >=0.8.2,<0.9.0',\n 'mpl_sample_data >=3.1.3',\n 'pscript',\n 'graphviz',\n 'bokeh >=3.1',\n 'pooch',\n 'selenium',\n]\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextras_require[\"build\"] = [\n \"param >=1.7.0\",\n \"setuptools >=30.3.0\",\n \"pyct >=0.4.4\",\n]\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, \".version\")\n try:\n from param import version\n except ImportError:\n version = None\n if version is not None:\n return version.Version.setup_version(\n basepath, reponame, archive_commit=\"$Format:%h$\"\n )\n else:\n print(\n \"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\"\n )\n return json.load(open(version_file_path))[\"version_string\"]\n\n\nsetup_args.update(\n dict(\n name=\"holoviews\",\n version=get_setup_version(\"holoviews\"),\n python_requires=\">=3.9\",\n install_requires=install_requires,\n extras_require=extras_require,\n description=\"Stop plotting your data - annotate your data and let it visualize itself.\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n author=\"Jean-Luc Stevens and Philipp Rudiger\",\n author_email=\"[email protected]\",\n maintainer=\"HoloViz Developers\",\n maintainer_email=\"[email protected]\",\n platforms=[\"Windows\", \"Mac OS X\", \"Linux\"],\n license=\"BSD\",\n url=\"https://www.holoviews.org\",\n project_urls={\n \"Source\": \"https://github.com/holoviz/holoviews\",\n },\n entry_points={\"console_scripts\": [\"holoviews = holoviews.util.command:main\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Framework :: Matplotlib\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\",\n ],\n )\n)\n\n\nif __name__ == \"__main__\":\n example_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"holoviews/examples\"\n )\n\n if \"develop\" not in sys.argv and \"egg_info\" not in sys.argv:\n pyct.build.examples(example_path, __file__, force=True)\n\n if \"install\" in sys.argv:\n header = \"HOLOVIEWS INSTALLATION INFORMATION\"\n bars = \"=\" * len(header)\n\n extras = \"\\n\".join(\"holoviews[%s]\" % e for e in setup_args[\"extras_require\"])\n\n print(\"%s\\n%s\\n%s\" % (bars, header, bars))\n\n print(\"\\nHoloViews supports the following installation types:\\n\")\n print(\"%s\\n\" % extras)\n print(\"Users should consider using one of these options.\\n\")\n print(\"By default only a core installation is performed and \")\n print(\"only the minimal set of dependencies are fetched.\\n\\n\")\n print(\"For more information please visit http://holoviews.org/install.html\\n\")\n print(bars + \"\\n\")\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport json\nimport os\nimport sys\nimport shutil\n\nfrom setuptools import setup, find_packages\n\nimport pyct.build\n\nsetup_args = {}\ninstall_requires = [\n \"param >=1.12.0,<3.0\",\n \"numpy >=1.0\",\n \"pyviz_comms >=0.7.4\",\n \"panel >=1.0\",\n \"colorcet\",\n \"packaging\",\n \"pandas >=0.20.0\",\n]\n\nextras_require = {}\n\nextras_require['lint'] = [\n 'ruff',\n 'pre-commit',\n]\n\n# Test requirements\nextras_require['tests_core'] = [\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'flaky',\n 'matplotlib >=3',\n 'nbconvert',\n 'bokeh >=3.1',\n 'pillow',\n 'plotly >=4.0',\n 'ipython >=5.4.0',\n 'contourpy',\n]\n\n# Optional tests dependencies, i.e. one should be able\n# to run and pass the test suite without installing any\n# of those.\nextras_require['tests'] = extras_require['tests_core'] + [\n 'dask',\n 'ibis-framework', # Mapped to ibis-sqlite in setup.cfg for conda\n 'xarray >=0.10.4',\n 'networkx',\n 'shapely',\n 'ffmpeg',\n 'cftime',\n 'scipy',\n 'selenium',\n 'spatialpandas',\n 'datashader >=0.11.1',\n 'dash >=1.16',\n]\n\nextras_require['tests_ci'] = [\n 'codecov',\n \"pytest-github-actions-annotate-failures\",\n]\n\nextras_require['tests_gpu'] = extras_require['tests'] + [\n 'cudf',\n]\n\nextras_require['tests_nb'] = ['nbval']\nextras_require['ui'] = ['playwright', 'pytest-playwright']\n\n# Notebook dependencies\nextras_require[\"notebook\"] = [\"ipython >=5.4.0\", \"notebook\"]\n\n# IPython Notebook + pandas + matplotlib + bokeh\nextras_require[\"recommended\"] = extras_require[\"notebook\"] + [\n \"matplotlib >=3\",\n \"bokeh >=3.1\",\n]\n\n# Requirements to run all examples\nextras_require[\"examples\"] = extras_require[\"recommended\"] + [\n \"networkx\",\n \"pillow\",\n \"xarray >=0.10.4\",\n \"plotly >=4.0\",\n 'dash >=1.16',\n \"streamz >=0.5.0\",\n \"ffmpeg\",\n \"cftime\",\n \"netcdf4\",\n \"dask\",\n \"scipy\",\n \"shapely\",\n \"scikit-image\",\n \"pyarrow\",\n \"pooch\",\n \"datashader >=0.11.1\",\n]\n\n\nextras_require[\"examples_tests\"] = extras_require[\"examples\"] + extras_require['tests_nb']\n\n# Extra third-party libraries\nextras_require[\"extras\"] = extras_require[\"examples\"] + [\n \"pscript ==0.7.1\",\n]\n\n# Not used in tox.ini or elsewhere, kept for backwards compatibility.\nextras_require[\"unit_tests\"] = extras_require[\"examples\"] + extras_require[\"tests\"] + extras_require['lint']\n\nextras_require['doc'] = extras_require['examples'] + [\n 'nbsite >=0.8.2,<0.9.0',\n 'mpl_sample_data >=3.1.3',\n 'pscript',\n 'graphviz',\n 'bokeh >=3.1',\n 'pooch',\n 'selenium',\n]\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextras_require[\"build\"] = [\n \"param >=1.7.0\",\n \"setuptools >=30.3.0\",\n \"pyct >=0.4.4\",\n]\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, \".version\")\n try:\n from param import version\n except ImportError:\n version = None\n if version is not None:\n return version.Version.setup_version(\n basepath, reponame, archive_commit=\"$Format:%h$\"\n )\n else:\n print(\n \"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\"\n )\n return json.load(open(version_file_path))[\"version_string\"]\n\n\nsetup_args.update(\n dict(\n name=\"holoviews\",\n version=get_setup_version(\"holoviews\"),\n python_requires=\">=3.9\",\n install_requires=install_requires,\n extras_require=extras_require,\n description=\"Stop plotting your data - annotate your data and let it visualize itself.\",\n long_description=open(\"README.md\").read(),\n long_description_content_type=\"text/markdown\",\n author=\"Jean-Luc Stevens and Philipp Rudiger\",\n author_email=\"[email protected]\",\n maintainer=\"HoloViz Developers\",\n maintainer_email=\"[email protected]\",\n platforms=[\"Windows\", \"Mac OS X\", \"Linux\"],\n license=\"BSD\",\n url=\"https://www.holoviews.org\",\n project_urls={\n \"Source\": \"https://github.com/holoviz/holoviews\",\n },\n entry_points={\"console_scripts\": [\"holoviews = holoviews.util.command:main\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: 3.12\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Framework :: Matplotlib\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\",\n ],\n )\n)\n\n\nif __name__ == \"__main__\":\n example_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"holoviews/examples\"\n )\n\n if \"develop\" not in sys.argv and \"egg_info\" not in sys.argv:\n pyct.build.examples(example_path, __file__, force=True)\n\n if \"install\" in sys.argv:\n header = \"HOLOVIEWS INSTALLATION INFORMATION\"\n bars = \"=\" * len(header)\n\n extras = \"\\n\".join(\"holoviews[%s]\" % e for e in setup_args[\"extras_require\"])\n\n print(\"%s\\n%s\\n%s\" % (bars, header, bars))\n\n print(\"\\nHoloViews supports the following installation types:\\n\")\n print(\"%s\\n\" % extras)\n print(\"Users should consider using one of these options.\\n\")\n print(\"By default only a core installation is performed and \")\n print(\"only the minimal set of dependencies are fetched.\\n\\n\")\n print(\"For more information please visit http://holoviews.org/install.html\\n\")\n print(bars + \"\\n\")\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n", "path": "setup.py"}]}
| 2,566 | 96 |
gh_patches_debug_7500
|
rasdani/github-patches
|
git_diff
|
conda-forge__conda-smithy-138
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve lint messages on whitespace
The message about the spacing for selectors isn't clear to see the problem in the number of spaces:
`Selectors are suggested to take a " # []" form.`
It would be clearer to have a message like: `<two spaces> # <one space> [selector]`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_smithy/lint_recipe.py`
Content:
```
1 import os
2 import re
3
4 import jinja2
5 import ruamel.yaml
6
7
8 EXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',
9 'test', 'app', 'about', 'extra']
10
11 REQUIREMENTS_ORDER = ['build', 'run']
12
13
14 class NullUndefined(jinja2.Undefined):
15 def __unicode__(self):
16 return unicode(self._undefined_name)
17
18
19 def get_section(parent, name, lints):
20 section = parent.get(name, {})
21 if not isinstance(section, dict):
22 lints.append('The "{}" section was expected to be a dictionary, but '
23 'got a {}.'.format(name, type(section).__name__))
24 section = {}
25 return section
26
27
28 def lintify(meta, recipe_dir=None):
29 lints = []
30 major_sections = list(meta.keys())
31
32 # If the recipe_dir exists (no guarantee within this function) , we can
33 # find the meta.yaml within it.
34 meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')
35
36 source_section = get_section(meta, 'source', lints)
37 build_section = get_section(meta, 'build', lints)
38 requirements_section = get_section(meta, 'requirements', lints)
39 about_section = get_section(meta, 'about', lints)
40 extra_section = get_section(meta, 'extra', lints)
41
42 # 1: Top level meta.yaml keys should have a specific order.
43 section_order_sorted = sorted(major_sections,
44 key=EXPECTED_SECTION_ORDER.index)
45 if major_sections != section_order_sorted:
46 lints.append('The top level meta keys are in an unexpected order. '
47 'Expecting {}.'.format(section_order_sorted))
48
49 # 2: The about section should have a home, license and summary.
50 for about_item in ['home', 'license', 'summary']:
51 # if the section doesn't exist, or is just empty, lint it.
52 if not about_section.get(about_item, ''):
53 lints.append('The {} item is expected in the about section.'
54 ''.format(about_item))
55
56 # 3: The recipe should have some maintainers.
57 if not extra_section.get('recipe-maintainers', []):
58 lints.append('The recipe could do with some maintainers listed in '
59 'the "extra/recipe-maintainers" section.')
60
61 # 4: The recipe should have some tests.
62 if 'test' not in major_sections:
63 test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',
64 'run_test.pl']
65 a_test_file_exists = (recipe_dir is not None and
66 any(os.path.exists(os.path.join(recipe_dir,
67 test_file))
68 for test_file in test_files))
69 if not a_test_file_exists:
70 lints.append('The recipe must have some tests.')
71
72 # 5: License cannot be 'unknown.'
73 license = about_section.get('license', '').lower()
74 if 'unknown' == license.strip():
75 lints.append('The recipe license cannot be unknown.')
76
77 # 6: Selectors should be in a tidy form.
78 if recipe_dir is not None and os.path.exists(meta_fname):
79 bad_selectors = []
80 # Good selectors look like ".*\s\s#\s[...]"
81 good_selectors_pat = re.compile(r'(.+?)\s{2,}#\s\[(.+)\](?(2).*)$')
82 with open(meta_fname, 'r') as fh:
83 for selector_line in selector_lines(fh):
84 if not good_selectors_pat.match(selector_line):
85 bad_selectors.append(selector_line)
86 if bad_selectors:
87 lints.append('Selectors are suggested to take a '
88 '" # [<selector>]" form.')
89
90 # 7: The build section should have a build number.
91 if build_section.get('number', None) is None:
92 lints.append('The recipe must have a `build/number` section.')
93
94 # 8: The build section should be before the run section in requirements.
95 requirements_order_sorted = sorted(requirements_section,
96 key=REQUIREMENTS_ORDER.index)
97 if requirements_section.keys() != requirements_order_sorted:
98 lints.append('The `requirements/build` section should be defined '
99 'before the `requirements/run` section.')
100
101 # 9: Files downloaded should have a hash.
102 if ('url' in source_section and
103 not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):
104 lints.append('When defining a source/url please add a sha256, sha1 '
105 'or md5 checksum (sha256 preferably).')
106
107 return lints
108
109
110 def selector_lines(lines):
111 # Using the same pattern defined in conda-build (metadata.py),
112 # we identify selectors.
113 sel_pat = re.compile(r'(.+?)\s*(#.*)?\[(.+)\](?(2).*)$')
114
115 for line in lines:
116 line = line.rstrip()
117 if line.lstrip().startswith('#'):
118 # Don't bother with comment only lines
119 continue
120 m = sel_pat.match(line)
121 if m:
122 m.group(3)
123 yield line
124
125
126 def main(recipe_dir):
127 recipe_dir = os.path.abspath(recipe_dir)
128 recipe_meta = os.path.join(recipe_dir, 'meta.yaml')
129 if not os.path.exists(recipe_dir):
130 raise IOError('Feedstock has no recipe/meta.yaml.')
131
132 env = jinja2.Environment(undefined=NullUndefined)
133
134 with open(recipe_meta, 'r') as fh:
135 content = env.from_string(''.join(fh)).render()
136 meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
137 results = lintify(meta, recipe_dir)
138 return results
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda_smithy/lint_recipe.py b/conda_smithy/lint_recipe.py
--- a/conda_smithy/lint_recipe.py
+++ b/conda_smithy/lint_recipe.py
@@ -85,7 +85,7 @@
bad_selectors.append(selector_line)
if bad_selectors:
lints.append('Selectors are suggested to take a '
- '" # [<selector>]" form.')
+ '"<two spaces>#<one space>[<selector>]" form.')
# 7: The build section should have a build number.
if build_section.get('number', None) is None:
|
{"golden_diff": "diff --git a/conda_smithy/lint_recipe.py b/conda_smithy/lint_recipe.py\n--- a/conda_smithy/lint_recipe.py\n+++ b/conda_smithy/lint_recipe.py\n@@ -85,7 +85,7 @@\n bad_selectors.append(selector_line)\n if bad_selectors:\n lints.append('Selectors are suggested to take a '\n- '\" # [<selector>]\" form.')\n+ '\"<two spaces>#<one space>[<selector>]\" form.')\n \n # 7: The build section should have a build number.\n if build_section.get('number', None) is None:\n", "issue": "Improve lint messages on whitespace\nThe message about the spacing for selectors isn't clear to see the problem in the number of spaces:\n\n`Selectors are suggested to take a \" # []\" form.`\n\nIt would be clearer to have a message like: `<two spaces> # <one space> [selector]`\n\n", "before_files": [{"content": "import os\nimport re\n\nimport jinja2\nimport ruamel.yaml\n\n\nEXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',\n 'test', 'app', 'about', 'extra']\n\nREQUIREMENTS_ORDER = ['build', 'run']\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return unicode(self._undefined_name)\n\n\ndef get_section(parent, name, lints):\n section = parent.get(name, {})\n if not isinstance(section, dict):\n lints.append('The \"{}\" section was expected to be a dictionary, but '\n 'got a {}.'.format(name, type(section).__name__))\n section = {}\n return section\n\n\ndef lintify(meta, recipe_dir=None):\n lints = []\n major_sections = list(meta.keys())\n\n # If the recipe_dir exists (no guarantee within this function) , we can\n # find the meta.yaml within it.\n meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')\n\n source_section = get_section(meta, 'source', lints)\n build_section = get_section(meta, 'build', lints)\n requirements_section = get_section(meta, 'requirements', lints)\n about_section = get_section(meta, 'about', lints)\n extra_section = get_section(meta, 'extra', lints)\n\n # 1: Top level meta.yaml keys should have a specific order.\n section_order_sorted = sorted(major_sections,\n key=EXPECTED_SECTION_ORDER.index)\n if major_sections != section_order_sorted:\n lints.append('The top level meta keys are in an unexpected order. '\n 'Expecting {}.'.format(section_order_sorted))\n\n # 2: The about section should have a home, license and summary.\n for about_item in ['home', 'license', 'summary']:\n # if the section doesn't exist, or is just empty, lint it.\n if not about_section.get(about_item, ''):\n lints.append('The {} item is expected in the about section.'\n ''.format(about_item))\n\n # 3: The recipe should have some maintainers.\n if not extra_section.get('recipe-maintainers', []):\n lints.append('The recipe could do with some maintainers listed in '\n 'the \"extra/recipe-maintainers\" section.')\n\n # 4: The recipe should have some tests.\n if 'test' not in major_sections:\n test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',\n 'run_test.pl']\n a_test_file_exists = (recipe_dir is not None and\n any(os.path.exists(os.path.join(recipe_dir,\n test_file))\n for test_file in test_files))\n if not a_test_file_exists:\n lints.append('The recipe must have some tests.')\n\n # 5: License cannot be 'unknown.'\n license = about_section.get('license', '').lower()\n if 'unknown' == license.strip():\n lints.append('The recipe license cannot be unknown.')\n\n # 6: Selectors should be in a tidy form.\n if recipe_dir is not None and os.path.exists(meta_fname):\n bad_selectors = []\n # Good selectors look like \".*\\s\\s#\\s[...]\"\n good_selectors_pat = re.compile(r'(.+?)\\s{2,}#\\s\\[(.+)\\](?(2).*)$')\n with open(meta_fname, 'r') as fh:\n for selector_line in selector_lines(fh):\n if not good_selectors_pat.match(selector_line):\n bad_selectors.append(selector_line)\n if bad_selectors:\n lints.append('Selectors are suggested to take a '\n '\" # [<selector>]\" form.')\n\n # 7: The build section should have a build number.\n if build_section.get('number', None) is None:\n lints.append('The recipe must have a `build/number` section.')\n\n # 8: The build section should be before the run section in requirements.\n requirements_order_sorted = sorted(requirements_section,\n key=REQUIREMENTS_ORDER.index)\n if requirements_section.keys() != requirements_order_sorted:\n lints.append('The `requirements/build` section should be defined '\n 'before the `requirements/run` section.')\n\n # 9: Files downloaded should have a hash.\n if ('url' in source_section and\n not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):\n lints.append('When defining a source/url please add a sha256, sha1 '\n 'or md5 checksum (sha256 preferably).')\n\n return lints\n\n\ndef selector_lines(lines):\n # Using the same pattern defined in conda-build (metadata.py),\n # we identify selectors.\n sel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[(.+)\\](?(2).*)$')\n\n for line in lines:\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n m.group(3)\n yield line\n\n\ndef main(recipe_dir):\n recipe_dir = os.path.abspath(recipe_dir)\n recipe_meta = os.path.join(recipe_dir, 'meta.yaml')\n if not os.path.exists(recipe_dir):\n raise IOError('Feedstock has no recipe/meta.yaml.')\n\n env = jinja2.Environment(undefined=NullUndefined)\n\n with open(recipe_meta, 'r') as fh:\n content = env.from_string(''.join(fh)).render()\n meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)\n results = lintify(meta, recipe_dir)\n return results\n", "path": "conda_smithy/lint_recipe.py"}], "after_files": [{"content": "import os\nimport re\n\nimport jinja2\nimport ruamel.yaml\n\n\nEXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',\n 'test', 'app', 'about', 'extra']\n\nREQUIREMENTS_ORDER = ['build', 'run']\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return unicode(self._undefined_name)\n\n\ndef get_section(parent, name, lints):\n section = parent.get(name, {})\n if not isinstance(section, dict):\n lints.append('The \"{}\" section was expected to be a dictionary, but '\n 'got a {}.'.format(name, type(section).__name__))\n section = {}\n return section\n\n\ndef lintify(meta, recipe_dir=None):\n lints = []\n major_sections = list(meta.keys())\n\n # If the recipe_dir exists (no guarantee within this function) , we can\n # find the meta.yaml within it.\n meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')\n\n source_section = get_section(meta, 'source', lints)\n build_section = get_section(meta, 'build', lints)\n requirements_section = get_section(meta, 'requirements', lints)\n about_section = get_section(meta, 'about', lints)\n extra_section = get_section(meta, 'extra', lints)\n\n # 1: Top level meta.yaml keys should have a specific order.\n section_order_sorted = sorted(major_sections,\n key=EXPECTED_SECTION_ORDER.index)\n if major_sections != section_order_sorted:\n lints.append('The top level meta keys are in an unexpected order. '\n 'Expecting {}.'.format(section_order_sorted))\n\n # 2: The about section should have a home, license and summary.\n for about_item in ['home', 'license', 'summary']:\n # if the section doesn't exist, or is just empty, lint it.\n if not about_section.get(about_item, ''):\n lints.append('The {} item is expected in the about section.'\n ''.format(about_item))\n\n # 3: The recipe should have some maintainers.\n if not extra_section.get('recipe-maintainers', []):\n lints.append('The recipe could do with some maintainers listed in '\n 'the \"extra/recipe-maintainers\" section.')\n\n # 4: The recipe should have some tests.\n if 'test' not in major_sections:\n test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',\n 'run_test.pl']\n a_test_file_exists = (recipe_dir is not None and\n any(os.path.exists(os.path.join(recipe_dir,\n test_file))\n for test_file in test_files))\n if not a_test_file_exists:\n lints.append('The recipe must have some tests.')\n\n # 5: License cannot be 'unknown.'\n license = about_section.get('license', '').lower()\n if 'unknown' == license.strip():\n lints.append('The recipe license cannot be unknown.')\n\n # 6: Selectors should be in a tidy form.\n if recipe_dir is not None and os.path.exists(meta_fname):\n bad_selectors = []\n # Good selectors look like \".*\\s\\s#\\s[...]\"\n good_selectors_pat = re.compile(r'(.+?)\\s{2,}#\\s\\[(.+)\\](?(2).*)$')\n with open(meta_fname, 'r') as fh:\n for selector_line in selector_lines(fh):\n if not good_selectors_pat.match(selector_line):\n bad_selectors.append(selector_line)\n if bad_selectors:\n lints.append('Selectors are suggested to take a '\n '\"<two spaces>#<one space>[<selector>]\" form.')\n\n # 7: The build section should have a build number.\n if build_section.get('number', None) is None:\n lints.append('The recipe must have a `build/number` section.')\n\n # 8: The build section should be before the run section in requirements.\n requirements_order_sorted = sorted(requirements_section,\n key=REQUIREMENTS_ORDER.index)\n if requirements_section.keys() != requirements_order_sorted:\n lints.append('The `requirements/build` section should be defined '\n 'before the `requirements/run` section.')\n\n # 9: Files downloaded should have a hash.\n if ('url' in source_section and\n not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):\n lints.append('When defining a source/url please add a sha256, sha1 '\n 'or md5 checksum (sha256 preferably).')\n\n return lints\n\n\ndef selector_lines(lines):\n # Using the same pattern defined in conda-build (metadata.py),\n # we identify selectors.\n sel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[(.+)\\](?(2).*)$')\n\n for line in lines:\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n m.group(3)\n yield line\n\n\ndef main(recipe_dir):\n recipe_dir = os.path.abspath(recipe_dir)\n recipe_meta = os.path.join(recipe_dir, 'meta.yaml')\n if not os.path.exists(recipe_dir):\n raise IOError('Feedstock has no recipe/meta.yaml.')\n\n env = jinja2.Environment(undefined=NullUndefined)\n\n with open(recipe_meta, 'r') as fh:\n content = env.from_string(''.join(fh)).render()\n meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)\n results = lintify(meta, recipe_dir)\n return results\n", "path": "conda_smithy/lint_recipe.py"}]}
| 1,881 | 142 |
gh_patches_debug_50581
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-6828
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Right-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`
### Checklist
- [x] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
Right-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`.
In the past I submitted a similar issue for the left-side label (see https://github.com/streamlit/streamlit/issues/5898); now it is the right-side label that is misbehaving.
### Reproducible Code Example
[](https://issues.streamlitapp.com/?issue=gh-6297)
```Python
import streamlit as st
st.title("Right-side label of slider and select_slider overflows when inside expander")
with st.expander('Example st.expander'):
single_value = st.slider(
label='Example st.slider',
min_value=9_500_000,
max_value=10_000_000,
value=10_000_000
)
first_value,last_value = st.slider(
label='Example st.slider (range mode)',
min_value=9_500_000,
max_value=10_000_000,
value=(9_500_000,10_000_000)
)
single_value = st.select_slider(
label='Example st.select_slider',
options=['Maradona','Ronaldo','Pele','This is a very, very long label'],
value='This is a very, very long label'
)
first_value,last_value = st.select_slider(
label='Example st.select_slider (range mode)',
options=['Maradona','Ronaldo','Pele','This is a very, very long label'],
value=['Maradona','This is a very, very long label']
)
```
### Steps To Reproduce
1. Run the reproducible code example
2. Open the expander and see that all the right side labels are overflowing
### Expected Behavior
Labels should not overflow beyond the widgets width.
### Current Behavior
_No response_
### Is this a regression?
- [X] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.20.0
- Python version: 3.11
- Operating System: macOS
- Browser: Brave
- Virtual environment: conda
### Additional Information
_No response_
### Are you willing to submit a PR?
- [ ] Yes, I am willing to submit a PR!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_select_slider.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import numpy as np
16 import pandas as pd
17
18 import streamlit as st
19 from streamlit import runtime
20
21 w1 = st.select_slider(
22 "Label 1",
23 value=("orange", "blue"),
24 options=["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
25 )
26 st.write("Value 1:", w1)
27
28 w2 = st.select_slider(
29 "Label 2",
30 options=np.array([1, 2, 3, 4, 5]),
31 )
32 st.write("Value 2:", w2)
33
34 w3 = st.select_slider(
35 "Label 3",
36 value=[2, 5],
37 options=pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]),
38 )
39 st.write("Value 3:", w3)
40
41 w4 = st.select_slider(
42 "Label 4",
43 value=5,
44 options=pd.DataFrame(
45 {
46 "first column": [1, 2, 3, 4, 5],
47 "second column": [10, 20, 30, 40, 50],
48 }
49 ),
50 )
51 st.write("Value 4:", w4)
52
53 w5 = st.select_slider(
54 "Label 5",
55 value=("orange", "blue"),
56 options=["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
57 disabled=True,
58 )
59 st.write("Value 5:", w5)
60
61 w6 = st.select_slider(
62 "Label 6",
63 options=["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
64 label_visibility="hidden",
65 )
66
67 st.write("Value 6:", w6)
68
69
70 w7 = st.select_slider(
71 "Label 7",
72 options=["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
73 label_visibility="collapsed",
74 )
75
76 st.write("Value 7:", w7)
77
78 if runtime.exists():
79
80 def on_change():
81 st.session_state.select_slider_changed = True
82
83 st.select_slider(
84 "Label 8",
85 options=np.array([1, 2, 3, 4, 5]),
86 key="select_slider8",
87 on_change=on_change,
88 )
89 st.write("Value 8:", st.session_state.select_slider8)
90 st.write("Select slider changed:", "select_slider_changed" in st.session_state)
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/e2e/scripts/st_select_slider.py b/e2e/scripts/st_select_slider.py
--- a/e2e/scripts/st_select_slider.py
+++ b/e2e/scripts/st_select_slider.py
@@ -88,3 +88,12 @@
)
st.write("Value 8:", st.session_state.select_slider8)
st.write("Select slider changed:", "select_slider_changed" in st.session_state)
+
+with st.expander("Expander", expanded=True):
+ w9 = st.select_slider(
+ label="Label 9",
+ options=["foo", "bar", "baz", "This is a very, very long option"],
+ value="This is a very, very long option",
+ )
+
+ st.write("Value 9:", w9)
|
{"golden_diff": "diff --git a/e2e/scripts/st_select_slider.py b/e2e/scripts/st_select_slider.py\n--- a/e2e/scripts/st_select_slider.py\n+++ b/e2e/scripts/st_select_slider.py\n@@ -88,3 +88,12 @@\n )\n st.write(\"Value 8:\", st.session_state.select_slider8)\n st.write(\"Select slider changed:\", \"select_slider_changed\" in st.session_state)\n+\n+with st.expander(\"Expander\", expanded=True):\n+ w9 = st.select_slider(\n+ label=\"Label 9\",\n+ options=[\"foo\", \"bar\", \"baz\", \"This is a very, very long option\"],\n+ value=\"This is a very, very long option\",\n+ )\n+\n+ st.write(\"Value 9:\", w9)\n", "issue": "Right-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`\n### Checklist\r\n\r\n- [x] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nRight-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`.\r\n\r\nIn the past I submitted a similar issue for the left-side label (see https://github.com/streamlit/streamlit/issues/5898); now it is the right-side label that is misbehaving.\r\n\r\n### Reproducible Code Example\r\n\r\n[](https://issues.streamlitapp.com/?issue=gh-6297)\r\n\r\n```Python\r\nimport streamlit as st\r\n\r\nst.title(\"Right-side label of slider and select_slider overflows when inside expander\")\r\n\r\nwith st.expander('Example st.expander'):\r\n\r\n single_value = st.slider(\r\n label='Example st.slider',\r\n min_value=9_500_000,\r\n max_value=10_000_000,\r\n value=10_000_000\r\n )\r\n\r\n first_value,last_value = st.slider(\r\n label='Example st.slider (range mode)',\r\n min_value=9_500_000,\r\n max_value=10_000_000,\r\n value=(9_500_000,10_000_000)\r\n )\r\n\r\n single_value = st.select_slider(\r\n label='Example st.select_slider',\r\n options=['Maradona','Ronaldo','Pele','This is a very, very long label'],\r\n value='This is a very, very long label'\r\n )\r\n\r\n first_value,last_value = st.select_slider(\r\n label='Example st.select_slider (range mode)',\r\n options=['Maradona','Ronaldo','Pele','This is a very, very long label'],\r\n value=['Maradona','This is a very, very long label']\r\n )\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\n1. Run the reproducible code example\r\n2. Open the expander and see that all the right side labels are overflowing\r\n\r\n### Expected Behavior\r\n\r\nLabels should not overflow beyond the widgets width.\r\n\r\n### Current Behavior\r\n\r\n_No response_\r\n\r\n### Is this a regression?\r\n\r\n- [X] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.20.0\r\n- Python version: 3.11\r\n- Operating System: macOS\r\n- Browser: Brave\r\n- Virtual environment: conda\r\n\r\n### Additional Information\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [ ] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\nfrom streamlit import runtime\n\nw1 = st.select_slider(\n \"Label 1\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n)\nst.write(\"Value 1:\", w1)\n\nw2 = st.select_slider(\n \"Label 2\",\n options=np.array([1, 2, 3, 4, 5]),\n)\nst.write(\"Value 2:\", w2)\n\nw3 = st.select_slider(\n \"Label 3\",\n value=[2, 5],\n options=pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]),\n)\nst.write(\"Value 3:\", w3)\n\nw4 = st.select_slider(\n \"Label 4\",\n value=5,\n options=pd.DataFrame(\n {\n \"first column\": [1, 2, 3, 4, 5],\n \"second column\": [10, 20, 30, 40, 50],\n }\n ),\n)\nst.write(\"Value 4:\", w4)\n\nw5 = st.select_slider(\n \"Label 5\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n disabled=True,\n)\nst.write(\"Value 5:\", w5)\n\nw6 = st.select_slider(\n \"Label 6\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"hidden\",\n)\n\nst.write(\"Value 6:\", w6)\n\n\nw7 = st.select_slider(\n \"Label 7\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"collapsed\",\n)\n\nst.write(\"Value 7:\", w7)\n\nif runtime.exists():\n\n def on_change():\n st.session_state.select_slider_changed = True\n\n st.select_slider(\n \"Label 8\",\n options=np.array([1, 2, 3, 4, 5]),\n key=\"select_slider8\",\n on_change=on_change,\n )\n st.write(\"Value 8:\", st.session_state.select_slider8)\n st.write(\"Select slider changed:\", \"select_slider_changed\" in st.session_state)\n", "path": "e2e/scripts/st_select_slider.py"}], "after_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\nfrom streamlit import runtime\n\nw1 = st.select_slider(\n \"Label 1\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n)\nst.write(\"Value 1:\", w1)\n\nw2 = st.select_slider(\n \"Label 2\",\n options=np.array([1, 2, 3, 4, 5]),\n)\nst.write(\"Value 2:\", w2)\n\nw3 = st.select_slider(\n \"Label 3\",\n value=[2, 5],\n options=pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]),\n)\nst.write(\"Value 3:\", w3)\n\nw4 = st.select_slider(\n \"Label 4\",\n value=5,\n options=pd.DataFrame(\n {\n \"first column\": [1, 2, 3, 4, 5],\n \"second column\": [10, 20, 30, 40, 50],\n }\n ),\n)\nst.write(\"Value 4:\", w4)\n\nw5 = st.select_slider(\n \"Label 5\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n disabled=True,\n)\nst.write(\"Value 5:\", w5)\n\nw6 = st.select_slider(\n \"Label 6\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"hidden\",\n)\n\nst.write(\"Value 6:\", w6)\n\n\nw7 = st.select_slider(\n \"Label 7\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"collapsed\",\n)\n\nst.write(\"Value 7:\", w7)\n\nif runtime.exists():\n\n def on_change():\n st.session_state.select_slider_changed = True\n\n st.select_slider(\n \"Label 8\",\n options=np.array([1, 2, 3, 4, 5]),\n key=\"select_slider8\",\n on_change=on_change,\n )\n st.write(\"Value 8:\", st.session_state.select_slider8)\n st.write(\"Select slider changed:\", \"select_slider_changed\" in st.session_state)\n\nwith st.expander(\"Expander\", expanded=True):\n w9 = st.select_slider(\n label=\"Label 9\",\n options=[\"foo\", \"bar\", \"baz\", \"This is a very, very long option\"],\n value=\"This is a very, very long option\",\n )\n\n st.write(\"Value 9:\", w9)\n", "path": "e2e/scripts/st_select_slider.py"}]}
| 1,781 | 173 |
gh_patches_debug_25216
|
rasdani/github-patches
|
git_diff
|
TileDB-Inc__TileDB-Py-167
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tiledb.open doesn't accept an optional ctx
it accepts a config which creates a ctx, would it make sense to have both?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tiledb/highlevel.py`
Content:
```
1 import tiledb
2 from tiledb.libtiledb import *
3
4 import numpy as np
5
6 def open(uri, key=None, attr=None, mode='r', config=None):
7 """
8 Open a TileDB array at the given URI
9
10 :param uri: any TileDB supported URI
11 :param key: encryption key, str or None
12 :param str mode: (default 'r') Open the array object in read 'r' or write 'w' mode
13 :param attr: attribute name to select from a multi-attribute array, str or None
14 :param config: TileDB config dictionary, dict or None
15 :return:
16 """
17 if config:
18 cfg = tiledb.Config(config)
19 ctx = tiledb.Ctx(cfg)
20 else:
21 ctx = default_ctx()
22
23 schema = ArraySchema.load(uri, ctx=ctx)
24 if not schema:
25 raise Exception("Unable to load tiledb ArraySchema from URI: '{}'".format(uri))
26
27 if schema.sparse:
28 return tiledb.SparseArray(uri, mode=mode, key=key, attr=attr, ctx=ctx)
29 elif not schema.sparse:
30 return tiledb.DenseArray(uri, mode=mode, key=key, attr=attr, ctx=ctx)
31 else:
32 raise Exception("Unknown TileDB array type")
33
34
35 def save(uri, array, config=None, **kw):
36 """
37 Save array-like object at the given URI.
38
39 :param uri: str or None
40 :param array: array-like object convertible to NumPy
41 :param config: TileDB config dictionary, dict or None
42 :param kw: optional keyword args will be forwarded to tiledb.Array constructor
43 :return:
44 """
45 if not isinstance(array, np.ndarray):
46 raise ValueError("expected NumPy ndarray, not '{}'".format(type(array)))
47 if config:
48 cfg = Config(config)
49 ctx = tiledb.Ctx(cfg)
50 else:
51 ctx = default_ctx()
52
53 return tiledb.from_numpy(uri, array, ctx=ctx)
54
55
56 def empty_like(uri, arr, config=None, key=None, tile=None):
57 """
58 Create and return an empty, writeable DenseArray with schema based on
59 a NumPy-array like object.
60
61 :param uri:
62 :param arr: NumPy ndarray, or shape tuple
63 :param ctx:
64 :param kw:
65 :return:
66 """
67 if config:
68 cfg = tiledb.Config(config)
69 ctx = tiledb.Ctx(cfg)
70 else:
71 ctx = default_ctx()
72
73 if arr is ArraySchema:
74 schema = arr
75 else:
76 schema = schema_like(arr, tile=tile, ctx=ctx)
77
78 tiledb.DenseArray.create(uri, key=key, schema=schema)
79 return tiledb.DenseArray(uri, mode='w', key=key, ctx=ctx)
80
81
82 def from_numpy(uri, array, ctx=default_ctx(), **kw):
83 """
84 Convenience method, see `tiledb.DenseArray.from_numpy`
85 """
86 if not isinstance(array, np.ndarray):
87 raise Exception("from_numpy is only currently supported for numpy.ndarray")
88
89 return DenseArray.from_numpy(uri, array, ctx=ctx, **kw)
90
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tiledb/highlevel.py b/tiledb/highlevel.py
--- a/tiledb/highlevel.py
+++ b/tiledb/highlevel.py
@@ -3,7 +3,7 @@
import numpy as np
-def open(uri, key=None, attr=None, mode='r', config=None):
+def open(uri, key=None, attr=None, mode='r', config=None, ctx=None):
"""
Open a TileDB array at the given URI
@@ -14,10 +14,14 @@
:param config: TileDB config dictionary, dict or None
:return:
"""
+ if ctx and config:
+ raise ValueError("Received extra Ctx or Config argument: either one may be provided, but not both")
+
if config:
cfg = tiledb.Config(config)
ctx = tiledb.Ctx(cfg)
- else:
+
+ if ctx is None:
ctx = default_ctx()
schema = ArraySchema.load(uri, ctx=ctx)
@@ -88,3 +92,23 @@
return DenseArray.from_numpy(uri, array, ctx=ctx, **kw)
+def array_exists(uri, isdense=False, issparse=False):
+ """
+ Check if arrays exists and is open-able at the given URI
+
+ Optionally restrict to `isdense` or `issparse` array types.
+ """
+ try:
+ a = tiledb.open(uri)
+ except TileDBError as exc:
+ return False
+
+ if isdense:
+ rval = not a.schema.sparse
+ elif issparse:
+ rval = a.schema.sparse
+ else:
+ rval = True
+
+ a.close()
+ return rval
|
{"golden_diff": "diff --git a/tiledb/highlevel.py b/tiledb/highlevel.py\n--- a/tiledb/highlevel.py\n+++ b/tiledb/highlevel.py\n@@ -3,7 +3,7 @@\n \n import numpy as np\n \n-def open(uri, key=None, attr=None, mode='r', config=None):\n+def open(uri, key=None, attr=None, mode='r', config=None, ctx=None):\n \"\"\"\n Open a TileDB array at the given URI\n \n@@ -14,10 +14,14 @@\n :param config: TileDB config dictionary, dict or None\n :return:\n \"\"\"\n+ if ctx and config:\n+ raise ValueError(\"Received extra Ctx or Config argument: either one may be provided, but not both\")\n+\n if config:\n cfg = tiledb.Config(config)\n ctx = tiledb.Ctx(cfg)\n- else:\n+\n+ if ctx is None:\n ctx = default_ctx()\n \n schema = ArraySchema.load(uri, ctx=ctx)\n@@ -88,3 +92,23 @@\n \n return DenseArray.from_numpy(uri, array, ctx=ctx, **kw)\n \n+def array_exists(uri, isdense=False, issparse=False):\n+ \"\"\"\n+ Check if arrays exists and is open-able at the given URI\n+\n+ Optionally restrict to `isdense` or `issparse` array types.\n+ \"\"\"\n+ try:\n+ a = tiledb.open(uri)\n+ except TileDBError as exc:\n+ return False\n+\n+ if isdense:\n+ rval = not a.schema.sparse\n+ elif issparse:\n+ rval = a.schema.sparse\n+ else:\n+ rval = True\n+\n+ a.close()\n+ return rval\n", "issue": "tiledb.open doesn't accept an optional ctx\nit accepts a config which creates a ctx, would it make sense to have both?\n", "before_files": [{"content": "import tiledb\nfrom tiledb.libtiledb import *\n\nimport numpy as np\n\ndef open(uri, key=None, attr=None, mode='r', config=None):\n \"\"\"\n Open a TileDB array at the given URI\n\n :param uri: any TileDB supported URI\n :param key: encryption key, str or None\n :param str mode: (default 'r') Open the array object in read 'r' or write 'w' mode\n :param attr: attribute name to select from a multi-attribute array, str or None\n :param config: TileDB config dictionary, dict or None\n :return:\n \"\"\"\n if config:\n cfg = tiledb.Config(config)\n ctx = tiledb.Ctx(cfg)\n else:\n ctx = default_ctx()\n\n schema = ArraySchema.load(uri, ctx=ctx)\n if not schema:\n raise Exception(\"Unable to load tiledb ArraySchema from URI: '{}'\".format(uri))\n\n if schema.sparse:\n return tiledb.SparseArray(uri, mode=mode, key=key, attr=attr, ctx=ctx)\n elif not schema.sparse:\n return tiledb.DenseArray(uri, mode=mode, key=key, attr=attr, ctx=ctx)\n else:\n raise Exception(\"Unknown TileDB array type\")\n\n\ndef save(uri, array, config=None, **kw):\n \"\"\"\n Save array-like object at the given URI.\n\n :param uri: str or None\n :param array: array-like object convertible to NumPy\n :param config: TileDB config dictionary, dict or None\n :param kw: optional keyword args will be forwarded to tiledb.Array constructor\n :return:\n \"\"\"\n if not isinstance(array, np.ndarray):\n raise ValueError(\"expected NumPy ndarray, not '{}'\".format(type(array)))\n if config:\n cfg = Config(config)\n ctx = tiledb.Ctx(cfg)\n else:\n ctx = default_ctx()\n\n return tiledb.from_numpy(uri, array, ctx=ctx)\n\n\ndef empty_like(uri, arr, config=None, key=None, tile=None):\n \"\"\"\n Create and return an empty, writeable DenseArray with schema based on\n a NumPy-array like object.\n\n :param uri:\n :param arr: NumPy ndarray, or shape tuple\n :param ctx:\n :param kw:\n :return:\n \"\"\"\n if config:\n cfg = tiledb.Config(config)\n ctx = tiledb.Ctx(cfg)\n else:\n ctx = default_ctx()\n\n if arr is ArraySchema:\n schema = arr\n else:\n schema = schema_like(arr, tile=tile, ctx=ctx)\n\n tiledb.DenseArray.create(uri, key=key, schema=schema)\n return tiledb.DenseArray(uri, mode='w', key=key, ctx=ctx)\n\n\ndef from_numpy(uri, array, ctx=default_ctx(), **kw):\n \"\"\"\n Convenience method, see `tiledb.DenseArray.from_numpy`\n \"\"\"\n if not isinstance(array, np.ndarray):\n raise Exception(\"from_numpy is only currently supported for numpy.ndarray\")\n\n return DenseArray.from_numpy(uri, array, ctx=ctx, **kw)\n\n", "path": "tiledb/highlevel.py"}], "after_files": [{"content": "import tiledb\nfrom tiledb.libtiledb import *\n\nimport numpy as np\n\ndef open(uri, key=None, attr=None, mode='r', config=None, ctx=None):\n \"\"\"\n Open a TileDB array at the given URI\n\n :param uri: any TileDB supported URI\n :param key: encryption key, str or None\n :param str mode: (default 'r') Open the array object in read 'r' or write 'w' mode\n :param attr: attribute name to select from a multi-attribute array, str or None\n :param config: TileDB config dictionary, dict or None\n :return:\n \"\"\"\n if ctx and config:\n raise ValueError(\"Received extra Ctx or Config argument: either one may be provided, but not both\")\n\n if config:\n cfg = tiledb.Config(config)\n ctx = tiledb.Ctx(cfg)\n\n if ctx is None:\n ctx = default_ctx()\n\n schema = ArraySchema.load(uri, ctx=ctx)\n if not schema:\n raise Exception(\"Unable to load tiledb ArraySchema from URI: '{}'\".format(uri))\n\n if schema.sparse:\n return tiledb.SparseArray(uri, mode=mode, key=key, attr=attr, ctx=ctx)\n elif not schema.sparse:\n return tiledb.DenseArray(uri, mode=mode, key=key, attr=attr, ctx=ctx)\n else:\n raise Exception(\"Unknown TileDB array type\")\n\n\ndef save(uri, array, config=None, **kw):\n \"\"\"\n Save array-like object at the given URI.\n\n :param uri: str or None\n :param array: array-like object convertible to NumPy\n :param config: TileDB config dictionary, dict or None\n :param kw: optional keyword args will be forwarded to tiledb.Array constructor\n :return:\n \"\"\"\n if not isinstance(array, np.ndarray):\n raise ValueError(\"expected NumPy ndarray, not '{}'\".format(type(array)))\n if config:\n cfg = Config(config)\n ctx = tiledb.Ctx(cfg)\n else:\n ctx = default_ctx()\n\n return tiledb.from_numpy(uri, array, ctx=ctx)\n\n\ndef empty_like(uri, arr, config=None, key=None, tile=None):\n \"\"\"\n Create and return an empty, writeable DenseArray with schema based on\n a NumPy-array like object.\n\n :param uri:\n :param arr: NumPy ndarray, or shape tuple\n :param ctx:\n :param kw:\n :return:\n \"\"\"\n if config:\n cfg = tiledb.Config(config)\n ctx = tiledb.Ctx(cfg)\n else:\n ctx = default_ctx()\n\n if arr is ArraySchema:\n schema = arr\n else:\n schema = schema_like(arr, tile=tile, ctx=ctx)\n\n tiledb.DenseArray.create(uri, key=key, schema=schema)\n return tiledb.DenseArray(uri, mode='w', key=key, ctx=ctx)\n\n\ndef from_numpy(uri, array, ctx=default_ctx(), **kw):\n \"\"\"\n Convenience method, see `tiledb.DenseArray.from_numpy`\n \"\"\"\n if not isinstance(array, np.ndarray):\n raise Exception(\"from_numpy is only currently supported for numpy.ndarray\")\n\n return DenseArray.from_numpy(uri, array, ctx=ctx, **kw)\n\ndef array_exists(uri, isdense=False, issparse=False):\n \"\"\"\n Check if arrays exists and is open-able at the given URI\n\n Optionally restrict to `isdense` or `issparse` array types.\n \"\"\"\n try:\n a = tiledb.open(uri)\n except TileDBError as exc:\n return False\n\n if isdense:\n rval = not a.schema.sparse\n elif issparse:\n rval = a.schema.sparse\n else:\n rval = True\n\n a.close()\n return rval\n", "path": "tiledb/highlevel.py"}]}
| 1,142 | 384 |
gh_patches_debug_13516
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-19895
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dropout2d
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/mindspore/ops/function/nn_func.py`
Content:
```
1 """Includes Mindspore Frontend functions listed in the TODO list
2 https://github.com/unifyai/ivy/issues/14951."""
3
4 # local
5 import ivy
6 from ivy.func_wrapper import with_supported_dtypes
7 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
8
9
10 @with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore")
11 @to_ivy_arrays_and_back
12 def selu(input_x):
13 return ivy.selu(input_x)
14
15
16 @with_supported_dtypes({"2.0 and below": ("float16", "float32")}, "mindspore")
17 @to_ivy_arrays_and_back
18 def softsign(x):
19 return ivy.divide(x, ivy.add(1, ivy.abs(x)))
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py
--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py
+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py
@@ -6,6 +6,23 @@
from ivy.func_wrapper import with_supported_dtypes
from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
+@with_supported_dtypes(
+ {
+ "2.0.0 and below": (
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "float16",
+ "float32",
+ "float64",
+ )
+ },
+ "mindspore",
+)
+@to_ivy_arrays_and_back
+def dropout2d(input, p=0.5, training=True):
+ return ivy.dropout2d(input, p, training=training, data_format="NCHW")
@with_supported_dtypes({"2.0.0 and below": ("float16", "float32")}, "mindspore")
@to_ivy_arrays_and_back
|
{"golden_diff": "diff --git a/ivy/functional/frontends/mindspore/ops/function/nn_func.py b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n--- a/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n+++ b/ivy/functional/frontends/mindspore/ops/function/nn_func.py\n@@ -6,6 +6,23 @@\n from ivy.func_wrapper import with_supported_dtypes\n from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n \n+@with_supported_dtypes(\n+ {\n+ \"2.0.0 and below\": (\n+ \"int8\",\n+ \"int16\",\n+ \"int32\",\n+ \"int64\",\n+ \"float16\",\n+ \"float32\",\n+ \"float64\",\n+ )\n+ },\n+ \"mindspore\",\n+)\n+@to_ivy_arrays_and_back\n+def dropout2d(input, p=0.5, training=True):\n+ return ivy.dropout2d(input, p, training=training, data_format=\"NCHW\")\n \n @with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n @to_ivy_arrays_and_back\n", "issue": "dropout2d\n\n", "before_files": [{"content": "\"\"\"Includes Mindspore Frontend functions listed in the TODO list\nhttps://github.com/unifyai/ivy/issues/14951.\"\"\"\n\n# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef selu(input_x):\n return ivy.selu(input_x)\n\n\n@with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef softsign(x):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n", "path": "ivy/functional/frontends/mindspore/ops/function/nn_func.py"}], "after_files": [{"content": "\"\"\"Includes Mindspore Frontend functions listed in the TODO list\nhttps://github.com/unifyai/ivy/issues/14951.\"\"\"\n\n# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n@with_supported_dtypes(\n {\n \"2.0.0 and below\": (\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float16\",\n \"float32\",\n \"float64\",\n )\n },\n \"mindspore\",\n)\n@to_ivy_arrays_and_back\ndef dropout2d(input, p=0.5, training=True):\n return ivy.dropout2d(input, p, training=training, data_format=\"NCHW\")\n\n@with_supported_dtypes({\"2.0.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef selu(input_x):\n return ivy.selu(input_x)\n\n\n@with_supported_dtypes({\"2.0 and below\": (\"float16\", \"float32\")}, \"mindspore\")\n@to_ivy_arrays_and_back\ndef softsign(x):\n return ivy.divide(x, ivy.add(1, ivy.abs(x)))\n", "path": "ivy/functional/frontends/mindspore/ops/function/nn_func.py"}]}
| 489 | 292 |
gh_patches_debug_39053
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2444
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create a Deployment type 1 specific script.
Create a script to download docker-compose from the Mathesar repo and help the user in setting up Mathesar.
- [ ] The script should be self-documenting, i.e., the prompts during interactive portions should help the user understand the implications of what they're doing, and any error states should give enough info for the user to find solutions in the troubleshooting documentation on docs.mathesar.org
- [ ] The script should also add help text in the installation CLI that would list the database Privileges needed for Mathesar to function effectively.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `install.py`
Content:
```
1 """
2 This script installs functions and types for Mathesar onto the configured DB.
3 """
4 import getopt
5 import sys
6
7 import getpass
8
9 import django
10 from decouple import UndefinedValueError, config as decouple_config
11 from django.contrib.auth import get_user_model
12 from django.core import management
13
14 from django.conf import settings
15 from db import install
16
17
18 def main():
19 skip_confirm = False
20 (opts, _) = getopt.getopt(sys.argv[1:], ":s", ["skip-confirm"])
21 for (opt, value) in opts:
22 if (opt == "-s") or (opt == "--skip-confirm"):
23 skip_confirm = True
24 check_missing_dj_config()
25 django.setup()
26 management.call_command('migrate')
27 debug_mode = decouple_config('DEBUG', default=False, cast=bool)
28 #
29 if not debug_mode:
30 management.call_command('collectstatic', no_input='y')
31 if not superuser_exists():
32 print("------------Setting up Admin user------------")
33 print("Admin user does not exists. We need at least one admin")
34 create_superuser(skip_confirm)
35
36 print("------------Setting up User Databases------------")
37 user_databases = [key for key in settings.DATABASES if key != "default"]
38 for database_key in user_databases:
39 install_on_db_with_key(database_key, skip_confirm)
40
41
42 def superuser_exists():
43 return get_user_model().objects.filter(is_superuser=True).exists()
44
45
46 def create_superuser(skip_confirm):
47 # TODO Replace argument name used for default admin user creation.
48 if not skip_confirm:
49 print("Please enter the details to create a new admin user ")
50 username = input("Username: ")
51 email = input("Email: ")
52 password = getpass.getpass('Password: ')
53 else:
54 username = "admin"
55 email = "[email protected]"
56 password = "password"
57 get_user_model().objects.create_superuser(username, email, password)
58 print(f"Admin user with username {username} was created successfully")
59
60
61 def check_missing_dj_config():
62 # TODO Add documentation link
63 documentation_link = ""
64 try:
65 decouple_config('ALLOWED_HOSTS')
66 decouple_config('SECRET_KEY')
67 decouple_config('DJANGO_DATABASE_KEY')
68 decouple_config('DJANGO_SETTINGS_MODULE')
69 decouple_config('DJANGO_DATABASE_URL')
70 decouple_config('MATHESAR_DATABASES')
71 except UndefinedValueError as e:
72 missing_config_key = e.args[0]
73 raise Exception(f"{missing_config_key} environment variable is missing."
74 f" Please follow the documentation {documentation_link} to add the missing environment variable.")
75
76
77 def install_on_db_with_key(database_key, skip_confirm):
78 install.install_mathesar(
79 database_name=settings.DATABASES[database_key]["NAME"],
80 username=settings.DATABASES[database_key]["USER"],
81 password=settings.DATABASES[database_key]["PASSWORD"],
82 hostname=settings.DATABASES[database_key]["HOST"],
83 port=settings.DATABASES[database_key]["PORT"],
84 skip_confirm=skip_confirm
85 )
86
87
88 if __name__ == "__main__":
89 main()
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/install.py b/install.py
--- a/install.py
+++ b/install.py
@@ -4,11 +4,7 @@
import getopt
import sys
-import getpass
-
import django
-from decouple import UndefinedValueError, config as decouple_config
-from django.contrib.auth import get_user_model
from django.core import management
from django.conf import settings
@@ -16,64 +12,20 @@
def main():
- skip_confirm = False
+ # skip_confirm is temporarily enabled by default as we don't have any use for interactive prompts with docker only deployments
+ skip_confirm = True
(opts, _) = getopt.getopt(sys.argv[1:], ":s", ["skip-confirm"])
for (opt, value) in opts:
if (opt == "-s") or (opt == "--skip-confirm"):
skip_confirm = True
- check_missing_dj_config()
django.setup()
management.call_command('migrate')
- debug_mode = decouple_config('DEBUG', default=False, cast=bool)
- #
- if not debug_mode:
- management.call_command('collectstatic', no_input='y')
- if not superuser_exists():
- print("------------Setting up Admin user------------")
- print("Admin user does not exists. We need at least one admin")
- create_superuser(skip_confirm)
-
print("------------Setting up User Databases------------")
user_databases = [key for key in settings.DATABASES if key != "default"]
for database_key in user_databases:
install_on_db_with_key(database_key, skip_confirm)
-def superuser_exists():
- return get_user_model().objects.filter(is_superuser=True).exists()
-
-
-def create_superuser(skip_confirm):
- # TODO Replace argument name used for default admin user creation.
- if not skip_confirm:
- print("Please enter the details to create a new admin user ")
- username = input("Username: ")
- email = input("Email: ")
- password = getpass.getpass('Password: ')
- else:
- username = "admin"
- email = "[email protected]"
- password = "password"
- get_user_model().objects.create_superuser(username, email, password)
- print(f"Admin user with username {username} was created successfully")
-
-
-def check_missing_dj_config():
- # TODO Add documentation link
- documentation_link = ""
- try:
- decouple_config('ALLOWED_HOSTS')
- decouple_config('SECRET_KEY')
- decouple_config('DJANGO_DATABASE_KEY')
- decouple_config('DJANGO_SETTINGS_MODULE')
- decouple_config('DJANGO_DATABASE_URL')
- decouple_config('MATHESAR_DATABASES')
- except UndefinedValueError as e:
- missing_config_key = e.args[0]
- raise Exception(f"{missing_config_key} environment variable is missing."
- f" Please follow the documentation {documentation_link} to add the missing environment variable.")
-
-
def install_on_db_with_key(database_key, skip_confirm):
install.install_mathesar(
database_name=settings.DATABASES[database_key]["NAME"],
|
{"golden_diff": "diff --git a/install.py b/install.py\n--- a/install.py\n+++ b/install.py\n@@ -4,11 +4,7 @@\n import getopt\n import sys\n \n-import getpass\n-\n import django\n-from decouple import UndefinedValueError, config as decouple_config\n-from django.contrib.auth import get_user_model\n from django.core import management\n \n from django.conf import settings\n@@ -16,64 +12,20 @@\n \n \n def main():\n- skip_confirm = False\n+ # skip_confirm is temporarily enabled by default as we don't have any use for interactive prompts with docker only deployments\n+ skip_confirm = True\n (opts, _) = getopt.getopt(sys.argv[1:], \":s\", [\"skip-confirm\"])\n for (opt, value) in opts:\n if (opt == \"-s\") or (opt == \"--skip-confirm\"):\n skip_confirm = True\n- check_missing_dj_config()\n django.setup()\n management.call_command('migrate')\n- debug_mode = decouple_config('DEBUG', default=False, cast=bool)\n- #\n- if not debug_mode:\n- management.call_command('collectstatic', no_input='y')\n- if not superuser_exists():\n- print(\"------------Setting up Admin user------------\")\n- print(\"Admin user does not exists. We need at least one admin\")\n- create_superuser(skip_confirm)\n-\n print(\"------------Setting up User Databases------------\")\n user_databases = [key for key in settings.DATABASES if key != \"default\"]\n for database_key in user_databases:\n install_on_db_with_key(database_key, skip_confirm)\n \n \n-def superuser_exists():\n- return get_user_model().objects.filter(is_superuser=True).exists()\n-\n-\n-def create_superuser(skip_confirm):\n- # TODO Replace argument name used for default admin user creation.\n- if not skip_confirm:\n- print(\"Please enter the details to create a new admin user \")\n- username = input(\"Username: \")\n- email = input(\"Email: \")\n- password = getpass.getpass('Password: ')\n- else:\n- username = \"admin\"\n- email = \"[email protected]\"\n- password = \"password\"\n- get_user_model().objects.create_superuser(username, email, password)\n- print(f\"Admin user with username {username} was created successfully\")\n-\n-\n-def check_missing_dj_config():\n- # TODO Add documentation link\n- documentation_link = \"\"\n- try:\n- decouple_config('ALLOWED_HOSTS')\n- decouple_config('SECRET_KEY')\n- decouple_config('DJANGO_DATABASE_KEY')\n- decouple_config('DJANGO_SETTINGS_MODULE')\n- decouple_config('DJANGO_DATABASE_URL')\n- decouple_config('MATHESAR_DATABASES')\n- except UndefinedValueError as e:\n- missing_config_key = e.args[0]\n- raise Exception(f\"{missing_config_key} environment variable is missing.\"\n- f\" Please follow the documentation {documentation_link} to add the missing environment variable.\")\n-\n-\n def install_on_db_with_key(database_key, skip_confirm):\n install.install_mathesar(\n database_name=settings.DATABASES[database_key][\"NAME\"],\n", "issue": "Create a Deployment type 1 specific script.\nCreate a script to download docker-compose from the Mathesar repo and help the user in setting up Mathesar.\r\n\r\n- [ ] The script should be self-documenting, i.e., the prompts during interactive portions should help the user understand the implications of what they're doing, and any error states should give enough info for the user to find solutions in the troubleshooting documentation on docs.mathesar.org\r\n \r\n- [ ] The script should also add help text in the installation CLI that would list the database Privileges needed for Mathesar to function effectively.\n", "before_files": [{"content": "\"\"\"\nThis script installs functions and types for Mathesar onto the configured DB.\n\"\"\"\nimport getopt\nimport sys\n\nimport getpass\n\nimport django\nfrom decouple import UndefinedValueError, config as decouple_config\nfrom django.contrib.auth import get_user_model\nfrom django.core import management\n\nfrom django.conf import settings\nfrom db import install\n\n\ndef main():\n skip_confirm = False\n (opts, _) = getopt.getopt(sys.argv[1:], \":s\", [\"skip-confirm\"])\n for (opt, value) in opts:\n if (opt == \"-s\") or (opt == \"--skip-confirm\"):\n skip_confirm = True\n check_missing_dj_config()\n django.setup()\n management.call_command('migrate')\n debug_mode = decouple_config('DEBUG', default=False, cast=bool)\n #\n if not debug_mode:\n management.call_command('collectstatic', no_input='y')\n if not superuser_exists():\n print(\"------------Setting up Admin user------------\")\n print(\"Admin user does not exists. We need at least one admin\")\n create_superuser(skip_confirm)\n\n print(\"------------Setting up User Databases------------\")\n user_databases = [key for key in settings.DATABASES if key != \"default\"]\n for database_key in user_databases:\n install_on_db_with_key(database_key, skip_confirm)\n\n\ndef superuser_exists():\n return get_user_model().objects.filter(is_superuser=True).exists()\n\n\ndef create_superuser(skip_confirm):\n # TODO Replace argument name used for default admin user creation.\n if not skip_confirm:\n print(\"Please enter the details to create a new admin user \")\n username = input(\"Username: \")\n email = input(\"Email: \")\n password = getpass.getpass('Password: ')\n else:\n username = \"admin\"\n email = \"[email protected]\"\n password = \"password\"\n get_user_model().objects.create_superuser(username, email, password)\n print(f\"Admin user with username {username} was created successfully\")\n\n\ndef check_missing_dj_config():\n # TODO Add documentation link\n documentation_link = \"\"\n try:\n decouple_config('ALLOWED_HOSTS')\n decouple_config('SECRET_KEY')\n decouple_config('DJANGO_DATABASE_KEY')\n decouple_config('DJANGO_SETTINGS_MODULE')\n decouple_config('DJANGO_DATABASE_URL')\n decouple_config('MATHESAR_DATABASES')\n except UndefinedValueError as e:\n missing_config_key = e.args[0]\n raise Exception(f\"{missing_config_key} environment variable is missing.\"\n f\" Please follow the documentation {documentation_link} to add the missing environment variable.\")\n\n\ndef install_on_db_with_key(database_key, skip_confirm):\n install.install_mathesar(\n database_name=settings.DATABASES[database_key][\"NAME\"],\n username=settings.DATABASES[database_key][\"USER\"],\n password=settings.DATABASES[database_key][\"PASSWORD\"],\n hostname=settings.DATABASES[database_key][\"HOST\"],\n port=settings.DATABASES[database_key][\"PORT\"],\n skip_confirm=skip_confirm\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install.py"}], "after_files": [{"content": "\"\"\"\nThis script installs functions and types for Mathesar onto the configured DB.\n\"\"\"\nimport getopt\nimport sys\n\nimport django\nfrom django.core import management\n\nfrom django.conf import settings\nfrom db import install\n\n\ndef main():\n # skip_confirm is temporarily enabled by default as we don't have any use for interactive prompts with docker only deployments\n skip_confirm = True\n (opts, _) = getopt.getopt(sys.argv[1:], \":s\", [\"skip-confirm\"])\n for (opt, value) in opts:\n if (opt == \"-s\") or (opt == \"--skip-confirm\"):\n skip_confirm = True\n django.setup()\n management.call_command('migrate')\n print(\"------------Setting up User Databases------------\")\n user_databases = [key for key in settings.DATABASES if key != \"default\"]\n for database_key in user_databases:\n install_on_db_with_key(database_key, skip_confirm)\n\n\ndef install_on_db_with_key(database_key, skip_confirm):\n install.install_mathesar(\n database_name=settings.DATABASES[database_key][\"NAME\"],\n username=settings.DATABASES[database_key][\"USER\"],\n password=settings.DATABASES[database_key][\"PASSWORD\"],\n hostname=settings.DATABASES[database_key][\"HOST\"],\n port=settings.DATABASES[database_key][\"PORT\"],\n skip_confirm=skip_confirm\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install.py"}]}
| 1,208 | 680 |
gh_patches_debug_11189
|
rasdani/github-patches
|
git_diff
|
nf-core__tools-1520
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
modules install to also print/write `include` statements
### Description of feature
print `include` statements to terminal for easy copy to destination files
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/modules/install.py`
Content:
```
1 import os
2 import questionary
3 import logging
4
5 import nf_core.utils
6 import nf_core.modules.module_utils
7
8 from .modules_command import ModuleCommand
9 from .module_utils import get_module_git_log, module_exist_in_repo
10
11 log = logging.getLogger(__name__)
12
13
14 class ModuleInstall(ModuleCommand):
15 def __init__(self, pipeline_dir, force=False, prompt=False, sha=None, update_all=False):
16 super().__init__(pipeline_dir)
17 self.force = force
18 self.prompt = prompt
19 self.sha = sha
20 self.update_all = update_all
21
22 def install(self, module):
23 if self.repo_type == "modules":
24 log.error("You cannot install a module in a clone of nf-core/modules")
25 return False
26 # Check whether pipelines is valid
27 if not self.has_valid_directory():
28 return False
29
30 # Verify that 'modules.json' is consistent with the installed modules
31 self.modules_json_up_to_date()
32
33 # Get the available modules
34 try:
35 self.modules_repo.get_modules_file_tree()
36 except LookupError as e:
37 log.error(e)
38 return False
39
40 if self.prompt and self.sha is not None:
41 log.error("Cannot use '--sha' and '--prompt' at the same time!")
42 return False
43
44 # Verify that the provided SHA exists in the repo
45 if self.sha:
46 try:
47 nf_core.modules.module_utils.sha_exists(self.sha, self.modules_repo)
48 except UserWarning:
49 log.error(f"Commit SHA '{self.sha}' doesn't exist in '{self.modules_repo.name}'")
50 return False
51 except LookupError as e:
52 log.error(e)
53 return False
54
55 if module is None:
56 module = questionary.autocomplete(
57 "Tool name:",
58 choices=self.modules_repo.modules_avail_module_names,
59 style=nf_core.utils.nfcore_question_style,
60 ).unsafe_ask()
61
62 # Check that the supplied name is an available module
63 if module and module not in self.modules_repo.modules_avail_module_names:
64 log.error("Module '{}' not found in list of available modules.".format(module))
65 log.info("Use the command 'nf-core modules list' to view available software")
66 return False
67
68 # Load 'modules.json'
69 modules_json = self.load_modules_json()
70 if not modules_json:
71 return False
72
73 if not module_exist_in_repo(module, self.modules_repo):
74 warn_msg = f"Module '{module}' not found in remote '{self.modules_repo.name}' ({self.modules_repo.branch})"
75 log.warning(warn_msg)
76 return False
77
78 if self.modules_repo.name in modules_json["repos"]:
79 current_entry = modules_json["repos"][self.modules_repo.name].get(module)
80 else:
81 current_entry = None
82
83 # Set the install folder based on the repository name
84 install_folder = [self.dir, "modules", self.modules_repo.owner, self.modules_repo.repo]
85
86 # Compute the module directory
87 module_dir = os.path.join(*install_folder, module)
88
89 # Check that the module is not already installed
90 if (current_entry is not None and os.path.exists(module_dir)) and not self.force:
91
92 log.error(f"Module is already installed.")
93 repo_flag = "" if self.modules_repo.name == "nf-core/modules" else f"-g {self.modules_repo.name} "
94 branch_flag = "" if self.modules_repo.branch == "master" else f"-b {self.modules_repo.branch} "
95
96 log.info(
97 f"To update '{module}' run 'nf-core modules {repo_flag}{branch_flag}update {module}'. To force reinstallation use '--force'"
98 )
99 return False
100
101 if self.sha:
102 version = self.sha
103 elif self.prompt:
104 try:
105 version = nf_core.modules.module_utils.prompt_module_version_sha(
106 module,
107 installed_sha=current_entry["git_sha"] if not current_entry is None else None,
108 modules_repo=self.modules_repo,
109 )
110 except SystemError as e:
111 log.error(e)
112 return False
113 else:
114 # Fetch the latest commit for the module
115 try:
116 git_log = get_module_git_log(module, modules_repo=self.modules_repo, per_page=1, page_nbr=1)
117 except UserWarning:
118 log.error(f"Was unable to fetch version of module '{module}'")
119 return False
120 version = git_log[0]["git_sha"]
121
122 if self.force:
123 log.info(f"Removing installed version of '{self.modules_repo.name}/{module}'")
124 self.clear_module_dir(module, module_dir)
125
126 log.info(f"{'Rei' if self.force else 'I'}nstalling '{module}'")
127 log.debug(f"Installing module '{module}' at modules hash {version} from {self.modules_repo.name}")
128
129 # Download module files
130 if not self.download_module_file(module, version, self.modules_repo, install_folder):
131 return False
132
133 # Update module.json with newly installed module
134 self.update_modules_json(modules_json, self.modules_repo.name, module, version)
135 return True
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nf_core/modules/install.py b/nf_core/modules/install.py
--- a/nf_core/modules/install.py
+++ b/nf_core/modules/install.py
@@ -130,6 +130,10 @@
if not self.download_module_file(module, version, self.modules_repo, install_folder):
return False
+ # Print include statement
+ module_name = "_".join(module.upper().split("/"))
+ log.info(f"Include statement: include {{ {module_name} }} from '.{os.path.join(*install_folder, module)}/main’")
+
# Update module.json with newly installed module
self.update_modules_json(modules_json, self.modules_repo.name, module, version)
return True
|
{"golden_diff": "diff --git a/nf_core/modules/install.py b/nf_core/modules/install.py\n--- a/nf_core/modules/install.py\n+++ b/nf_core/modules/install.py\n@@ -130,6 +130,10 @@\n if not self.download_module_file(module, version, self.modules_repo, install_folder):\n return False\n \n+ # Print include statement\n+ module_name = \"_\".join(module.upper().split(\"/\"))\n+ log.info(f\"Include statement: include {{ {module_name} }} from '.{os.path.join(*install_folder, module)}/main\u2019\")\n+\n # Update module.json with newly installed module\n self.update_modules_json(modules_json, self.modules_repo.name, module, version)\n return True\n", "issue": "modules install to also print/write `include` statements\n### Description of feature\n\nprint `include` statements to terminal for easy copy to destination files\n", "before_files": [{"content": "import os\nimport questionary\nimport logging\n\nimport nf_core.utils\nimport nf_core.modules.module_utils\n\nfrom .modules_command import ModuleCommand\nfrom .module_utils import get_module_git_log, module_exist_in_repo\n\nlog = logging.getLogger(__name__)\n\n\nclass ModuleInstall(ModuleCommand):\n def __init__(self, pipeline_dir, force=False, prompt=False, sha=None, update_all=False):\n super().__init__(pipeline_dir)\n self.force = force\n self.prompt = prompt\n self.sha = sha\n self.update_all = update_all\n\n def install(self, module):\n if self.repo_type == \"modules\":\n log.error(\"You cannot install a module in a clone of nf-core/modules\")\n return False\n # Check whether pipelines is valid\n if not self.has_valid_directory():\n return False\n\n # Verify that 'modules.json' is consistent with the installed modules\n self.modules_json_up_to_date()\n\n # Get the available modules\n try:\n self.modules_repo.get_modules_file_tree()\n except LookupError as e:\n log.error(e)\n return False\n\n if self.prompt and self.sha is not None:\n log.error(\"Cannot use '--sha' and '--prompt' at the same time!\")\n return False\n\n # Verify that the provided SHA exists in the repo\n if self.sha:\n try:\n nf_core.modules.module_utils.sha_exists(self.sha, self.modules_repo)\n except UserWarning:\n log.error(f\"Commit SHA '{self.sha}' doesn't exist in '{self.modules_repo.name}'\")\n return False\n except LookupError as e:\n log.error(e)\n return False\n\n if module is None:\n module = questionary.autocomplete(\n \"Tool name:\",\n choices=self.modules_repo.modules_avail_module_names,\n style=nf_core.utils.nfcore_question_style,\n ).unsafe_ask()\n\n # Check that the supplied name is an available module\n if module and module not in self.modules_repo.modules_avail_module_names:\n log.error(\"Module '{}' not found in list of available modules.\".format(module))\n log.info(\"Use the command 'nf-core modules list' to view available software\")\n return False\n\n # Load 'modules.json'\n modules_json = self.load_modules_json()\n if not modules_json:\n return False\n\n if not module_exist_in_repo(module, self.modules_repo):\n warn_msg = f\"Module '{module}' not found in remote '{self.modules_repo.name}' ({self.modules_repo.branch})\"\n log.warning(warn_msg)\n return False\n\n if self.modules_repo.name in modules_json[\"repos\"]:\n current_entry = modules_json[\"repos\"][self.modules_repo.name].get(module)\n else:\n current_entry = None\n\n # Set the install folder based on the repository name\n install_folder = [self.dir, \"modules\", self.modules_repo.owner, self.modules_repo.repo]\n\n # Compute the module directory\n module_dir = os.path.join(*install_folder, module)\n\n # Check that the module is not already installed\n if (current_entry is not None and os.path.exists(module_dir)) and not self.force:\n\n log.error(f\"Module is already installed.\")\n repo_flag = \"\" if self.modules_repo.name == \"nf-core/modules\" else f\"-g {self.modules_repo.name} \"\n branch_flag = \"\" if self.modules_repo.branch == \"master\" else f\"-b {self.modules_repo.branch} \"\n\n log.info(\n f\"To update '{module}' run 'nf-core modules {repo_flag}{branch_flag}update {module}'. To force reinstallation use '--force'\"\n )\n return False\n\n if self.sha:\n version = self.sha\n elif self.prompt:\n try:\n version = nf_core.modules.module_utils.prompt_module_version_sha(\n module,\n installed_sha=current_entry[\"git_sha\"] if not current_entry is None else None,\n modules_repo=self.modules_repo,\n )\n except SystemError as e:\n log.error(e)\n return False\n else:\n # Fetch the latest commit for the module\n try:\n git_log = get_module_git_log(module, modules_repo=self.modules_repo, per_page=1, page_nbr=1)\n except UserWarning:\n log.error(f\"Was unable to fetch version of module '{module}'\")\n return False\n version = git_log[0][\"git_sha\"]\n\n if self.force:\n log.info(f\"Removing installed version of '{self.modules_repo.name}/{module}'\")\n self.clear_module_dir(module, module_dir)\n\n log.info(f\"{'Rei' if self.force else 'I'}nstalling '{module}'\")\n log.debug(f\"Installing module '{module}' at modules hash {version} from {self.modules_repo.name}\")\n\n # Download module files\n if not self.download_module_file(module, version, self.modules_repo, install_folder):\n return False\n\n # Update module.json with newly installed module\n self.update_modules_json(modules_json, self.modules_repo.name, module, version)\n return True\n", "path": "nf_core/modules/install.py"}], "after_files": [{"content": "import os\nimport questionary\nimport logging\n\nimport nf_core.utils\nimport nf_core.modules.module_utils\n\nfrom .modules_command import ModuleCommand\nfrom .module_utils import get_module_git_log, module_exist_in_repo\n\nlog = logging.getLogger(__name__)\n\n\nclass ModuleInstall(ModuleCommand):\n def __init__(self, pipeline_dir, force=False, prompt=False, sha=None, update_all=False):\n super().__init__(pipeline_dir)\n self.force = force\n self.prompt = prompt\n self.sha = sha\n self.update_all = update_all\n\n def install(self, module):\n if self.repo_type == \"modules\":\n log.error(\"You cannot install a module in a clone of nf-core/modules\")\n return False\n # Check whether pipelines is valid\n if not self.has_valid_directory():\n return False\n\n # Verify that 'modules.json' is consistent with the installed modules\n self.modules_json_up_to_date()\n\n # Get the available modules\n try:\n self.modules_repo.get_modules_file_tree()\n except LookupError as e:\n log.error(e)\n return False\n\n if self.prompt and self.sha is not None:\n log.error(\"Cannot use '--sha' and '--prompt' at the same time!\")\n return False\n\n # Verify that the provided SHA exists in the repo\n if self.sha:\n try:\n nf_core.modules.module_utils.sha_exists(self.sha, self.modules_repo)\n except UserWarning:\n log.error(f\"Commit SHA '{self.sha}' doesn't exist in '{self.modules_repo.name}'\")\n return False\n except LookupError as e:\n log.error(e)\n return False\n\n if module is None:\n module = questionary.autocomplete(\n \"Tool name:\",\n choices=self.modules_repo.modules_avail_module_names,\n style=nf_core.utils.nfcore_question_style,\n ).unsafe_ask()\n\n # Check that the supplied name is an available module\n if module and module not in self.modules_repo.modules_avail_module_names:\n log.error(\"Module '{}' not found in list of available modules.\".format(module))\n log.info(\"Use the command 'nf-core modules list' to view available software\")\n return False\n\n # Load 'modules.json'\n modules_json = self.load_modules_json()\n if not modules_json:\n return False\n\n if not module_exist_in_repo(module, self.modules_repo):\n warn_msg = f\"Module '{module}' not found in remote '{self.modules_repo.name}' ({self.modules_repo.branch})\"\n log.warning(warn_msg)\n return False\n\n if self.modules_repo.name in modules_json[\"repos\"]:\n current_entry = modules_json[\"repos\"][self.modules_repo.name].get(module)\n else:\n current_entry = None\n\n # Set the install folder based on the repository name\n install_folder = [self.dir, \"modules\", self.modules_repo.owner, self.modules_repo.repo]\n\n # Compute the module directory\n module_dir = os.path.join(*install_folder, module)\n\n # Check that the module is not already installed\n if (current_entry is not None and os.path.exists(module_dir)) and not self.force:\n\n log.error(f\"Module is already installed.\")\n repo_flag = \"\" if self.modules_repo.name == \"nf-core/modules\" else f\"-g {self.modules_repo.name} \"\n branch_flag = \"\" if self.modules_repo.branch == \"master\" else f\"-b {self.modules_repo.branch} \"\n\n log.info(\n f\"To update '{module}' run 'nf-core modules {repo_flag}{branch_flag}update {module}'. To force reinstallation use '--force'\"\n )\n return False\n\n if self.sha:\n version = self.sha\n elif self.prompt:\n try:\n version = nf_core.modules.module_utils.prompt_module_version_sha(\n module,\n installed_sha=current_entry[\"git_sha\"] if not current_entry is None else None,\n modules_repo=self.modules_repo,\n )\n except SystemError as e:\n log.error(e)\n return False\n else:\n # Fetch the latest commit for the module\n try:\n git_log = get_module_git_log(module, modules_repo=self.modules_repo, per_page=1, page_nbr=1)\n except UserWarning:\n log.error(f\"Was unable to fetch version of module '{module}'\")\n return False\n version = git_log[0][\"git_sha\"]\n\n if self.force:\n log.info(f\"Removing installed version of '{self.modules_repo.name}/{module}'\")\n self.clear_module_dir(module, module_dir)\n\n log.info(f\"{'Rei' if self.force else 'I'}nstalling '{module}'\")\n log.debug(f\"Installing module '{module}' at modules hash {version} from {self.modules_repo.name}\")\n\n # Download module files\n if not self.download_module_file(module, version, self.modules_repo, install_folder):\n return False\n\n # Print include statement\n module_name = \"_\".join(module.upper().split(\"/\"))\n log.info(f\"Include statement: include {{ {module_name} }} from '.{os.path.join(*install_folder, module)}/main\u2019\")\n\n # Update module.json with newly installed module\n self.update_modules_json(modules_json, self.modules_repo.name, module, version)\n return True\n", "path": "nf_core/modules/install.py"}]}
| 1,666 | 159 |
gh_patches_debug_1274
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-1510
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Divide by Zero error
Its in `netlib/strutils.py`
This line # around 126 :
``` python
for i in six.iterbytes(s[:100])
) / len(s[:100]) > 0.3
```
if s is empty, it gives this error in the mitmproxy, (doesn't crash though due to recent improvements in mitmproxy i guess..)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netlib/strutils.py`
Content:
```
1 from __future__ import absolute_import, print_function, division
2 import re
3 import codecs
4
5 import six
6
7
8 def always_bytes(unicode_or_bytes, *encode_args):
9 if isinstance(unicode_or_bytes, six.text_type):
10 return unicode_or_bytes.encode(*encode_args)
11 return unicode_or_bytes
12
13
14 def native(s, *encoding_opts):
15 """
16 Convert :py:class:`bytes` or :py:class:`unicode` to the native
17 :py:class:`str` type, using latin1 encoding if conversion is necessary.
18
19 https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types
20 """
21 if not isinstance(s, (six.binary_type, six.text_type)):
22 raise TypeError("%r is neither bytes nor unicode" % s)
23 if six.PY2:
24 if isinstance(s, six.text_type):
25 return s.encode(*encoding_opts)
26 else:
27 if isinstance(s, six.binary_type):
28 return s.decode(*encoding_opts)
29 return s
30
31
32 # Translate control characters to "safe" characters. This implementation initially
33 # replaced them with the matching control pictures (http://unicode.org/charts/PDF/U2400.pdf),
34 # but that turned out to render badly with monospace fonts. We are back to "." therefore.
35 _control_char_trans = {
36 x: ord(".") # x + 0x2400 for unicode control group pictures
37 for x in range(32)
38 }
39 _control_char_trans[127] = ord(".") # 0x2421
40 _control_char_trans_newline = _control_char_trans.copy()
41 for x in ("\r", "\n", "\t"):
42 del _control_char_trans_newline[ord(x)]
43
44
45 if six.PY2:
46 pass
47 else:
48 _control_char_trans = str.maketrans(_control_char_trans)
49 _control_char_trans_newline = str.maketrans(_control_char_trans_newline)
50
51
52 def escape_control_characters(text, keep_spacing=True):
53 """
54 Replace all unicode C1 control characters from the given text with a single "."
55
56 Args:
57 keep_spacing: If True, tabs and newlines will not be replaced.
58 """
59 # type: (six.string_types) -> six.text_type
60 if not isinstance(text, six.string_types):
61 raise ValueError("text type must be unicode but is {}".format(type(text).__name__))
62
63 trans = _control_char_trans_newline if keep_spacing else _control_char_trans
64 if six.PY2:
65 return u"".join(
66 six.unichr(trans.get(ord(ch), ord(ch)))
67 for ch in text
68 )
69 return text.translate(trans)
70
71
72 def bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False):
73 """
74 Take bytes and return a safe string that can be displayed to the user.
75
76 Single quotes are always escaped, double quotes are never escaped:
77 "'" + bytes_to_escaped_str(...) + "'"
78 gives a valid Python string.
79
80 Args:
81 keep_spacing: If True, tabs and newlines will not be escaped.
82 """
83
84 if not isinstance(data, bytes):
85 raise ValueError("data must be bytes, but is {}".format(data.__class__.__name__))
86 # We always insert a double-quote here so that we get a single-quoted string back
87 # https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their
88 ret = repr(b'"' + data).lstrip("b")[2:-1]
89 if not escape_single_quotes:
90 ret = re.sub(r"(?<!\\)(\\\\)*\\'", lambda m: (m.group(1) or "") + "'", ret)
91 if keep_spacing:
92 ret = re.sub(
93 r"(?<!\\)(\\\\)*\\([nrt])",
94 lambda m: (m.group(1) or "") + dict(n="\n", r="\r", t="\t")[m.group(2)],
95 ret
96 )
97 return ret
98
99
100 def escaped_str_to_bytes(data):
101 """
102 Take an escaped string and return the unescaped bytes equivalent.
103
104 Raises:
105 ValueError, if the escape sequence is invalid.
106 """
107 if not isinstance(data, six.string_types):
108 if six.PY2:
109 raise ValueError("data must be str or unicode, but is {}".format(data.__class__.__name__))
110 raise ValueError("data must be str, but is {}".format(data.__class__.__name__))
111
112 if six.PY2:
113 if isinstance(data, unicode):
114 data = data.encode("utf8")
115 return data.decode("string-escape")
116
117 # This one is difficult - we use an undocumented Python API here
118 # as per http://stackoverflow.com/a/23151714/934719
119 return codecs.escape_decode(data)[0]
120
121
122 def is_mostly_bin(s):
123 # type: (bytes) -> bool
124 return sum(
125 i < 9 or 13 < i < 32 or 126 < i
126 for i in six.iterbytes(s[:100])
127 ) / len(s[:100]) > 0.3
128
129
130 def is_xml(s):
131 # type: (bytes) -> bool
132 return s.strip().startswith(b"<")
133
134
135 def clean_hanging_newline(t):
136 """
137 Many editors will silently add a newline to the final line of a
138 document (I'm looking at you, Vim). This function fixes this common
139 problem at the risk of removing a hanging newline in the rare cases
140 where the user actually intends it.
141 """
142 if t and t[-1] == "\n":
143 return t[:-1]
144 return t
145
146
147 def hexdump(s):
148 """
149 Returns:
150 A generator of (offset, hex, str) tuples
151 """
152 for i in range(0, len(s), 16):
153 offset = "{:0=10x}".format(i)
154 part = s[i:i + 16]
155 x = " ".join("{:0=2x}".format(i) for i in six.iterbytes(part))
156 x = x.ljust(47) # 16*2 + 15
157 part_repr = native(escape_control_characters(
158 part.decode("ascii", "replace").replace(u"\ufffd", u"."),
159 False
160 ))
161 yield (offset, x, part_repr)
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netlib/strutils.py b/netlib/strutils.py
--- a/netlib/strutils.py
+++ b/netlib/strutils.py
@@ -121,6 +121,9 @@
def is_mostly_bin(s):
# type: (bytes) -> bool
+ if not s or len(s) == 0:
+ return False
+
return sum(
i < 9 or 13 < i < 32 or 126 < i
for i in six.iterbytes(s[:100])
|
{"golden_diff": "diff --git a/netlib/strutils.py b/netlib/strutils.py\n--- a/netlib/strutils.py\n+++ b/netlib/strutils.py\n@@ -121,6 +121,9 @@\n \n def is_mostly_bin(s):\n # type: (bytes) -> bool\n+ if not s or len(s) == 0:\n+ return False\n+\n return sum(\n i < 9 or 13 < i < 32 or 126 < i\n for i in six.iterbytes(s[:100])\n", "issue": "Divide by Zero error\nIts in `netlib/strutils.py`\n\nThis line # around 126 :\n\n``` python\n for i in six.iterbytes(s[:100])\n) / len(s[:100]) > 0.3\n```\n\nif s is empty, it gives this error in the mitmproxy, (doesn't crash though due to recent improvements in mitmproxy i guess..)\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, division\nimport re\nimport codecs\n\nimport six\n\n\ndef always_bytes(unicode_or_bytes, *encode_args):\n if isinstance(unicode_or_bytes, six.text_type):\n return unicode_or_bytes.encode(*encode_args)\n return unicode_or_bytes\n\n\ndef native(s, *encoding_opts):\n \"\"\"\n Convert :py:class:`bytes` or :py:class:`unicode` to the native\n :py:class:`str` type, using latin1 encoding if conversion is necessary.\n\n https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types\n \"\"\"\n if not isinstance(s, (six.binary_type, six.text_type)):\n raise TypeError(\"%r is neither bytes nor unicode\" % s)\n if six.PY2:\n if isinstance(s, six.text_type):\n return s.encode(*encoding_opts)\n else:\n if isinstance(s, six.binary_type):\n return s.decode(*encoding_opts)\n return s\n\n\n# Translate control characters to \"safe\" characters. This implementation initially\n# replaced them with the matching control pictures (http://unicode.org/charts/PDF/U2400.pdf),\n# but that turned out to render badly with monospace fonts. We are back to \".\" therefore.\n_control_char_trans = {\n x: ord(\".\") # x + 0x2400 for unicode control group pictures\n for x in range(32)\n}\n_control_char_trans[127] = ord(\".\") # 0x2421\n_control_char_trans_newline = _control_char_trans.copy()\nfor x in (\"\\r\", \"\\n\", \"\\t\"):\n del _control_char_trans_newline[ord(x)]\n\n\nif six.PY2:\n pass\nelse:\n _control_char_trans = str.maketrans(_control_char_trans)\n _control_char_trans_newline = str.maketrans(_control_char_trans_newline)\n\n\ndef escape_control_characters(text, keep_spacing=True):\n \"\"\"\n Replace all unicode C1 control characters from the given text with a single \".\"\n\n Args:\n keep_spacing: If True, tabs and newlines will not be replaced.\n \"\"\"\n # type: (six.string_types) -> six.text_type\n if not isinstance(text, six.string_types):\n raise ValueError(\"text type must be unicode but is {}\".format(type(text).__name__))\n\n trans = _control_char_trans_newline if keep_spacing else _control_char_trans\n if six.PY2:\n return u\"\".join(\n six.unichr(trans.get(ord(ch), ord(ch)))\n for ch in text\n )\n return text.translate(trans)\n\n\ndef bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False):\n \"\"\"\n Take bytes and return a safe string that can be displayed to the user.\n\n Single quotes are always escaped, double quotes are never escaped:\n \"'\" + bytes_to_escaped_str(...) + \"'\"\n gives a valid Python string.\n\n Args:\n keep_spacing: If True, tabs and newlines will not be escaped.\n \"\"\"\n\n if not isinstance(data, bytes):\n raise ValueError(\"data must be bytes, but is {}\".format(data.__class__.__name__))\n # We always insert a double-quote here so that we get a single-quoted string back\n # https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their\n ret = repr(b'\"' + data).lstrip(\"b\")[2:-1]\n if not escape_single_quotes:\n ret = re.sub(r\"(?<!\\\\)(\\\\\\\\)*\\\\'\", lambda m: (m.group(1) or \"\") + \"'\", ret)\n if keep_spacing:\n ret = re.sub(\n r\"(?<!\\\\)(\\\\\\\\)*\\\\([nrt])\",\n lambda m: (m.group(1) or \"\") + dict(n=\"\\n\", r=\"\\r\", t=\"\\t\")[m.group(2)],\n ret\n )\n return ret\n\n\ndef escaped_str_to_bytes(data):\n \"\"\"\n Take an escaped string and return the unescaped bytes equivalent.\n\n Raises:\n ValueError, if the escape sequence is invalid.\n \"\"\"\n if not isinstance(data, six.string_types):\n if six.PY2:\n raise ValueError(\"data must be str or unicode, but is {}\".format(data.__class__.__name__))\n raise ValueError(\"data must be str, but is {}\".format(data.__class__.__name__))\n\n if six.PY2:\n if isinstance(data, unicode):\n data = data.encode(\"utf8\")\n return data.decode(\"string-escape\")\n\n # This one is difficult - we use an undocumented Python API here\n # as per http://stackoverflow.com/a/23151714/934719\n return codecs.escape_decode(data)[0]\n\n\ndef is_mostly_bin(s):\n # type: (bytes) -> bool\n return sum(\n i < 9 or 13 < i < 32 or 126 < i\n for i in six.iterbytes(s[:100])\n ) / len(s[:100]) > 0.3\n\n\ndef is_xml(s):\n # type: (bytes) -> bool\n return s.strip().startswith(b\"<\")\n\n\ndef clean_hanging_newline(t):\n \"\"\"\n Many editors will silently add a newline to the final line of a\n document (I'm looking at you, Vim). This function fixes this common\n problem at the risk of removing a hanging newline in the rare cases\n where the user actually intends it.\n \"\"\"\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t\n\n\ndef hexdump(s):\n \"\"\"\n Returns:\n A generator of (offset, hex, str) tuples\n \"\"\"\n for i in range(0, len(s), 16):\n offset = \"{:0=10x}\".format(i)\n part = s[i:i + 16]\n x = \" \".join(\"{:0=2x}\".format(i) for i in six.iterbytes(part))\n x = x.ljust(47) # 16*2 + 15\n part_repr = native(escape_control_characters(\n part.decode(\"ascii\", \"replace\").replace(u\"\\ufffd\", u\".\"),\n False\n ))\n yield (offset, x, part_repr)\n", "path": "netlib/strutils.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function, division\nimport re\nimport codecs\n\nimport six\n\n\ndef always_bytes(unicode_or_bytes, *encode_args):\n if isinstance(unicode_or_bytes, six.text_type):\n return unicode_or_bytes.encode(*encode_args)\n return unicode_or_bytes\n\n\ndef native(s, *encoding_opts):\n \"\"\"\n Convert :py:class:`bytes` or :py:class:`unicode` to the native\n :py:class:`str` type, using latin1 encoding if conversion is necessary.\n\n https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types\n \"\"\"\n if not isinstance(s, (six.binary_type, six.text_type)):\n raise TypeError(\"%r is neither bytes nor unicode\" % s)\n if six.PY2:\n if isinstance(s, six.text_type):\n return s.encode(*encoding_opts)\n else:\n if isinstance(s, six.binary_type):\n return s.decode(*encoding_opts)\n return s\n\n\n# Translate control characters to \"safe\" characters. This implementation initially\n# replaced them with the matching control pictures (http://unicode.org/charts/PDF/U2400.pdf),\n# but that turned out to render badly with monospace fonts. We are back to \".\" therefore.\n_control_char_trans = {\n x: ord(\".\") # x + 0x2400 for unicode control group pictures\n for x in range(32)\n}\n_control_char_trans[127] = ord(\".\") # 0x2421\n_control_char_trans_newline = _control_char_trans.copy()\nfor x in (\"\\r\", \"\\n\", \"\\t\"):\n del _control_char_trans_newline[ord(x)]\n\n\nif six.PY2:\n pass\nelse:\n _control_char_trans = str.maketrans(_control_char_trans)\n _control_char_trans_newline = str.maketrans(_control_char_trans_newline)\n\n\ndef escape_control_characters(text, keep_spacing=True):\n \"\"\"\n Replace all unicode C1 control characters from the given text with a single \".\"\n\n Args:\n keep_spacing: If True, tabs and newlines will not be replaced.\n \"\"\"\n # type: (six.string_types) -> six.text_type\n if not isinstance(text, six.string_types):\n raise ValueError(\"text type must be unicode but is {}\".format(type(text).__name__))\n\n trans = _control_char_trans_newline if keep_spacing else _control_char_trans\n if six.PY2:\n return u\"\".join(\n six.unichr(trans.get(ord(ch), ord(ch)))\n for ch in text\n )\n return text.translate(trans)\n\n\ndef bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False):\n \"\"\"\n Take bytes and return a safe string that can be displayed to the user.\n\n Single quotes are always escaped, double quotes are never escaped:\n \"'\" + bytes_to_escaped_str(...) + \"'\"\n gives a valid Python string.\n\n Args:\n keep_spacing: If True, tabs and newlines will not be escaped.\n \"\"\"\n\n if not isinstance(data, bytes):\n raise ValueError(\"data must be bytes, but is {}\".format(data.__class__.__name__))\n # We always insert a double-quote here so that we get a single-quoted string back\n # https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their\n ret = repr(b'\"' + data).lstrip(\"b\")[2:-1]\n if not escape_single_quotes:\n ret = re.sub(r\"(?<!\\\\)(\\\\\\\\)*\\\\'\", lambda m: (m.group(1) or \"\") + \"'\", ret)\n if keep_spacing:\n ret = re.sub(\n r\"(?<!\\\\)(\\\\\\\\)*\\\\([nrt])\",\n lambda m: (m.group(1) or \"\") + dict(n=\"\\n\", r=\"\\r\", t=\"\\t\")[m.group(2)],\n ret\n )\n return ret\n\n\ndef escaped_str_to_bytes(data):\n \"\"\"\n Take an escaped string and return the unescaped bytes equivalent.\n\n Raises:\n ValueError, if the escape sequence is invalid.\n \"\"\"\n if not isinstance(data, six.string_types):\n if six.PY2:\n raise ValueError(\"data must be str or unicode, but is {}\".format(data.__class__.__name__))\n raise ValueError(\"data must be str, but is {}\".format(data.__class__.__name__))\n\n if six.PY2:\n if isinstance(data, unicode):\n data = data.encode(\"utf8\")\n return data.decode(\"string-escape\")\n\n # This one is difficult - we use an undocumented Python API here\n # as per http://stackoverflow.com/a/23151714/934719\n return codecs.escape_decode(data)[0]\n\n\ndef is_mostly_bin(s):\n # type: (bytes) -> bool\n if not s or len(s) == 0:\n return False\n\n return sum(\n i < 9 or 13 < i < 32 or 126 < i\n for i in six.iterbytes(s[:100])\n ) / len(s[:100]) > 0.3\n\n\ndef is_xml(s):\n # type: (bytes) -> bool\n return s.strip().startswith(b\"<\")\n\n\ndef clean_hanging_newline(t):\n \"\"\"\n Many editors will silently add a newline to the final line of a\n document (I'm looking at you, Vim). This function fixes this common\n problem at the risk of removing a hanging newline in the rare cases\n where the user actually intends it.\n \"\"\"\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t\n\n\ndef hexdump(s):\n \"\"\"\n Returns:\n A generator of (offset, hex, str) tuples\n \"\"\"\n for i in range(0, len(s), 16):\n offset = \"{:0=10x}\".format(i)\n part = s[i:i + 16]\n x = \" \".join(\"{:0=2x}\".format(i) for i in six.iterbytes(part))\n x = x.ljust(47) # 16*2 + 15\n part_repr = native(escape_control_characters(\n part.decode(\"ascii\", \"replace\").replace(u\"\\ufffd\", u\".\"),\n False\n ))\n yield (offset, x, part_repr)\n", "path": "netlib/strutils.py"}]}
| 2,136 | 127 |
gh_patches_debug_64335
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1442
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[wiktionary] Relax case sensitivity a bit?
Looking up anything other than the exact capitalization in Wiktionary's database appears to fail. It might be worthwhile, for user experience, to always try the all-lowercase version of a query if no results are returned for it as entered.
That said, the MediaWiki API would allow querying automatically for several variants (using `action=query&titles=variant1|Variant2|etc.`) and then fetching the first one that exists for display, if making multiple requests is OK in the plugin.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/wiktionary.py`
Content:
```
1 # coding=utf-8
2 """
3 wiktionary.py - Sopel Wiktionary Module
4 Copyright 2009, Sean B. Palmer, inamidst.com
5 Licensed under the Eiffel Forum License 2.
6
7 https://sopel.chat
8 """
9 from __future__ import unicode_literals, absolute_import, print_function, division
10
11 import re
12 import requests
13 from sopel import web
14 from sopel.module import commands, example
15
16 uri = 'https://en.wiktionary.org/w/index.php?title=%s&printable=yes'
17 r_sup = re.compile(r'<sup[^>]+>.+</sup>') # Superscripts that are references only, not ordinal indicators, etc...
18 r_tag = re.compile(r'<[^>]+>')
19 r_ul = re.compile(r'(?ims)<ul>.*?</ul>')
20
21
22 def text(html):
23 text = r_sup.sub('', html) # Remove superscripts that are references from definition
24 text = r_tag.sub('', text).strip()
25 text = text.replace('\n', ' ')
26 text = text.replace('\r', '')
27 text = text.replace('(intransitive', '(intr.')
28 text = text.replace('(transitive', '(trans.')
29 text = web.decode(text)
30 return text
31
32
33 def wikt(word):
34 bytes = requests.get(uri % web.quote(word)).text
35 bytes = r_ul.sub('', bytes)
36
37 mode = None
38 etymology = None
39 definitions = {}
40 for line in bytes.splitlines():
41 if 'id="Etymology"' in line:
42 mode = 'etymology'
43 elif 'id="Noun"' in line:
44 mode = 'noun'
45 elif 'id="Verb"' in line:
46 mode = 'verb'
47 elif 'id="Adjective"' in line:
48 mode = 'adjective'
49 elif 'id="Adverb"' in line:
50 mode = 'adverb'
51 elif 'id="Interjection"' in line:
52 mode = 'interjection'
53 elif 'id="Particle"' in line:
54 mode = 'particle'
55 elif 'id="Preposition"' in line:
56 mode = 'preposition'
57 elif 'id="Prefix"' in line:
58 mode = 'prefix'
59 elif 'id="Suffix"' in line:
60 mode = 'suffix'
61 # 'id="' can occur in definition lines <li> when <sup> tag is used for references;
62 # make sure those are not excluded (see e.g., abecedarian).
63 elif ('id="' in line) and ('<li>' not in line):
64 mode = None
65
66 elif (mode == 'etmyology') and ('<p>' in line):
67 etymology = text(line)
68 elif (mode is not None) and ('<li>' in line):
69 definitions.setdefault(mode, []).append(text(line))
70
71 if '<hr' in line:
72 break
73 return etymology, definitions
74
75
76 parts = ('preposition', 'particle', 'noun', 'verb',
77 'adjective', 'adverb', 'interjection',
78 'prefix', 'suffix')
79
80
81 def format(result, definitions, number=2):
82 for part in parts:
83 if part in definitions:
84 defs = definitions[part][:number]
85 result += u' — {}: '.format(part)
86 n = ['%s. %s' % (i + 1, e.strip(' .')) for i, e in enumerate(defs)]
87 result += ', '.join(n)
88 return result.strip(' .,')
89
90
91 @commands('wt', 'define', 'dict')
92 @example('.wt bailiwick')
93 def wiktionary(bot, trigger):
94 """Look up a word on Wiktionary."""
95 word = trigger.group(2)
96 if word is None:
97 bot.reply('You must tell me what to look up!')
98 return
99
100 _etymology, definitions = wikt(word)
101 if not definitions:
102 bot.say("Couldn't get any definitions for %s." % word)
103 return
104
105 result = format(word, definitions)
106 if len(result) < 150:
107 result = format(word, definitions, 3)
108 if len(result) < 150:
109 result = format(word, definitions, 5)
110
111 if len(result) > 300:
112 result = result[:295] + '[...]'
113 bot.say(result)
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sopel/modules/wiktionary.py b/sopel/modules/wiktionary.py
--- a/sopel/modules/wiktionary.py
+++ b/sopel/modules/wiktionary.py
@@ -99,8 +99,11 @@
_etymology, definitions = wikt(word)
if not definitions:
- bot.say("Couldn't get any definitions for %s." % word)
- return
+ # Cast word to lower to check in case of mismatched user input
+ _etymology, definitions = wikt(word.lower())
+ if not definitions:
+ bot.say("Couldn't get any definitions for %s." % word)
+ return
result = format(word, definitions)
if len(result) < 150:
|
{"golden_diff": "diff --git a/sopel/modules/wiktionary.py b/sopel/modules/wiktionary.py\n--- a/sopel/modules/wiktionary.py\n+++ b/sopel/modules/wiktionary.py\n@@ -99,8 +99,11 @@\n \n _etymology, definitions = wikt(word)\n if not definitions:\n- bot.say(\"Couldn't get any definitions for %s.\" % word)\n- return\n+ # Cast word to lower to check in case of mismatched user input\n+ _etymology, definitions = wikt(word.lower())\n+ if not definitions:\n+ bot.say(\"Couldn't get any definitions for %s.\" % word)\n+ return\n \n result = format(word, definitions)\n if len(result) < 150:\n", "issue": "[wiktionary] Relax case sensitivity a bit?\nLooking up anything other than the exact capitalization in Wiktionary's database appears to fail. It might be worthwhile, for user experience, to always try the all-lowercase version of a query if no results are returned for it as entered.\r\n\r\nThat said, the MediaWiki API would allow querying automatically for several variants (using `action=query&titles=variant1|Variant2|etc.`) and then fetching the first one that exists for display, if making multiple requests is OK in the plugin.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nwiktionary.py - Sopel Wiktionary Module\nCopyright 2009, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nimport requests\nfrom sopel import web\nfrom sopel.module import commands, example\n\nuri = 'https://en.wiktionary.org/w/index.php?title=%s&printable=yes'\nr_sup = re.compile(r'<sup[^>]+>.+</sup>') # Superscripts that are references only, not ordinal indicators, etc...\nr_tag = re.compile(r'<[^>]+>')\nr_ul = re.compile(r'(?ims)<ul>.*?</ul>')\n\n\ndef text(html):\n text = r_sup.sub('', html) # Remove superscripts that are references from definition\n text = r_tag.sub('', text).strip()\n text = text.replace('\\n', ' ')\n text = text.replace('\\r', '')\n text = text.replace('(intransitive', '(intr.')\n text = text.replace('(transitive', '(trans.')\n text = web.decode(text)\n return text\n\n\ndef wikt(word):\n bytes = requests.get(uri % web.quote(word)).text\n bytes = r_ul.sub('', bytes)\n\n mode = None\n etymology = None\n definitions = {}\n for line in bytes.splitlines():\n if 'id=\"Etymology\"' in line:\n mode = 'etymology'\n elif 'id=\"Noun\"' in line:\n mode = 'noun'\n elif 'id=\"Verb\"' in line:\n mode = 'verb'\n elif 'id=\"Adjective\"' in line:\n mode = 'adjective'\n elif 'id=\"Adverb\"' in line:\n mode = 'adverb'\n elif 'id=\"Interjection\"' in line:\n mode = 'interjection'\n elif 'id=\"Particle\"' in line:\n mode = 'particle'\n elif 'id=\"Preposition\"' in line:\n mode = 'preposition'\n elif 'id=\"Prefix\"' in line:\n mode = 'prefix'\n elif 'id=\"Suffix\"' in line:\n mode = 'suffix'\n # 'id=\"' can occur in definition lines <li> when <sup> tag is used for references;\n # make sure those are not excluded (see e.g., abecedarian).\n elif ('id=\"' in line) and ('<li>' not in line):\n mode = None\n\n elif (mode == 'etmyology') and ('<p>' in line):\n etymology = text(line)\n elif (mode is not None) and ('<li>' in line):\n definitions.setdefault(mode, []).append(text(line))\n\n if '<hr' in line:\n break\n return etymology, definitions\n\n\nparts = ('preposition', 'particle', 'noun', 'verb',\n 'adjective', 'adverb', 'interjection',\n 'prefix', 'suffix')\n\n\ndef format(result, definitions, number=2):\n for part in parts:\n if part in definitions:\n defs = definitions[part][:number]\n result += u' \u2014 {}: '.format(part)\n n = ['%s. %s' % (i + 1, e.strip(' .')) for i, e in enumerate(defs)]\n result += ', '.join(n)\n return result.strip(' .,')\n\n\n@commands('wt', 'define', 'dict')\n@example('.wt bailiwick')\ndef wiktionary(bot, trigger):\n \"\"\"Look up a word on Wiktionary.\"\"\"\n word = trigger.group(2)\n if word is None:\n bot.reply('You must tell me what to look up!')\n return\n\n _etymology, definitions = wikt(word)\n if not definitions:\n bot.say(\"Couldn't get any definitions for %s.\" % word)\n return\n\n result = format(word, definitions)\n if len(result) < 150:\n result = format(word, definitions, 3)\n if len(result) < 150:\n result = format(word, definitions, 5)\n\n if len(result) > 300:\n result = result[:295] + '[...]'\n bot.say(result)\n", "path": "sopel/modules/wiktionary.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nwiktionary.py - Sopel Wiktionary Module\nCopyright 2009, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nimport requests\nfrom sopel import web\nfrom sopel.module import commands, example\n\nuri = 'https://en.wiktionary.org/w/index.php?title=%s&printable=yes'\nr_sup = re.compile(r'<sup[^>]+>.+</sup>') # Superscripts that are references only, not ordinal indicators, etc...\nr_tag = re.compile(r'<[^>]+>')\nr_ul = re.compile(r'(?ims)<ul>.*?</ul>')\n\n\ndef text(html):\n text = r_sup.sub('', html) # Remove superscripts that are references from definition\n text = r_tag.sub('', text).strip()\n text = text.replace('\\n', ' ')\n text = text.replace('\\r', '')\n text = text.replace('(intransitive', '(intr.')\n text = text.replace('(transitive', '(trans.')\n text = web.decode(text)\n return text\n\n\ndef wikt(word):\n bytes = requests.get(uri % web.quote(word)).text\n bytes = r_ul.sub('', bytes)\n\n mode = None\n etymology = None\n definitions = {}\n for line in bytes.splitlines():\n if 'id=\"Etymology\"' in line:\n mode = 'etymology'\n elif 'id=\"Noun\"' in line:\n mode = 'noun'\n elif 'id=\"Verb\"' in line:\n mode = 'verb'\n elif 'id=\"Adjective\"' in line:\n mode = 'adjective'\n elif 'id=\"Adverb\"' in line:\n mode = 'adverb'\n elif 'id=\"Interjection\"' in line:\n mode = 'interjection'\n elif 'id=\"Particle\"' in line:\n mode = 'particle'\n elif 'id=\"Preposition\"' in line:\n mode = 'preposition'\n elif 'id=\"Prefix\"' in line:\n mode = 'prefix'\n elif 'id=\"Suffix\"' in line:\n mode = 'suffix'\n # 'id=\"' can occur in definition lines <li> when <sup> tag is used for references;\n # make sure those are not excluded (see e.g., abecedarian).\n elif ('id=\"' in line) and ('<li>' not in line):\n mode = None\n\n elif (mode == 'etmyology') and ('<p>' in line):\n etymology = text(line)\n elif (mode is not None) and ('<li>' in line):\n definitions.setdefault(mode, []).append(text(line))\n\n if '<hr' in line:\n break\n return etymology, definitions\n\n\nparts = ('preposition', 'particle', 'noun', 'verb',\n 'adjective', 'adverb', 'interjection',\n 'prefix', 'suffix')\n\n\ndef format(result, definitions, number=2):\n for part in parts:\n if part in definitions:\n defs = definitions[part][:number]\n result += u' \u2014 {}: '.format(part)\n n = ['%s. %s' % (i + 1, e.strip(' .')) for i, e in enumerate(defs)]\n result += ', '.join(n)\n return result.strip(' .,')\n\n\n@commands('wt', 'define', 'dict')\n@example('.wt bailiwick')\ndef wiktionary(bot, trigger):\n \"\"\"Look up a word on Wiktionary.\"\"\"\n word = trigger.group(2)\n if word is None:\n bot.reply('You must tell me what to look up!')\n return\n\n _etymology, definitions = wikt(word)\n if not definitions:\n # Cast word to lower to check in case of mismatched user input\n _etymology, definitions = wikt(word.lower())\n if not definitions:\n bot.say(\"Couldn't get any definitions for %s.\" % word)\n return\n\n result = format(word, definitions)\n if len(result) < 150:\n result = format(word, definitions, 3)\n if len(result) < 150:\n result = format(word, definitions, 5)\n\n if len(result) > 300:\n result = result[:295] + '[...]'\n bot.say(result)\n", "path": "sopel/modules/wiktionary.py"}]}
| 1,553 | 175 |
gh_patches_debug_9388
|
rasdani/github-patches
|
git_diff
|
flairNLP__flair-418
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
macro_avg_f_score bug
In the file flair/training_utils.py, when calculating macro_avg_f_score, I think 'recall' method should be called but 'precision' is called.

i think it needs to write like this:
`def macro_avg_f_score(self):
class_precisions = [self.precision(class_name) for class_name in self.get_classes()]
class_recalls = [self.recall(class_name) for class_name in self.get_classes()]`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/training_utils.py`
Content:
```
1 import itertools
2 import random
3 import logging
4 from collections import defaultdict
5 from enum import Enum
6 from pathlib import Path
7 from typing import List
8 from flair.data import Dictionary, Sentence
9 from functools import reduce
10
11
12 class Metric(object):
13
14 def __init__(self, name):
15 self.name = name
16
17 self._tps = defaultdict(int)
18 self._fps = defaultdict(int)
19 self._tns = defaultdict(int)
20 self._fns = defaultdict(int)
21
22 def add_tp(self, class_name):
23 self._tps[class_name] += 1
24
25 def add_tn(self, class_name):
26 self._tns[class_name] += 1
27
28 def add_fp(self, class_name):
29 self._fps[class_name] += 1
30
31 def add_fn(self, class_name):
32 self._fns[class_name] += 1
33
34 def get_tp(self, class_name=None):
35 if class_name is None:
36 return sum([self._tps[class_name] for class_name in self.get_classes()])
37 return self._tps[class_name]
38
39 def get_tn(self, class_name):
40 if class_name is None:
41 return sum([self._tns[class_name] for class_name in self.get_classes()])
42 return self._tns[class_name]
43
44 def get_fp(self, class_name):
45 if class_name is None:
46 return sum([self._fps[class_name] for class_name in self.get_classes()])
47 return self._fps[class_name]
48
49 def get_fn(self, class_name):
50 if class_name is None:
51 return sum([self._fns[class_name] for class_name in self.get_classes()])
52 return self._fns[class_name]
53
54 def precision(self, class_name=None):
55 if self.get_tp(class_name) + self.get_fp(class_name) > 0:
56 return round(self.get_tp(class_name) / (self.get_tp(class_name) + self.get_fp(class_name)), 4)
57 return 0.0
58
59 def recall(self, class_name=None):
60 if self.get_tp(class_name) + self.get_fn(class_name) > 0:
61 return round(self.get_tp(class_name) / (self.get_tp(class_name) + self.get_fn(class_name)), 4)
62 return 0.0
63
64 def f_score(self, class_name=None):
65 if self.precision(class_name) + self.recall(class_name) > 0:
66 return round(2 * (self.precision(class_name) * self.recall(class_name))
67 / (self.precision(class_name) + self.recall(class_name)), 4)
68 return 0.0
69
70 def accuracy(self, class_name=None):
71 if self.get_tp(class_name) + self.get_tn(class_name) + self.get_fp(class_name) + self.get_fn(class_name) > 0:
72 return round(
73 (self.get_tp(class_name) + self.get_tn(class_name))
74 / (self.get_tp(class_name) + self.get_tn(class_name) + self.get_fp(class_name) + self.get_fn(class_name)),
75 4)
76 return 0.0
77
78 def micro_avg_f_score(self):
79 return self.f_score(None)
80
81 def macro_avg_f_score(self):
82 class_precisions = [self.precision(class_name) for class_name in self.get_classes()]
83 class_recalls = [self.precision(class_name) for class_name in self.get_classes()]
84
85 if len(class_precisions) > 0 and len(class_recalls) > 0:
86 macro_precision = sum(class_precisions) / len(class_precisions)
87 macro_recall = sum(class_recalls) / len(class_recalls)
88
89 if macro_precision + macro_recall > 0:
90 return round(2 * (macro_precision * macro_recall) / (macro_precision + macro_recall), 4)
91
92 return 0.0
93
94 def micro_avg_accuracy(self):
95 return self.accuracy(None)
96
97 def macro_avg_accuracy(self):
98 class_accuracy = [self.accuracy(class_name) for class_name in self.get_classes()]
99
100 if len(class_accuracy) > 0:
101 return round(sum(class_accuracy) / len(class_accuracy), 4)
102
103 return 0.0
104
105 def get_classes(self) -> List:
106 all_classes = set(itertools.chain(*[list(keys) for keys
107 in [self._tps.keys(), self._fps.keys(), self._tns.keys(),
108 self._fns.keys()]]))
109 all_classes = [class_name for class_name in all_classes if class_name is not None]
110 all_classes.sort()
111 return all_classes
112
113 def to_tsv(self):
114 return '{}\t{}\t{}\t{}'.format(
115 self.precision(),
116 self.recall(),
117 self.accuracy(),
118 self.micro_avg_f_score(),
119 )
120
121 @staticmethod
122 def tsv_header(prefix=None):
123 if prefix:
124 return '{0}_PRECISION\t{0}_RECALL\t{0}_ACCURACY\t{0}_F-SCORE'.format(
125 prefix)
126
127 return 'PRECISION\tRECALL\tACCURACY\tF-SCORE'
128
129 @staticmethod
130 def to_empty_tsv():
131 return '\t_\t_\t_\t_'
132
133 def __str__(self):
134 all_classes = self.get_classes()
135 all_classes = [None] + all_classes
136 all_lines = [
137 '{0:<10}\ttp: {1} - fp: {2} - fn: {3} - tn: {4} - precision: {5:.4f} - recall: {6:.4f} - accuracy: {7:.4f} - f1-score: {8:.4f}'.format(
138 self.name if class_name is None else class_name,
139 self.get_tp(class_name), self.get_fp(class_name), self.get_fn(class_name), self.get_tn(class_name),
140 self.precision(class_name), self.recall(class_name), self.accuracy(class_name),
141 self.f_score(class_name))
142 for class_name in all_classes]
143 return '\n'.join(all_lines)
144
145
146 class EvaluationMetric(Enum):
147 MICRO_ACCURACY = 'micro-average accuracy'
148 MICRO_F1_SCORE = 'micro-average f1-score'
149 MACRO_ACCURACY = 'macro-average accuracy'
150 MACRO_F1_SCORE = 'macro-average f1-score'
151
152
153 class WeightExtractor(object):
154
155 def __init__(self, directory: Path, number_of_weights: int = 10):
156 self.weights_file = init_output_file(directory, 'weights.txt')
157 self.weights_dict = defaultdict(lambda: defaultdict(lambda: list()))
158 self.number_of_weights = number_of_weights
159
160 def extract_weights(self, state_dict, iteration):
161 for key in state_dict.keys():
162
163 vec = state_dict[key]
164 weights_to_watch = min(self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size())))
165
166 if key not in self.weights_dict:
167 self._init_weights_index(key, state_dict, weights_to_watch)
168
169 for i in range(weights_to_watch):
170 vec = state_dict[key]
171 for index in self.weights_dict[key][i]:
172 vec = vec[index]
173
174 value = vec.item()
175
176 with open(self.weights_file, 'a') as f:
177 f.write('{}\t{}\t{}\t{}\n'.format(iteration, key, i, float(value)))
178
179 def _init_weights_index(self, key, state_dict, weights_to_watch):
180 indices = {}
181
182 i = 0
183 while len(indices) < weights_to_watch:
184 vec = state_dict[key]
185 cur_indices = []
186
187 for x in range(len(vec.size())):
188 index = random.randint(0, len(vec) - 1)
189 vec = vec[index]
190 cur_indices.append(index)
191
192 if cur_indices not in list(indices.values()):
193 indices[i] = cur_indices
194 i += 1
195
196 self.weights_dict[key] = indices
197
198
199 def clear_embeddings(sentences: List[Sentence], also_clear_word_embeddings=False):
200 """
201 Clears the embeddings from all given sentences.
202 :param sentences: list of sentences
203 """
204 for sentence in sentences:
205 sentence.clear_embeddings(also_clear_word_embeddings=also_clear_word_embeddings)
206
207
208 def init_output_file(base_path: Path, file_name: str) -> Path:
209 """
210 Creates a local file.
211 :param base_path: the path to the directory
212 :param file_name: the file name
213 :return: the created file
214 """
215 base_path.mkdir(parents=True, exist_ok=True)
216
217 file = base_path / file_name
218 open(file, "w", encoding='utf-8').close()
219 return file
220
221
222 def convert_labels_to_one_hot(label_list: List[List[str]], label_dict: Dictionary) -> List[List[int]]:
223 """
224 Convert list of labels (strings) to a one hot list.
225 :param label_list: list of labels
226 :param label_dict: label dictionary
227 :return: converted label list
228 """
229 return [[1 if l in labels else 0 for l in label_dict.get_items()] for labels in label_list]
230
231
232 def log_line(log):
233 log.info('-' * 100)
234
235
236 def add_file_handler(log, output_file):
237 init_output_file(output_file.parents[0], output_file.name)
238 fh = logging.FileHandler(output_file)
239 fh.setLevel(logging.INFO)
240 formatter = logging.Formatter('%(asctime)-15s %(message)s')
241 fh.setFormatter(formatter)
242 log.addHandler(fh)
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flair/training_utils.py b/flair/training_utils.py
--- a/flair/training_utils.py
+++ b/flair/training_utils.py
@@ -80,7 +80,7 @@
def macro_avg_f_score(self):
class_precisions = [self.precision(class_name) for class_name in self.get_classes()]
- class_recalls = [self.precision(class_name) for class_name in self.get_classes()]
+ class_recalls = [self.recall(class_name) for class_name in self.get_classes()]
if len(class_precisions) > 0 and len(class_recalls) > 0:
macro_precision = sum(class_precisions) / len(class_precisions)
|
{"golden_diff": "diff --git a/flair/training_utils.py b/flair/training_utils.py\n--- a/flair/training_utils.py\n+++ b/flair/training_utils.py\n@@ -80,7 +80,7 @@\n \n def macro_avg_f_score(self):\n class_precisions = [self.precision(class_name) for class_name in self.get_classes()]\n- class_recalls = [self.precision(class_name) for class_name in self.get_classes()]\n+ class_recalls = [self.recall(class_name) for class_name in self.get_classes()]\n \n if len(class_precisions) > 0 and len(class_recalls) > 0:\n macro_precision = sum(class_precisions) / len(class_precisions)\n", "issue": "macro_avg_f_score bug\nIn the file flair/training_utils.py, when calculating macro_avg_f_score, I think 'recall' method should be called but 'precision' is called.\r\n\r\n\r\n\r\ni think it needs to write like this:\r\n`def macro_avg_f_score(self): \r\n class_precisions = [self.precision(class_name) for class_name in self.get_classes()] \r\n class_recalls = [self.recall(class_name) for class_name in self.get_classes()]`\r\n\r\n\n", "before_files": [{"content": "import itertools\nimport random\nimport logging\nfrom collections import defaultdict\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List\nfrom flair.data import Dictionary, Sentence\nfrom functools import reduce\n\n\nclass Metric(object):\n\n def __init__(self, name):\n self.name = name\n\n self._tps = defaultdict(int)\n self._fps = defaultdict(int)\n self._tns = defaultdict(int)\n self._fns = defaultdict(int)\n\n def add_tp(self, class_name):\n self._tps[class_name] += 1\n\n def add_tn(self, class_name):\n self._tns[class_name] += 1\n\n def add_fp(self, class_name):\n self._fps[class_name] += 1\n\n def add_fn(self, class_name):\n self._fns[class_name] += 1\n\n def get_tp(self, class_name=None):\n if class_name is None:\n return sum([self._tps[class_name] for class_name in self.get_classes()])\n return self._tps[class_name]\n\n def get_tn(self, class_name):\n if class_name is None:\n return sum([self._tns[class_name] for class_name in self.get_classes()])\n return self._tns[class_name]\n\n def get_fp(self, class_name):\n if class_name is None:\n return sum([self._fps[class_name] for class_name in self.get_classes()])\n return self._fps[class_name]\n\n def get_fn(self, class_name):\n if class_name is None:\n return sum([self._fns[class_name] for class_name in self.get_classes()])\n return self._fns[class_name]\n\n def precision(self, class_name=None):\n if self.get_tp(class_name) + self.get_fp(class_name) > 0:\n return round(self.get_tp(class_name) / (self.get_tp(class_name) + self.get_fp(class_name)), 4)\n return 0.0\n\n def recall(self, class_name=None):\n if self.get_tp(class_name) + self.get_fn(class_name) > 0:\n return round(self.get_tp(class_name) / (self.get_tp(class_name) + self.get_fn(class_name)), 4)\n return 0.0\n\n def f_score(self, class_name=None):\n if self.precision(class_name) + self.recall(class_name) > 0:\n return round(2 * (self.precision(class_name) * self.recall(class_name))\n / (self.precision(class_name) + self.recall(class_name)), 4)\n return 0.0\n\n def accuracy(self, class_name=None):\n if self.get_tp(class_name) + self.get_tn(class_name) + self.get_fp(class_name) + self.get_fn(class_name) > 0:\n return round(\n (self.get_tp(class_name) + self.get_tn(class_name))\n / (self.get_tp(class_name) + self.get_tn(class_name) + self.get_fp(class_name) + self.get_fn(class_name)),\n 4)\n return 0.0\n\n def micro_avg_f_score(self):\n return self.f_score(None)\n\n def macro_avg_f_score(self):\n class_precisions = [self.precision(class_name) for class_name in self.get_classes()]\n class_recalls = [self.precision(class_name) for class_name in self.get_classes()]\n\n if len(class_precisions) > 0 and len(class_recalls) > 0:\n macro_precision = sum(class_precisions) / len(class_precisions)\n macro_recall = sum(class_recalls) / len(class_recalls)\n\n if macro_precision + macro_recall > 0:\n return round(2 * (macro_precision * macro_recall) / (macro_precision + macro_recall), 4)\n\n return 0.0\n\n def micro_avg_accuracy(self):\n return self.accuracy(None)\n\n def macro_avg_accuracy(self):\n class_accuracy = [self.accuracy(class_name) for class_name in self.get_classes()]\n\n if len(class_accuracy) > 0:\n return round(sum(class_accuracy) / len(class_accuracy), 4)\n\n return 0.0\n\n def get_classes(self) -> List:\n all_classes = set(itertools.chain(*[list(keys) for keys\n in [self._tps.keys(), self._fps.keys(), self._tns.keys(),\n self._fns.keys()]]))\n all_classes = [class_name for class_name in all_classes if class_name is not None]\n all_classes.sort()\n return all_classes\n\n def to_tsv(self):\n return '{}\\t{}\\t{}\\t{}'.format(\n self.precision(),\n self.recall(),\n self.accuracy(),\n self.micro_avg_f_score(),\n )\n\n @staticmethod\n def tsv_header(prefix=None):\n if prefix:\n return '{0}_PRECISION\\t{0}_RECALL\\t{0}_ACCURACY\\t{0}_F-SCORE'.format(\n prefix)\n\n return 'PRECISION\\tRECALL\\tACCURACY\\tF-SCORE'\n\n @staticmethod\n def to_empty_tsv():\n return '\\t_\\t_\\t_\\t_'\n\n def __str__(self):\n all_classes = self.get_classes()\n all_classes = [None] + all_classes\n all_lines = [\n '{0:<10}\\ttp: {1} - fp: {2} - fn: {3} - tn: {4} - precision: {5:.4f} - recall: {6:.4f} - accuracy: {7:.4f} - f1-score: {8:.4f}'.format(\n self.name if class_name is None else class_name,\n self.get_tp(class_name), self.get_fp(class_name), self.get_fn(class_name), self.get_tn(class_name),\n self.precision(class_name), self.recall(class_name), self.accuracy(class_name),\n self.f_score(class_name))\n for class_name in all_classes]\n return '\\n'.join(all_lines)\n\n\nclass EvaluationMetric(Enum):\n MICRO_ACCURACY = 'micro-average accuracy'\n MICRO_F1_SCORE = 'micro-average f1-score'\n MACRO_ACCURACY = 'macro-average accuracy'\n MACRO_F1_SCORE = 'macro-average f1-score'\n\n\nclass WeightExtractor(object):\n\n def __init__(self, directory: Path, number_of_weights: int = 10):\n self.weights_file = init_output_file(directory, 'weights.txt')\n self.weights_dict = defaultdict(lambda: defaultdict(lambda: list()))\n self.number_of_weights = number_of_weights\n\n def extract_weights(self, state_dict, iteration):\n for key in state_dict.keys():\n\n vec = state_dict[key]\n weights_to_watch = min(self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size())))\n\n if key not in self.weights_dict:\n self._init_weights_index(key, state_dict, weights_to_watch)\n\n for i in range(weights_to_watch):\n vec = state_dict[key]\n for index in self.weights_dict[key][i]:\n vec = vec[index]\n\n value = vec.item()\n\n with open(self.weights_file, 'a') as f:\n f.write('{}\\t{}\\t{}\\t{}\\n'.format(iteration, key, i, float(value)))\n\n def _init_weights_index(self, key, state_dict, weights_to_watch):\n indices = {}\n\n i = 0\n while len(indices) < weights_to_watch:\n vec = state_dict[key]\n cur_indices = []\n\n for x in range(len(vec.size())):\n index = random.randint(0, len(vec) - 1)\n vec = vec[index]\n cur_indices.append(index)\n\n if cur_indices not in list(indices.values()):\n indices[i] = cur_indices\n i += 1\n\n self.weights_dict[key] = indices\n\n\ndef clear_embeddings(sentences: List[Sentence], also_clear_word_embeddings=False):\n \"\"\"\n Clears the embeddings from all given sentences.\n :param sentences: list of sentences\n \"\"\"\n for sentence in sentences:\n sentence.clear_embeddings(also_clear_word_embeddings=also_clear_word_embeddings)\n\n\ndef init_output_file(base_path: Path, file_name: str) -> Path:\n \"\"\"\n Creates a local file.\n :param base_path: the path to the directory\n :param file_name: the file name\n :return: the created file\n \"\"\"\n base_path.mkdir(parents=True, exist_ok=True)\n\n file = base_path / file_name\n open(file, \"w\", encoding='utf-8').close()\n return file\n\n\ndef convert_labels_to_one_hot(label_list: List[List[str]], label_dict: Dictionary) -> List[List[int]]:\n \"\"\"\n Convert list of labels (strings) to a one hot list.\n :param label_list: list of labels\n :param label_dict: label dictionary\n :return: converted label list\n \"\"\"\n return [[1 if l in labels else 0 for l in label_dict.get_items()] for labels in label_list]\n\n\ndef log_line(log):\n log.info('-' * 100)\n\n\ndef add_file_handler(log, output_file):\n init_output_file(output_file.parents[0], output_file.name)\n fh = logging.FileHandler(output_file)\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)-15s %(message)s')\n fh.setFormatter(formatter)\n log.addHandler(fh)\n", "path": "flair/training_utils.py"}], "after_files": [{"content": "import itertools\nimport random\nimport logging\nfrom collections import defaultdict\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import List\nfrom flair.data import Dictionary, Sentence\nfrom functools import reduce\n\n\nclass Metric(object):\n\n def __init__(self, name):\n self.name = name\n\n self._tps = defaultdict(int)\n self._fps = defaultdict(int)\n self._tns = defaultdict(int)\n self._fns = defaultdict(int)\n\n def add_tp(self, class_name):\n self._tps[class_name] += 1\n\n def add_tn(self, class_name):\n self._tns[class_name] += 1\n\n def add_fp(self, class_name):\n self._fps[class_name] += 1\n\n def add_fn(self, class_name):\n self._fns[class_name] += 1\n\n def get_tp(self, class_name=None):\n if class_name is None:\n return sum([self._tps[class_name] for class_name in self.get_classes()])\n return self._tps[class_name]\n\n def get_tn(self, class_name):\n if class_name is None:\n return sum([self._tns[class_name] for class_name in self.get_classes()])\n return self._tns[class_name]\n\n def get_fp(self, class_name):\n if class_name is None:\n return sum([self._fps[class_name] for class_name in self.get_classes()])\n return self._fps[class_name]\n\n def get_fn(self, class_name):\n if class_name is None:\n return sum([self._fns[class_name] for class_name in self.get_classes()])\n return self._fns[class_name]\n\n def precision(self, class_name=None):\n if self.get_tp(class_name) + self.get_fp(class_name) > 0:\n return round(self.get_tp(class_name) / (self.get_tp(class_name) + self.get_fp(class_name)), 4)\n return 0.0\n\n def recall(self, class_name=None):\n if self.get_tp(class_name) + self.get_fn(class_name) > 0:\n return round(self.get_tp(class_name) / (self.get_tp(class_name) + self.get_fn(class_name)), 4)\n return 0.0\n\n def f_score(self, class_name=None):\n if self.precision(class_name) + self.recall(class_name) > 0:\n return round(2 * (self.precision(class_name) * self.recall(class_name))\n / (self.precision(class_name) + self.recall(class_name)), 4)\n return 0.0\n\n def accuracy(self, class_name=None):\n if self.get_tp(class_name) + self.get_tn(class_name) + self.get_fp(class_name) + self.get_fn(class_name) > 0:\n return round(\n (self.get_tp(class_name) + self.get_tn(class_name))\n / (self.get_tp(class_name) + self.get_tn(class_name) + self.get_fp(class_name) + self.get_fn(class_name)),\n 4)\n return 0.0\n\n def micro_avg_f_score(self):\n return self.f_score(None)\n\n def macro_avg_f_score(self):\n class_precisions = [self.precision(class_name) for class_name in self.get_classes()]\n class_recalls = [self.recall(class_name) for class_name in self.get_classes()]\n\n if len(class_precisions) > 0 and len(class_recalls) > 0:\n macro_precision = sum(class_precisions) / len(class_precisions)\n macro_recall = sum(class_recalls) / len(class_recalls)\n\n if macro_precision + macro_recall > 0:\n return round(2 * (macro_precision * macro_recall) / (macro_precision + macro_recall), 4)\n\n return 0.0\n\n def micro_avg_accuracy(self):\n return self.accuracy(None)\n\n def macro_avg_accuracy(self):\n class_accuracy = [self.accuracy(class_name) for class_name in self.get_classes()]\n\n if len(class_accuracy) > 0:\n return round(sum(class_accuracy) / len(class_accuracy), 4)\n\n return 0.0\n\n def get_classes(self) -> List:\n all_classes = set(itertools.chain(*[list(keys) for keys\n in [self._tps.keys(), self._fps.keys(), self._tns.keys(),\n self._fns.keys()]]))\n all_classes = [class_name for class_name in all_classes if class_name is not None]\n all_classes.sort()\n return all_classes\n\n def to_tsv(self):\n return '{}\\t{}\\t{}\\t{}'.format(\n self.precision(),\n self.recall(),\n self.accuracy(),\n self.micro_avg_f_score(),\n )\n\n @staticmethod\n def tsv_header(prefix=None):\n if prefix:\n return '{0}_PRECISION\\t{0}_RECALL\\t{0}_ACCURACY\\t{0}_F-SCORE'.format(\n prefix)\n\n return 'PRECISION\\tRECALL\\tACCURACY\\tF-SCORE'\n\n @staticmethod\n def to_empty_tsv():\n return '\\t_\\t_\\t_\\t_'\n\n def __str__(self):\n all_classes = self.get_classes()\n all_classes = [None] + all_classes\n all_lines = [\n '{0:<10}\\ttp: {1} - fp: {2} - fn: {3} - tn: {4} - precision: {5:.4f} - recall: {6:.4f} - accuracy: {7:.4f} - f1-score: {8:.4f}'.format(\n self.name if class_name is None else class_name,\n self.get_tp(class_name), self.get_fp(class_name), self.get_fn(class_name), self.get_tn(class_name),\n self.precision(class_name), self.recall(class_name), self.accuracy(class_name),\n self.f_score(class_name))\n for class_name in all_classes]\n return '\\n'.join(all_lines)\n\n\nclass EvaluationMetric(Enum):\n MICRO_ACCURACY = 'micro-average accuracy'\n MICRO_F1_SCORE = 'micro-average f1-score'\n MACRO_ACCURACY = 'macro-average accuracy'\n MACRO_F1_SCORE = 'macro-average f1-score'\n\n\nclass WeightExtractor(object):\n\n def __init__(self, directory: Path, number_of_weights: int = 10):\n self.weights_file = init_output_file(directory, 'weights.txt')\n self.weights_dict = defaultdict(lambda: defaultdict(lambda: list()))\n self.number_of_weights = number_of_weights\n\n def extract_weights(self, state_dict, iteration):\n for key in state_dict.keys():\n\n vec = state_dict[key]\n weights_to_watch = min(self.number_of_weights, reduce(lambda x, y: x * y, list(vec.size())))\n\n if key not in self.weights_dict:\n self._init_weights_index(key, state_dict, weights_to_watch)\n\n for i in range(weights_to_watch):\n vec = state_dict[key]\n for index in self.weights_dict[key][i]:\n vec = vec[index]\n\n value = vec.item()\n\n with open(self.weights_file, 'a') as f:\n f.write('{}\\t{}\\t{}\\t{}\\n'.format(iteration, key, i, float(value)))\n\n def _init_weights_index(self, key, state_dict, weights_to_watch):\n indices = {}\n\n i = 0\n while len(indices) < weights_to_watch:\n vec = state_dict[key]\n cur_indices = []\n\n for x in range(len(vec.size())):\n index = random.randint(0, len(vec) - 1)\n vec = vec[index]\n cur_indices.append(index)\n\n if cur_indices not in list(indices.values()):\n indices[i] = cur_indices\n i += 1\n\n self.weights_dict[key] = indices\n\n\ndef clear_embeddings(sentences: List[Sentence], also_clear_word_embeddings=False):\n \"\"\"\n Clears the embeddings from all given sentences.\n :param sentences: list of sentences\n \"\"\"\n for sentence in sentences:\n sentence.clear_embeddings(also_clear_word_embeddings=also_clear_word_embeddings)\n\n\ndef init_output_file(base_path: Path, file_name: str) -> Path:\n \"\"\"\n Creates a local file.\n :param base_path: the path to the directory\n :param file_name: the file name\n :return: the created file\n \"\"\"\n base_path.mkdir(parents=True, exist_ok=True)\n\n file = base_path / file_name\n open(file, \"w\", encoding='utf-8').close()\n return file\n\n\ndef convert_labels_to_one_hot(label_list: List[List[str]], label_dict: Dictionary) -> List[List[int]]:\n \"\"\"\n Convert list of labels (strings) to a one hot list.\n :param label_list: list of labels\n :param label_dict: label dictionary\n :return: converted label list\n \"\"\"\n return [[1 if l in labels else 0 for l in label_dict.get_items()] for labels in label_list]\n\n\ndef log_line(log):\n log.info('-' * 100)\n\n\ndef add_file_handler(log, output_file):\n init_output_file(output_file.parents[0], output_file.name)\n fh = logging.FileHandler(output_file)\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)-15s %(message)s')\n fh.setFormatter(formatter)\n log.addHandler(fh)\n", "path": "flair/training_utils.py"}]}
| 3,088 | 153 |
gh_patches_debug_16750
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-348
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IndexError when trying to load Mathesar without any tables
## Description
<!-- A clear and concise description of what the bug is. -->
Mathesar's UI does not load if there are no tables in the UI. There's an error with an index being out of range.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
Mathesar's UI should load.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Ensure you have no tables in your connected DB and try to load the Mathesar homepage.
## Additional context
<!-- Add any other context about the problem or screenshots here. -->
Traceback:
```
Environment:
Request Method: GET
Request URL: http://localhost:8000/mathesar_tables
Django Version: 3.1.7
Python Version: 3.9.6
Installed Applications:
['django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'django_property_filter',
'mathesar']
Installed Middleware:
['django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware']
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/code/mathesar/views/frontend.py", line 42, in index
"schemas": get_schemas(request),
File "/code/mathesar/views/frontend.py", line 17, in get_schemas
return schema_serializer.data
File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 745, in data
ret = super().data
File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 246, in data
self._data = self.to_representation(self.instance)
File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 663, in to_representation
return [
File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 664, in <listcomp>
self.child.to_representation(item) for item in iterable
File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 515, in to_representation
ret[field.field_name] = field.to_representation(attribute)
File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 663, in to_representation
return [
File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 664, in <listcomp>
self.child.to_representation(item) for item in iterable
File "/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py", line 502, in to_representation
attribute = field.get_attribute(instance)
File "/usr/local/lib/python3.9/site-packages/rest_framework/fields.py", line 457, in get_attribute
return get_attribute(instance, self.source_attrs)
File "/usr/local/lib/python3.9/site-packages/rest_framework/fields.py", line 97, in get_attribute
instance = getattr(instance, attr)
File "/usr/local/lib/python3.9/site-packages/django/utils/functional.py", line 48, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/code/mathesar/models.py", line 89, in name
return self._sa_table.name
File "/usr/local/lib/python3.9/site-packages/django/utils/functional.py", line 48, in __get__
res = instance.__dict__[self.name] = self.func(instance)
File "/code/mathesar/models.py", line 75, in _sa_table
table = tables.reflect_table_from_oid(
File "/code/db/tables.py", line 358, in reflect_table_from_oid
schema, table_name = conn.execute(sel).fetchall()[0]
Exception Type: IndexError at /mathesar_tables
Exception Value: list index out of range
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/models.py`
Content:
```
1 from django.contrib.auth.models import User
2 from django.core.cache import cache
3 from django.db import models
4 from django.utils.functional import cached_property
5
6 from mathesar.database.base import create_mathesar_engine
7 from mathesar.utils import models as model_utils
8 from db import tables, records, schemas, columns
9
10 NAME_CACHE_INTERVAL = 60 * 5
11
12
13 class BaseModel(models.Model):
14 created_at = models.DateTimeField(auto_now_add=True)
15 updated_at = models.DateTimeField(auto_now=True)
16
17 class Meta:
18 abstract = True
19
20
21 class DatabaseObject(BaseModel):
22 oid = models.IntegerField()
23
24 class Meta:
25 abstract = True
26
27 def __str__(self):
28 return f"{self.__class__.__name__}: {self.oid}"
29
30
31 # TODO: Replace with a proper form of caching
32 # See: https://github.com/centerofci/mathesar/issues/280
33 _engines = {}
34
35
36 class Schema(DatabaseObject):
37 database = models.CharField(max_length=128)
38
39 @property
40 def _sa_engine(self):
41 global _engines
42 # We're caching this since the engine is used frequently.
43 if self.database not in _engines:
44 _engines[self.database] = create_mathesar_engine(self.database)
45 return _engines[self.database]
46
47 @cached_property
48 def name(self):
49 cache_key = f"{self.database}_schema_name_{self.oid}"
50 try:
51 schema_name = cache.get(cache_key)
52 if schema_name is None:
53 schema_name = schemas.get_schema_name_from_oid(
54 self.oid, self._sa_engine
55 )
56 cache.set(cache_key, schema_name, NAME_CACHE_INTERVAL)
57 return schema_name
58 # We catch this error, since it lets us decouple the cadence of
59 # overall DB reflection from the cadence of cache expiration for
60 # schema names. Also, it makes it obvious when the DB layer has
61 # been altered, as opposed to other reasons for a 404 when
62 # requesting a schema.
63 except TypeError:
64 return 'MISSING'
65
66
67 class Table(DatabaseObject):
68 schema = models.ForeignKey('Schema', on_delete=models.CASCADE,
69 related_name='tables')
70 import_verified = models.BooleanField(blank=True, null=True)
71
72 @cached_property
73 def _sa_table(self):
74 try:
75 table = tables.reflect_table_from_oid(
76 self.oid, self.schema._sa_engine,
77 )
78 # We catch this error, since it lets us decouple the cadence of
79 # overall DB reflection from the cadence of cache expiration for
80 # table names. Also, it makes it obvious when the DB layer has
81 # been altered, as opposed to other reasons for a 404 when
82 # requesting a table.
83 except TypeError:
84 table = tables.create_empty_table("MISSING")
85 return table
86
87 @cached_property
88 def name(self):
89 return self._sa_table.name
90
91 @property
92 def sa_columns(self):
93 return self._sa_table.columns
94
95 @property
96 def sa_column_names(self):
97 return self.sa_columns.keys()
98
99 def add_column(self, column_data):
100 return columns.create_column(
101 self.schema._sa_engine,
102 self.oid,
103 column_data,
104 )
105
106 def alter_column(self, column_index, column_data):
107 return columns.alter_column(
108 self.schema._sa_engine,
109 self.oid,
110 column_index,
111 column_data,
112 )
113
114 def drop_column(self, column_index):
115 columns.drop_column(
116 self.schema._sa_engine,
117 self.oid,
118 column_index,
119 )
120
121 @property
122 def sa_all_records(self):
123 return records.get_records(self._sa_table, self.schema._sa_engine)
124
125 def sa_num_records(self, filters=[]):
126 return tables.get_count(self._sa_table, self.schema._sa_engine, filters=filters)
127
128 def get_record(self, id_value):
129 return records.get_record(self._sa_table, self.schema._sa_engine, id_value)
130
131 def get_records(self, limit=None, offset=None, filters=[], order_by=[]):
132 return records.get_records(self._sa_table, self.schema._sa_engine, limit,
133 offset, filters=filters, order_by=order_by)
134
135 def get_group_counts(
136 self, group_by, limit=None, offset=None, filters=[], order_by=[]
137 ):
138 return records.get_group_counts(self._sa_table, self.schema._sa_engine,
139 group_by, limit, offset, filters=filters,
140 order_by=order_by)
141
142 def create_record_or_records(self, record_data):
143 return records.create_record_or_records(self._sa_table, self.schema._sa_engine, record_data)
144
145 def update_record(self, id_value, record_data):
146 return records.update_record(self._sa_table, self.schema._sa_engine, id_value, record_data)
147
148 def delete_record(self, id_value):
149 return records.delete_record(self._sa_table, self.schema._sa_engine, id_value)
150
151
152 class DataFile(BaseModel):
153 file = models.FileField(
154 upload_to=model_utils.user_directory_path,
155 )
156 user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)
157 table_imported_to = models.ForeignKey(Table, related_name="data_files", blank=True,
158 null=True, on_delete=models.SET_NULL)
159 delimiter = models.CharField(max_length=1, default=',', blank=True)
160 escapechar = models.CharField(max_length=1, blank=True)
161 quotechar = models.CharField(max_length=1, default='"', blank=True)
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/models.py b/mathesar/models.py
--- a/mathesar/models.py
+++ b/mathesar/models.py
@@ -75,12 +75,12 @@
table = tables.reflect_table_from_oid(
self.oid, self.schema._sa_engine,
)
- # We catch this error, since it lets us decouple the cadence of
+ # We catch these errors, since it lets us decouple the cadence of
# overall DB reflection from the cadence of cache expiration for
# table names. Also, it makes it obvious when the DB layer has
# been altered, as opposed to other reasons for a 404 when
# requesting a table.
- except TypeError:
+ except (TypeError, IndexError):
table = tables.create_empty_table("MISSING")
return table
|
{"golden_diff": "diff --git a/mathesar/models.py b/mathesar/models.py\n--- a/mathesar/models.py\n+++ b/mathesar/models.py\n@@ -75,12 +75,12 @@\n table = tables.reflect_table_from_oid(\n self.oid, self.schema._sa_engine,\n )\n- # We catch this error, since it lets us decouple the cadence of\n+ # We catch these errors, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # table names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a table.\n- except TypeError:\n+ except (TypeError, IndexError):\n table = tables.create_empty_table(\"MISSING\")\n return table\n", "issue": "IndexError when trying to load Mathesar without any tables\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nMathesar's UI does not load if there are no tables in the UI. There's an error with an index being out of range.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nMathesar's UI should load.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\nEnsure you have no tables in your connected DB and try to load the Mathesar homepage.\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\r\nTraceback:\r\n\r\n```\r\nEnvironment:\r\n\r\n\r\nRequest Method: GET\r\nRequest URL: http://localhost:8000/mathesar_tables\r\n\r\nDjango Version: 3.1.7\r\nPython Version: 3.9.6\r\nInstalled Applications:\r\n['django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'rest_framework',\r\n 'django_filters',\r\n 'django_property_filter',\r\n 'mathesar']\r\nInstalled Middleware:\r\n['django.middleware.security.SecurityMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware']\r\n\r\n\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/code/mathesar/views/frontend.py\", line 42, in index\r\n \"schemas\": get_schemas(request),\r\n File \"/code/mathesar/views/frontend.py\", line 17, in get_schemas\r\n return schema_serializer.data\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 745, in data\r\n ret = super().data\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 246, in data\r\n self._data = self.to_representation(self.instance)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 663, in to_representation\r\n return [\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 664, in <listcomp>\r\n self.child.to_representation(item) for item in iterable\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 515, in to_representation\r\n ret[field.field_name] = field.to_representation(attribute)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 663, in to_representation\r\n return [\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 664, in <listcomp>\r\n self.child.to_representation(item) for item in iterable\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/serializers.py\", line 502, in to_representation\r\n attribute = field.get_attribute(instance)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/fields.py\", line 457, in get_attribute\r\n return get_attribute(instance, self.source_attrs)\r\n File \"/usr/local/lib/python3.9/site-packages/rest_framework/fields.py\", line 97, in get_attribute\r\n instance = getattr(instance, attr)\r\n File \"/usr/local/lib/python3.9/site-packages/django/utils/functional.py\", line 48, in __get__\r\n res = instance.__dict__[self.name] = self.func(instance)\r\n File \"/code/mathesar/models.py\", line 89, in name\r\n return self._sa_table.name\r\n File \"/usr/local/lib/python3.9/site-packages/django/utils/functional.py\", line 48, in __get__\r\n res = instance.__dict__[self.name] = self.func(instance)\r\n File \"/code/mathesar/models.py\", line 75, in _sa_table\r\n table = tables.reflect_table_from_oid(\r\n File \"/code/db/tables.py\", line 358, in reflect_table_from_oid\r\n schema, table_name = conn.execute(sel).fetchall()[0]\r\n\r\nException Type: IndexError at /mathesar_tables\r\nException Value: list index out of range\r\n```\n", "before_files": [{"content": "from django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom mathesar.database.base import create_mathesar_engine\nfrom mathesar.utils import models as model_utils\nfrom db import tables, records, schemas, columns\n\nNAME_CACHE_INTERVAL = 60 * 5\n\n\nclass BaseModel(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass DatabaseObject(BaseModel):\n oid = models.IntegerField()\n\n class Meta:\n abstract = True\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.oid}\"\n\n\n# TODO: Replace with a proper form of caching\n# See: https://github.com/centerofci/mathesar/issues/280\n_engines = {}\n\n\nclass Schema(DatabaseObject):\n database = models.CharField(max_length=128)\n\n @property\n def _sa_engine(self):\n global _engines\n # We're caching this since the engine is used frequently.\n if self.database not in _engines:\n _engines[self.database] = create_mathesar_engine(self.database)\n return _engines[self.database]\n\n @cached_property\n def name(self):\n cache_key = f\"{self.database}_schema_name_{self.oid}\"\n try:\n schema_name = cache.get(cache_key)\n if schema_name is None:\n schema_name = schemas.get_schema_name_from_oid(\n self.oid, self._sa_engine\n )\n cache.set(cache_key, schema_name, NAME_CACHE_INTERVAL)\n return schema_name\n # We catch this error, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # schema names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a schema.\n except TypeError:\n return 'MISSING'\n\n\nclass Table(DatabaseObject):\n schema = models.ForeignKey('Schema', on_delete=models.CASCADE,\n related_name='tables')\n import_verified = models.BooleanField(blank=True, null=True)\n\n @cached_property\n def _sa_table(self):\n try:\n table = tables.reflect_table_from_oid(\n self.oid, self.schema._sa_engine,\n )\n # We catch this error, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # table names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a table.\n except TypeError:\n table = tables.create_empty_table(\"MISSING\")\n return table\n\n @cached_property\n def name(self):\n return self._sa_table.name\n\n @property\n def sa_columns(self):\n return self._sa_table.columns\n\n @property\n def sa_column_names(self):\n return self.sa_columns.keys()\n\n def add_column(self, column_data):\n return columns.create_column(\n self.schema._sa_engine,\n self.oid,\n column_data,\n )\n\n def alter_column(self, column_index, column_data):\n return columns.alter_column(\n self.schema._sa_engine,\n self.oid,\n column_index,\n column_data,\n )\n\n def drop_column(self, column_index):\n columns.drop_column(\n self.schema._sa_engine,\n self.oid,\n column_index,\n )\n\n @property\n def sa_all_records(self):\n return records.get_records(self._sa_table, self.schema._sa_engine)\n\n def sa_num_records(self, filters=[]):\n return tables.get_count(self._sa_table, self.schema._sa_engine, filters=filters)\n\n def get_record(self, id_value):\n return records.get_record(self._sa_table, self.schema._sa_engine, id_value)\n\n def get_records(self, limit=None, offset=None, filters=[], order_by=[]):\n return records.get_records(self._sa_table, self.schema._sa_engine, limit,\n offset, filters=filters, order_by=order_by)\n\n def get_group_counts(\n self, group_by, limit=None, offset=None, filters=[], order_by=[]\n ):\n return records.get_group_counts(self._sa_table, self.schema._sa_engine,\n group_by, limit, offset, filters=filters,\n order_by=order_by)\n\n def create_record_or_records(self, record_data):\n return records.create_record_or_records(self._sa_table, self.schema._sa_engine, record_data)\n\n def update_record(self, id_value, record_data):\n return records.update_record(self._sa_table, self.schema._sa_engine, id_value, record_data)\n\n def delete_record(self, id_value):\n return records.delete_record(self._sa_table, self.schema._sa_engine, id_value)\n\n\nclass DataFile(BaseModel):\n file = models.FileField(\n upload_to=model_utils.user_directory_path,\n )\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)\n table_imported_to = models.ForeignKey(Table, related_name=\"data_files\", blank=True,\n null=True, on_delete=models.SET_NULL)\n delimiter = models.CharField(max_length=1, default=',', blank=True)\n escapechar = models.CharField(max_length=1, blank=True)\n quotechar = models.CharField(max_length=1, default='\"', blank=True)\n", "path": "mathesar/models.py"}], "after_files": [{"content": "from django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom mathesar.database.base import create_mathesar_engine\nfrom mathesar.utils import models as model_utils\nfrom db import tables, records, schemas, columns\n\nNAME_CACHE_INTERVAL = 60 * 5\n\n\nclass BaseModel(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass DatabaseObject(BaseModel):\n oid = models.IntegerField()\n\n class Meta:\n abstract = True\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.oid}\"\n\n\n# TODO: Replace with a proper form of caching\n# See: https://github.com/centerofci/mathesar/issues/280\n_engines = {}\n\n\nclass Schema(DatabaseObject):\n database = models.CharField(max_length=128)\n\n @property\n def _sa_engine(self):\n global _engines\n # We're caching this since the engine is used frequently.\n if self.database not in _engines:\n _engines[self.database] = create_mathesar_engine(self.database)\n return _engines[self.database]\n\n @cached_property\n def name(self):\n cache_key = f\"{self.database}_schema_name_{self.oid}\"\n try:\n schema_name = cache.get(cache_key)\n if schema_name is None:\n schema_name = schemas.get_schema_name_from_oid(\n self.oid, self._sa_engine\n )\n cache.set(cache_key, schema_name, NAME_CACHE_INTERVAL)\n return schema_name\n # We catch this error, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # schema names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a schema.\n except TypeError:\n return 'MISSING'\n\n\nclass Table(DatabaseObject):\n schema = models.ForeignKey('Schema', on_delete=models.CASCADE,\n related_name='tables')\n import_verified = models.BooleanField(blank=True, null=True)\n\n @cached_property\n def _sa_table(self):\n try:\n table = tables.reflect_table_from_oid(\n self.oid, self.schema._sa_engine,\n )\n # We catch these errors, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # table names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a table.\n except (TypeError, IndexError):\n table = tables.create_empty_table(\"MISSING\")\n return table\n\n @cached_property\n def name(self):\n return self._sa_table.name\n\n @property\n def sa_columns(self):\n return self._sa_table.columns\n\n @property\n def sa_column_names(self):\n return self.sa_columns.keys()\n\n def add_column(self, column_data):\n return columns.create_column(\n self.schema._sa_engine,\n self.oid,\n column_data,\n )\n\n def alter_column(self, column_index, column_data):\n return columns.alter_column(\n self.schema._sa_engine,\n self.oid,\n column_index,\n column_data,\n )\n\n def drop_column(self, column_index):\n columns.drop_column(\n self.schema._sa_engine,\n self.oid,\n column_index,\n )\n\n @property\n def sa_all_records(self):\n return records.get_records(self._sa_table, self.schema._sa_engine)\n\n def sa_num_records(self, filters=[]):\n return tables.get_count(self._sa_table, self.schema._sa_engine, filters=filters)\n\n def get_record(self, id_value):\n return records.get_record(self._sa_table, self.schema._sa_engine, id_value)\n\n def get_records(self, limit=None, offset=None, filters=[], order_by=[]):\n return records.get_records(self._sa_table, self.schema._sa_engine, limit,\n offset, filters=filters, order_by=order_by)\n\n def get_group_counts(\n self, group_by, limit=None, offset=None, filters=[], order_by=[]\n ):\n return records.get_group_counts(self._sa_table, self.schema._sa_engine,\n group_by, limit, offset, filters=filters,\n order_by=order_by)\n\n def create_record_or_records(self, record_data):\n return records.create_record_or_records(self._sa_table, self.schema._sa_engine, record_data)\n\n def update_record(self, id_value, record_data):\n return records.update_record(self._sa_table, self.schema._sa_engine, id_value, record_data)\n\n def delete_record(self, id_value):\n return records.delete_record(self._sa_table, self.schema._sa_engine, id_value)\n\n\nclass DataFile(BaseModel):\n file = models.FileField(\n upload_to=model_utils.user_directory_path,\n )\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)\n table_imported_to = models.ForeignKey(Table, related_name=\"data_files\", blank=True,\n null=True, on_delete=models.SET_NULL)\n delimiter = models.CharField(max_length=1, default=',', blank=True)\n escapechar = models.CharField(max_length=1, blank=True)\n quotechar = models.CharField(max_length=1, default='\"', blank=True)\n", "path": "mathesar/models.py"}]}
| 2,874 | 186 |
gh_patches_debug_62717
|
rasdani/github-patches
|
git_diff
|
rucio__rucio-2761
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dark Reaper rse_id crash
Motivation
----------
The dark reaper raises an error in 1.20.3
Modification
------------
Fix the query in core/quarantined_replica.py -> list_rses()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/core/quarantined_replica.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 # Authors:
17 # - Vincent Garonne <[email protected]>, 2016-2017
18 # - Hannes Hansen <[email protected]>, 2018
19 # - Dimitrios Christidis <[email protected]>, 2018
20 # - Andrew Lister <[email protected]>, 2019
21 #
22 # PY3K COMPATIBLE
23
24 import datetime
25
26 from sqlalchemy import and_, or_, exists, not_
27 from sqlalchemy.sql.expression import bindparam, text, select, false
28
29 from rucio.common.utils import chunks
30 from rucio.db.sqla import models
31 from rucio.db.sqla.session import read_session, transactional_session
32
33
34 @transactional_session
35 def add_quarantined_replicas(rse_id, replicas, session=None):
36 """
37 Bulk add quarantined file replicas.
38
39 :param rse_id: The rse id.
40 :param replicas: A list of dicts with the replica information.
41 :param session: The database session in use.
42 """
43
44 for chunk in chunks(replicas, 100):
45 # Exlude files that have a registered replica. This is a
46 # safeguard against potential issues in the Auditor.
47 file_clause = []
48 for replica in chunk:
49 file_clause.append(and_(models.RSEFileAssociation.scope == replica.get('scope', None),
50 models.RSEFileAssociation.name == replica.get('name', None),
51 models.RSEFileAssociation.rse_id == rse_id))
52 file_query = session.query(models.RSEFileAssociation.scope,
53 models.RSEFileAssociation.name,
54 models.RSEFileAssociation.rse_id).\
55 with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
56 filter(or_(*file_clause))
57 existing_replicas = [(scope, name, rseid) for scope, name, rseid in file_query]
58 chunk = [replica for replica in chunk if (replica.get('scope', None), replica.get('name', None), rse_id) not in existing_replicas]
59
60 # Exclude files that have already been added to the quarantined
61 # replica table.
62 quarantine_clause = []
63 for replica in chunk:
64 quarantine_clause.append(and_(models.QuarantinedReplica.path == replica['path'],
65 models.QuarantinedReplica.rse_id == rse_id))
66 quarantine_query = session.query(models.QuarantinedReplica.path,
67 models.QuarantinedReplica.rse_id).\
68 filter(or_(*quarantine_clause))
69 quarantine_replicas = [(path, rseid) for path, rseid in quarantine_query]
70 chunk = [replica for replica in chunk if (replica['path'], rse_id) not in quarantine_replicas]
71
72 session.bulk_insert_mappings(
73 models.QuarantinedReplica,
74 [{'rse_id': rse_id, 'path': file['path'],
75 'scope': file.get('scope'), 'name': file.get('name'),
76 'bytes': file.get('bytes')} for file in chunk])
77
78
79 @transactional_session
80 def delete_quarantined_replicas(rse_id, replicas, session=None):
81 """
82 Delete file replicas.
83
84 :param rse_id: the rse id.
85 :param files: the list of files to delete.
86 :param ignore_availability: Ignore the RSE blacklisting.
87 :param session: The database session in use.
88 """
89
90 conditions = []
91 for replica in replicas:
92 conditions.append(models.QuarantinedReplica.path == replica['path'])
93
94 if conditions:
95 session.query(models.QuarantinedReplica).\
96 filter(models.QuarantinedReplica.rse_id == rse_id).\
97 filter(or_(*conditions)).\
98 delete(synchronize_session=False)
99
100 session.\
101 bulk_insert_mappings(models.QuarantinedReplica.__history_mapper__.class_,
102 [{'rse_id': rse_id, 'path': replica['path'],
103 'bytes': replica.get('bytes'),
104 'created_at': replica.get('created_at'),
105 'deleted_at': datetime.datetime.utcnow()}
106 for replica in replicas])
107
108
109 @read_session
110 def list_quarantined_replicas(rse_id, limit, worker_number=None, total_workers=None, session=None):
111 """
112 List RSE Quarantined File replicas.
113
114 :param rse_id: the rse id.
115 :param limit: The maximum number of replicas returned.
116 :param worker_number: id of the executing worker.
117 :param total_workers: Number of total workers.
118 :param session: The database session in use.
119
120 :returns: a list of dictionary replica.
121 """
122
123 query = session.query(models.QuarantinedReplica.path,
124 models.QuarantinedReplica.bytes,
125 models.QuarantinedReplica.scope,
126 models.QuarantinedReplica.name,
127 models.QuarantinedReplica.created_at).\
128 filter(models.QuarantinedReplica.rse_id == rse_id)
129
130 # do no delete valid replicas
131 stmt = exists(select([1]).prefix_with("/*+ index(REPLICAS REPLICAS_PK) */", dialect='oracle')).\
132 where(and_(models.RSEFileAssociation.scope == models.QuarantinedReplica.scope,
133 models.RSEFileAssociation.name == models.QuarantinedReplica.name,
134 models.RSEFileAssociation.rse_id == models.QuarantinedReplica.rse_id))
135 query = query.filter(not_(stmt))
136
137 if worker_number and total_workers and total_workers - 1 > 0:
138 if session.bind.dialect.name == 'oracle':
139 bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]
140 query = query.filter(text('ORA_HASH(path, :total_workers) = :worker_number', bindparams=bindparams))
141 elif session.bind.dialect.name == 'mysql':
142 query = query.filter('mod(md5(path), %s) = %s' % (total_workers - 1, worker_number - 1))
143 elif session.bind.dialect.name == 'postgresql':
144 query = query.filter('mod(abs((\'x\'||md5(path))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1))
145
146 return [{'path': path,
147 'rse_id': rse_id,
148 'created_at': created_at,
149 'scope': scope,
150 'name': name,
151 'bytes': bytes}
152 for path, bytes, scope, name, created_at in query.limit(limit)]
153
154
155 @read_session
156 def list_rses(session=None):
157 """
158 List RSEs in the Quarantined Queues.
159
160 :param session: The database session in use.
161
162 :returns: a list of RSEs.
163 """
164 query = session.query(models.RSE.rse_id).distinct(models.RSE.rse_id).\
165 filter(models.QuarantinedReplica.rse_id == models.RSE.id).\
166 filter(models.RSE.deleted == false())
167 return [rse for (rse,) in query]
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/rucio/core/quarantined_replica.py b/lib/rucio/core/quarantined_replica.py
--- a/lib/rucio/core/quarantined_replica.py
+++ b/lib/rucio/core/quarantined_replica.py
@@ -161,7 +161,7 @@
:returns: a list of RSEs.
"""
- query = session.query(models.RSE.rse_id).distinct(models.RSE.rse_id).\
+ query = session.query(models.RSE.id).distinct(models.RSE.id).\
filter(models.QuarantinedReplica.rse_id == models.RSE.id).\
filter(models.RSE.deleted == false())
return [rse for (rse,) in query]
|
{"golden_diff": "diff --git a/lib/rucio/core/quarantined_replica.py b/lib/rucio/core/quarantined_replica.py\n--- a/lib/rucio/core/quarantined_replica.py\n+++ b/lib/rucio/core/quarantined_replica.py\n@@ -161,7 +161,7 @@\n \n :returns: a list of RSEs.\n \"\"\"\n- query = session.query(models.RSE.rse_id).distinct(models.RSE.rse_id).\\\n+ query = session.query(models.RSE.id).distinct(models.RSE.id).\\\n filter(models.QuarantinedReplica.rse_id == models.RSE.id).\\\n filter(models.RSE.deleted == false())\n return [rse for (rse,) in query]\n", "issue": "Dark Reaper rse_id crash\nMotivation\r\n----------\r\nThe dark reaper raises an error in 1.20.3\r\n\r\n\r\nModification\r\n------------\r\nFix the query in core/quarantined_replica.py -> list_rses()\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2016-2017\n# - Hannes Hansen <[email protected]>, 2018\n# - Dimitrios Christidis <[email protected]>, 2018\n# - Andrew Lister <[email protected]>, 2019\n#\n# PY3K COMPATIBLE\n\nimport datetime\n\nfrom sqlalchemy import and_, or_, exists, not_\nfrom sqlalchemy.sql.expression import bindparam, text, select, false\n\nfrom rucio.common.utils import chunks\nfrom rucio.db.sqla import models\nfrom rucio.db.sqla.session import read_session, transactional_session\n\n\n@transactional_session\ndef add_quarantined_replicas(rse_id, replicas, session=None):\n \"\"\"\n Bulk add quarantined file replicas.\n\n :param rse_id: The rse id.\n :param replicas: A list of dicts with the replica information.\n :param session: The database session in use.\n \"\"\"\n\n for chunk in chunks(replicas, 100):\n # Exlude files that have a registered replica. This is a\n # safeguard against potential issues in the Auditor.\n file_clause = []\n for replica in chunk:\n file_clause.append(and_(models.RSEFileAssociation.scope == replica.get('scope', None),\n models.RSEFileAssociation.name == replica.get('name', None),\n models.RSEFileAssociation.rse_id == rse_id))\n file_query = session.query(models.RSEFileAssociation.scope,\n models.RSEFileAssociation.name,\n models.RSEFileAssociation.rse_id).\\\n with_hint(models.RSEFileAssociation, \"index(REPLICAS REPLICAS_PK)\", 'oracle').\\\n filter(or_(*file_clause))\n existing_replicas = [(scope, name, rseid) for scope, name, rseid in file_query]\n chunk = [replica for replica in chunk if (replica.get('scope', None), replica.get('name', None), rse_id) not in existing_replicas]\n\n # Exclude files that have already been added to the quarantined\n # replica table.\n quarantine_clause = []\n for replica in chunk:\n quarantine_clause.append(and_(models.QuarantinedReplica.path == replica['path'],\n models.QuarantinedReplica.rse_id == rse_id))\n quarantine_query = session.query(models.QuarantinedReplica.path,\n models.QuarantinedReplica.rse_id).\\\n filter(or_(*quarantine_clause))\n quarantine_replicas = [(path, rseid) for path, rseid in quarantine_query]\n chunk = [replica for replica in chunk if (replica['path'], rse_id) not in quarantine_replicas]\n\n session.bulk_insert_mappings(\n models.QuarantinedReplica,\n [{'rse_id': rse_id, 'path': file['path'],\n 'scope': file.get('scope'), 'name': file.get('name'),\n 'bytes': file.get('bytes')} for file in chunk])\n\n\n@transactional_session\ndef delete_quarantined_replicas(rse_id, replicas, session=None):\n \"\"\"\n Delete file replicas.\n\n :param rse_id: the rse id.\n :param files: the list of files to delete.\n :param ignore_availability: Ignore the RSE blacklisting.\n :param session: The database session in use.\n \"\"\"\n\n conditions = []\n for replica in replicas:\n conditions.append(models.QuarantinedReplica.path == replica['path'])\n\n if conditions:\n session.query(models.QuarantinedReplica).\\\n filter(models.QuarantinedReplica.rse_id == rse_id).\\\n filter(or_(*conditions)).\\\n delete(synchronize_session=False)\n\n session.\\\n bulk_insert_mappings(models.QuarantinedReplica.__history_mapper__.class_,\n [{'rse_id': rse_id, 'path': replica['path'],\n 'bytes': replica.get('bytes'),\n 'created_at': replica.get('created_at'),\n 'deleted_at': datetime.datetime.utcnow()}\n for replica in replicas])\n\n\n@read_session\ndef list_quarantined_replicas(rse_id, limit, worker_number=None, total_workers=None, session=None):\n \"\"\"\n List RSE Quarantined File replicas.\n\n :param rse_id: the rse id.\n :param limit: The maximum number of replicas returned.\n :param worker_number: id of the executing worker.\n :param total_workers: Number of total workers.\n :param session: The database session in use.\n\n :returns: a list of dictionary replica.\n \"\"\"\n\n query = session.query(models.QuarantinedReplica.path,\n models.QuarantinedReplica.bytes,\n models.QuarantinedReplica.scope,\n models.QuarantinedReplica.name,\n models.QuarantinedReplica.created_at).\\\n filter(models.QuarantinedReplica.rse_id == rse_id)\n\n # do no delete valid replicas\n stmt = exists(select([1]).prefix_with(\"/*+ index(REPLICAS REPLICAS_PK) */\", dialect='oracle')).\\\n where(and_(models.RSEFileAssociation.scope == models.QuarantinedReplica.scope,\n models.RSEFileAssociation.name == models.QuarantinedReplica.name,\n models.RSEFileAssociation.rse_id == models.QuarantinedReplica.rse_id))\n query = query.filter(not_(stmt))\n\n if worker_number and total_workers and total_workers - 1 > 0:\n if session.bind.dialect.name == 'oracle':\n bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]\n query = query.filter(text('ORA_HASH(path, :total_workers) = :worker_number', bindparams=bindparams))\n elif session.bind.dialect.name == 'mysql':\n query = query.filter('mod(md5(path), %s) = %s' % (total_workers - 1, worker_number - 1))\n elif session.bind.dialect.name == 'postgresql':\n query = query.filter('mod(abs((\\'x\\'||md5(path))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1))\n\n return [{'path': path,\n 'rse_id': rse_id,\n 'created_at': created_at,\n 'scope': scope,\n 'name': name,\n 'bytes': bytes}\n for path, bytes, scope, name, created_at in query.limit(limit)]\n\n\n@read_session\ndef list_rses(session=None):\n \"\"\"\n List RSEs in the Quarantined Queues.\n\n :param session: The database session in use.\n\n :returns: a list of RSEs.\n \"\"\"\n query = session.query(models.RSE.rse_id).distinct(models.RSE.rse_id).\\\n filter(models.QuarantinedReplica.rse_id == models.RSE.id).\\\n filter(models.RSE.deleted == false())\n return [rse for (rse,) in query]\n", "path": "lib/rucio/core/quarantined_replica.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2016-2017\n# - Hannes Hansen <[email protected]>, 2018\n# - Dimitrios Christidis <[email protected]>, 2018\n# - Andrew Lister <[email protected]>, 2019\n#\n# PY3K COMPATIBLE\n\nimport datetime\n\nfrom sqlalchemy import and_, or_, exists, not_\nfrom sqlalchemy.sql.expression import bindparam, text, select, false\n\nfrom rucio.common.utils import chunks\nfrom rucio.db.sqla import models\nfrom rucio.db.sqla.session import read_session, transactional_session\n\n\n@transactional_session\ndef add_quarantined_replicas(rse_id, replicas, session=None):\n \"\"\"\n Bulk add quarantined file replicas.\n\n :param rse_id: The rse id.\n :param replicas: A list of dicts with the replica information.\n :param session: The database session in use.\n \"\"\"\n\n for chunk in chunks(replicas, 100):\n # Exlude files that have a registered replica. This is a\n # safeguard against potential issues in the Auditor.\n file_clause = []\n for replica in chunk:\n file_clause.append(and_(models.RSEFileAssociation.scope == replica.get('scope', None),\n models.RSEFileAssociation.name == replica.get('name', None),\n models.RSEFileAssociation.rse_id == rse_id))\n file_query = session.query(models.RSEFileAssociation.scope,\n models.RSEFileAssociation.name,\n models.RSEFileAssociation.rse_id).\\\n with_hint(models.RSEFileAssociation, \"index(REPLICAS REPLICAS_PK)\", 'oracle').\\\n filter(or_(*file_clause))\n existing_replicas = [(scope, name, rseid) for scope, name, rseid in file_query]\n chunk = [replica for replica in chunk if (replica.get('scope', None), replica.get('name', None), rse_id) not in existing_replicas]\n\n # Exclude files that have already been added to the quarantined\n # replica table.\n quarantine_clause = []\n for replica in chunk:\n quarantine_clause.append(and_(models.QuarantinedReplica.path == replica['path'],\n models.QuarantinedReplica.rse_id == rse_id))\n quarantine_query = session.query(models.QuarantinedReplica.path,\n models.QuarantinedReplica.rse_id).\\\n filter(or_(*quarantine_clause))\n quarantine_replicas = [(path, rseid) for path, rseid in quarantine_query]\n chunk = [replica for replica in chunk if (replica['path'], rse_id) not in quarantine_replicas]\n\n session.bulk_insert_mappings(\n models.QuarantinedReplica,\n [{'rse_id': rse_id, 'path': file['path'],\n 'scope': file.get('scope'), 'name': file.get('name'),\n 'bytes': file.get('bytes')} for file in chunk])\n\n\n@transactional_session\ndef delete_quarantined_replicas(rse_id, replicas, session=None):\n \"\"\"\n Delete file replicas.\n\n :param rse_id: the rse id.\n :param files: the list of files to delete.\n :param ignore_availability: Ignore the RSE blacklisting.\n :param session: The database session in use.\n \"\"\"\n\n conditions = []\n for replica in replicas:\n conditions.append(models.QuarantinedReplica.path == replica['path'])\n\n if conditions:\n session.query(models.QuarantinedReplica).\\\n filter(models.QuarantinedReplica.rse_id == rse_id).\\\n filter(or_(*conditions)).\\\n delete(synchronize_session=False)\n\n session.\\\n bulk_insert_mappings(models.QuarantinedReplica.__history_mapper__.class_,\n [{'rse_id': rse_id, 'path': replica['path'],\n 'bytes': replica.get('bytes'),\n 'created_at': replica.get('created_at'),\n 'deleted_at': datetime.datetime.utcnow()}\n for replica in replicas])\n\n\n@read_session\ndef list_quarantined_replicas(rse_id, limit, worker_number=None, total_workers=None, session=None):\n \"\"\"\n List RSE Quarantined File replicas.\n\n :param rse_id: the rse id.\n :param limit: The maximum number of replicas returned.\n :param worker_number: id of the executing worker.\n :param total_workers: Number of total workers.\n :param session: The database session in use.\n\n :returns: a list of dictionary replica.\n \"\"\"\n\n query = session.query(models.QuarantinedReplica.path,\n models.QuarantinedReplica.bytes,\n models.QuarantinedReplica.scope,\n models.QuarantinedReplica.name,\n models.QuarantinedReplica.created_at).\\\n filter(models.QuarantinedReplica.rse_id == rse_id)\n\n # do no delete valid replicas\n stmt = exists(select([1]).prefix_with(\"/*+ index(REPLICAS REPLICAS_PK) */\", dialect='oracle')).\\\n where(and_(models.RSEFileAssociation.scope == models.QuarantinedReplica.scope,\n models.RSEFileAssociation.name == models.QuarantinedReplica.name,\n models.RSEFileAssociation.rse_id == models.QuarantinedReplica.rse_id))\n query = query.filter(not_(stmt))\n\n if worker_number and total_workers and total_workers - 1 > 0:\n if session.bind.dialect.name == 'oracle':\n bindparams = [bindparam('worker_number', worker_number - 1), bindparam('total_workers', total_workers - 1)]\n query = query.filter(text('ORA_HASH(path, :total_workers) = :worker_number', bindparams=bindparams))\n elif session.bind.dialect.name == 'mysql':\n query = query.filter('mod(md5(path), %s) = %s' % (total_workers - 1, worker_number - 1))\n elif session.bind.dialect.name == 'postgresql':\n query = query.filter('mod(abs((\\'x\\'||md5(path))::bit(32)::int), %s) = %s' % (total_workers - 1, worker_number - 1))\n\n return [{'path': path,\n 'rse_id': rse_id,\n 'created_at': created_at,\n 'scope': scope,\n 'name': name,\n 'bytes': bytes}\n for path, bytes, scope, name, created_at in query.limit(limit)]\n\n\n@read_session\ndef list_rses(session=None):\n \"\"\"\n List RSEs in the Quarantined Queues.\n\n :param session: The database session in use.\n\n :returns: a list of RSEs.\n \"\"\"\n query = session.query(models.RSE.id).distinct(models.RSE.id).\\\n filter(models.QuarantinedReplica.rse_id == models.RSE.id).\\\n filter(models.RSE.deleted == false())\n return [rse for (rse,) in query]\n", "path": "lib/rucio/core/quarantined_replica.py"}]}
| 2,444 | 169 |
gh_patches_debug_16292
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-286
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove / deprecate expected_return_code
Since we rely heavily on xargs, this feature doesn't really work as expected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/run.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import logging
5 import os
6 import sys
7
8 from pre_commit import color
9 from pre_commit import git
10 from pre_commit.logging_handler import LoggingHandler
11 from pre_commit.output import get_hook_message
12 from pre_commit.output import sys_stdout_write_wrapper
13 from pre_commit.staged_files_only import staged_files_only
14 from pre_commit.util import cmd_output
15 from pre_commit.util import noop_context
16
17
18 logger = logging.getLogger('pre_commit')
19
20
21 def _get_skips(environ):
22 skips = environ.get('SKIP', '')
23 return set(skip.strip() for skip in skips.split(',') if skip.strip())
24
25
26 def _hook_msg_start(hook, verbose):
27 return '{0}{1}'.format(
28 '[{0}] '.format(hook['id']) if verbose else '',
29 hook['name'],
30 )
31
32
33 def _print_no_files_skipped(hook, write, args):
34 write(get_hook_message(
35 _hook_msg_start(hook, args.verbose),
36 postfix='(no files to check) ',
37 end_msg='Skipped',
38 end_color=color.TURQUOISE,
39 use_color=args.color,
40 ))
41
42
43 def _print_user_skipped(hook, write, args):
44 write(get_hook_message(
45 _hook_msg_start(hook, args.verbose),
46 end_msg='Skipped',
47 end_color=color.YELLOW,
48 use_color=args.color,
49 ))
50
51
52 def get_changed_files(new, old):
53 return cmd_output(
54 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),
55 )[1].splitlines()
56
57
58 def get_filenames(args, include_expr, exclude_expr):
59 if args.origin and args.source:
60 getter = git.get_files_matching(
61 lambda: get_changed_files(args.origin, args.source),
62 )
63 elif args.files:
64 getter = git.get_files_matching(lambda: args.files)
65 elif args.all_files:
66 getter = git.get_all_files_matching
67 elif git.is_in_merge_conflict():
68 getter = git.get_conflicted_files_matching
69 else:
70 getter = git.get_staged_files_matching
71 return getter(include_expr, exclude_expr)
72
73
74 def _run_single_hook(hook, repo, args, write, skips=frozenset()):
75 filenames = get_filenames(args, hook['files'], hook['exclude'])
76 if hook['id'] in skips:
77 _print_user_skipped(hook, write, args)
78 return 0
79 elif not filenames:
80 _print_no_files_skipped(hook, write, args)
81 return 0
82
83 # Print the hook and the dots first in case the hook takes hella long to
84 # run.
85 write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))
86 sys.stdout.flush()
87
88 retcode, stdout, stderr = repo.run_hook(hook, filenames)
89
90 if retcode != hook['expected_return_value']:
91 retcode = 1
92 print_color = color.RED
93 pass_fail = 'Failed'
94 else:
95 retcode = 0
96 print_color = color.GREEN
97 pass_fail = 'Passed'
98
99 write(color.format_color(pass_fail, print_color, args.color) + '\n')
100
101 if (stdout or stderr) and (retcode or args.verbose):
102 write('hookid: {0}\n'.format(hook['id']))
103 write('\n')
104 for output in (stdout, stderr):
105 assert type(output) is bytes, type(output)
106 if output.strip():
107 write(output.strip() + b'\n')
108 write('\n')
109
110 return retcode
111
112
113 def _run_hooks(repo_hooks, args, write, environ):
114 """Actually run the hooks."""
115 skips = _get_skips(environ)
116 retval = 0
117 for repo, hook in repo_hooks:
118 retval |= _run_single_hook(hook, repo, args, write, skips)
119 return retval
120
121
122 def get_repo_hooks(runner):
123 for repo in runner.repositories:
124 for _, hook in repo.hooks:
125 yield (repo, hook)
126
127
128 def _has_unmerged_paths(runner):
129 _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])
130 return bool(stdout.strip())
131
132
133 def _has_unstaged_config(runner):
134 retcode, _, _ = runner.cmd_runner.run(
135 ('git', 'diff', '--exit-code', runner.config_file_path),
136 retcode=None,
137 )
138 # be explicit, other git errors don't mean it has an unstaged config.
139 return retcode == 1
140
141
142 def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):
143 no_stash = args.no_stash or args.all_files or bool(args.files)
144 # Set up our logging handler
145 logger.addHandler(LoggingHandler(args.color, write=write))
146 logger.setLevel(logging.INFO)
147
148 # Check if we have unresolved merge conflict files and fail fast.
149 if _has_unmerged_paths(runner):
150 logger.error('Unmerged files. Resolve before committing.')
151 return 1
152 if bool(args.source) != bool(args.origin):
153 logger.error('Specify both --origin and --source.')
154 return 1
155 if _has_unstaged_config(runner) and not no_stash:
156 if args.allow_unstaged_config:
157 logger.warn(
158 'You have an unstaged config file and have specified the '
159 '--allow-unstaged-config option.\n'
160 'Note that your config will be stashed before the config is '
161 'parsed unless --no-stash is specified.',
162 )
163 else:
164 logger.error(
165 'Your .pre-commit-config.yaml is unstaged.\n'
166 '`git add .pre-commit-config.yaml` to fix this.\n'
167 'Run pre-commit with --allow-unstaged-config to silence this.'
168 )
169 return 1
170
171 if no_stash:
172 ctx = noop_context()
173 else:
174 ctx = staged_files_only(runner.cmd_runner)
175
176 with ctx:
177 repo_hooks = list(get_repo_hooks(runner))
178
179 if args.hook:
180 repo_hooks = [
181 (repo, hook) for repo, hook in repo_hooks
182 if hook['id'] == args.hook
183 ]
184 if not repo_hooks:
185 write('No hook with id `{0}`\n'.format(args.hook))
186 return 1
187
188 # Filter hooks for stages
189 repo_hooks = [
190 (repo, hook) for repo, hook in repo_hooks
191 if not hook['stages'] or args.hook_stage in hook['stages']
192 ]
193
194 return _run_hooks(repo_hooks, args, write, environ)
195
```
Path: `pre_commit/clientlib/validate_manifest.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from pre_commit.clientlib.validate_base import get_run_function
4 from pre_commit.clientlib.validate_base import get_validator
5 from pre_commit.clientlib.validate_base import is_regex_valid
6 from pre_commit.languages.all import all_languages
7
8
9 class InvalidManifestError(ValueError):
10 pass
11
12
13 MANIFEST_JSON_SCHEMA = {
14 'type': 'array',
15 'minItems': 1,
16 'items': {
17 'type': 'object',
18 'properties': {
19 'id': {'type': 'string'},
20 'name': {'type': 'string'},
21 'description': {'type': 'string', 'default': ''},
22 'entry': {'type': 'string'},
23 'exclude': {'type': 'string', 'default': '^$'},
24 'language': {'type': 'string'},
25 'language_version': {'type': 'string', 'default': 'default'},
26 'files': {'type': 'string'},
27 'expected_return_value': {'type': 'number', 'default': 0},
28 'stages': {
29 'type': 'array',
30 'default': [],
31 'items': {
32 'type': 'string',
33 },
34 },
35 'args': {
36 'type': 'array',
37 'default': [],
38 'items': {
39 'type': 'string',
40 },
41 },
42 },
43 'required': ['id', 'name', 'entry', 'language', 'files'],
44 },
45 }
46
47
48 def validate_languages(hook_config):
49 if hook_config['language'] not in all_languages:
50 raise InvalidManifestError(
51 'Expected language {0} for {1} to be one of {2!r}'.format(
52 hook_config['id'],
53 hook_config['language'],
54 all_languages,
55 )
56 )
57
58
59 def validate_files(hook_config):
60 if not is_regex_valid(hook_config['files']):
61 raise InvalidManifestError(
62 'Invalid files regex at {0}: {1}'.format(
63 hook_config['id'], hook_config['files'],
64 )
65 )
66
67 if not is_regex_valid(hook_config.get('exclude', '')):
68 raise InvalidManifestError(
69 'Invalid exclude regex at {0}: {1}'.format(
70 hook_config['id'], hook_config['exclude'],
71 )
72 )
73
74
75 def additional_manifest_check(obj):
76 for hook_config in obj:
77 validate_languages(hook_config)
78 validate_files(hook_config)
79
80
81 load_manifest = get_validator(
82 MANIFEST_JSON_SCHEMA,
83 InvalidManifestError,
84 additional_manifest_check,
85 )
86
87
88 run = get_run_function(
89 'Manifest filenames.',
90 load_manifest,
91 InvalidManifestError,
92 )
93
94
95 if __name__ == '__main__':
96 exit(run())
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/clientlib/validate_manifest.py b/pre_commit/clientlib/validate_manifest.py
--- a/pre_commit/clientlib/validate_manifest.py
+++ b/pre_commit/clientlib/validate_manifest.py
@@ -24,7 +24,6 @@
'language': {'type': 'string'},
'language_version': {'type': 'string', 'default': 'default'},
'files': {'type': 'string'},
- 'expected_return_value': {'type': 'number', 'default': 0},
'stages': {
'type': 'array',
'default': [],
diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -87,7 +87,7 @@
retcode, stdout, stderr = repo.run_hook(hook, filenames)
- if retcode != hook['expected_return_value']:
+ if retcode:
retcode = 1
print_color = color.RED
pass_fail = 'Failed'
|
{"golden_diff": "diff --git a/pre_commit/clientlib/validate_manifest.py b/pre_commit/clientlib/validate_manifest.py\n--- a/pre_commit/clientlib/validate_manifest.py\n+++ b/pre_commit/clientlib/validate_manifest.py\n@@ -24,7 +24,6 @@\n 'language': {'type': 'string'},\n 'language_version': {'type': 'string', 'default': 'default'},\n 'files': {'type': 'string'},\n- 'expected_return_value': {'type': 'number', 'default': 0},\n 'stages': {\n 'type': 'array',\n 'default': [],\ndiff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -87,7 +87,7 @@\n \n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n \n- if retcode != hook['expected_return_value']:\n+ if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n", "issue": "Remove / deprecate expected_return_code\nSince we rely heavily on xargs, this feature doesn't really work as expected.\n\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.output import sys_stdout_write_wrapper\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return set(skip.strip() for skip in skips.split(',') if skip.strip())\n\n\ndef _hook_msg_start(hook, verbose):\n return '{0}{1}'.format(\n '[{0}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef _print_no_files_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix='(no files to check) ',\n end_msg='Skipped',\n end_color=color.TURQUOISE,\n use_color=args.color,\n ))\n\n\ndef _print_user_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg='Skipped',\n end_color=color.YELLOW,\n use_color=args.color,\n ))\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\ndef _run_single_hook(hook, repo, args, write, skips=frozenset()):\n filenames = get_filenames(args, hook['files'], hook['exclude'])\n if hook['id'] in skips:\n _print_user_skipped(hook, write, args)\n return 0\n elif not filenames:\n _print_no_files_skipped(hook, write, args)\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))\n sys.stdout.flush()\n\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n\n if retcode != hook['expected_return_value']:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('hookid: {0}\\n'.format(hook['id']))\n write('\\n')\n for output in (stdout, stderr):\n assert type(output) is bytes, type(output)\n if output.strip():\n write(output.strip() + b'\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(repo_hooks, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, write, skips)\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n no_stash = args.no_stash or args.all_files or bool(args.files)\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n write('No hook with id `{0}`\\n'.format(args.hook))\n return 1\n\n # Filter hooks for stages\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if not hook['stages'] or args.hook_stage in hook['stages']\n ]\n\n return _run_hooks(repo_hooks, args, write, environ)\n", "path": "pre_commit/commands/run.py"}, {"content": "from __future__ import unicode_literals\n\nfrom pre_commit.clientlib.validate_base import get_run_function\nfrom pre_commit.clientlib.validate_base import get_validator\nfrom pre_commit.clientlib.validate_base import is_regex_valid\nfrom pre_commit.languages.all import all_languages\n\n\nclass InvalidManifestError(ValueError):\n pass\n\n\nMANIFEST_JSON_SCHEMA = {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'name': {'type': 'string'},\n 'description': {'type': 'string', 'default': ''},\n 'entry': {'type': 'string'},\n 'exclude': {'type': 'string', 'default': '^$'},\n 'language': {'type': 'string'},\n 'language_version': {'type': 'string', 'default': 'default'},\n 'files': {'type': 'string'},\n 'expected_return_value': {'type': 'number', 'default': 0},\n 'stages': {\n 'type': 'array',\n 'default': [],\n 'items': {\n 'type': 'string',\n },\n },\n 'args': {\n 'type': 'array',\n 'default': [],\n 'items': {\n 'type': 'string',\n },\n },\n },\n 'required': ['id', 'name', 'entry', 'language', 'files'],\n },\n}\n\n\ndef validate_languages(hook_config):\n if hook_config['language'] not in all_languages:\n raise InvalidManifestError(\n 'Expected language {0} for {1} to be one of {2!r}'.format(\n hook_config['id'],\n hook_config['language'],\n all_languages,\n )\n )\n\n\ndef validate_files(hook_config):\n if not is_regex_valid(hook_config['files']):\n raise InvalidManifestError(\n 'Invalid files regex at {0}: {1}'.format(\n hook_config['id'], hook_config['files'],\n )\n )\n\n if not is_regex_valid(hook_config.get('exclude', '')):\n raise InvalidManifestError(\n 'Invalid exclude regex at {0}: {1}'.format(\n hook_config['id'], hook_config['exclude'],\n )\n )\n\n\ndef additional_manifest_check(obj):\n for hook_config in obj:\n validate_languages(hook_config)\n validate_files(hook_config)\n\n\nload_manifest = get_validator(\n MANIFEST_JSON_SCHEMA,\n InvalidManifestError,\n additional_manifest_check,\n)\n\n\nrun = get_run_function(\n 'Manifest filenames.',\n load_manifest,\n InvalidManifestError,\n)\n\n\nif __name__ == '__main__':\n exit(run())\n", "path": "pre_commit/clientlib/validate_manifest.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.output import sys_stdout_write_wrapper\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return set(skip.strip() for skip in skips.split(',') if skip.strip())\n\n\ndef _hook_msg_start(hook, verbose):\n return '{0}{1}'.format(\n '[{0}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef _print_no_files_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix='(no files to check) ',\n end_msg='Skipped',\n end_color=color.TURQUOISE,\n use_color=args.color,\n ))\n\n\ndef _print_user_skipped(hook, write, args):\n write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg='Skipped',\n end_color=color.YELLOW,\n use_color=args.color,\n ))\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{0}..{1}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\ndef _run_single_hook(hook, repo, args, write, skips=frozenset()):\n filenames = get_filenames(args, hook['files'], hook['exclude'])\n if hook['id'] in skips:\n _print_user_skipped(hook, write, args)\n return 0\n elif not filenames:\n _print_no_files_skipped(hook, write, args)\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))\n sys.stdout.flush()\n\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n\n if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n write(color.format_color(pass_fail, print_color, args.color) + '\\n')\n\n if (stdout or stderr) and (retcode or args.verbose):\n write('hookid: {0}\\n'.format(hook['id']))\n write('\\n')\n for output in (stdout, stderr):\n assert type(output) is bytes, type(output)\n if output.strip():\n write(output.strip() + b'\\n')\n write('\\n')\n\n return retcode\n\n\ndef _run_hooks(repo_hooks, args, write, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, write, skips)\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):\n no_stash = args.no_stash or args.all_files or bool(args.files)\n # Set up our logging handler\n logger.addHandler(LoggingHandler(args.color, write=write))\n logger.setLevel(logging.INFO)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n write('No hook with id `{0}`\\n'.format(args.hook))\n return 1\n\n # Filter hooks for stages\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if not hook['stages'] or args.hook_stage in hook['stages']\n ]\n\n return _run_hooks(repo_hooks, args, write, environ)\n", "path": "pre_commit/commands/run.py"}, {"content": "from __future__ import unicode_literals\n\nfrom pre_commit.clientlib.validate_base import get_run_function\nfrom pre_commit.clientlib.validate_base import get_validator\nfrom pre_commit.clientlib.validate_base import is_regex_valid\nfrom pre_commit.languages.all import all_languages\n\n\nclass InvalidManifestError(ValueError):\n pass\n\n\nMANIFEST_JSON_SCHEMA = {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'name': {'type': 'string'},\n 'description': {'type': 'string', 'default': ''},\n 'entry': {'type': 'string'},\n 'exclude': {'type': 'string', 'default': '^$'},\n 'language': {'type': 'string'},\n 'language_version': {'type': 'string', 'default': 'default'},\n 'files': {'type': 'string'},\n 'stages': {\n 'type': 'array',\n 'default': [],\n 'items': {\n 'type': 'string',\n },\n },\n 'args': {\n 'type': 'array',\n 'default': [],\n 'items': {\n 'type': 'string',\n },\n },\n },\n 'required': ['id', 'name', 'entry', 'language', 'files'],\n },\n}\n\n\ndef validate_languages(hook_config):\n if hook_config['language'] not in all_languages:\n raise InvalidManifestError(\n 'Expected language {0} for {1} to be one of {2!r}'.format(\n hook_config['id'],\n hook_config['language'],\n all_languages,\n )\n )\n\n\ndef validate_files(hook_config):\n if not is_regex_valid(hook_config['files']):\n raise InvalidManifestError(\n 'Invalid files regex at {0}: {1}'.format(\n hook_config['id'], hook_config['files'],\n )\n )\n\n if not is_regex_valid(hook_config.get('exclude', '')):\n raise InvalidManifestError(\n 'Invalid exclude regex at {0}: {1}'.format(\n hook_config['id'], hook_config['exclude'],\n )\n )\n\n\ndef additional_manifest_check(obj):\n for hook_config in obj:\n validate_languages(hook_config)\n validate_files(hook_config)\n\n\nload_manifest = get_validator(\n MANIFEST_JSON_SCHEMA,\n InvalidManifestError,\n additional_manifest_check,\n)\n\n\nrun = get_run_function(\n 'Manifest filenames.',\n load_manifest,\n InvalidManifestError,\n)\n\n\nif __name__ == '__main__':\n exit(run())\n", "path": "pre_commit/clientlib/validate_manifest.py"}]}
| 2,989 | 235 |
gh_patches_debug_6956
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-5484
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replication assumes every plugin supports Publications
The ```Replicator``` class, which is being subclassed inside plugins to support the replication feature, assumes that every plugin supports publications as it tries to access ```upstream_distribution["publication"]``` inside one of its methods (for some plugins, the dictionary simply doesn't contain the "publication" key so an exception gets raised). This forces certain subclasses of ```Replicator``` to create workarounds or rewrite the given method.
I propose making the method more general, removing such assumptions.
Relevant code: https://github.com/pulp/pulpcore/blob/c31a39c186fd734e9e0409321735d7d6509a12ee/pulpcore/app/replica.py#L97C9-L97C96
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/replica.py`
Content:
```
1 from django.conf import settings
2 from django.db.models import Model
3 import logging
4
5 from pulp_glue.common.context import PulpContext
6 from pulpcore.tasking.tasks import dispatch
7 from pulpcore.app.tasks.base import (
8 general_update,
9 general_create,
10 general_multi_delete,
11 )
12 from pulpcore.plugin.util import get_url, get_domain
13
14 _logger = logging.getLogger(__name__)
15
16
17 class ReplicaContext(PulpContext):
18 def __init__(self, **kwargs):
19 super().__init__(**kwargs)
20 self.out_buf = ""
21 self.err_buf = ""
22
23 def echo(self, message: str, nl: bool = True, err: bool = False) -> None:
24 if err:
25 self.err_buf += message
26 if nl:
27 _logger.warn("{}", self.err_buf)
28 self.err_buf = ""
29 else:
30 self.out_buf += message
31 if nl:
32 _logger.info("{}", self.out_buf)
33 self.out_buf = ""
34
35
36 class Replicator:
37 distribution_ctx_cls = None
38 repository_ctx_cls = None
39 publication_ctx_cls = None
40 remote_model_cls = None
41 repository_model_cls = None
42 distribution_model_cls = None
43 distribution_serializer_name = None
44 repository_serializer_name = None
45 remote_serializer_name = None
46 app_label = None
47 sync_task = None
48
49 def __init__(self, pulp_ctx, task_group, tls_settings):
50 """
51 :param pulp_ctx: PulpReplicaContext
52 :param task_group: TaskGroup
53 :param ca_cert: str
54 """
55 self.pulp_ctx = pulp_ctx
56 self.task_group = task_group
57 self.tls_settings = tls_settings
58 self.domain = get_domain()
59 uri = "/api/v3/distributions/"
60 # TODO check and compare this to distribution locking on the distribution viewset.
61 if settings.DOMAIN_ENABLED:
62 uri = f"/{self.domain.name}{uri}"
63 self.distros_uris = [uri, f"pdrn:{self.domain.pulp_id}:distributions"]
64
65 @staticmethod
66 def needs_update(fields_dict, model_instance):
67 """
68 Compares a Model instance's attributes against a dictionary where keys are attribute
69 names and values are expected values.
70 """
71 needs_update = False
72 for field_name, value in fields_dict.items():
73 if isinstance(getattr(model_instance, field_name), Model):
74 if get_url(getattr(model_instance, field_name)) != value:
75 needs_update = True
76 elif getattr(model_instance, field_name) != value:
77 needs_update = True
78 return needs_update
79
80 def upstream_distributions(self, labels=None):
81 if labels:
82 params = {"pulp_label_select": labels}
83 else:
84 params = {}
85 offset = 0
86 list_size = 100
87 while True:
88 distributions = self.distribution_ctx_cls(self.pulp_ctx).list(list_size, offset, params)
89 for distro in distributions:
90 yield distro
91 if len(distributions) < list_size:
92 break
93 offset += list_size
94
95 def url(self, upstream_distribution):
96 return upstream_distribution["base_url"]
97
98 def remote_extra_fields(self, upstream_distribution):
99 return {}
100
101 def create_or_update_remote(self, upstream_distribution):
102 if not upstream_distribution["repository"] and not upstream_distribution["publication"]:
103 return None
104 url = self.url(upstream_distribution)
105 remote_fields_dict = {"url": url}
106 remote_fields_dict.update(self.tls_settings)
107 remote_fields_dict.update(self.remote_extra_fields(upstream_distribution))
108
109 # Check if there is a remote pointing to this distribution
110 try:
111 remote = self.remote_model_cls.objects.get(
112 name=upstream_distribution["name"], pulp_domain=self.domain
113 )
114 needs_update = self.needs_update(remote_fields_dict, remote)
115 if needs_update:
116 dispatch(
117 general_update,
118 task_group=self.task_group,
119 exclusive_resources=[remote],
120 args=(remote.pk, self.app_label, self.remote_serializer_name),
121 kwargs={"data": remote_fields_dict, "partial": True},
122 )
123 except self.remote_model_cls.DoesNotExist:
124 # Create the remote
125 remote = self.remote_model_cls(name=upstream_distribution["name"], **remote_fields_dict)
126 remote.save()
127
128 return remote
129
130 def repository_extra_fields(self, remote):
131 return {}
132
133 def create_or_update_repository(self, remote):
134 try:
135 repository = self.repository_model_cls.objects.get(
136 name=remote.name, pulp_domain=self.domain
137 )
138 repo_fields_dict = self.repository_extra_fields(remote)
139 needs_update = self.needs_update(repo_fields_dict, repository)
140 if needs_update:
141 dispatch(
142 general_update,
143 task_group=self.task_group,
144 exclusive_resources=[repository],
145 args=(repository.pk, self.app_label, self.repository_serializer_name),
146 kwargs={"data": repo_fields_dict, "partial": True},
147 )
148 except self.repository_model_cls.DoesNotExist:
149 repository = self.repository_model_cls(
150 name=remote.name, **self.repository_extra_fields(remote)
151 )
152 repository.save()
153 return repository
154
155 def distribution_data(self, repository, upstream_distribution):
156 """
157 Return the fields that need to be updated/cleared on distributions for idempotence.
158 """
159 return {
160 "repository": get_url(repository),
161 "publication": None,
162 "base_path": upstream_distribution["base_path"],
163 }
164
165 def create_or_update_distribution(self, repository, upstream_distribution):
166 distribution_data = self.distribution_data(repository, upstream_distribution)
167 try:
168 distro = self.distribution_model_cls.objects.get(
169 name=upstream_distribution["name"], pulp_domain=self.domain
170 )
171 # Check that the distribution has the right repository associated
172 needs_update = self.needs_update(distribution_data, distro)
173 if needs_update:
174 # Update the distribution
175 dispatch(
176 general_update,
177 task_group=self.task_group,
178 shared_resources=[repository],
179 exclusive_resources=self.distros_uris,
180 args=(distro.pk, self.app_label, self.distribution_serializer_name),
181 kwargs={
182 "data": distribution_data,
183 "partial": True,
184 },
185 )
186 except self.distribution_model_cls.DoesNotExist:
187 # Dispatch a task to create the distribution
188 distribution_data["name"] = upstream_distribution["name"]
189 dispatch(
190 general_create,
191 task_group=self.task_group,
192 shared_resources=[repository],
193 exclusive_resources=self.distros_uris,
194 args=(self.app_label, self.distribution_serializer_name),
195 kwargs={"data": distribution_data},
196 )
197
198 def sync_params(self, repository, remote):
199 """This method returns a dict that will be passed as kwargs to the sync task."""
200 raise NotImplementedError("Each replicator must supply its own sync params.")
201
202 def sync(self, repository, remote):
203 dispatch(
204 self.sync_task,
205 task_group=self.task_group,
206 shared_resources=[remote],
207 exclusive_resources=[repository],
208 kwargs=self.sync_params(repository, remote),
209 )
210
211 def remove_missing(self, names):
212 # Remove all distributions with names not present in the list of names
213 # Perform this in an extra task, because we hold a big lock here.
214 distribution_ids = [
215 (distribution.pk, self.app_label, self.distribution_serializer_name)
216 for distribution in self.distribution_model_cls.objects.filter(
217 pulp_domain=self.domain
218 ).exclude(name__in=names)
219 ]
220 if distribution_ids:
221 dispatch(
222 general_multi_delete,
223 task_group=self.task_group,
224 exclusive_resources=self.distros_uris,
225 args=(distribution_ids,),
226 )
227
228 # Remove all the repositories and remotes of the missing distributions
229 repositories = list(
230 self.repository_model_cls.objects.filter(
231 pulp_domain=self.domain, user_hidden=False
232 ).exclude(name__in=names)
233 )
234 repository_ids = [
235 (repo.pk, self.app_label, self.repository_serializer_name) for repo in repositories
236 ]
237
238 remotes = list(
239 self.remote_model_cls.objects.filter(pulp_domain=self.domain).exclude(name__in=names)
240 )
241 remote_ids = [
242 (remote.pk, self.app_label, self.remote_serializer_name) for remote in remotes
243 ]
244
245 if repository_ids or remote_ids:
246 dispatch(
247 general_multi_delete,
248 task_group=self.task_group,
249 exclusive_resources=repositories + remotes,
250 args=(repository_ids + remote_ids,),
251 )
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/replica.py b/pulpcore/app/replica.py
--- a/pulpcore/app/replica.py
+++ b/pulpcore/app/replica.py
@@ -99,7 +99,9 @@
return {}
def create_or_update_remote(self, upstream_distribution):
- if not upstream_distribution["repository"] and not upstream_distribution["publication"]:
+ if not upstream_distribution.get("repository") and not upstream_distribution.get(
+ "publication"
+ ):
return None
url = self.url(upstream_distribution)
remote_fields_dict = {"url": url}
|
{"golden_diff": "diff --git a/pulpcore/app/replica.py b/pulpcore/app/replica.py\n--- a/pulpcore/app/replica.py\n+++ b/pulpcore/app/replica.py\n@@ -99,7 +99,9 @@\n return {}\n \n def create_or_update_remote(self, upstream_distribution):\n- if not upstream_distribution[\"repository\"] and not upstream_distribution[\"publication\"]:\n+ if not upstream_distribution.get(\"repository\") and not upstream_distribution.get(\n+ \"publication\"\n+ ):\n return None\n url = self.url(upstream_distribution)\n remote_fields_dict = {\"url\": url}\n", "issue": "Replication assumes every plugin supports Publications\nThe ```Replicator``` class, which is being subclassed inside plugins to support the replication feature, assumes that every plugin supports publications as it tries to access ```upstream_distribution[\"publication\"]``` inside one of its methods (for some plugins, the dictionary simply doesn't contain the \"publication\" key so an exception gets raised). This forces certain subclasses of ```Replicator``` to create workarounds or rewrite the given method.\r\n\r\nI propose making the method more general, removing such assumptions.\r\n\r\nRelevant code: https://github.com/pulp/pulpcore/blob/c31a39c186fd734e9e0409321735d7d6509a12ee/pulpcore/app/replica.py#L97C9-L97C96\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db.models import Model\nimport logging\n\nfrom pulp_glue.common.context import PulpContext\nfrom pulpcore.tasking.tasks import dispatch\nfrom pulpcore.app.tasks.base import (\n general_update,\n general_create,\n general_multi_delete,\n)\nfrom pulpcore.plugin.util import get_url, get_domain\n\n_logger = logging.getLogger(__name__)\n\n\nclass ReplicaContext(PulpContext):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.out_buf = \"\"\n self.err_buf = \"\"\n\n def echo(self, message: str, nl: bool = True, err: bool = False) -> None:\n if err:\n self.err_buf += message\n if nl:\n _logger.warn(\"{}\", self.err_buf)\n self.err_buf = \"\"\n else:\n self.out_buf += message\n if nl:\n _logger.info(\"{}\", self.out_buf)\n self.out_buf = \"\"\n\n\nclass Replicator:\n distribution_ctx_cls = None\n repository_ctx_cls = None\n publication_ctx_cls = None\n remote_model_cls = None\n repository_model_cls = None\n distribution_model_cls = None\n distribution_serializer_name = None\n repository_serializer_name = None\n remote_serializer_name = None\n app_label = None\n sync_task = None\n\n def __init__(self, pulp_ctx, task_group, tls_settings):\n \"\"\"\n :param pulp_ctx: PulpReplicaContext\n :param task_group: TaskGroup\n :param ca_cert: str\n \"\"\"\n self.pulp_ctx = pulp_ctx\n self.task_group = task_group\n self.tls_settings = tls_settings\n self.domain = get_domain()\n uri = \"/api/v3/distributions/\"\n # TODO check and compare this to distribution locking on the distribution viewset.\n if settings.DOMAIN_ENABLED:\n uri = f\"/{self.domain.name}{uri}\"\n self.distros_uris = [uri, f\"pdrn:{self.domain.pulp_id}:distributions\"]\n\n @staticmethod\n def needs_update(fields_dict, model_instance):\n \"\"\"\n Compares a Model instance's attributes against a dictionary where keys are attribute\n names and values are expected values.\n \"\"\"\n needs_update = False\n for field_name, value in fields_dict.items():\n if isinstance(getattr(model_instance, field_name), Model):\n if get_url(getattr(model_instance, field_name)) != value:\n needs_update = True\n elif getattr(model_instance, field_name) != value:\n needs_update = True\n return needs_update\n\n def upstream_distributions(self, labels=None):\n if labels:\n params = {\"pulp_label_select\": labels}\n else:\n params = {}\n offset = 0\n list_size = 100\n while True:\n distributions = self.distribution_ctx_cls(self.pulp_ctx).list(list_size, offset, params)\n for distro in distributions:\n yield distro\n if len(distributions) < list_size:\n break\n offset += list_size\n\n def url(self, upstream_distribution):\n return upstream_distribution[\"base_url\"]\n\n def remote_extra_fields(self, upstream_distribution):\n return {}\n\n def create_or_update_remote(self, upstream_distribution):\n if not upstream_distribution[\"repository\"] and not upstream_distribution[\"publication\"]:\n return None\n url = self.url(upstream_distribution)\n remote_fields_dict = {\"url\": url}\n remote_fields_dict.update(self.tls_settings)\n remote_fields_dict.update(self.remote_extra_fields(upstream_distribution))\n\n # Check if there is a remote pointing to this distribution\n try:\n remote = self.remote_model_cls.objects.get(\n name=upstream_distribution[\"name\"], pulp_domain=self.domain\n )\n needs_update = self.needs_update(remote_fields_dict, remote)\n if needs_update:\n dispatch(\n general_update,\n task_group=self.task_group,\n exclusive_resources=[remote],\n args=(remote.pk, self.app_label, self.remote_serializer_name),\n kwargs={\"data\": remote_fields_dict, \"partial\": True},\n )\n except self.remote_model_cls.DoesNotExist:\n # Create the remote\n remote = self.remote_model_cls(name=upstream_distribution[\"name\"], **remote_fields_dict)\n remote.save()\n\n return remote\n\n def repository_extra_fields(self, remote):\n return {}\n\n def create_or_update_repository(self, remote):\n try:\n repository = self.repository_model_cls.objects.get(\n name=remote.name, pulp_domain=self.domain\n )\n repo_fields_dict = self.repository_extra_fields(remote)\n needs_update = self.needs_update(repo_fields_dict, repository)\n if needs_update:\n dispatch(\n general_update,\n task_group=self.task_group,\n exclusive_resources=[repository],\n args=(repository.pk, self.app_label, self.repository_serializer_name),\n kwargs={\"data\": repo_fields_dict, \"partial\": True},\n )\n except self.repository_model_cls.DoesNotExist:\n repository = self.repository_model_cls(\n name=remote.name, **self.repository_extra_fields(remote)\n )\n repository.save()\n return repository\n\n def distribution_data(self, repository, upstream_distribution):\n \"\"\"\n Return the fields that need to be updated/cleared on distributions for idempotence.\n \"\"\"\n return {\n \"repository\": get_url(repository),\n \"publication\": None,\n \"base_path\": upstream_distribution[\"base_path\"],\n }\n\n def create_or_update_distribution(self, repository, upstream_distribution):\n distribution_data = self.distribution_data(repository, upstream_distribution)\n try:\n distro = self.distribution_model_cls.objects.get(\n name=upstream_distribution[\"name\"], pulp_domain=self.domain\n )\n # Check that the distribution has the right repository associated\n needs_update = self.needs_update(distribution_data, distro)\n if needs_update:\n # Update the distribution\n dispatch(\n general_update,\n task_group=self.task_group,\n shared_resources=[repository],\n exclusive_resources=self.distros_uris,\n args=(distro.pk, self.app_label, self.distribution_serializer_name),\n kwargs={\n \"data\": distribution_data,\n \"partial\": True,\n },\n )\n except self.distribution_model_cls.DoesNotExist:\n # Dispatch a task to create the distribution\n distribution_data[\"name\"] = upstream_distribution[\"name\"]\n dispatch(\n general_create,\n task_group=self.task_group,\n shared_resources=[repository],\n exclusive_resources=self.distros_uris,\n args=(self.app_label, self.distribution_serializer_name),\n kwargs={\"data\": distribution_data},\n )\n\n def sync_params(self, repository, remote):\n \"\"\"This method returns a dict that will be passed as kwargs to the sync task.\"\"\"\n raise NotImplementedError(\"Each replicator must supply its own sync params.\")\n\n def sync(self, repository, remote):\n dispatch(\n self.sync_task,\n task_group=self.task_group,\n shared_resources=[remote],\n exclusive_resources=[repository],\n kwargs=self.sync_params(repository, remote),\n )\n\n def remove_missing(self, names):\n # Remove all distributions with names not present in the list of names\n # Perform this in an extra task, because we hold a big lock here.\n distribution_ids = [\n (distribution.pk, self.app_label, self.distribution_serializer_name)\n for distribution in self.distribution_model_cls.objects.filter(\n pulp_domain=self.domain\n ).exclude(name__in=names)\n ]\n if distribution_ids:\n dispatch(\n general_multi_delete,\n task_group=self.task_group,\n exclusive_resources=self.distros_uris,\n args=(distribution_ids,),\n )\n\n # Remove all the repositories and remotes of the missing distributions\n repositories = list(\n self.repository_model_cls.objects.filter(\n pulp_domain=self.domain, user_hidden=False\n ).exclude(name__in=names)\n )\n repository_ids = [\n (repo.pk, self.app_label, self.repository_serializer_name) for repo in repositories\n ]\n\n remotes = list(\n self.remote_model_cls.objects.filter(pulp_domain=self.domain).exclude(name__in=names)\n )\n remote_ids = [\n (remote.pk, self.app_label, self.remote_serializer_name) for remote in remotes\n ]\n\n if repository_ids or remote_ids:\n dispatch(\n general_multi_delete,\n task_group=self.task_group,\n exclusive_resources=repositories + remotes,\n args=(repository_ids + remote_ids,),\n )\n", "path": "pulpcore/app/replica.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.db.models import Model\nimport logging\n\nfrom pulp_glue.common.context import PulpContext\nfrom pulpcore.tasking.tasks import dispatch\nfrom pulpcore.app.tasks.base import (\n general_update,\n general_create,\n general_multi_delete,\n)\nfrom pulpcore.plugin.util import get_url, get_domain\n\n_logger = logging.getLogger(__name__)\n\n\nclass ReplicaContext(PulpContext):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.out_buf = \"\"\n self.err_buf = \"\"\n\n def echo(self, message: str, nl: bool = True, err: bool = False) -> None:\n if err:\n self.err_buf += message\n if nl:\n _logger.warn(\"{}\", self.err_buf)\n self.err_buf = \"\"\n else:\n self.out_buf += message\n if nl:\n _logger.info(\"{}\", self.out_buf)\n self.out_buf = \"\"\n\n\nclass Replicator:\n distribution_ctx_cls = None\n repository_ctx_cls = None\n publication_ctx_cls = None\n remote_model_cls = None\n repository_model_cls = None\n distribution_model_cls = None\n distribution_serializer_name = None\n repository_serializer_name = None\n remote_serializer_name = None\n app_label = None\n sync_task = None\n\n def __init__(self, pulp_ctx, task_group, tls_settings):\n \"\"\"\n :param pulp_ctx: PulpReplicaContext\n :param task_group: TaskGroup\n :param ca_cert: str\n \"\"\"\n self.pulp_ctx = pulp_ctx\n self.task_group = task_group\n self.tls_settings = tls_settings\n self.domain = get_domain()\n uri = \"/api/v3/distributions/\"\n # TODO check and compare this to distribution locking on the distribution viewset.\n if settings.DOMAIN_ENABLED:\n uri = f\"/{self.domain.name}{uri}\"\n self.distros_uris = [uri, f\"pdrn:{self.domain.pulp_id}:distributions\"]\n\n @staticmethod\n def needs_update(fields_dict, model_instance):\n \"\"\"\n Compares a Model instance's attributes against a dictionary where keys are attribute\n names and values are expected values.\n \"\"\"\n needs_update = False\n for field_name, value in fields_dict.items():\n if isinstance(getattr(model_instance, field_name), Model):\n if get_url(getattr(model_instance, field_name)) != value:\n needs_update = True\n elif getattr(model_instance, field_name) != value:\n needs_update = True\n return needs_update\n\n def upstream_distributions(self, labels=None):\n if labels:\n params = {\"pulp_label_select\": labels}\n else:\n params = {}\n offset = 0\n list_size = 100\n while True:\n distributions = self.distribution_ctx_cls(self.pulp_ctx).list(list_size, offset, params)\n for distro in distributions:\n yield distro\n if len(distributions) < list_size:\n break\n offset += list_size\n\n def url(self, upstream_distribution):\n return upstream_distribution[\"base_url\"]\n\n def remote_extra_fields(self, upstream_distribution):\n return {}\n\n def create_or_update_remote(self, upstream_distribution):\n if not upstream_distribution.get(\"repository\") and not upstream_distribution.get(\n \"publication\"\n ):\n return None\n url = self.url(upstream_distribution)\n remote_fields_dict = {\"url\": url}\n remote_fields_dict.update(self.tls_settings)\n remote_fields_dict.update(self.remote_extra_fields(upstream_distribution))\n\n # Check if there is a remote pointing to this distribution\n try:\n remote = self.remote_model_cls.objects.get(\n name=upstream_distribution[\"name\"], pulp_domain=self.domain\n )\n needs_update = self.needs_update(remote_fields_dict, remote)\n if needs_update:\n dispatch(\n general_update,\n task_group=self.task_group,\n exclusive_resources=[remote],\n args=(remote.pk, self.app_label, self.remote_serializer_name),\n kwargs={\"data\": remote_fields_dict, \"partial\": True},\n )\n except self.remote_model_cls.DoesNotExist:\n # Create the remote\n remote = self.remote_model_cls(name=upstream_distribution[\"name\"], **remote_fields_dict)\n remote.save()\n\n return remote\n\n def repository_extra_fields(self, remote):\n return {}\n\n def create_or_update_repository(self, remote):\n try:\n repository = self.repository_model_cls.objects.get(\n name=remote.name, pulp_domain=self.domain\n )\n repo_fields_dict = self.repository_extra_fields(remote)\n needs_update = self.needs_update(repo_fields_dict, repository)\n if needs_update:\n dispatch(\n general_update,\n task_group=self.task_group,\n exclusive_resources=[repository],\n args=(repository.pk, self.app_label, self.repository_serializer_name),\n kwargs={\"data\": repo_fields_dict, \"partial\": True},\n )\n except self.repository_model_cls.DoesNotExist:\n repository = self.repository_model_cls(\n name=remote.name, **self.repository_extra_fields(remote)\n )\n repository.save()\n return repository\n\n def distribution_data(self, repository, upstream_distribution):\n \"\"\"\n Return the fields that need to be updated/cleared on distributions for idempotence.\n \"\"\"\n return {\n \"repository\": get_url(repository),\n \"publication\": None,\n \"base_path\": upstream_distribution[\"base_path\"],\n }\n\n def create_or_update_distribution(self, repository, upstream_distribution):\n distribution_data = self.distribution_data(repository, upstream_distribution)\n try:\n distro = self.distribution_model_cls.objects.get(\n name=upstream_distribution[\"name\"], pulp_domain=self.domain\n )\n # Check that the distribution has the right repository associated\n needs_update = self.needs_update(distribution_data, distro)\n if needs_update:\n # Update the distribution\n dispatch(\n general_update,\n task_group=self.task_group,\n shared_resources=[repository],\n exclusive_resources=self.distros_uris,\n args=(distro.pk, self.app_label, self.distribution_serializer_name),\n kwargs={\n \"data\": distribution_data,\n \"partial\": True,\n },\n )\n except self.distribution_model_cls.DoesNotExist:\n # Dispatch a task to create the distribution\n distribution_data[\"name\"] = upstream_distribution[\"name\"]\n dispatch(\n general_create,\n task_group=self.task_group,\n shared_resources=[repository],\n exclusive_resources=self.distros_uris,\n args=(self.app_label, self.distribution_serializer_name),\n kwargs={\"data\": distribution_data},\n )\n\n def sync_params(self, repository, remote):\n \"\"\"This method returns a dict that will be passed as kwargs to the sync task.\"\"\"\n raise NotImplementedError(\"Each replicator must supply its own sync params.\")\n\n def sync(self, repository, remote):\n dispatch(\n self.sync_task,\n task_group=self.task_group,\n shared_resources=[remote],\n exclusive_resources=[repository],\n kwargs=self.sync_params(repository, remote),\n )\n\n def remove_missing(self, names):\n # Remove all distributions with names not present in the list of names\n # Perform this in an extra task, because we hold a big lock here.\n distribution_ids = [\n (distribution.pk, self.app_label, self.distribution_serializer_name)\n for distribution in self.distribution_model_cls.objects.filter(\n pulp_domain=self.domain\n ).exclude(name__in=names)\n ]\n if distribution_ids:\n dispatch(\n general_multi_delete,\n task_group=self.task_group,\n exclusive_resources=self.distros_uris,\n args=(distribution_ids,),\n )\n\n # Remove all the repositories and remotes of the missing distributions\n repositories = list(\n self.repository_model_cls.objects.filter(\n pulp_domain=self.domain, user_hidden=False\n ).exclude(name__in=names)\n )\n repository_ids = [\n (repo.pk, self.app_label, self.repository_serializer_name) for repo in repositories\n ]\n\n remotes = list(\n self.remote_model_cls.objects.filter(pulp_domain=self.domain).exclude(name__in=names)\n )\n remote_ids = [\n (remote.pk, self.app_label, self.remote_serializer_name) for remote in remotes\n ]\n\n if repository_ids or remote_ids:\n dispatch(\n general_multi_delete,\n task_group=self.task_group,\n exclusive_resources=repositories + remotes,\n args=(repository_ids + remote_ids,),\n )\n", "path": "pulpcore/app/replica.py"}]}
| 2,885 | 134 |
gh_patches_debug_10371
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-783
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Executor idle timer not reset
TLDR; I think there's a bug in assessing timeout for idle executors.
Parsl log: https://gist.github.com/macintoshpie/1ae87cd93e45e532513ea2833a3f8e60
### Setup
Running sequence of 5 tasks that each last about 3 hours on NSCC Aspire1, on a single executor.
### Problem
The first task completes after about 3 hours but then the executor is killed and the parsl head errors out.
See this line for where things go awry: https://gist.github.com/macintoshpie/1ae87cd93e45e532513ea2833a3f8e60#file-parsl-log-L5197
### Proposed bug
You can see in [this line](https://gist.github.com/macintoshpie/1ae87cd93e45e532513ea2833a3f8e60#file-parsl-log-L88) that the `idle_time` is set early in the process. There's no logging that the kill timer is being reset/updated later on either. Once the first task finishes, it meets the conditions to enter a potential timeout state (`active_tasks == 0 && active_blocks > min_blocks`). It looks at the very old `idle_time` set earlier and kills the executor immediately.
The solution is to reset the timer once it's no longer in the possible timeout state. Here's the problematic area of code: https://github.com/Parsl/parsl/blob/b4c55567b468278e15516e89373bc0a72d445317/parsl/dataflow/strategy.py#L209-L228
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/dataflow/strategy.py`
Content:
```
1 import logging
2 import time
3 import math
4
5 from parsl.executors import IPyParallelExecutor, HighThroughputExecutor, ExtremeScaleExecutor
6
7
8 logger = logging.getLogger(__name__)
9
10
11 class Strategy(object):
12 """FlowControl strategy.
13
14 As a workflow dag is processed by Parsl, new tasks are added and completed
15 asynchronously. Parsl interfaces executors with execution providers to construct
16 scalable executors to handle the variable work-load generated by the
17 workflow. This component is responsible for periodically checking outstanding
18 tasks and available compute capacity and trigger scaling events to match
19 workflow needs.
20
21 Here's a diagram of an executor. An executor consists of blocks, which are usually
22 created by single requests to a Local Resource Manager (LRM) such as slurm,
23 condor, torque, or even AWS API. The blocks could contain several task blocks
24 which are separate instances on workers.
25
26
27 .. code:: python
28
29 |<--min_blocks |<-init_blocks max_blocks-->|
30 +----------------------------------------------------------+
31 | +--------block----------+ +--------block--------+ |
32 executor = | | task task | ... | task task | |
33 | +-----------------------+ +---------------------+ |
34 +----------------------------------------------------------+
35
36 The relevant specification options are:
37 1. min_blocks: Minimum number of blocks to maintain
38 2. init_blocks: number of blocks to provision at initialization of workflow
39 3. max_blocks: Maximum number of blocks that can be active due to one workflow
40
41
42 .. code:: python
43
44 slots = current_capacity * tasks_per_node * nodes_per_block
45
46 active_tasks = pending_tasks + running_tasks
47
48 Parallelism = slots / tasks
49 = [0, 1] (i.e, 0 <= p <= 1)
50
51 For example:
52
53 When p = 0,
54 => compute with the least resources possible.
55 infinite tasks are stacked per slot.
56
57 .. code:: python
58
59 blocks = min_blocks { if active_tasks = 0
60 max(min_blocks, 1) { else
61
62 When p = 1,
63 => compute with the most resources.
64 one task is stacked per slot.
65
66 .. code:: python
67
68 blocks = min ( max_blocks,
69 ceil( active_tasks / slots ) )
70
71
72 When p = 1/2,
73 => We stack upto 2 tasks per slot before we overflow
74 and request a new block
75
76
77 let's say min:init:max = 0:0:4 and task_blocks=2
78 Consider the following example:
79 min_blocks = 0
80 init_blocks = 0
81 max_blocks = 4
82 tasks_per_node = 2
83 nodes_per_block = 1
84
85 In the diagram, X <- task
86
87 at 2 tasks:
88
89 .. code:: python
90
91 +---Block---|
92 | |
93 | X X |
94 |slot slot|
95 +-----------+
96
97 at 5 tasks, we overflow as the capacity of a single block is fully used.
98
99 .. code:: python
100
101 +---Block---| +---Block---|
102 | X X | ----> | |
103 | X X | | X |
104 |slot slot| |slot slot|
105 +-----------+ +-----------+
106
107 """
108
109 def __init__(self, dfk):
110 """Initialize strategy."""
111 self.dfk = dfk
112 self.config = dfk.config
113 self.executors = {}
114 self.max_idletime = 60 * 2 # 2 minutes
115
116 for e in self.dfk.config.executors:
117 self.executors[e.label] = {'idle_since': None, 'config': e.label}
118
119 self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}
120
121 self.strategize = self.strategies[self.config.strategy]
122 self.logger_flag = False
123 self.prior_loghandlers = set(logging.getLogger().handlers)
124
125 logger.debug("Scaling strategy: {0}".format(self.config.strategy))
126
127 def _strategy_noop(self, tasks, *args, kind=None, **kwargs):
128 """Do nothing.
129
130 Args:
131 - tasks (task_ids): Not used here.
132
133 KWargs:
134 - kind (Not used)
135 """
136
137 def unset_logging(self):
138 """ Mute newly added handlers to the root level, right after calling executor.status
139 """
140 if self.logger_flag is True:
141 return
142
143 root_logger = logging.getLogger()
144
145 for hndlr in root_logger.handlers:
146 if hndlr not in self.prior_loghandlers:
147 hndlr.setLevel(logging.ERROR)
148
149 self.logger_flag = True
150
151 def _strategy_simple(self, tasks, *args, kind=None, **kwargs):
152 """Peek at the DFK and the executors specified.
153
154 We assume here that tasks are not held in a runnable
155 state, and that all tasks from an app would be sent to
156 a single specific executor, i.e tasks cannot be specified
157 to go to one of more executors.
158
159 Args:
160 - tasks (task_ids): Not used here.
161
162 KWargs:
163 - kind (Not used)
164 """
165
166 for label, executor in self.dfk.executors.items():
167 if not executor.scaling_enabled:
168 continue
169
170 # Tasks that are either pending completion
171 active_tasks = executor.outstanding
172
173 status = executor.status()
174 self.unset_logging()
175
176 # FIXME we need to handle case where provider does not define these
177 # FIXME probably more of this logic should be moved to the provider
178 min_blocks = executor.provider.min_blocks
179 max_blocks = executor.provider.max_blocks
180 if isinstance(executor, IPyParallelExecutor):
181 tasks_per_node = executor.workers_per_node
182 elif isinstance(executor, HighThroughputExecutor):
183 # This is probably wrong calculation, we need this to come from the executor
184 # since we can't know slots ahead of time.
185 tasks_per_node = 1
186 elif isinstance(executor, ExtremeScaleExecutor):
187 tasks_per_node = executor.ranks_per_node
188
189 nodes_per_block = executor.provider.nodes_per_block
190 parallelism = executor.provider.parallelism
191
192 running = sum([1 for x in status if x == 'RUNNING'])
193 submitting = sum([1 for x in status if x == 'SUBMITTING'])
194 pending = sum([1 for x in status if x == 'PENDING'])
195 active_blocks = running + submitting + pending
196 active_slots = active_blocks * tasks_per_node * nodes_per_block
197
198 if (isinstance(executor, IPyParallelExecutor) or
199 isinstance(executor, HighThroughputExecutor) or
200 isinstance(executor, ExtremeScaleExecutor)):
201 logger.debug('Executor {} has {} active tasks, {}/{}/{} running/submitted/pending blocks, and {} connected engines'.format(
202 label, active_tasks, running, submitting, pending, len(executor.connected_workers)))
203 else:
204 logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format(
205 label, active_tasks, running, submitting, pending))
206
207 # Case 1
208 # No tasks.
209 if active_tasks == 0:
210 # Case 1a
211 # Fewer blocks that min_blocks
212 if active_blocks <= min_blocks:
213 # Ignore
214 # logger.debug("Strategy: Case.1a")
215 pass
216
217 # Case 1b
218 # More blocks than min_blocks. Scale down
219 else:
220 # We want to make sure that max_idletime is reached
221 # before killing off resources
222 if not self.executors[executor.label]['idle_since']:
223 logger.debug("Executor {} has 0 active tasks; starting kill timer (if idle time exceeds {}s, resources will be removed)".format(
224 label, self.max_idletime)
225 )
226 self.executors[executor.label]['idle_since'] = time.time()
227
228 idle_since = self.executors[executor.label]['idle_since']
229 if (time.time() - idle_since) > self.max_idletime:
230 # We have resources idle for the max duration,
231 # we have to scale_in now.
232 logger.debug("Idle time has reached {}s for executor {}; removing resources".format(
233 self.max_idletime, label)
234 )
235 executor.scale_in(active_blocks - min_blocks)
236
237 else:
238 pass
239 # logger.debug("Strategy: Case.1b. Waiting for timer : {0}".format(idle_since))
240
241 # Case 2
242 # More tasks than the available slots.
243 elif (float(active_slots) / active_tasks) < parallelism:
244 # Case 2a
245 # We have the max blocks possible
246 if active_blocks >= max_blocks:
247 # Ignore since we already have the max nodes
248 # logger.debug("Strategy: Case.2a")
249 pass
250
251 # Case 2b
252 else:
253 # logger.debug("Strategy: Case.2b")
254 excess = math.ceil((active_tasks * parallelism) - active_slots)
255 excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))
256 logger.debug("Requesting {} more blocks".format(excess_blocks))
257 executor.scale_out(excess_blocks)
258
259 elif active_slots == 0 and active_tasks > 0:
260 # Case 4
261 # Check if slots are being lost quickly ?
262 logger.debug("Requesting single slot")
263 executor.scale_out(1)
264 # Case 3
265 # tasks ~ slots
266 else:
267 # logger.debug("Strategy: Case 3")
268 pass
269
270
271 if __name__ == '__main__':
272
273 pass
274
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/dataflow/strategy.py b/parsl/dataflow/strategy.py
--- a/parsl/dataflow/strategy.py
+++ b/parsl/dataflow/strategy.py
@@ -204,6 +204,10 @@
logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format(
label, active_tasks, running, submitting, pending))
+ # reset kill timer if executor has active tasks
+ if active_tasks > 0 and self.executors[executor.label]['idle_since']:
+ self.executors[executor.label]['idle_since'] = None
+
# Case 1
# No tasks.
if active_tasks == 0:
|
{"golden_diff": "diff --git a/parsl/dataflow/strategy.py b/parsl/dataflow/strategy.py\n--- a/parsl/dataflow/strategy.py\n+++ b/parsl/dataflow/strategy.py\n@@ -204,6 +204,10 @@\n logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format(\n label, active_tasks, running, submitting, pending))\n \n+ # reset kill timer if executor has active tasks\n+ if active_tasks > 0 and self.executors[executor.label]['idle_since']:\n+ self.executors[executor.label]['idle_since'] = None\n+\n # Case 1\n # No tasks.\n if active_tasks == 0:\n", "issue": "Executor idle timer not reset\nTLDR; I think there's a bug in assessing timeout for idle executors.\r\n\r\nParsl log: https://gist.github.com/macintoshpie/1ae87cd93e45e532513ea2833a3f8e60\r\n\r\n### Setup\r\nRunning sequence of 5 tasks that each last about 3 hours on NSCC Aspire1, on a single executor.\r\n\r\n### Problem\r\nThe first task completes after about 3 hours but then the executor is killed and the parsl head errors out.\r\nSee this line for where things go awry: https://gist.github.com/macintoshpie/1ae87cd93e45e532513ea2833a3f8e60#file-parsl-log-L5197\r\n\r\n### Proposed bug\r\nYou can see in [this line](https://gist.github.com/macintoshpie/1ae87cd93e45e532513ea2833a3f8e60#file-parsl-log-L88) that the `idle_time` is set early in the process. There's no logging that the kill timer is being reset/updated later on either. Once the first task finishes, it meets the conditions to enter a potential timeout state (`active_tasks == 0 && active_blocks > min_blocks`). It looks at the very old `idle_time` set earlier and kills the executor immediately.\r\n\r\nThe solution is to reset the timer once it's no longer in the possible timeout state. Here's the problematic area of code: https://github.com/Parsl/parsl/blob/b4c55567b468278e15516e89373bc0a72d445317/parsl/dataflow/strategy.py#L209-L228\n", "before_files": [{"content": "import logging\nimport time\nimport math\n\nfrom parsl.executors import IPyParallelExecutor, HighThroughputExecutor, ExtremeScaleExecutor\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Strategy(object):\n \"\"\"FlowControl strategy.\n\n As a workflow dag is processed by Parsl, new tasks are added and completed\n asynchronously. Parsl interfaces executors with execution providers to construct\n scalable executors to handle the variable work-load generated by the\n workflow. This component is responsible for periodically checking outstanding\n tasks and available compute capacity and trigger scaling events to match\n workflow needs.\n\n Here's a diagram of an executor. An executor consists of blocks, which are usually\n created by single requests to a Local Resource Manager (LRM) such as slurm,\n condor, torque, or even AWS API. The blocks could contain several task blocks\n which are separate instances on workers.\n\n\n .. code:: python\n\n |<--min_blocks |<-init_blocks max_blocks-->|\n +----------------------------------------------------------+\n | +--------block----------+ +--------block--------+ |\n executor = | | task task | ... | task task | |\n | +-----------------------+ +---------------------+ |\n +----------------------------------------------------------+\n\n The relevant specification options are:\n 1. min_blocks: Minimum number of blocks to maintain\n 2. init_blocks: number of blocks to provision at initialization of workflow\n 3. max_blocks: Maximum number of blocks that can be active due to one workflow\n\n\n .. code:: python\n\n slots = current_capacity * tasks_per_node * nodes_per_block\n\n active_tasks = pending_tasks + running_tasks\n\n Parallelism = slots / tasks\n = [0, 1] (i.e, 0 <= p <= 1)\n\n For example:\n\n When p = 0,\n => compute with the least resources possible.\n infinite tasks are stacked per slot.\n\n .. code:: python\n\n blocks = min_blocks { if active_tasks = 0\n max(min_blocks, 1) { else\n\n When p = 1,\n => compute with the most resources.\n one task is stacked per slot.\n\n .. code:: python\n\n blocks = min ( max_blocks,\n ceil( active_tasks / slots ) )\n\n\n When p = 1/2,\n => We stack upto 2 tasks per slot before we overflow\n and request a new block\n\n\n let's say min:init:max = 0:0:4 and task_blocks=2\n Consider the following example:\n min_blocks = 0\n init_blocks = 0\n max_blocks = 4\n tasks_per_node = 2\n nodes_per_block = 1\n\n In the diagram, X <- task\n\n at 2 tasks:\n\n .. code:: python\n\n +---Block---|\n | |\n | X X |\n |slot slot|\n +-----------+\n\n at 5 tasks, we overflow as the capacity of a single block is fully used.\n\n .. code:: python\n\n +---Block---| +---Block---|\n | X X | ----> | |\n | X X | | X |\n |slot slot| |slot slot|\n +-----------+ +-----------+\n\n \"\"\"\n\n def __init__(self, dfk):\n \"\"\"Initialize strategy.\"\"\"\n self.dfk = dfk\n self.config = dfk.config\n self.executors = {}\n self.max_idletime = 60 * 2 # 2 minutes\n\n for e in self.dfk.config.executors:\n self.executors[e.label] = {'idle_since': None, 'config': e.label}\n\n self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}\n\n self.strategize = self.strategies[self.config.strategy]\n self.logger_flag = False\n self.prior_loghandlers = set(logging.getLogger().handlers)\n\n logger.debug(\"Scaling strategy: {0}\".format(self.config.strategy))\n\n def _strategy_noop(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Do nothing.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n def unset_logging(self):\n \"\"\" Mute newly added handlers to the root level, right after calling executor.status\n \"\"\"\n if self.logger_flag is True:\n return\n\n root_logger = logging.getLogger()\n\n for hndlr in root_logger.handlers:\n if hndlr not in self.prior_loghandlers:\n hndlr.setLevel(logging.ERROR)\n\n self.logger_flag = True\n\n def _strategy_simple(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Peek at the DFK and the executors specified.\n\n We assume here that tasks are not held in a runnable\n state, and that all tasks from an app would be sent to\n a single specific executor, i.e tasks cannot be specified\n to go to one of more executors.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n for label, executor in self.dfk.executors.items():\n if not executor.scaling_enabled:\n continue\n\n # Tasks that are either pending completion\n active_tasks = executor.outstanding\n\n status = executor.status()\n self.unset_logging()\n\n # FIXME we need to handle case where provider does not define these\n # FIXME probably more of this logic should be moved to the provider\n min_blocks = executor.provider.min_blocks\n max_blocks = executor.provider.max_blocks\n if isinstance(executor, IPyParallelExecutor):\n tasks_per_node = executor.workers_per_node\n elif isinstance(executor, HighThroughputExecutor):\n # This is probably wrong calculation, we need this to come from the executor\n # since we can't know slots ahead of time.\n tasks_per_node = 1\n elif isinstance(executor, ExtremeScaleExecutor):\n tasks_per_node = executor.ranks_per_node\n\n nodes_per_block = executor.provider.nodes_per_block\n parallelism = executor.provider.parallelism\n\n running = sum([1 for x in status if x == 'RUNNING'])\n submitting = sum([1 for x in status if x == 'SUBMITTING'])\n pending = sum([1 for x in status if x == 'PENDING'])\n active_blocks = running + submitting + pending\n active_slots = active_blocks * tasks_per_node * nodes_per_block\n\n if (isinstance(executor, IPyParallelExecutor) or\n isinstance(executor, HighThroughputExecutor) or\n isinstance(executor, ExtremeScaleExecutor)):\n logger.debug('Executor {} has {} active tasks, {}/{}/{} running/submitted/pending blocks, and {} connected engines'.format(\n label, active_tasks, running, submitting, pending, len(executor.connected_workers)))\n else:\n logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format(\n label, active_tasks, running, submitting, pending))\n\n # Case 1\n # No tasks.\n if active_tasks == 0:\n # Case 1a\n # Fewer blocks that min_blocks\n if active_blocks <= min_blocks:\n # Ignore\n # logger.debug(\"Strategy: Case.1a\")\n pass\n\n # Case 1b\n # More blocks than min_blocks. Scale down\n else:\n # We want to make sure that max_idletime is reached\n # before killing off resources\n if not self.executors[executor.label]['idle_since']:\n logger.debug(\"Executor {} has 0 active tasks; starting kill timer (if idle time exceeds {}s, resources will be removed)\".format(\n label, self.max_idletime)\n )\n self.executors[executor.label]['idle_since'] = time.time()\n\n idle_since = self.executors[executor.label]['idle_since']\n if (time.time() - idle_since) > self.max_idletime:\n # We have resources idle for the max duration,\n # we have to scale_in now.\n logger.debug(\"Idle time has reached {}s for executor {}; removing resources\".format(\n self.max_idletime, label)\n )\n executor.scale_in(active_blocks - min_blocks)\n\n else:\n pass\n # logger.debug(\"Strategy: Case.1b. Waiting for timer : {0}\".format(idle_since))\n\n # Case 2\n # More tasks than the available slots.\n elif (float(active_slots) / active_tasks) < parallelism:\n # Case 2a\n # We have the max blocks possible\n if active_blocks >= max_blocks:\n # Ignore since we already have the max nodes\n # logger.debug(\"Strategy: Case.2a\")\n pass\n\n # Case 2b\n else:\n # logger.debug(\"Strategy: Case.2b\")\n excess = math.ceil((active_tasks * parallelism) - active_slots)\n excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))\n logger.debug(\"Requesting {} more blocks\".format(excess_blocks))\n executor.scale_out(excess_blocks)\n\n elif active_slots == 0 and active_tasks > 0:\n # Case 4\n # Check if slots are being lost quickly ?\n logger.debug(\"Requesting single slot\")\n executor.scale_out(1)\n # Case 3\n # tasks ~ slots\n else:\n # logger.debug(\"Strategy: Case 3\")\n pass\n\n\nif __name__ == '__main__':\n\n pass\n", "path": "parsl/dataflow/strategy.py"}], "after_files": [{"content": "import logging\nimport time\nimport math\n\nfrom parsl.executors import IPyParallelExecutor, HighThroughputExecutor, ExtremeScaleExecutor\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Strategy(object):\n \"\"\"FlowControl strategy.\n\n As a workflow dag is processed by Parsl, new tasks are added and completed\n asynchronously. Parsl interfaces executors with execution providers to construct\n scalable executors to handle the variable work-load generated by the\n workflow. This component is responsible for periodically checking outstanding\n tasks and available compute capacity and trigger scaling events to match\n workflow needs.\n\n Here's a diagram of an executor. An executor consists of blocks, which are usually\n created by single requests to a Local Resource Manager (LRM) such as slurm,\n condor, torque, or even AWS API. The blocks could contain several task blocks\n which are separate instances on workers.\n\n\n .. code:: python\n\n |<--min_blocks |<-init_blocks max_blocks-->|\n +----------------------------------------------------------+\n | +--------block----------+ +--------block--------+ |\n executor = | | task task | ... | task task | |\n | +-----------------------+ +---------------------+ |\n +----------------------------------------------------------+\n\n The relevant specification options are:\n 1. min_blocks: Minimum number of blocks to maintain\n 2. init_blocks: number of blocks to provision at initialization of workflow\n 3. max_blocks: Maximum number of blocks that can be active due to one workflow\n\n\n .. code:: python\n\n slots = current_capacity * tasks_per_node * nodes_per_block\n\n active_tasks = pending_tasks + running_tasks\n\n Parallelism = slots / tasks\n = [0, 1] (i.e, 0 <= p <= 1)\n\n For example:\n\n When p = 0,\n => compute with the least resources possible.\n infinite tasks are stacked per slot.\n\n .. code:: python\n\n blocks = min_blocks { if active_tasks = 0\n max(min_blocks, 1) { else\n\n When p = 1,\n => compute with the most resources.\n one task is stacked per slot.\n\n .. code:: python\n\n blocks = min ( max_blocks,\n ceil( active_tasks / slots ) )\n\n\n When p = 1/2,\n => We stack upto 2 tasks per slot before we overflow\n and request a new block\n\n\n let's say min:init:max = 0:0:4 and task_blocks=2\n Consider the following example:\n min_blocks = 0\n init_blocks = 0\n max_blocks = 4\n tasks_per_node = 2\n nodes_per_block = 1\n\n In the diagram, X <- task\n\n at 2 tasks:\n\n .. code:: python\n\n +---Block---|\n | |\n | X X |\n |slot slot|\n +-----------+\n\n at 5 tasks, we overflow as the capacity of a single block is fully used.\n\n .. code:: python\n\n +---Block---| +---Block---|\n | X X | ----> | |\n | X X | | X |\n |slot slot| |slot slot|\n +-----------+ +-----------+\n\n \"\"\"\n\n def __init__(self, dfk):\n \"\"\"Initialize strategy.\"\"\"\n self.dfk = dfk\n self.config = dfk.config\n self.executors = {}\n self.max_idletime = 60 * 2 # 2 minutes\n\n for e in self.dfk.config.executors:\n self.executors[e.label] = {'idle_since': None, 'config': e.label}\n\n self.strategies = {None: self._strategy_noop, 'simple': self._strategy_simple}\n\n self.strategize = self.strategies[self.config.strategy]\n self.logger_flag = False\n self.prior_loghandlers = set(logging.getLogger().handlers)\n\n logger.debug(\"Scaling strategy: {0}\".format(self.config.strategy))\n\n def _strategy_noop(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Do nothing.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n def unset_logging(self):\n \"\"\" Mute newly added handlers to the root level, right after calling executor.status\n \"\"\"\n if self.logger_flag is True:\n return\n\n root_logger = logging.getLogger()\n\n for hndlr in root_logger.handlers:\n if hndlr not in self.prior_loghandlers:\n hndlr.setLevel(logging.ERROR)\n\n self.logger_flag = True\n\n def _strategy_simple(self, tasks, *args, kind=None, **kwargs):\n \"\"\"Peek at the DFK and the executors specified.\n\n We assume here that tasks are not held in a runnable\n state, and that all tasks from an app would be sent to\n a single specific executor, i.e tasks cannot be specified\n to go to one of more executors.\n\n Args:\n - tasks (task_ids): Not used here.\n\n KWargs:\n - kind (Not used)\n \"\"\"\n\n for label, executor in self.dfk.executors.items():\n if not executor.scaling_enabled:\n continue\n\n # Tasks that are either pending completion\n active_tasks = executor.outstanding\n\n status = executor.status()\n self.unset_logging()\n\n # FIXME we need to handle case where provider does not define these\n # FIXME probably more of this logic should be moved to the provider\n min_blocks = executor.provider.min_blocks\n max_blocks = executor.provider.max_blocks\n if isinstance(executor, IPyParallelExecutor):\n tasks_per_node = executor.workers_per_node\n elif isinstance(executor, HighThroughputExecutor):\n # This is probably wrong calculation, we need this to come from the executor\n # since we can't know slots ahead of time.\n tasks_per_node = 1\n elif isinstance(executor, ExtremeScaleExecutor):\n tasks_per_node = executor.ranks_per_node\n\n nodes_per_block = executor.provider.nodes_per_block\n parallelism = executor.provider.parallelism\n\n running = sum([1 for x in status if x == 'RUNNING'])\n submitting = sum([1 for x in status if x == 'SUBMITTING'])\n pending = sum([1 for x in status if x == 'PENDING'])\n active_blocks = running + submitting + pending\n active_slots = active_blocks * tasks_per_node * nodes_per_block\n\n if (isinstance(executor, IPyParallelExecutor) or\n isinstance(executor, HighThroughputExecutor) or\n isinstance(executor, ExtremeScaleExecutor)):\n logger.debug('Executor {} has {} active tasks, {}/{}/{} running/submitted/pending blocks, and {} connected engines'.format(\n label, active_tasks, running, submitting, pending, len(executor.connected_workers)))\n else:\n logger.debug('Executor {} has {} active tasks and {}/{}/{} running/submitted/pending blocks'.format(\n label, active_tasks, running, submitting, pending))\n\n # reset kill timer if executor has active tasks\n if active_tasks > 0 and self.executors[executor.label]['idle_since']:\n self.executors[executor.label]['idle_since'] = None\n\n # Case 1\n # No tasks.\n if active_tasks == 0:\n # Case 1a\n # Fewer blocks that min_blocks\n if active_blocks <= min_blocks:\n # Ignore\n # logger.debug(\"Strategy: Case.1a\")\n pass\n\n # Case 1b\n # More blocks than min_blocks. Scale down\n else:\n # We want to make sure that max_idletime is reached\n # before killing off resources\n if not self.executors[executor.label]['idle_since']:\n logger.debug(\"Executor {} has 0 active tasks; starting kill timer (if idle time exceeds {}s, resources will be removed)\".format(\n label, self.max_idletime)\n )\n self.executors[executor.label]['idle_since'] = time.time()\n\n idle_since = self.executors[executor.label]['idle_since']\n if (time.time() - idle_since) > self.max_idletime:\n # We have resources idle for the max duration,\n # we have to scale_in now.\n logger.debug(\"Idle time has reached {}s for executor {}; removing resources\".format(\n self.max_idletime, label)\n )\n executor.scale_in(active_blocks - min_blocks)\n\n else:\n pass\n # logger.debug(\"Strategy: Case.1b. Waiting for timer : {0}\".format(idle_since))\n\n # Case 2\n # More tasks than the available slots.\n elif (float(active_slots) / active_tasks) < parallelism:\n # Case 2a\n # We have the max blocks possible\n if active_blocks >= max_blocks:\n # Ignore since we already have the max nodes\n # logger.debug(\"Strategy: Case.2a\")\n pass\n\n # Case 2b\n else:\n # logger.debug(\"Strategy: Case.2b\")\n excess = math.ceil((active_tasks * parallelism) - active_slots)\n excess_blocks = math.ceil(float(excess) / (tasks_per_node * nodes_per_block))\n logger.debug(\"Requesting {} more blocks\".format(excess_blocks))\n executor.scale_out(excess_blocks)\n\n elif active_slots == 0 and active_tasks > 0:\n # Case 4\n # Check if slots are being lost quickly ?\n logger.debug(\"Requesting single slot\")\n executor.scale_out(1)\n # Case 3\n # tasks ~ slots\n else:\n # logger.debug(\"Strategy: Case 3\")\n pass\n\n\nif __name__ == '__main__':\n\n pass\n", "path": "parsl/dataflow/strategy.py"}]}
| 3,523 | 162 |
gh_patches_debug_18445
|
rasdani/github-patches
|
git_diff
|
statsmodels__statsmodels-7921
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong upper limit in Bland-Altman plot
"lower" should be "upper"
https://github.com/statsmodels/statsmodels/blob/6d7d42eae9b261eac8b30dd49336ad9c09a31b45/statsmodels/graphics/agreement.py#L139
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `statsmodels/graphics/agreement.py`
Content:
```
1 '''
2 Bland-Altman mean-difference plots
3
4 Author: Joses Ho
5 License: BSD-3
6 '''
7
8 import numpy as np
9
10 from . import utils
11
12
13 def mean_diff_plot(m1, m2, sd_limit=1.96, ax=None, scatter_kwds=None,
14 mean_line_kwds=None, limit_lines_kwds=None):
15 """
16 Construct a Tukey/Bland-Altman Mean Difference Plot.
17
18 Tukey's Mean Difference Plot (also known as a Bland-Altman plot) is a
19 graphical method to analyze the differences between two methods of
20 measurement. The mean of the measures is plotted against their difference.
21
22 For more information see
23 https://en.wikipedia.org/wiki/Bland-Altman_plot
24
25 Parameters
26 ----------
27 m1 : array_like
28 A 1-d array.
29 m2 : array_like
30 A 1-d array.
31 sd_limit : float
32 The limit of agreements expressed in terms of the standard deviation of
33 the differences. If `md` is the mean of the differences, and `sd` is
34 the standard deviation of those differences, then the limits of
35 agreement that will be plotted are md +/- sd_limit * sd.
36 The default of 1.96 will produce 95% confidence intervals for the means
37 of the differences. If sd_limit = 0, no limits will be plotted, and
38 the ylimit of the plot defaults to 3 standard deviations on either
39 side of the mean.
40 ax : AxesSubplot
41 If `ax` is None, then a figure is created. If an axis instance is
42 given, the mean difference plot is drawn on the axis.
43 scatter_kwds : dict
44 Options to to style the scatter plot. Accepts any keywords for the
45 matplotlib Axes.scatter plotting method
46 mean_line_kwds : dict
47 Options to to style the scatter plot. Accepts any keywords for the
48 matplotlib Axes.axhline plotting method
49 limit_lines_kwds : dict
50 Options to to style the scatter plot. Accepts any keywords for the
51 matplotlib Axes.axhline plotting method
52
53 Returns
54 -------
55 Figure
56 If `ax` is None, the created figure. Otherwise the figure to which
57 `ax` is connected.
58
59 References
60 ----------
61 Bland JM, Altman DG (1986). "Statistical methods for assessing agreement
62 between two methods of clinical measurement"
63
64 Examples
65 --------
66
67 Load relevant libraries.
68
69 >>> import statsmodels.api as sm
70 >>> import numpy as np
71 >>> import matplotlib.pyplot as plt
72
73 Making a mean difference plot.
74
75 >>> # Seed the random number generator.
76 >>> # This ensures that the results below are reproducible.
77 >>> np.random.seed(9999)
78 >>> m1 = np.random.random(20)
79 >>> m2 = np.random.random(20)
80 >>> f, ax = plt.subplots(1, figsize = (8,5))
81 >>> sm.graphics.mean_diff_plot(m1, m2, ax = ax)
82 >>> plt.show()
83
84 .. plot:: plots/graphics-mean_diff_plot.py
85 """
86 fig, ax = utils.create_mpl_ax(ax)
87
88 if len(m1) != len(m2):
89 raise ValueError('m1 does not have the same length as m2.')
90 if sd_limit < 0:
91 raise ValueError('sd_limit ({}) is less than 0.'.format(sd_limit))
92
93 means = np.mean([m1, m2], axis=0)
94 diffs = m1 - m2
95 mean_diff = np.mean(diffs)
96 std_diff = np.std(diffs, axis=0)
97
98 scatter_kwds = scatter_kwds or {}
99 if 's' not in scatter_kwds:
100 scatter_kwds['s'] = 20
101 mean_line_kwds = mean_line_kwds or {}
102 limit_lines_kwds = limit_lines_kwds or {}
103 for kwds in [mean_line_kwds, limit_lines_kwds]:
104 if 'color' not in kwds:
105 kwds['color'] = 'gray'
106 if 'linewidth' not in kwds:
107 kwds['linewidth'] = 1
108 if 'linestyle' not in mean_line_kwds:
109 kwds['linestyle'] = '--'
110 if 'linestyle' not in limit_lines_kwds:
111 kwds['linestyle'] = ':'
112
113 ax.scatter(means, diffs, **scatter_kwds) # Plot the means against the diffs.
114 ax.axhline(mean_diff, **mean_line_kwds) # draw mean line.
115
116 # Annotate mean line with mean difference.
117 ax.annotate('mean diff:\n{}'.format(np.round(mean_diff, 2)),
118 xy=(0.99, 0.5),
119 horizontalalignment='right',
120 verticalalignment='center',
121 fontsize=14,
122 xycoords='axes fraction')
123
124 if sd_limit > 0:
125 half_ylim = (1.5 * sd_limit) * std_diff
126 ax.set_ylim(mean_diff - half_ylim,
127 mean_diff + half_ylim)
128 limit_of_agreement = sd_limit * std_diff
129 lower = mean_diff - limit_of_agreement
130 upper = mean_diff + limit_of_agreement
131 for j, lim in enumerate([lower, upper]):
132 ax.axhline(lim, **limit_lines_kwds)
133 ax.annotate(f'-SD{sd_limit}: {lower:0.2g}',
134 xy=(0.99, 0.07),
135 horizontalalignment='right',
136 verticalalignment='bottom',
137 fontsize=14,
138 xycoords='axes fraction')
139 ax.annotate(f'+SD{sd_limit}: {lower:0.2g}',
140 xy=(0.99, 0.92),
141 horizontalalignment='right',
142 fontsize=14,
143 xycoords='axes fraction')
144
145 elif sd_limit == 0:
146 half_ylim = 3 * std_diff
147 ax.set_ylim(mean_diff - half_ylim,
148 mean_diff + half_ylim)
149
150 ax.set_ylabel('Difference', fontsize=15)
151 ax.set_xlabel('Means', fontsize=15)
152 ax.tick_params(labelsize=13)
153 fig.tight_layout()
154 return fig
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/statsmodels/graphics/agreement.py b/statsmodels/graphics/agreement.py
--- a/statsmodels/graphics/agreement.py
+++ b/statsmodels/graphics/agreement.py
@@ -130,13 +130,13 @@
upper = mean_diff + limit_of_agreement
for j, lim in enumerate([lower, upper]):
ax.axhline(lim, **limit_lines_kwds)
- ax.annotate(f'-SD{sd_limit}: {lower:0.2g}',
+ ax.annotate(f'-{sd_limit} SD: {lower:0.2g}',
xy=(0.99, 0.07),
horizontalalignment='right',
verticalalignment='bottom',
fontsize=14,
xycoords='axes fraction')
- ax.annotate(f'+SD{sd_limit}: {lower:0.2g}',
+ ax.annotate(f'+{sd_limit} SD: {upper:0.2g}',
xy=(0.99, 0.92),
horizontalalignment='right',
fontsize=14,
|
{"golden_diff": "diff --git a/statsmodels/graphics/agreement.py b/statsmodels/graphics/agreement.py\n--- a/statsmodels/graphics/agreement.py\n+++ b/statsmodels/graphics/agreement.py\n@@ -130,13 +130,13 @@\n upper = mean_diff + limit_of_agreement\n for j, lim in enumerate([lower, upper]):\n ax.axhline(lim, **limit_lines_kwds)\n- ax.annotate(f'-SD{sd_limit}: {lower:0.2g}',\n+ ax.annotate(f'-{sd_limit} SD: {lower:0.2g}',\n xy=(0.99, 0.07),\n horizontalalignment='right',\n verticalalignment='bottom',\n fontsize=14,\n xycoords='axes fraction')\n- ax.annotate(f'+SD{sd_limit}: {lower:0.2g}',\n+ ax.annotate(f'+{sd_limit} SD: {upper:0.2g}',\n xy=(0.99, 0.92),\n horizontalalignment='right',\n fontsize=14,\n", "issue": "Wrong upper limit in Bland-Altman plot\n\"lower\" should be \"upper\"\r\n\r\nhttps://github.com/statsmodels/statsmodels/blob/6d7d42eae9b261eac8b30dd49336ad9c09a31b45/statsmodels/graphics/agreement.py#L139\n", "before_files": [{"content": "'''\nBland-Altman mean-difference plots\n\nAuthor: Joses Ho\nLicense: BSD-3\n'''\n\nimport numpy as np\n\nfrom . import utils\n\n\ndef mean_diff_plot(m1, m2, sd_limit=1.96, ax=None, scatter_kwds=None,\n mean_line_kwds=None, limit_lines_kwds=None):\n \"\"\"\n Construct a Tukey/Bland-Altman Mean Difference Plot.\n\n Tukey's Mean Difference Plot (also known as a Bland-Altman plot) is a\n graphical method to analyze the differences between two methods of\n measurement. The mean of the measures is plotted against their difference.\n\n For more information see\n https://en.wikipedia.org/wiki/Bland-Altman_plot\n\n Parameters\n ----------\n m1 : array_like\n A 1-d array.\n m2 : array_like\n A 1-d array.\n sd_limit : float\n The limit of agreements expressed in terms of the standard deviation of\n the differences. If `md` is the mean of the differences, and `sd` is\n the standard deviation of those differences, then the limits of\n agreement that will be plotted are md +/- sd_limit * sd.\n The default of 1.96 will produce 95% confidence intervals for the means\n of the differences. If sd_limit = 0, no limits will be plotted, and\n the ylimit of the plot defaults to 3 standard deviations on either\n side of the mean.\n ax : AxesSubplot\n If `ax` is None, then a figure is created. If an axis instance is\n given, the mean difference plot is drawn on the axis.\n scatter_kwds : dict\n Options to to style the scatter plot. Accepts any keywords for the\n matplotlib Axes.scatter plotting method\n mean_line_kwds : dict\n Options to to style the scatter plot. Accepts any keywords for the\n matplotlib Axes.axhline plotting method\n limit_lines_kwds : dict\n Options to to style the scatter plot. Accepts any keywords for the\n matplotlib Axes.axhline plotting method\n\n Returns\n -------\n Figure\n If `ax` is None, the created figure. Otherwise the figure to which\n `ax` is connected.\n\n References\n ----------\n Bland JM, Altman DG (1986). \"Statistical methods for assessing agreement\n between two methods of clinical measurement\"\n\n Examples\n --------\n\n Load relevant libraries.\n\n >>> import statsmodels.api as sm\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n\n Making a mean difference plot.\n\n >>> # Seed the random number generator.\n >>> # This ensures that the results below are reproducible.\n >>> np.random.seed(9999)\n >>> m1 = np.random.random(20)\n >>> m2 = np.random.random(20)\n >>> f, ax = plt.subplots(1, figsize = (8,5))\n >>> sm.graphics.mean_diff_plot(m1, m2, ax = ax)\n >>> plt.show()\n\n .. plot:: plots/graphics-mean_diff_plot.py\n \"\"\"\n fig, ax = utils.create_mpl_ax(ax)\n\n if len(m1) != len(m2):\n raise ValueError('m1 does not have the same length as m2.')\n if sd_limit < 0:\n raise ValueError('sd_limit ({}) is less than 0.'.format(sd_limit))\n\n means = np.mean([m1, m2], axis=0)\n diffs = m1 - m2\n mean_diff = np.mean(diffs)\n std_diff = np.std(diffs, axis=0)\n\n scatter_kwds = scatter_kwds or {}\n if 's' not in scatter_kwds:\n scatter_kwds['s'] = 20\n mean_line_kwds = mean_line_kwds or {}\n limit_lines_kwds = limit_lines_kwds or {}\n for kwds in [mean_line_kwds, limit_lines_kwds]:\n if 'color' not in kwds:\n kwds['color'] = 'gray'\n if 'linewidth' not in kwds:\n kwds['linewidth'] = 1\n if 'linestyle' not in mean_line_kwds:\n kwds['linestyle'] = '--'\n if 'linestyle' not in limit_lines_kwds:\n kwds['linestyle'] = ':'\n\n ax.scatter(means, diffs, **scatter_kwds) # Plot the means against the diffs.\n ax.axhline(mean_diff, **mean_line_kwds) # draw mean line.\n\n # Annotate mean line with mean difference.\n ax.annotate('mean diff:\\n{}'.format(np.round(mean_diff, 2)),\n xy=(0.99, 0.5),\n horizontalalignment='right',\n verticalalignment='center',\n fontsize=14,\n xycoords='axes fraction')\n\n if sd_limit > 0:\n half_ylim = (1.5 * sd_limit) * std_diff\n ax.set_ylim(mean_diff - half_ylim,\n mean_diff + half_ylim)\n limit_of_agreement = sd_limit * std_diff\n lower = mean_diff - limit_of_agreement\n upper = mean_diff + limit_of_agreement\n for j, lim in enumerate([lower, upper]):\n ax.axhline(lim, **limit_lines_kwds)\n ax.annotate(f'-SD{sd_limit}: {lower:0.2g}',\n xy=(0.99, 0.07),\n horizontalalignment='right',\n verticalalignment='bottom',\n fontsize=14,\n xycoords='axes fraction')\n ax.annotate(f'+SD{sd_limit}: {lower:0.2g}',\n xy=(0.99, 0.92),\n horizontalalignment='right',\n fontsize=14,\n xycoords='axes fraction')\n\n elif sd_limit == 0:\n half_ylim = 3 * std_diff\n ax.set_ylim(mean_diff - half_ylim,\n mean_diff + half_ylim)\n\n ax.set_ylabel('Difference', fontsize=15)\n ax.set_xlabel('Means', fontsize=15)\n ax.tick_params(labelsize=13)\n fig.tight_layout()\n return fig\n", "path": "statsmodels/graphics/agreement.py"}], "after_files": [{"content": "'''\nBland-Altman mean-difference plots\n\nAuthor: Joses Ho\nLicense: BSD-3\n'''\n\nimport numpy as np\n\nfrom . import utils\n\n\ndef mean_diff_plot(m1, m2, sd_limit=1.96, ax=None, scatter_kwds=None,\n mean_line_kwds=None, limit_lines_kwds=None):\n \"\"\"\n Construct a Tukey/Bland-Altman Mean Difference Plot.\n\n Tukey's Mean Difference Plot (also known as a Bland-Altman plot) is a\n graphical method to analyze the differences between two methods of\n measurement. The mean of the measures is plotted against their difference.\n\n For more information see\n https://en.wikipedia.org/wiki/Bland-Altman_plot\n\n Parameters\n ----------\n m1 : array_like\n A 1-d array.\n m2 : array_like\n A 1-d array.\n sd_limit : float\n The limit of agreements expressed in terms of the standard deviation of\n the differences. If `md` is the mean of the differences, and `sd` is\n the standard deviation of those differences, then the limits of\n agreement that will be plotted are md +/- sd_limit * sd.\n The default of 1.96 will produce 95% confidence intervals for the means\n of the differences. If sd_limit = 0, no limits will be plotted, and\n the ylimit of the plot defaults to 3 standard deviations on either\n side of the mean.\n ax : AxesSubplot\n If `ax` is None, then a figure is created. If an axis instance is\n given, the mean difference plot is drawn on the axis.\n scatter_kwds : dict\n Options to to style the scatter plot. Accepts any keywords for the\n matplotlib Axes.scatter plotting method\n mean_line_kwds : dict\n Options to to style the scatter plot. Accepts any keywords for the\n matplotlib Axes.axhline plotting method\n limit_lines_kwds : dict\n Options to to style the scatter plot. Accepts any keywords for the\n matplotlib Axes.axhline plotting method\n\n Returns\n -------\n Figure\n If `ax` is None, the created figure. Otherwise the figure to which\n `ax` is connected.\n\n References\n ----------\n Bland JM, Altman DG (1986). \"Statistical methods for assessing agreement\n between two methods of clinical measurement\"\n\n Examples\n --------\n\n Load relevant libraries.\n\n >>> import statsmodels.api as sm\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n\n Making a mean difference plot.\n\n >>> # Seed the random number generator.\n >>> # This ensures that the results below are reproducible.\n >>> np.random.seed(9999)\n >>> m1 = np.random.random(20)\n >>> m2 = np.random.random(20)\n >>> f, ax = plt.subplots(1, figsize = (8,5))\n >>> sm.graphics.mean_diff_plot(m1, m2, ax = ax)\n >>> plt.show()\n\n .. plot:: plots/graphics-mean_diff_plot.py\n \"\"\"\n fig, ax = utils.create_mpl_ax(ax)\n\n if len(m1) != len(m2):\n raise ValueError('m1 does not have the same length as m2.')\n if sd_limit < 0:\n raise ValueError('sd_limit ({}) is less than 0.'.format(sd_limit))\n\n means = np.mean([m1, m2], axis=0)\n diffs = m1 - m2\n mean_diff = np.mean(diffs)\n std_diff = np.std(diffs, axis=0)\n\n scatter_kwds = scatter_kwds or {}\n if 's' not in scatter_kwds:\n scatter_kwds['s'] = 20\n mean_line_kwds = mean_line_kwds or {}\n limit_lines_kwds = limit_lines_kwds or {}\n for kwds in [mean_line_kwds, limit_lines_kwds]:\n if 'color' not in kwds:\n kwds['color'] = 'gray'\n if 'linewidth' not in kwds:\n kwds['linewidth'] = 1\n if 'linestyle' not in mean_line_kwds:\n kwds['linestyle'] = '--'\n if 'linestyle' not in limit_lines_kwds:\n kwds['linestyle'] = ':'\n\n ax.scatter(means, diffs, **scatter_kwds) # Plot the means against the diffs.\n ax.axhline(mean_diff, **mean_line_kwds) # draw mean line.\n\n # Annotate mean line with mean difference.\n ax.annotate('mean diff:\\n{}'.format(np.round(mean_diff, 2)),\n xy=(0.99, 0.5),\n horizontalalignment='right',\n verticalalignment='center',\n fontsize=14,\n xycoords='axes fraction')\n\n if sd_limit > 0:\n half_ylim = (1.5 * sd_limit) * std_diff\n ax.set_ylim(mean_diff - half_ylim,\n mean_diff + half_ylim)\n limit_of_agreement = sd_limit * std_diff\n lower = mean_diff - limit_of_agreement\n upper = mean_diff + limit_of_agreement\n for j, lim in enumerate([lower, upper]):\n ax.axhline(lim, **limit_lines_kwds)\n ax.annotate(f'-{sd_limit} SD: {lower:0.2g}',\n xy=(0.99, 0.07),\n horizontalalignment='right',\n verticalalignment='bottom',\n fontsize=14,\n xycoords='axes fraction')\n ax.annotate(f'+{sd_limit} SD: {upper:0.2g}',\n xy=(0.99, 0.92),\n horizontalalignment='right',\n fontsize=14,\n xycoords='axes fraction')\n\n elif sd_limit == 0:\n half_ylim = 3 * std_diff\n ax.set_ylim(mean_diff - half_ylim,\n mean_diff + half_ylim)\n\n ax.set_ylabel('Difference', fontsize=15)\n ax.set_xlabel('Means', fontsize=15)\n ax.tick_params(labelsize=13)\n fig.tight_layout()\n return fig\n", "path": "statsmodels/graphics/agreement.py"}]}
| 2,064 | 236 |
gh_patches_debug_41108
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1070
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ios cannot add widgets to main_window.content
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Try to run this app in ios:
```
import toga
class MyApp(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
self.state = GameState()
self.main_window = toga.MainWindow(title=self.formal_name)
box = toga.Box()
label = toga.Label("Test1")
box.add(label)
self.main_window.content = box
label2 = toga.Label("Test2")
box.add(label2)
self.main_window.show()
MyApp().main_loop()
```
**Expected behavior**
The application should open and shows the labels "Test1" and "Test2". Instead, you get a black screen. In the xcode debugger there is a log message on the "box.add(label2)" line about the label (test2) not having the attribute "viewport".
If you comment out box.add(label2), you correctly get a window that has "Test1".
On targets macos and android, the above test will correctly show "Test1 Test2"
**Environment:**
- Operating System: macos
- Python version: 3.8
- Software versions:
- Briefcase: 0.3.3
- Toga: 0.3.0.dev23
- IOS: iphone 11
This may be related to #225, as you will get a similar error if you try to rebind main_window.content to a different widget.
ios cannot add widgets to main_window.content
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Try to run this app in ios:
```
import toga
class MyApp(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
self.state = GameState()
self.main_window = toga.MainWindow(title=self.formal_name)
box = toga.Box()
label = toga.Label("Test1")
box.add(label)
self.main_window.content = box
label2 = toga.Label("Test2")
box.add(label2)
self.main_window.show()
MyApp().main_loop()
```
**Expected behavior**
The application should open and shows the labels "Test1" and "Test2". Instead, you get a black screen. In the xcode debugger there is a log message on the "box.add(label2)" line about the label (test2) not having the attribute "viewport".
If you comment out box.add(label2), you correctly get a window that has "Test1".
On targets macos and android, the above test will correctly show "Test1 Test2"
**Environment:**
- Operating System: macos
- Python version: 3.8
- Software versions:
- Briefcase: 0.3.3
- Toga: 0.3.0.dev23
- IOS: iphone 11
This may be related to #225, as you will get a similar error if you try to rebind main_window.content to a different widget.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/iOS/toga_iOS/widgets/base.py`
Content:
```
1 from toga_iOS.constraints import Constraints
2
3
4 class Widget:
5 def __init__(self, interface):
6 self.interface = interface
7 self.interface._impl = self
8 self._container = None
9 self.constraints = None
10 self.native = None
11 self.create()
12 self.interface.style.reapply()
13
14 def set_app(self, app):
15 pass
16
17 def set_window(self, window):
18 pass
19
20 @property
21 def container(self):
22 return self._container
23
24 @container.setter
25 def container(self, container):
26 self._container = container
27 if self.constraints:
28 self._container.native.addSubview(self.native)
29 self.constraints.container = container
30
31 for child in self.interface.children:
32 child._impl.container = container
33 self.rehint()
34
35 def set_enabled(self, value):
36 self.native.enabled = self.interface.enabled
37
38 def focus(self):
39 self.interface.factory.not_implemented("Widget.focus()")
40
41 # APPLICATOR
42
43 def set_bounds(self, x, y, width, height):
44 if self.container:
45 viewport = self.container.viewport
46 else:
47 viewport = self.viewport
48
49 self.constraints.update(
50 x, y + viewport.statusbar_height,
51 width, height
52 )
53
54 def set_alignment(self, alignment):
55 pass
56
57 def set_hidden(self, hidden):
58 if self._container:
59 for view in self._container._impl.subviews:
60 if view._impl:
61 view.setHidden(hidden)
62
63 def set_font(self, font):
64 # By default, font can't be changed
65 pass
66
67 def set_color(self, color):
68 # By default, color can't be changed
69 pass
70
71 def set_background_color(self, color):
72 # By default, background color can't be changed
73 pass
74
75 # INTERFACE
76
77 def add_child(self, child):
78 if self.container:
79 child.viewport = self.root.viewport
80 child.container = self.container
81
82 def add_constraints(self):
83 self.native.translatesAutoresizingMaskIntoConstraints = False
84 self.constraints = Constraints(self)
85
86 def rehint(self):
87 pass
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/iOS/toga_iOS/widgets/base.py b/src/iOS/toga_iOS/widgets/base.py
--- a/src/iOS/toga_iOS/widgets/base.py
+++ b/src/iOS/toga_iOS/widgets/base.py
@@ -6,10 +6,12 @@
self.interface = interface
self.interface._impl = self
self._container = None
+ self._viewport = None
self.constraints = None
self.native = None
self.create()
self.interface.style.reapply()
+ self.set_enabled(self.interface.enabled)
def set_app(self, app):
pass
@@ -23,15 +25,35 @@
@container.setter
def container(self, container):
- self._container = container
- if self.constraints:
+ if self.container:
+ if container:
+ raise RuntimeError('Already have a container')
+ else:
+ # existing container should be removed
+ self.constraints = None
+ self._container = None
+ self.native.removeFromSuperview()
+ elif container:
+ # setting container
+ self._container = container
self._container.native.addSubview(self.native)
+ if not self.constraints:
+ self.add_constraints()
self.constraints.container = container
for child in self.interface.children:
child._impl.container = container
+
self.rehint()
+ @property
+ def viewport(self):
+ return self._viewport
+
+ @viewport.setter
+ def viewport(self, viewport):
+ self._viewport = viewport
+
def set_enabled(self, value):
self.native.enabled = self.interface.enabled
@@ -41,13 +63,13 @@
# APPLICATOR
def set_bounds(self, x, y, width, height):
+ offset_y = 0
if self.container:
- viewport = self.container.viewport
- else:
- viewport = self.viewport
-
+ offset_y = self.container.viewport.statusbar_height
+ elif self.viewport:
+ offset_y = self.viewport.statusbar_height
self.constraints.update(
- x, y + viewport.statusbar_height,
+ x, y + offset_y,
width, height
)
@@ -55,8 +77,8 @@
pass
def set_hidden(self, hidden):
- if self._container:
- for view in self._container._impl.subviews:
+ if self.container:
+ for view in self.container._impl.subviews:
if view._impl:
view.setHidden(hidden)
@@ -75,10 +97,16 @@
# INTERFACE
def add_child(self, child):
- if self.container:
- child.viewport = self.root.viewport
+
+ if self.viewport:
+ # we are the the top level UIView
+ child.container = self
+ else:
child.container = self.container
+ def remove_child(self, child):
+ child.container = None
+
def add_constraints(self):
self.native.translatesAutoresizingMaskIntoConstraints = False
self.constraints = Constraints(self)
|
{"golden_diff": "diff --git a/src/iOS/toga_iOS/widgets/base.py b/src/iOS/toga_iOS/widgets/base.py\n--- a/src/iOS/toga_iOS/widgets/base.py\n+++ b/src/iOS/toga_iOS/widgets/base.py\n@@ -6,10 +6,12 @@\n self.interface = interface\n self.interface._impl = self\n self._container = None\n+ self._viewport = None\n self.constraints = None\n self.native = None\n self.create()\n self.interface.style.reapply()\n+ self.set_enabled(self.interface.enabled)\n \n def set_app(self, app):\n pass\n@@ -23,15 +25,35 @@\n \n @container.setter\n def container(self, container):\n- self._container = container\n- if self.constraints:\n+ if self.container:\n+ if container:\n+ raise RuntimeError('Already have a container')\n+ else:\n+ # existing container should be removed\n+ self.constraints = None\n+ self._container = None\n+ self.native.removeFromSuperview()\n+ elif container:\n+ # setting container\n+ self._container = container\n self._container.native.addSubview(self.native)\n+ if not self.constraints:\n+ self.add_constraints()\n self.constraints.container = container\n \n for child in self.interface.children:\n child._impl.container = container\n+\n self.rehint()\n \n+ @property\n+ def viewport(self):\n+ return self._viewport\n+\n+ @viewport.setter\n+ def viewport(self, viewport):\n+ self._viewport = viewport\n+\n def set_enabled(self, value):\n self.native.enabled = self.interface.enabled\n \n@@ -41,13 +63,13 @@\n # APPLICATOR\n \n def set_bounds(self, x, y, width, height):\n+ offset_y = 0\n if self.container:\n- viewport = self.container.viewport\n- else:\n- viewport = self.viewport\n-\n+ offset_y = self.container.viewport.statusbar_height\n+ elif self.viewport:\n+ offset_y = self.viewport.statusbar_height\n self.constraints.update(\n- x, y + viewport.statusbar_height,\n+ x, y + offset_y,\n width, height\n )\n \n@@ -55,8 +77,8 @@\n pass\n \n def set_hidden(self, hidden):\n- if self._container:\n- for view in self._container._impl.subviews:\n+ if self.container:\n+ for view in self.container._impl.subviews:\n if view._impl:\n view.setHidden(hidden)\n \n@@ -75,10 +97,16 @@\n # INTERFACE\n \n def add_child(self, child):\n- if self.container:\n- child.viewport = self.root.viewport\n+\n+ if self.viewport:\n+ # we are the the top level UIView\n+ child.container = self\n+ else:\n child.container = self.container\n \n+ def remove_child(self, child):\n+ child.container = None\n+\n def add_constraints(self):\n self.native.translatesAutoresizingMaskIntoConstraints = False\n self.constraints = Constraints(self)\n", "issue": "ios cannot add widgets to main_window.content \n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nTry to run this app in ios:\r\n```\r\nimport toga\r\nclass MyApp(toga.App):\r\n\r\n def startup(self):\r\n \"\"\"\r\n Construct and show the Toga application.\r\n\r\n Usually, you would add your application to a main content box.\r\n We then create a main window (with a name matching the app), and\r\n show the main window.\r\n \"\"\"\r\n self.state = GameState()\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n box = toga.Box()\r\n label = toga.Label(\"Test1\")\r\n box.add(label)\r\n self.main_window.content = box\r\n label2 = toga.Label(\"Test2\")\r\n box.add(label2)\r\n self.main_window.show()\r\nMyApp().main_loop()\r\n```\r\n\r\n**Expected behavior**\r\nThe application should open and shows the labels \"Test1\" and \"Test2\". Instead, you get a black screen. In the xcode debugger there is a log message on the \"box.add(label2)\" line about the label (test2) not having the attribute \"viewport\".\r\n\r\nIf you comment out box.add(label2), you correctly get a window that has \"Test1\".\r\n\r\nOn targets macos and android, the above test will correctly show \"Test1 Test2\"\r\n\r\n**Environment:**\r\n - Operating System: macos\r\n - Python version: 3.8\r\n - Software versions:\r\n - Briefcase: 0.3.3\r\n - Toga: 0.3.0.dev23\r\n - IOS: iphone 11\r\n\r\nThis may be related to #225, as you will get a similar error if you try to rebind main_window.content to a different widget.\nios cannot add widgets to main_window.content \n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nTry to run this app in ios:\r\n```\r\nimport toga\r\nclass MyApp(toga.App):\r\n\r\n def startup(self):\r\n \"\"\"\r\n Construct and show the Toga application.\r\n\r\n Usually, you would add your application to a main content box.\r\n We then create a main window (with a name matching the app), and\r\n show the main window.\r\n \"\"\"\r\n self.state = GameState()\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n box = toga.Box()\r\n label = toga.Label(\"Test1\")\r\n box.add(label)\r\n self.main_window.content = box\r\n label2 = toga.Label(\"Test2\")\r\n box.add(label2)\r\n self.main_window.show()\r\nMyApp().main_loop()\r\n```\r\n\r\n**Expected behavior**\r\nThe application should open and shows the labels \"Test1\" and \"Test2\". Instead, you get a black screen. In the xcode debugger there is a log message on the \"box.add(label2)\" line about the label (test2) not having the attribute \"viewport\".\r\n\r\nIf you comment out box.add(label2), you correctly get a window that has \"Test1\".\r\n\r\nOn targets macos and android, the above test will correctly show \"Test1 Test2\"\r\n\r\n**Environment:**\r\n - Operating System: macos\r\n - Python version: 3.8\r\n - Software versions:\r\n - Briefcase: 0.3.3\r\n - Toga: 0.3.0.dev23\r\n - IOS: iphone 11\r\n\r\nThis may be related to #225, as you will get a similar error if you try to rebind main_window.content to a different widget.\n", "before_files": [{"content": "from toga_iOS.constraints import Constraints\n\n\nclass Widget:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self._container = None\n self.constraints = None\n self.native = None\n self.create()\n self.interface.style.reapply()\n\n def set_app(self, app):\n pass\n\n def set_window(self, window):\n pass\n\n @property\n def container(self):\n return self._container\n\n @container.setter\n def container(self, container):\n self._container = container\n if self.constraints:\n self._container.native.addSubview(self.native)\n self.constraints.container = container\n\n for child in self.interface.children:\n child._impl.container = container\n self.rehint()\n\n def set_enabled(self, value):\n self.native.enabled = self.interface.enabled\n\n def focus(self):\n self.interface.factory.not_implemented(\"Widget.focus()\")\n\n # APPLICATOR\n\n def set_bounds(self, x, y, width, height):\n if self.container:\n viewport = self.container.viewport\n else:\n viewport = self.viewport\n\n self.constraints.update(\n x, y + viewport.statusbar_height,\n width, height\n )\n\n def set_alignment(self, alignment):\n pass\n\n def set_hidden(self, hidden):\n if self._container:\n for view in self._container._impl.subviews:\n if view._impl:\n view.setHidden(hidden)\n\n def set_font(self, font):\n # By default, font can't be changed\n pass\n\n def set_color(self, color):\n # By default, color can't be changed\n pass\n\n def set_background_color(self, color):\n # By default, background color can't be changed\n pass\n\n # INTERFACE\n\n def add_child(self, child):\n if self.container:\n child.viewport = self.root.viewport\n child.container = self.container\n\n def add_constraints(self):\n self.native.translatesAutoresizingMaskIntoConstraints = False\n self.constraints = Constraints(self)\n\n def rehint(self):\n pass\n", "path": "src/iOS/toga_iOS/widgets/base.py"}], "after_files": [{"content": "from toga_iOS.constraints import Constraints\n\n\nclass Widget:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self._container = None\n self._viewport = None\n self.constraints = None\n self.native = None\n self.create()\n self.interface.style.reapply()\n self.set_enabled(self.interface.enabled)\n\n def set_app(self, app):\n pass\n\n def set_window(self, window):\n pass\n\n @property\n def container(self):\n return self._container\n\n @container.setter\n def container(self, container):\n if self.container:\n if container:\n raise RuntimeError('Already have a container')\n else:\n # existing container should be removed\n self.constraints = None\n self._container = None\n self.native.removeFromSuperview()\n elif container:\n # setting container\n self._container = container\n self._container.native.addSubview(self.native)\n if not self.constraints:\n self.add_constraints()\n self.constraints.container = container\n\n for child in self.interface.children:\n child._impl.container = container\n\n self.rehint()\n\n @property\n def viewport(self):\n return self._viewport\n\n @viewport.setter\n def viewport(self, viewport):\n self._viewport = viewport\n\n def set_enabled(self, value):\n self.native.enabled = self.interface.enabled\n\n def focus(self):\n self.interface.factory.not_implemented(\"Widget.focus()\")\n\n # APPLICATOR\n\n def set_bounds(self, x, y, width, height):\n offset_y = 0\n if self.container:\n offset_y = self.container.viewport.statusbar_height\n elif self.viewport:\n offset_y = self.viewport.statusbar_height\n self.constraints.update(\n x, y + offset_y,\n width, height\n )\n\n def set_alignment(self, alignment):\n pass\n\n def set_hidden(self, hidden):\n if self.container:\n for view in self.container._impl.subviews:\n if view._impl:\n view.setHidden(hidden)\n\n def set_font(self, font):\n # By default, font can't be changed\n pass\n\n def set_color(self, color):\n # By default, color can't be changed\n pass\n\n def set_background_color(self, color):\n # By default, background color can't be changed\n pass\n\n # INTERFACE\n\n def add_child(self, child):\n\n if self.viewport:\n # we are the the top level UIView\n child.container = self\n else:\n child.container = self.container\n\n def remove_child(self, child):\n child.container = None\n\n def add_constraints(self):\n self.native.translatesAutoresizingMaskIntoConstraints = False\n self.constraints = Constraints(self)\n\n def rehint(self):\n pass\n", "path": "src/iOS/toga_iOS/widgets/base.py"}]}
| 1,651 | 681 |
gh_patches_debug_3372
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-6638
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SBU download is failing
This is happening since roughly a month (first detection in #6274), but was masked by #6268.
http://www.cs.virginia.edu/~vicente/sbucaptions/
The server is still up, but the path is no longer valid. I'll reach out to the author to see if this can be fixed. In the meantime we should probably disable the test.
cc @pmeier @YosuaMichael
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/sbu.py`
Content:
```
1 import os
2 from typing import Any, Callable, Optional, Tuple
3
4 from PIL import Image
5
6 from .utils import check_integrity, download_url
7 from .vision import VisionDataset
8
9
10 class SBU(VisionDataset):
11 """`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.
12
13 Args:
14 root (string): Root directory of dataset where tarball
15 ``SBUCaptionedPhotoDataset.tar.gz`` exists.
16 transform (callable, optional): A function/transform that takes in a PIL image
17 and returns a transformed version. E.g, ``transforms.RandomCrop``
18 target_transform (callable, optional): A function/transform that takes in the
19 target and transforms it.
20 download (bool, optional): If True, downloads the dataset from the internet and
21 puts it in root directory. If dataset is already downloaded, it is not
22 downloaded again.
23 """
24
25 url = "http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
26 filename = "SBUCaptionedPhotoDataset.tar.gz"
27 md5_checksum = "9aec147b3488753cf758b4d493422285"
28
29 def __init__(
30 self,
31 root: str,
32 transform: Optional[Callable] = None,
33 target_transform: Optional[Callable] = None,
34 download: bool = True,
35 ) -> None:
36 super().__init__(root, transform=transform, target_transform=target_transform)
37
38 if download:
39 self.download()
40
41 if not self._check_integrity():
42 raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
43
44 # Read the caption for each photo
45 self.photos = []
46 self.captions = []
47
48 file1 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")
49 file2 = os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_captions.txt")
50
51 for line1, line2 in zip(open(file1), open(file2)):
52 url = line1.rstrip()
53 photo = os.path.basename(url)
54 filename = os.path.join(self.root, "dataset", photo)
55 if os.path.exists(filename):
56 caption = line2.rstrip()
57 self.photos.append(photo)
58 self.captions.append(caption)
59
60 def __getitem__(self, index: int) -> Tuple[Any, Any]:
61 """
62 Args:
63 index (int): Index
64
65 Returns:
66 tuple: (image, target) where target is a caption for the photo.
67 """
68 filename = os.path.join(self.root, "dataset", self.photos[index])
69 img = Image.open(filename).convert("RGB")
70 if self.transform is not None:
71 img = self.transform(img)
72
73 target = self.captions[index]
74 if self.target_transform is not None:
75 target = self.target_transform(target)
76
77 return img, target
78
79 def __len__(self) -> int:
80 """The number of photos in the dataset."""
81 return len(self.photos)
82
83 def _check_integrity(self) -> bool:
84 """Check the md5 checksum of the downloaded tarball."""
85 root = self.root
86 fpath = os.path.join(root, self.filename)
87 if not check_integrity(fpath, self.md5_checksum):
88 return False
89 return True
90
91 def download(self) -> None:
92 """Download and extract the tarball, and download each individual photo."""
93 import tarfile
94
95 if self._check_integrity():
96 print("Files already downloaded and verified")
97 return
98
99 download_url(self.url, self.root, self.filename, self.md5_checksum)
100
101 # Extract file
102 with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
103 tar.extractall(path=self.root)
104
105 # Download individual photos
106 with open(os.path.join(self.root, "dataset", "SBU_captioned_photo_dataset_urls.txt")) as fh:
107 for line in fh:
108 url = line.rstrip()
109 try:
110 download_url(url, os.path.join(self.root, "dataset"))
111 except OSError:
112 # The images point to public images on Flickr.
113 # Note: Images might be removed by users at anytime.
114 pass
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/datasets/sbu.py b/torchvision/datasets/sbu.py
--- a/torchvision/datasets/sbu.py
+++ b/torchvision/datasets/sbu.py
@@ -22,7 +22,7 @@
downloaded again.
"""
- url = "http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
+ url = "https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz"
filename = "SBUCaptionedPhotoDataset.tar.gz"
md5_checksum = "9aec147b3488753cf758b4d493422285"
|
{"golden_diff": "diff --git a/torchvision/datasets/sbu.py b/torchvision/datasets/sbu.py\n--- a/torchvision/datasets/sbu.py\n+++ b/torchvision/datasets/sbu.py\n@@ -22,7 +22,7 @@\n downloaded again.\n \"\"\"\n \n- url = \"http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz\"\n+ url = \"https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz\"\n filename = \"SBUCaptionedPhotoDataset.tar.gz\"\n md5_checksum = \"9aec147b3488753cf758b4d493422285\"\n", "issue": "SBU download is failing\nThis is happening since roughly a month (first detection in #6274), but was masked by #6268. \r\n\r\nhttp://www.cs.virginia.edu/~vicente/sbucaptions/\r\n\r\nThe server is still up, but the path is no longer valid. I'll reach out to the author to see if this can be fixed. In the meantime we should probably disable the test.\r\n\r\ncc @pmeier @YosuaMichael\n", "before_files": [{"content": "import os\nfrom typing import Any, Callable, Optional, Tuple\n\nfrom PIL import Image\n\nfrom .utils import check_integrity, download_url\nfrom .vision import VisionDataset\n\n\nclass SBU(VisionDataset):\n \"\"\"`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where tarball\n ``SBUCaptionedPhotoDataset.tar.gz`` exists.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If True, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n\n url = \"http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz\"\n filename = \"SBUCaptionedPhotoDataset.tar.gz\"\n md5_checksum = \"9aec147b3488753cf758b4d493422285\"\n\n def __init__(\n self,\n root: str,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = True,\n ) -> None:\n super().__init__(root, transform=transform, target_transform=target_transform)\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\"Dataset not found or corrupted. You can use download=True to download it\")\n\n # Read the caption for each photo\n self.photos = []\n self.captions = []\n\n file1 = os.path.join(self.root, \"dataset\", \"SBU_captioned_photo_dataset_urls.txt\")\n file2 = os.path.join(self.root, \"dataset\", \"SBU_captioned_photo_dataset_captions.txt\")\n\n for line1, line2 in zip(open(file1), open(file2)):\n url = line1.rstrip()\n photo = os.path.basename(url)\n filename = os.path.join(self.root, \"dataset\", photo)\n if os.path.exists(filename):\n caption = line2.rstrip()\n self.photos.append(photo)\n self.captions.append(caption)\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is a caption for the photo.\n \"\"\"\n filename = os.path.join(self.root, \"dataset\", self.photos[index])\n img = Image.open(filename).convert(\"RGB\")\n if self.transform is not None:\n img = self.transform(img)\n\n target = self.captions[index]\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self) -> int:\n \"\"\"The number of photos in the dataset.\"\"\"\n return len(self.photos)\n\n def _check_integrity(self) -> bool:\n \"\"\"Check the md5 checksum of the downloaded tarball.\"\"\"\n root = self.root\n fpath = os.path.join(root, self.filename)\n if not check_integrity(fpath, self.md5_checksum):\n return False\n return True\n\n def download(self) -> None:\n \"\"\"Download and extract the tarball, and download each individual photo.\"\"\"\n import tarfile\n\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n download_url(self.url, self.root, self.filename, self.md5_checksum)\n\n # Extract file\n with tarfile.open(os.path.join(self.root, self.filename), \"r:gz\") as tar:\n tar.extractall(path=self.root)\n\n # Download individual photos\n with open(os.path.join(self.root, \"dataset\", \"SBU_captioned_photo_dataset_urls.txt\")) as fh:\n for line in fh:\n url = line.rstrip()\n try:\n download_url(url, os.path.join(self.root, \"dataset\"))\n except OSError:\n # The images point to public images on Flickr.\n # Note: Images might be removed by users at anytime.\n pass\n", "path": "torchvision/datasets/sbu.py"}], "after_files": [{"content": "import os\nfrom typing import Any, Callable, Optional, Tuple\n\nfrom PIL import Image\n\nfrom .utils import check_integrity, download_url\nfrom .vision import VisionDataset\n\n\nclass SBU(VisionDataset):\n \"\"\"`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where tarball\n ``SBUCaptionedPhotoDataset.tar.gz`` exists.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If True, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n \"\"\"\n\n url = \"https://www.cs.rice.edu/~vo9/sbucaptions/SBUCaptionedPhotoDataset.tar.gz\"\n filename = \"SBUCaptionedPhotoDataset.tar.gz\"\n md5_checksum = \"9aec147b3488753cf758b4d493422285\"\n\n def __init__(\n self,\n root: str,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = True,\n ) -> None:\n super().__init__(root, transform=transform, target_transform=target_transform)\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\"Dataset not found or corrupted. You can use download=True to download it\")\n\n # Read the caption for each photo\n self.photos = []\n self.captions = []\n\n file1 = os.path.join(self.root, \"dataset\", \"SBU_captioned_photo_dataset_urls.txt\")\n file2 = os.path.join(self.root, \"dataset\", \"SBU_captioned_photo_dataset_captions.txt\")\n\n for line1, line2 in zip(open(file1), open(file2)):\n url = line1.rstrip()\n photo = os.path.basename(url)\n filename = os.path.join(self.root, \"dataset\", photo)\n if os.path.exists(filename):\n caption = line2.rstrip()\n self.photos.append(photo)\n self.captions.append(caption)\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is a caption for the photo.\n \"\"\"\n filename = os.path.join(self.root, \"dataset\", self.photos[index])\n img = Image.open(filename).convert(\"RGB\")\n if self.transform is not None:\n img = self.transform(img)\n\n target = self.captions[index]\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self) -> int:\n \"\"\"The number of photos in the dataset.\"\"\"\n return len(self.photos)\n\n def _check_integrity(self) -> bool:\n \"\"\"Check the md5 checksum of the downloaded tarball.\"\"\"\n root = self.root\n fpath = os.path.join(root, self.filename)\n if not check_integrity(fpath, self.md5_checksum):\n return False\n return True\n\n def download(self) -> None:\n \"\"\"Download and extract the tarball, and download each individual photo.\"\"\"\n import tarfile\n\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n download_url(self.url, self.root, self.filename, self.md5_checksum)\n\n # Extract file\n with tarfile.open(os.path.join(self.root, self.filename), \"r:gz\") as tar:\n tar.extractall(path=self.root)\n\n # Download individual photos\n with open(os.path.join(self.root, \"dataset\", \"SBU_captioned_photo_dataset_urls.txt\")) as fh:\n for line in fh:\n url = line.rstrip()\n try:\n download_url(url, os.path.join(self.root, \"dataset\"))\n except OSError:\n # The images point to public images on Flickr.\n # Note: Images might be removed by users at anytime.\n pass\n", "path": "torchvision/datasets/sbu.py"}]}
| 1,552 | 174 |
gh_patches_debug_30593
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-2733
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow for np.integer as pulse duration
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
`duration` of pulse command should be python integer, but this limitation might degrade usability. It is better to allow for `np.integers` - e.g. when we prepare series of pulse commands with many durations with `np.arange`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/pulse/commands/pulse_decorators.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 # pylint: disable=missing-return-doc, missing-return-type-doc
16
17 """
18 Pulse decorators.
19 """
20
21 import functools
22 from typing import Callable
23
24 import numpy as np
25
26 from qiskit.pulse.exceptions import PulseError
27
28 from .sample_pulse import SamplePulse
29
30
31 def functional_pulse(func: Callable):
32 """A decorator for generating SamplePulse from python callable.
33 Args:
34 func: A function describing pulse envelope.
35 Raises:
36 PulseError: when invalid function is specified.
37 """
38 @functools.wraps(func)
39 def to_pulse(duration, *args, name=None, **kwargs):
40 """Return SamplePulse."""
41 if isinstance(duration, int) and duration > 0:
42 samples = func(duration, *args, **kwargs)
43 samples = np.asarray(samples, dtype=np.complex128)
44 return SamplePulse(samples=samples, name=name)
45 raise PulseError('The first argument must be an integer value representing duration.')
46
47 return to_pulse
48
```
Path: `qiskit/pulse/commands/command.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """
16 Base command.
17 """
18 import re
19
20 from abc import ABCMeta, abstractmethod
21 from typing import List, Optional
22
23 from qiskit.pulse.exceptions import PulseError
24 from qiskit.pulse.channels import Channel
25 from qiskit.pulse.timeslots import TimeslotCollection
26
27 from .instruction import Instruction
28
29
30 class MetaCount(ABCMeta):
31 """Meta class to count class instances."""
32 def __new__(mcs, name, bases, namespace):
33 new_cls = super(MetaCount, mcs).__new__(mcs, name, bases, namespace)
34 new_cls.instances_counter = 0
35 return new_cls
36
37
38 class Command(metaclass=MetaCount):
39 """Abstract command class."""
40
41 # Counter for the number of instances in this class
42 prefix = 'c'
43
44 @abstractmethod
45 def __init__(self, duration: int = None):
46 """Create a new command.
47
48 Args:
49 duration: Duration of this command.
50 Raises:
51 PulseError: when duration is not number of points
52 """
53 if isinstance(duration, int):
54 self._duration = duration
55 else:
56 raise PulseError('Pulse duration should be integer.')
57
58 self._name = Command.create_name()
59
60 @classmethod
61 def create_name(cls, name: str = None) -> str:
62 """Autogenerate names for pulse commands."""
63 if name is None:
64 try:
65 name = '%s%i' % (cls.prefix, cls.instances_counter) # pylint: disable=E1101
66 except TypeError:
67 raise PulseError("prefix and counter must be non-None when name is None.")
68 else:
69 try:
70 name = str(name)
71 except Exception:
72 raise PulseError("The pulse command name should be castable to a string "
73 "(or None for autogenerate a name).")
74 name_format = re.compile('[a-zA-Z][a-zA-Z0-9_]*')
75 if name_format.match(name) is None:
76 raise PulseError("%s is an invalid OpenPulse command name." % name)
77
78 cls.instances_counter += 1 # pylint: disable=E1101
79
80 return name
81
82 @property
83 def duration(self) -> int:
84 """Duration of this command."""
85 return self._duration
86
87 @property
88 def name(self) -> str:
89 """Name of this command."""
90 return self._name
91
92 @abstractmethod
93 def to_instruction(self, command, *channels: List[Channel],
94 timeslots: Optional[TimeslotCollection] = None,
95 name: Optional[str] = None) -> Instruction:
96 """Create an instruction from command."""
97 pass
98
99 def __call__(self, *args, **kwargs):
100 """Creates an Instruction obtained from call to `to_instruction` wrapped in a Schedule."""
101 return self.to_instruction(*args, **kwargs)
102
103 def __eq__(self, other: 'Command'):
104 """Two Commands are the same if they are of the same type
105 and have the same duration and name.
106
107 Args:
108 other: other Command
109
110 Returns:
111 bool: are self and other equal
112 """
113 if type(self) is type(other) and \
114 self._duration == other._duration and \
115 self._name == other._name:
116 return True
117 return False
118
119 def __hash__(self):
120 return hash((type(self), self._duration, self._name))
121
122 def __repr__(self):
123 return '%s(name=%s, duration=%d)' % (self.__class__.__name__,
124 self._name, self._duration)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/pulse/commands/command.py b/qiskit/pulse/commands/command.py
--- a/qiskit/pulse/commands/command.py
+++ b/qiskit/pulse/commands/command.py
@@ -18,7 +18,8 @@
import re
from abc import ABCMeta, abstractmethod
-from typing import List, Optional
+from typing import List, Optional, Union
+import numpy as np
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.channels import Channel
@@ -42,7 +43,7 @@
prefix = 'c'
@abstractmethod
- def __init__(self, duration: int = None):
+ def __init__(self, duration: Union[int, np.integer] = None):
"""Create a new command.
Args:
@@ -50,8 +51,8 @@
Raises:
PulseError: when duration is not number of points
"""
- if isinstance(duration, int):
- self._duration = duration
+ if isinstance(duration, (int, np.integer)):
+ self._duration = int(duration)
else:
raise PulseError('Pulse duration should be integer.')
diff --git a/qiskit/pulse/commands/pulse_decorators.py b/qiskit/pulse/commands/pulse_decorators.py
--- a/qiskit/pulse/commands/pulse_decorators.py
+++ b/qiskit/pulse/commands/pulse_decorators.py
@@ -38,7 +38,7 @@
@functools.wraps(func)
def to_pulse(duration, *args, name=None, **kwargs):
"""Return SamplePulse."""
- if isinstance(duration, int) and duration > 0:
+ if isinstance(duration, (int, np.integer)) and duration > 0:
samples = func(duration, *args, **kwargs)
samples = np.asarray(samples, dtype=np.complex128)
return SamplePulse(samples=samples, name=name)
|
{"golden_diff": "diff --git a/qiskit/pulse/commands/command.py b/qiskit/pulse/commands/command.py\n--- a/qiskit/pulse/commands/command.py\n+++ b/qiskit/pulse/commands/command.py\n@@ -18,7 +18,8 @@\n import re\n \n from abc import ABCMeta, abstractmethod\n-from typing import List, Optional\n+from typing import List, Optional, Union\n+import numpy as np\n \n from qiskit.pulse.exceptions import PulseError\n from qiskit.pulse.channels import Channel\n@@ -42,7 +43,7 @@\n prefix = 'c'\n \n @abstractmethod\n- def __init__(self, duration: int = None):\n+ def __init__(self, duration: Union[int, np.integer] = None):\n \"\"\"Create a new command.\n \n Args:\n@@ -50,8 +51,8 @@\n Raises:\n PulseError: when duration is not number of points\n \"\"\"\n- if isinstance(duration, int):\n- self._duration = duration\n+ if isinstance(duration, (int, np.integer)):\n+ self._duration = int(duration)\n else:\n raise PulseError('Pulse duration should be integer.')\n \ndiff --git a/qiskit/pulse/commands/pulse_decorators.py b/qiskit/pulse/commands/pulse_decorators.py\n--- a/qiskit/pulse/commands/pulse_decorators.py\n+++ b/qiskit/pulse/commands/pulse_decorators.py\n@@ -38,7 +38,7 @@\n @functools.wraps(func)\n def to_pulse(duration, *args, name=None, **kwargs):\n \"\"\"Return SamplePulse.\"\"\"\n- if isinstance(duration, int) and duration > 0:\n+ if isinstance(duration, (int, np.integer)) and duration > 0:\n samples = func(duration, *args, **kwargs)\n samples = np.asarray(samples, dtype=np.complex128)\n return SamplePulse(samples=samples, name=name)\n", "issue": "Allow for np.integer as pulse duration\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\n`duration` of pulse command should be python integer, but this limitation might degrade usability. It is better to allow for `np.integers` - e.g. when we prepare series of pulse commands with many durations with `np.arange`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=missing-return-doc, missing-return-type-doc\n\n\"\"\"\nPulse decorators.\n\"\"\"\n\nimport functools\nfrom typing import Callable\n\nimport numpy as np\n\nfrom qiskit.pulse.exceptions import PulseError\n\nfrom .sample_pulse import SamplePulse\n\n\ndef functional_pulse(func: Callable):\n \"\"\"A decorator for generating SamplePulse from python callable.\n Args:\n func: A function describing pulse envelope.\n Raises:\n PulseError: when invalid function is specified.\n \"\"\"\n @functools.wraps(func)\n def to_pulse(duration, *args, name=None, **kwargs):\n \"\"\"Return SamplePulse.\"\"\"\n if isinstance(duration, int) and duration > 0:\n samples = func(duration, *args, **kwargs)\n samples = np.asarray(samples, dtype=np.complex128)\n return SamplePulse(samples=samples, name=name)\n raise PulseError('The first argument must be an integer value representing duration.')\n\n return to_pulse\n", "path": "qiskit/pulse/commands/pulse_decorators.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nBase command.\n\"\"\"\nimport re\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import List, Optional\n\nfrom qiskit.pulse.exceptions import PulseError\nfrom qiskit.pulse.channels import Channel\nfrom qiskit.pulse.timeslots import TimeslotCollection\n\nfrom .instruction import Instruction\n\n\nclass MetaCount(ABCMeta):\n \"\"\"Meta class to count class instances.\"\"\"\n def __new__(mcs, name, bases, namespace):\n new_cls = super(MetaCount, mcs).__new__(mcs, name, bases, namespace)\n new_cls.instances_counter = 0\n return new_cls\n\n\nclass Command(metaclass=MetaCount):\n \"\"\"Abstract command class.\"\"\"\n\n # Counter for the number of instances in this class\n prefix = 'c'\n\n @abstractmethod\n def __init__(self, duration: int = None):\n \"\"\"Create a new command.\n\n Args:\n duration: Duration of this command.\n Raises:\n PulseError: when duration is not number of points\n \"\"\"\n if isinstance(duration, int):\n self._duration = duration\n else:\n raise PulseError('Pulse duration should be integer.')\n\n self._name = Command.create_name()\n\n @classmethod\n def create_name(cls, name: str = None) -> str:\n \"\"\"Autogenerate names for pulse commands.\"\"\"\n if name is None:\n try:\n name = '%s%i' % (cls.prefix, cls.instances_counter) # pylint: disable=E1101\n except TypeError:\n raise PulseError(\"prefix and counter must be non-None when name is None.\")\n else:\n try:\n name = str(name)\n except Exception:\n raise PulseError(\"The pulse command name should be castable to a string \"\n \"(or None for autogenerate a name).\")\n name_format = re.compile('[a-zA-Z][a-zA-Z0-9_]*')\n if name_format.match(name) is None:\n raise PulseError(\"%s is an invalid OpenPulse command name.\" % name)\n\n cls.instances_counter += 1 # pylint: disable=E1101\n\n return name\n\n @property\n def duration(self) -> int:\n \"\"\"Duration of this command.\"\"\"\n return self._duration\n\n @property\n def name(self) -> str:\n \"\"\"Name of this command.\"\"\"\n return self._name\n\n @abstractmethod\n def to_instruction(self, command, *channels: List[Channel],\n timeslots: Optional[TimeslotCollection] = None,\n name: Optional[str] = None) -> Instruction:\n \"\"\"Create an instruction from command.\"\"\"\n pass\n\n def __call__(self, *args, **kwargs):\n \"\"\"Creates an Instruction obtained from call to `to_instruction` wrapped in a Schedule.\"\"\"\n return self.to_instruction(*args, **kwargs)\n\n def __eq__(self, other: 'Command'):\n \"\"\"Two Commands are the same if they are of the same type\n and have the same duration and name.\n\n Args:\n other: other Command\n\n Returns:\n bool: are self and other equal\n \"\"\"\n if type(self) is type(other) and \\\n self._duration == other._duration and \\\n self._name == other._name:\n return True\n return False\n\n def __hash__(self):\n return hash((type(self), self._duration, self._name))\n\n def __repr__(self):\n return '%s(name=%s, duration=%d)' % (self.__class__.__name__,\n self._name, self._duration)\n", "path": "qiskit/pulse/commands/command.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=missing-return-doc, missing-return-type-doc\n\n\"\"\"\nPulse decorators.\n\"\"\"\n\nimport functools\nfrom typing import Callable\n\nimport numpy as np\n\nfrom qiskit.pulse.exceptions import PulseError\n\nfrom .sample_pulse import SamplePulse\n\n\ndef functional_pulse(func: Callable):\n \"\"\"A decorator for generating SamplePulse from python callable.\n Args:\n func: A function describing pulse envelope.\n Raises:\n PulseError: when invalid function is specified.\n \"\"\"\n @functools.wraps(func)\n def to_pulse(duration, *args, name=None, **kwargs):\n \"\"\"Return SamplePulse.\"\"\"\n if isinstance(duration, (int, np.integer)) and duration > 0:\n samples = func(duration, *args, **kwargs)\n samples = np.asarray(samples, dtype=np.complex128)\n return SamplePulse(samples=samples, name=name)\n raise PulseError('The first argument must be an integer value representing duration.')\n\n return to_pulse\n", "path": "qiskit/pulse/commands/pulse_decorators.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nBase command.\n\"\"\"\nimport re\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import List, Optional, Union\nimport numpy as np\n\nfrom qiskit.pulse.exceptions import PulseError\nfrom qiskit.pulse.channels import Channel\nfrom qiskit.pulse.timeslots import TimeslotCollection\n\nfrom .instruction import Instruction\n\n\nclass MetaCount(ABCMeta):\n \"\"\"Meta class to count class instances.\"\"\"\n def __new__(mcs, name, bases, namespace):\n new_cls = super(MetaCount, mcs).__new__(mcs, name, bases, namespace)\n new_cls.instances_counter = 0\n return new_cls\n\n\nclass Command(metaclass=MetaCount):\n \"\"\"Abstract command class.\"\"\"\n\n # Counter for the number of instances in this class\n prefix = 'c'\n\n @abstractmethod\n def __init__(self, duration: Union[int, np.integer] = None):\n \"\"\"Create a new command.\n\n Args:\n duration: Duration of this command.\n Raises:\n PulseError: when duration is not number of points\n \"\"\"\n if isinstance(duration, (int, np.integer)):\n self._duration = int(duration)\n else:\n raise PulseError('Pulse duration should be integer.')\n\n self._name = Command.create_name()\n\n @classmethod\n def create_name(cls, name: str = None) -> str:\n \"\"\"Autogenerate names for pulse commands.\"\"\"\n if name is None:\n try:\n name = '%s%i' % (cls.prefix, cls.instances_counter) # pylint: disable=E1101\n except TypeError:\n raise PulseError(\"prefix and counter must be non-None when name is None.\")\n else:\n try:\n name = str(name)\n except Exception:\n raise PulseError(\"The pulse command name should be castable to a string \"\n \"(or None for autogenerate a name).\")\n name_format = re.compile('[a-zA-Z][a-zA-Z0-9_]*')\n if name_format.match(name) is None:\n raise PulseError(\"%s is an invalid OpenPulse command name.\" % name)\n\n cls.instances_counter += 1 # pylint: disable=E1101\n\n return name\n\n @property\n def duration(self) -> int:\n \"\"\"Duration of this command.\"\"\"\n return self._duration\n\n @property\n def name(self) -> str:\n \"\"\"Name of this command.\"\"\"\n return self._name\n\n @abstractmethod\n def to_instruction(self, command, *channels: List[Channel],\n timeslots: Optional[TimeslotCollection] = None,\n name: Optional[str] = None) -> Instruction:\n \"\"\"Create an instruction from command.\"\"\"\n pass\n\n def __call__(self, *args, **kwargs):\n \"\"\"Creates an Instruction obtained from call to `to_instruction` wrapped in a Schedule.\"\"\"\n return self.to_instruction(*args, **kwargs)\n\n def __eq__(self, other: 'Command'):\n \"\"\"Two Commands are the same if they are of the same type\n and have the same duration and name.\n\n Args:\n other: other Command\n\n Returns:\n bool: are self and other equal\n \"\"\"\n if type(self) is type(other) and \\\n self._duration == other._duration and \\\n self._name == other._name:\n return True\n return False\n\n def __hash__(self):\n return hash((type(self), self._duration, self._name))\n\n def __repr__(self):\n return '%s(name=%s, duration=%d)' % (self.__class__.__name__,\n self._name, self._duration)\n", "path": "qiskit/pulse/commands/command.py"}]}
| 2,004 | 439 |
gh_patches_debug_25013
|
rasdani/github-patches
|
git_diff
|
pandas-dev__pandas-24034
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add columns-parameter like in feather.read_dataframe
Wes suggested to raise this feature request here again, after I wondered why `pandas.read_feather` is lacking a `columns`-parameter like the [`feather-format`](https://github.com/wesm/feather) package has it.
You can read in only a subset of columns from a feather-file with e.g.:
`df = feather.read_dataframe('df_test.feather', columns='b')`
It would be really nice if `pandas.read_feather` also had this built in, so you don't have to install and import the feather-package just for that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/io/feather_format.py`
Content:
```
1 """ feather-format compat """
2
3 from distutils.version import LooseVersion
4
5 from pandas.compat import range
6 from pandas.util._decorators import deprecate_kwarg
7
8 from pandas import DataFrame, Int64Index, RangeIndex
9
10 from pandas.io.common import _stringify_path
11
12
13 def _try_import():
14 # since pandas is a dependency of pyarrow
15 # we need to import on first use
16 try:
17 import pyarrow
18 from pyarrow import feather
19 except ImportError:
20 # give a nice error message
21 raise ImportError("pyarrow is not installed\n\n"
22 "you can install via conda\n"
23 "conda install pyarrow -c conda-forge\n"
24 "or via pip\n"
25 "pip install -U pyarrow\n")
26
27 if LooseVersion(pyarrow.__version__) < LooseVersion('0.4.1'):
28 raise ImportError("pyarrow >= 0.4.1 required for feather support\n\n"
29 "you can install via conda\n"
30 "conda install pyarrow -c conda-forge"
31 "or via pip\n"
32 "pip install -U pyarrow\n")
33
34 return feather, pyarrow
35
36
37 def to_feather(df, path):
38 """
39 Write a DataFrame to the feather-format
40
41 Parameters
42 ----------
43 df : DataFrame
44 path : string file path, or file-like object
45
46 """
47 path = _stringify_path(path)
48 if not isinstance(df, DataFrame):
49 raise ValueError("feather only support IO with DataFrames")
50
51 feather = _try_import()[0]
52 valid_types = {'string', 'unicode'}
53
54 # validate index
55 # --------------
56
57 # validate that we have only a default index
58 # raise on anything else as we don't serialize the index
59
60 if not isinstance(df.index, Int64Index):
61 raise ValueError("feather does not support serializing {} "
62 "for the index; you can .reset_index()"
63 "to make the index into column(s)".format(
64 type(df.index)))
65
66 if not df.index.equals(RangeIndex.from_range(range(len(df)))):
67 raise ValueError("feather does not support serializing a "
68 "non-default index for the index; you "
69 "can .reset_index() to make the index "
70 "into column(s)")
71
72 if df.index.name is not None:
73 raise ValueError("feather does not serialize index meta-data on a "
74 "default index")
75
76 # validate columns
77 # ----------------
78
79 # must have value column names (strings only)
80 if df.columns.inferred_type not in valid_types:
81 raise ValueError("feather must have string column names")
82
83 feather.write_feather(df, path)
84
85
86 @deprecate_kwarg(old_arg_name='nthreads', new_arg_name='use_threads')
87 def read_feather(path, use_threads=True):
88 """
89 Load a feather-format object from the file path
90
91 .. versionadded 0.20.0
92
93 Parameters
94 ----------
95 path : string file path, or file-like object
96 nthreads : int, default 1
97 Number of CPU threads to use when reading to pandas.DataFrame
98
99 .. versionadded 0.21.0
100 .. deprecated 0.24.0
101 use_threads : bool, default True
102 Whether to parallelize reading using multiple threads
103
104 .. versionadded 0.24.0
105
106 Returns
107 -------
108 type of object stored in file
109
110 """
111
112 feather, pyarrow = _try_import()
113 path = _stringify_path(path)
114
115 if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'):
116 int_use_threads = int(use_threads)
117 if int_use_threads < 1:
118 int_use_threads = 1
119 return feather.read_feather(path, nthreads=int_use_threads)
120
121 return feather.read_feather(path, use_threads=bool(use_threads))
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py
--- a/pandas/io/feather_format.py
+++ b/pandas/io/feather_format.py
@@ -84,7 +84,7 @@
@deprecate_kwarg(old_arg_name='nthreads', new_arg_name='use_threads')
-def read_feather(path, use_threads=True):
+def read_feather(path, columns=None, use_threads=True):
"""
Load a feather-format object from the file path
@@ -93,6 +93,10 @@
Parameters
----------
path : string file path, or file-like object
+ columns : sequence, default None
+ If not provided, all columns are read
+
+ .. versionadded 0.24.0
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame
@@ -116,6 +120,8 @@
int_use_threads = int(use_threads)
if int_use_threads < 1:
int_use_threads = 1
- return feather.read_feather(path, nthreads=int_use_threads)
+ return feather.read_feather(path, columns=columns,
+ nthreads=int_use_threads)
- return feather.read_feather(path, use_threads=bool(use_threads))
+ return feather.read_feather(path, columns=columns,
+ use_threads=bool(use_threads))
|
{"golden_diff": "diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py\n--- a/pandas/io/feather_format.py\n+++ b/pandas/io/feather_format.py\n@@ -84,7 +84,7 @@\n \n \n @deprecate_kwarg(old_arg_name='nthreads', new_arg_name='use_threads')\n-def read_feather(path, use_threads=True):\n+def read_feather(path, columns=None, use_threads=True):\n \"\"\"\n Load a feather-format object from the file path\n \n@@ -93,6 +93,10 @@\n Parameters\n ----------\n path : string file path, or file-like object\n+ columns : sequence, default None\n+ If not provided, all columns are read\n+\n+ .. versionadded 0.24.0\n nthreads : int, default 1\n Number of CPU threads to use when reading to pandas.DataFrame\n \n@@ -116,6 +120,8 @@\n int_use_threads = int(use_threads)\n if int_use_threads < 1:\n int_use_threads = 1\n- return feather.read_feather(path, nthreads=int_use_threads)\n+ return feather.read_feather(path, columns=columns,\n+ nthreads=int_use_threads)\n \n- return feather.read_feather(path, use_threads=bool(use_threads))\n+ return feather.read_feather(path, columns=columns,\n+ use_threads=bool(use_threads))\n", "issue": "Add columns-parameter like in feather.read_dataframe\nWes suggested to raise this feature request here again, after I wondered why `pandas.read_feather` is lacking a `columns`-parameter like the [`feather-format`](https://github.com/wesm/feather) package has it.\r\n\r\nYou can read in only a subset of columns from a feather-file with e.g.: \r\n`df = feather.read_dataframe('df_test.feather', columns='b')`\r\n\r\nIt would be really nice if `pandas.read_feather` also had this built in, so you don't have to install and import the feather-package just for that.\n", "before_files": [{"content": "\"\"\" feather-format compat \"\"\"\n\nfrom distutils.version import LooseVersion\n\nfrom pandas.compat import range\nfrom pandas.util._decorators import deprecate_kwarg\n\nfrom pandas import DataFrame, Int64Index, RangeIndex\n\nfrom pandas.io.common import _stringify_path\n\n\ndef _try_import():\n # since pandas is a dependency of pyarrow\n # we need to import on first use\n try:\n import pyarrow\n from pyarrow import feather\n except ImportError:\n # give a nice error message\n raise ImportError(\"pyarrow is not installed\\n\\n\"\n \"you can install via conda\\n\"\n \"conda install pyarrow -c conda-forge\\n\"\n \"or via pip\\n\"\n \"pip install -U pyarrow\\n\")\n\n if LooseVersion(pyarrow.__version__) < LooseVersion('0.4.1'):\n raise ImportError(\"pyarrow >= 0.4.1 required for feather support\\n\\n\"\n \"you can install via conda\\n\"\n \"conda install pyarrow -c conda-forge\"\n \"or via pip\\n\"\n \"pip install -U pyarrow\\n\")\n\n return feather, pyarrow\n\n\ndef to_feather(df, path):\n \"\"\"\n Write a DataFrame to the feather-format\n\n Parameters\n ----------\n df : DataFrame\n path : string file path, or file-like object\n\n \"\"\"\n path = _stringify_path(path)\n if not isinstance(df, DataFrame):\n raise ValueError(\"feather only support IO with DataFrames\")\n\n feather = _try_import()[0]\n valid_types = {'string', 'unicode'}\n\n # validate index\n # --------------\n\n # validate that we have only a default index\n # raise on anything else as we don't serialize the index\n\n if not isinstance(df.index, Int64Index):\n raise ValueError(\"feather does not support serializing {} \"\n \"for the index; you can .reset_index()\"\n \"to make the index into column(s)\".format(\n type(df.index)))\n\n if not df.index.equals(RangeIndex.from_range(range(len(df)))):\n raise ValueError(\"feather does not support serializing a \"\n \"non-default index for the index; you \"\n \"can .reset_index() to make the index \"\n \"into column(s)\")\n\n if df.index.name is not None:\n raise ValueError(\"feather does not serialize index meta-data on a \"\n \"default index\")\n\n # validate columns\n # ----------------\n\n # must have value column names (strings only)\n if df.columns.inferred_type not in valid_types:\n raise ValueError(\"feather must have string column names\")\n\n feather.write_feather(df, path)\n\n\n@deprecate_kwarg(old_arg_name='nthreads', new_arg_name='use_threads')\ndef read_feather(path, use_threads=True):\n \"\"\"\n Load a feather-format object from the file path\n\n .. versionadded 0.20.0\n\n Parameters\n ----------\n path : string file path, or file-like object\n nthreads : int, default 1\n Number of CPU threads to use when reading to pandas.DataFrame\n\n .. versionadded 0.21.0\n .. deprecated 0.24.0\n use_threads : bool, default True\n Whether to parallelize reading using multiple threads\n\n .. versionadded 0.24.0\n\n Returns\n -------\n type of object stored in file\n\n \"\"\"\n\n feather, pyarrow = _try_import()\n path = _stringify_path(path)\n\n if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'):\n int_use_threads = int(use_threads)\n if int_use_threads < 1:\n int_use_threads = 1\n return feather.read_feather(path, nthreads=int_use_threads)\n\n return feather.read_feather(path, use_threads=bool(use_threads))\n", "path": "pandas/io/feather_format.py"}], "after_files": [{"content": "\"\"\" feather-format compat \"\"\"\n\nfrom distutils.version import LooseVersion\n\nfrom pandas.compat import range\nfrom pandas.util._decorators import deprecate_kwarg\n\nfrom pandas import DataFrame, Int64Index, RangeIndex\n\nfrom pandas.io.common import _stringify_path\n\n\ndef _try_import():\n # since pandas is a dependency of pyarrow\n # we need to import on first use\n try:\n import pyarrow\n from pyarrow import feather\n except ImportError:\n # give a nice error message\n raise ImportError(\"pyarrow is not installed\\n\\n\"\n \"you can install via conda\\n\"\n \"conda install pyarrow -c conda-forge\\n\"\n \"or via pip\\n\"\n \"pip install -U pyarrow\\n\")\n\n if LooseVersion(pyarrow.__version__) < LooseVersion('0.4.1'):\n raise ImportError(\"pyarrow >= 0.4.1 required for feather support\\n\\n\"\n \"you can install via conda\\n\"\n \"conda install pyarrow -c conda-forge\"\n \"or via pip\\n\"\n \"pip install -U pyarrow\\n\")\n\n return feather, pyarrow\n\n\ndef to_feather(df, path):\n \"\"\"\n Write a DataFrame to the feather-format\n\n Parameters\n ----------\n df : DataFrame\n path : string file path, or file-like object\n\n \"\"\"\n path = _stringify_path(path)\n if not isinstance(df, DataFrame):\n raise ValueError(\"feather only support IO with DataFrames\")\n\n feather = _try_import()[0]\n valid_types = {'string', 'unicode'}\n\n # validate index\n # --------------\n\n # validate that we have only a default index\n # raise on anything else as we don't serialize the index\n\n if not isinstance(df.index, Int64Index):\n raise ValueError(\"feather does not support serializing {} \"\n \"for the index; you can .reset_index()\"\n \"to make the index into column(s)\".format(\n type(df.index)))\n\n if not df.index.equals(RangeIndex.from_range(range(len(df)))):\n raise ValueError(\"feather does not support serializing a \"\n \"non-default index for the index; you \"\n \"can .reset_index() to make the index \"\n \"into column(s)\")\n\n if df.index.name is not None:\n raise ValueError(\"feather does not serialize index meta-data on a \"\n \"default index\")\n\n # validate columns\n # ----------------\n\n # must have value column names (strings only)\n if df.columns.inferred_type not in valid_types:\n raise ValueError(\"feather must have string column names\")\n\n feather.write_feather(df, path)\n\n\n@deprecate_kwarg(old_arg_name='nthreads', new_arg_name='use_threads')\ndef read_feather(path, columns=None, use_threads=True):\n \"\"\"\n Load a feather-format object from the file path\n\n .. versionadded 0.20.0\n\n Parameters\n ----------\n path : string file path, or file-like object\n columns : sequence, default None\n If not provided, all columns are read\n\n .. versionadded 0.24.0\n nthreads : int, default 1\n Number of CPU threads to use when reading to pandas.DataFrame\n\n .. versionadded 0.21.0\n .. deprecated 0.24.0\n use_threads : bool, default True\n Whether to parallelize reading using multiple threads\n\n .. versionadded 0.24.0\n\n Returns\n -------\n type of object stored in file\n\n \"\"\"\n\n feather, pyarrow = _try_import()\n path = _stringify_path(path)\n\n if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'):\n int_use_threads = int(use_threads)\n if int_use_threads < 1:\n int_use_threads = 1\n return feather.read_feather(path, columns=columns,\n nthreads=int_use_threads)\n\n return feather.read_feather(path, columns=columns,\n use_threads=bool(use_threads))\n", "path": "pandas/io/feather_format.py"}]}
| 1,507 | 314 |
gh_patches_debug_2620
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-4189
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SyntaxWarning for loading indicator widget
I receive this warning after upgrading to `0.52.0`:
```
/Users/cthompson/Library/Caches/pypoetry/virtualenvs/dolphie-z84eXs3q-py3.11/lib/python3.11/site-packages/textual/widgets/_loading_indicator.py:57: SyntaxWarning: "is" with a literal. Did you mean "=="?
if self.app.animation_level is "none":
```
https://github.com/Textualize/textual/blob/main/src/textual/widgets/_loading_indicator.py#L57
Seems we just need to change `is "none"` to `== "none"`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_loading_indicator.py`
Content:
```
1 from __future__ import annotations
2
3 from time import time
4
5 from rich.console import RenderableType
6 from rich.style import Style
7 from rich.text import Text
8
9 from ..color import Gradient
10 from ..events import Mount
11 from ..widget import Widget
12
13
14 class LoadingIndicator(Widget):
15 """Display an animated loading indicator."""
16
17 DEFAULT_CSS = """
18 LoadingIndicator {
19 width: 100%;
20 height: 100%;
21 min-height: 1;
22 content-align: center middle;
23 color: $accent;
24 }
25 LoadingIndicator.-textual-loading-indicator {
26 layer: _loading;
27 background: $boost;
28 dock: top;
29 }
30 """
31
32 def __init__(
33 self,
34 name: str | None = None,
35 id: str | None = None,
36 classes: str | None = None,
37 disabled: bool = False,
38 ):
39 """Initialize a loading indicator.
40
41 Args:
42 name: The name of the widget.
43 id: The ID of the widget in the DOM.
44 classes: The CSS classes for the widget.
45 disabled: Whether the widget is disabled or not.
46 """
47 super().__init__(name=name, id=id, classes=classes, disabled=disabled)
48
49 self._start_time: float = 0.0
50 """The time the loading indicator was mounted (a Unix timestamp)."""
51
52 def _on_mount(self, _: Mount) -> None:
53 self._start_time = time()
54 self.auto_refresh = 1 / 16
55
56 def render(self) -> RenderableType:
57 if self.app.animation_level is "none":
58 return Text("Loading...")
59
60 elapsed = time() - self._start_time
61 speed = 0.8
62 dot = "\u25cf"
63 _, _, background, color = self.colors
64
65 gradient = Gradient(
66 (0.0, background.blend(color, 0.1)),
67 (0.7, color),
68 (1.0, color.lighten(0.1)),
69 )
70
71 blends = [(elapsed * speed - dot_number / 8) % 1 for dot_number in range(5)]
72
73 dots = [
74 (
75 f"{dot} ",
76 Style.from_color(gradient.get_color((1 - blend) ** 2).rich_color),
77 )
78 for blend in blends
79 ]
80 indicator = Text.assemble(*dots)
81 indicator.rstrip()
82 return indicator
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/textual/widgets/_loading_indicator.py b/src/textual/widgets/_loading_indicator.py
--- a/src/textual/widgets/_loading_indicator.py
+++ b/src/textual/widgets/_loading_indicator.py
@@ -54,7 +54,7 @@
self.auto_refresh = 1 / 16
def render(self) -> RenderableType:
- if self.app.animation_level is "none":
+ if self.app.animation_level == "none":
return Text("Loading...")
elapsed = time() - self._start_time
|
{"golden_diff": "diff --git a/src/textual/widgets/_loading_indicator.py b/src/textual/widgets/_loading_indicator.py\n--- a/src/textual/widgets/_loading_indicator.py\n+++ b/src/textual/widgets/_loading_indicator.py\n@@ -54,7 +54,7 @@\n self.auto_refresh = 1 / 16\n \n def render(self) -> RenderableType:\n- if self.app.animation_level is \"none\":\n+ if self.app.animation_level == \"none\":\n return Text(\"Loading...\")\n \n elapsed = time() - self._start_time\n", "issue": "SyntaxWarning for loading indicator widget\nI receive this warning after upgrading to `0.52.0`:\r\n\r\n```\r\n/Users/cthompson/Library/Caches/pypoetry/virtualenvs/dolphie-z84eXs3q-py3.11/lib/python3.11/site-packages/textual/widgets/_loading_indicator.py:57: SyntaxWarning: \"is\" with a literal. Did you mean \"==\"?\r\n if self.app.animation_level is \"none\":\r\n```\r\n\r\nhttps://github.com/Textualize/textual/blob/main/src/textual/widgets/_loading_indicator.py#L57\r\n\r\nSeems we just need to change `is \"none\"` to `== \"none\"`\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom time import time\n\nfrom rich.console import RenderableType\nfrom rich.style import Style\nfrom rich.text import Text\n\nfrom ..color import Gradient\nfrom ..events import Mount\nfrom ..widget import Widget\n\n\nclass LoadingIndicator(Widget):\n \"\"\"Display an animated loading indicator.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n LoadingIndicator {\n width: 100%;\n height: 100%;\n min-height: 1;\n content-align: center middle;\n color: $accent;\n }\n LoadingIndicator.-textual-loading-indicator {\n layer: _loading;\n background: $boost;\n dock: top;\n }\n \"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Initialize a loading indicator.\n\n Args:\n name: The name of the widget.\n id: The ID of the widget in the DOM.\n classes: The CSS classes for the widget.\n disabled: Whether the widget is disabled or not.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n\n self._start_time: float = 0.0\n \"\"\"The time the loading indicator was mounted (a Unix timestamp).\"\"\"\n\n def _on_mount(self, _: Mount) -> None:\n self._start_time = time()\n self.auto_refresh = 1 / 16\n\n def render(self) -> RenderableType:\n if self.app.animation_level is \"none\":\n return Text(\"Loading...\")\n\n elapsed = time() - self._start_time\n speed = 0.8\n dot = \"\\u25cf\"\n _, _, background, color = self.colors\n\n gradient = Gradient(\n (0.0, background.blend(color, 0.1)),\n (0.7, color),\n (1.0, color.lighten(0.1)),\n )\n\n blends = [(elapsed * speed - dot_number / 8) % 1 for dot_number in range(5)]\n\n dots = [\n (\n f\"{dot} \",\n Style.from_color(gradient.get_color((1 - blend) ** 2).rich_color),\n )\n for blend in blends\n ]\n indicator = Text.assemble(*dots)\n indicator.rstrip()\n return indicator\n", "path": "src/textual/widgets/_loading_indicator.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom time import time\n\nfrom rich.console import RenderableType\nfrom rich.style import Style\nfrom rich.text import Text\n\nfrom ..color import Gradient\nfrom ..events import Mount\nfrom ..widget import Widget\n\n\nclass LoadingIndicator(Widget):\n \"\"\"Display an animated loading indicator.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n LoadingIndicator {\n width: 100%;\n height: 100%;\n min-height: 1;\n content-align: center middle;\n color: $accent;\n }\n LoadingIndicator.-textual-loading-indicator {\n layer: _loading;\n background: $boost;\n dock: top;\n }\n \"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Initialize a loading indicator.\n\n Args:\n name: The name of the widget.\n id: The ID of the widget in the DOM.\n classes: The CSS classes for the widget.\n disabled: Whether the widget is disabled or not.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n\n self._start_time: float = 0.0\n \"\"\"The time the loading indicator was mounted (a Unix timestamp).\"\"\"\n\n def _on_mount(self, _: Mount) -> None:\n self._start_time = time()\n self.auto_refresh = 1 / 16\n\n def render(self) -> RenderableType:\n if self.app.animation_level == \"none\":\n return Text(\"Loading...\")\n\n elapsed = time() - self._start_time\n speed = 0.8\n dot = \"\\u25cf\"\n _, _, background, color = self.colors\n\n gradient = Gradient(\n (0.0, background.blend(color, 0.1)),\n (0.7, color),\n (1.0, color.lighten(0.1)),\n )\n\n blends = [(elapsed * speed - dot_number / 8) % 1 for dot_number in range(5)]\n\n dots = [\n (\n f\"{dot} \",\n Style.from_color(gradient.get_color((1 - blend) ** 2).rich_color),\n )\n for blend in blends\n ]\n indicator = Text.assemble(*dots)\n indicator.rstrip()\n return indicator\n", "path": "src/textual/widgets/_loading_indicator.py"}]}
| 1,093 | 118 |
gh_patches_debug_21749
|
rasdani/github-patches
|
git_diff
|
searx__searx-3444
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Brainstorming: on_result plugin / https_rewrite plugin
Different plugin ideas require to filter / modify the results according to the host of the URL.
Usage examples:
* the https_rewrite plugin, and the current implementation fails to do so: https://github.com/asciimoo/searx/tree/master/searx/plugins/https_rules are outdated, and the number of rules is a fraction of the actual list to avoid an overhead. Here there is one lookup per .xml files: https://github.com/asciimoo/searx/blob/6e37255e59693d7a3768491b2b8d06441051131f/searx/plugins/https_rewrite.py#L140
* [Add support for Youtube and Twitter links rewrite PR #1726](https://github.com/asciimoo/searx/pull/1726) rewrite some URL too.
* [feature request: private user blocklists/blacklists #2001](https://github.com/asciimoo/searx/issues/2001) asks to remove some domain names from the results.
Currently, ```on_result``` plugin scan for each URLs and process them.
What if we provide provide each plugin register for a list of domain.
Some technical solutions to create a static list of domains:
* a [trie](https://en.wikipedia.org/wiki/Trie). (This issue superseed this one https://github.com/asciimoo/searx/issues/936 )
* one hash table per label in the domain name: https://github.com/sethmlarson/hstspreload
According to https://www.eff.org/https-everywhere/rulesets#wildcard-targets the wildcard in HTTPS everywhere can be either:
* *.eff.org
* www.eff.*
A solution: two lookups in two different structures, one in reversed order (org -> eff -> *), one in normal order (www -> eff -> *).
Prototype: https://gist.github.com/dalf/26462c1627c8b00fdfacdd55525b08b6
The idea is provide:
* a format to store the rules (either a binary or plain text format), with a module to create this output format.
* a plugin can register this structure: the searx core calls the plugin where there is a match.
* it is difficult to merge all trie without a lot of repetition, some most probably one "double trie" per plugin; but in simple cases I guess it can be done.
Note about the issue #2001:
* the choice of domains to remove is a preference, then the issue is not solved.
* if we can make some maintained list, then the issue is solved.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/duckduckgo.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """DuckDuckGo Lite
4 """
5
6 from json import loads
7
8 from lxml.html import fromstring
9
10 from searx.utils import (
11 dict_subset,
12 eval_xpath,
13 eval_xpath_getindex,
14 extract_text,
15 match_language,
16 )
17 from searx.poolrequests import get
18
19 # about
20 about = {
21 "website": 'https://lite.duckduckgo.com/lite',
22 "wikidata_id": 'Q12805',
23 "official_api_documentation": 'https://duckduckgo.com/api',
24 "use_official_api": False,
25 "require_api_key": False,
26 "results": 'HTML',
27 }
28
29 # engine dependent config
30 categories = ['general', 'web']
31 paging = True
32 supported_languages_url = 'https://duckduckgo.com/util/u588.js'
33 time_range_support = True
34
35 language_aliases = {
36 'ar-SA': 'ar-XA',
37 'es-419': 'es-XL',
38 'ja': 'jp-JP',
39 'ko': 'kr-KR',
40 'sl-SI': 'sl-SL',
41 'zh-TW': 'tzh-TW',
42 'zh-HK': 'tzh-HK',
43 }
44
45 time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
46
47 # search-url
48 url = 'https://lite.duckduckgo.com/lite'
49 url_ping = 'https://duckduckgo.com/t/sl_l'
50
51
52 # match query's language to a region code that duckduckgo will accept
53 def get_region_code(lang, lang_list=None):
54 if lang == 'all':
55 return None
56
57 lang_code = match_language(lang, lang_list or [], language_aliases, 'wt-WT')
58 lang_parts = lang_code.split('-')
59
60 # country code goes first
61 return lang_parts[1].lower() + '-' + lang_parts[0].lower()
62
63
64 def request(query, params):
65
66 params['url'] = url
67 params['method'] = 'POST'
68
69 params['data']['q'] = query
70
71 # The API is not documented, so we do some reverse engineering and emulate
72 # what https://lite.duckduckgo.com/lite/ does when you press "next Page"
73 # link again and again ..
74
75 params['headers']['Content-Type'] = 'application/x-www-form-urlencoded'
76
77 # initial page does not have an offset
78 if params['pageno'] == 2:
79 # second page does have an offset of 30
80 offset = (params['pageno'] - 1) * 30
81 params['data']['s'] = offset
82 params['data']['dc'] = offset + 1
83
84 elif params['pageno'] > 2:
85 # third and following pages do have an offset of 30 + n*50
86 offset = 30 + (params['pageno'] - 2) * 50
87 params['data']['s'] = offset
88 params['data']['dc'] = offset + 1
89
90 # initial page does not have additional data in the input form
91 if params['pageno'] > 1:
92 # request the second page (and more pages) needs 'o' and 'api' arguments
93 params['data']['o'] = 'json'
94 params['data']['api'] = 'd.js'
95
96 # initial page does not have additional data in the input form
97 if params['pageno'] > 2:
98 # request the third page (and more pages) some more arguments
99 params['data']['nextParams'] = ''
100 params['data']['v'] = ''
101 params['data']['vqd'] = ''
102
103 region_code = get_region_code(params['language'], supported_languages)
104 if region_code:
105 params['data']['kl'] = region_code
106 params['cookies']['kl'] = region_code
107
108 params['data']['df'] = ''
109 if params['time_range'] in time_range_dict:
110 params['data']['df'] = time_range_dict[params['time_range']]
111 params['cookies']['df'] = time_range_dict[params['time_range']]
112
113 return params
114
115
116 # get response from search-request
117 def response(resp):
118
119 headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie'])
120 get(url_ping, headers=headers_ping)
121
122 if resp.status_code == 303:
123 return []
124
125 results = []
126 doc = fromstring(resp.text)
127
128 result_table = eval_xpath(doc, '//html/body/form/div[@class="filters"]/table')
129 if not len(result_table) >= 3:
130 # no more results
131 return []
132 result_table = result_table[2]
133
134 tr_rows = eval_xpath(result_table, './/tr')
135
136 # In the last <tr> is the form of the 'previous/next page' links
137 tr_rows = tr_rows[:-1]
138
139 len_tr_rows = len(tr_rows)
140 offset = 0
141
142 while len_tr_rows >= offset + 4:
143
144 # assemble table rows we need to scrap
145 tr_title = tr_rows[offset]
146 tr_content = tr_rows[offset + 1]
147 offset += 4
148
149 # ignore sponsored Adds <tr class="result-sponsored">
150 if tr_content.get('class') == 'result-sponsored':
151 continue
152
153 a_tag = eval_xpath_getindex(tr_title, './/td//a[@class="result-link"]', 0, None)
154 if a_tag is None:
155 continue
156
157 td_content = eval_xpath_getindex(tr_content, './/td[@class="result-snippet"]', 0, None)
158 if td_content is None:
159 continue
160
161 results.append(
162 {
163 'title': a_tag.text_content(),
164 'content': extract_text(td_content),
165 'url': a_tag.get('href'),
166 }
167 )
168
169 return results
170
171
172 # get supported languages from their site
173 def _fetch_supported_languages(resp):
174
175 # response is a js file with regions as an embedded object
176 response_page = resp.text
177 response_page = response_page[response_page.find('regions:{') + 8:]
178 response_page = response_page[: response_page.find('}') + 1]
179
180 regions_json = loads(response_page)
181 supported_languages = map((lambda x: x[3:] + '-' + x[:2].upper()), regions_json.keys())
182
183 return list(supported_languages)
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py
--- a/searx/engines/duckduckgo.py
+++ b/searx/engines/duckduckgo.py
@@ -18,7 +18,7 @@
# about
about = {
- "website": 'https://lite.duckduckgo.com/lite',
+ "website": 'https://lite.duckduckgo.com/lite/',
"wikidata_id": 'Q12805',
"official_api_documentation": 'https://duckduckgo.com/api',
"use_official_api": False,
@@ -45,7 +45,7 @@
time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
# search-url
-url = 'https://lite.duckduckgo.com/lite'
+url = 'https://lite.duckduckgo.com/lite/'
url_ping = 'https://duckduckgo.com/t/sl_l'
@@ -73,6 +73,7 @@
# link again and again ..
params['headers']['Content-Type'] = 'application/x-www-form-urlencoded'
+ params['headers']['Referer'] = 'https://lite.duckduckgo.com/'
# initial page does not have an offset
if params['pageno'] == 2:
|
{"golden_diff": "diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py\n--- a/searx/engines/duckduckgo.py\n+++ b/searx/engines/duckduckgo.py\n@@ -18,7 +18,7 @@\n \n # about\n about = {\n- \"website\": 'https://lite.duckduckgo.com/lite',\n+ \"website\": 'https://lite.duckduckgo.com/lite/',\n \"wikidata_id\": 'Q12805',\n \"official_api_documentation\": 'https://duckduckgo.com/api',\n \"use_official_api\": False,\n@@ -45,7 +45,7 @@\n time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}\n \n # search-url\n-url = 'https://lite.duckduckgo.com/lite'\n+url = 'https://lite.duckduckgo.com/lite/'\n url_ping = 'https://duckduckgo.com/t/sl_l'\n \n \n@@ -73,6 +73,7 @@\n # link again and again ..\n \n params['headers']['Content-Type'] = 'application/x-www-form-urlencoded'\n+ params['headers']['Referer'] = 'https://lite.duckduckgo.com/'\n \n # initial page does not have an offset\n if params['pageno'] == 2:\n", "issue": "Brainstorming: on_result plugin / https_rewrite plugin\nDifferent plugin ideas require to filter / modify the results according to the host of the URL. \r\n\r\nUsage examples:\r\n* the https_rewrite plugin, and the current implementation fails to do so: https://github.com/asciimoo/searx/tree/master/searx/plugins/https_rules are outdated, and the number of rules is a fraction of the actual list to avoid an overhead. Here there is one lookup per .xml files: https://github.com/asciimoo/searx/blob/6e37255e59693d7a3768491b2b8d06441051131f/searx/plugins/https_rewrite.py#L140\r\n* [Add support for Youtube and Twitter links rewrite PR #1726](https://github.com/asciimoo/searx/pull/1726) rewrite some URL too.\r\n* [feature request: private user blocklists/blacklists #2001](https://github.com/asciimoo/searx/issues/2001) asks to remove some domain names from the results.\r\n\r\nCurrently, ```on_result``` plugin scan for each URLs and process them. \r\nWhat if we provide provide each plugin register for a list of domain.\r\n\r\nSome technical solutions to create a static list of domains:\r\n* a [trie](https://en.wikipedia.org/wiki/Trie). (This issue superseed this one https://github.com/asciimoo/searx/issues/936 )\r\n* one hash table per label in the domain name: https://github.com/sethmlarson/hstspreload\r\n\r\nAccording to https://www.eff.org/https-everywhere/rulesets#wildcard-targets the wildcard in HTTPS everywhere can be either:\r\n* *.eff.org\r\n* www.eff.*\r\n\r\nA solution: two lookups in two different structures, one in reversed order (org -> eff -> *), one in normal order (www -> eff -> *).\r\n\r\nPrototype: https://gist.github.com/dalf/26462c1627c8b00fdfacdd55525b08b6\r\n\r\n\r\n\r\nThe idea is provide: \r\n* a format to store the rules (either a binary or plain text format), with a module to create this output format.\r\n* a plugin can register this structure: the searx core calls the plugin where there is a match.\r\n* it is difficult to merge all trie without a lot of repetition, some most probably one \"double trie\" per plugin; but in simple cases I guess it can be done.\r\n\r\nNote about the issue #2001:\r\n* the choice of domains to remove is a preference, then the issue is not solved.\r\n* if we can make some maintained list, then the issue is solved.\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"DuckDuckGo Lite\n\"\"\"\n\nfrom json import loads\n\nfrom lxml.html import fromstring\n\nfrom searx.utils import (\n dict_subset,\n eval_xpath,\n eval_xpath_getindex,\n extract_text,\n match_language,\n)\nfrom searx.poolrequests import get\n\n# about\nabout = {\n \"website\": 'https://lite.duckduckgo.com/lite',\n \"wikidata_id\": 'Q12805',\n \"official_api_documentation\": 'https://duckduckgo.com/api',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\nsupported_languages_url = 'https://duckduckgo.com/util/u588.js'\ntime_range_support = True\n\nlanguage_aliases = {\n 'ar-SA': 'ar-XA',\n 'es-419': 'es-XL',\n 'ja': 'jp-JP',\n 'ko': 'kr-KR',\n 'sl-SI': 'sl-SL',\n 'zh-TW': 'tzh-TW',\n 'zh-HK': 'tzh-HK',\n}\n\ntime_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}\n\n# search-url\nurl = 'https://lite.duckduckgo.com/lite'\nurl_ping = 'https://duckduckgo.com/t/sl_l'\n\n\n# match query's language to a region code that duckduckgo will accept\ndef get_region_code(lang, lang_list=None):\n if lang == 'all':\n return None\n\n lang_code = match_language(lang, lang_list or [], language_aliases, 'wt-WT')\n lang_parts = lang_code.split('-')\n\n # country code goes first\n return lang_parts[1].lower() + '-' + lang_parts[0].lower()\n\n\ndef request(query, params):\n\n params['url'] = url\n params['method'] = 'POST'\n\n params['data']['q'] = query\n\n # The API is not documented, so we do some reverse engineering and emulate\n # what https://lite.duckduckgo.com/lite/ does when you press \"next Page\"\n # link again and again ..\n\n params['headers']['Content-Type'] = 'application/x-www-form-urlencoded'\n\n # initial page does not have an offset\n if params['pageno'] == 2:\n # second page does have an offset of 30\n offset = (params['pageno'] - 1) * 30\n params['data']['s'] = offset\n params['data']['dc'] = offset + 1\n\n elif params['pageno'] > 2:\n # third and following pages do have an offset of 30 + n*50\n offset = 30 + (params['pageno'] - 2) * 50\n params['data']['s'] = offset\n params['data']['dc'] = offset + 1\n\n # initial page does not have additional data in the input form\n if params['pageno'] > 1:\n # request the second page (and more pages) needs 'o' and 'api' arguments\n params['data']['o'] = 'json'\n params['data']['api'] = 'd.js'\n\n # initial page does not have additional data in the input form\n if params['pageno'] > 2:\n # request the third page (and more pages) some more arguments\n params['data']['nextParams'] = ''\n params['data']['v'] = ''\n params['data']['vqd'] = ''\n\n region_code = get_region_code(params['language'], supported_languages)\n if region_code:\n params['data']['kl'] = region_code\n params['cookies']['kl'] = region_code\n\n params['data']['df'] = ''\n if params['time_range'] in time_range_dict:\n params['data']['df'] = time_range_dict[params['time_range']]\n params['cookies']['df'] = time_range_dict[params['time_range']]\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n\n headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie'])\n get(url_ping, headers=headers_ping)\n\n if resp.status_code == 303:\n return []\n\n results = []\n doc = fromstring(resp.text)\n\n result_table = eval_xpath(doc, '//html/body/form/div[@class=\"filters\"]/table')\n if not len(result_table) >= 3:\n # no more results\n return []\n result_table = result_table[2]\n\n tr_rows = eval_xpath(result_table, './/tr')\n\n # In the last <tr> is the form of the 'previous/next page' links\n tr_rows = tr_rows[:-1]\n\n len_tr_rows = len(tr_rows)\n offset = 0\n\n while len_tr_rows >= offset + 4:\n\n # assemble table rows we need to scrap\n tr_title = tr_rows[offset]\n tr_content = tr_rows[offset + 1]\n offset += 4\n\n # ignore sponsored Adds <tr class=\"result-sponsored\">\n if tr_content.get('class') == 'result-sponsored':\n continue\n\n a_tag = eval_xpath_getindex(tr_title, './/td//a[@class=\"result-link\"]', 0, None)\n if a_tag is None:\n continue\n\n td_content = eval_xpath_getindex(tr_content, './/td[@class=\"result-snippet\"]', 0, None)\n if td_content is None:\n continue\n\n results.append(\n {\n 'title': a_tag.text_content(),\n 'content': extract_text(td_content),\n 'url': a_tag.get('href'),\n }\n )\n\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n\n # response is a js file with regions as an embedded object\n response_page = resp.text\n response_page = response_page[response_page.find('regions:{') + 8:]\n response_page = response_page[: response_page.find('}') + 1]\n\n regions_json = loads(response_page)\n supported_languages = map((lambda x: x[3:] + '-' + x[:2].upper()), regions_json.keys())\n\n return list(supported_languages)\n", "path": "searx/engines/duckduckgo.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"DuckDuckGo Lite\n\"\"\"\n\nfrom json import loads\n\nfrom lxml.html import fromstring\n\nfrom searx.utils import (\n dict_subset,\n eval_xpath,\n eval_xpath_getindex,\n extract_text,\n match_language,\n)\nfrom searx.poolrequests import get\n\n# about\nabout = {\n \"website\": 'https://lite.duckduckgo.com/lite/',\n \"wikidata_id\": 'Q12805',\n \"official_api_documentation\": 'https://duckduckgo.com/api',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\nsupported_languages_url = 'https://duckduckgo.com/util/u588.js'\ntime_range_support = True\n\nlanguage_aliases = {\n 'ar-SA': 'ar-XA',\n 'es-419': 'es-XL',\n 'ja': 'jp-JP',\n 'ko': 'kr-KR',\n 'sl-SI': 'sl-SL',\n 'zh-TW': 'tzh-TW',\n 'zh-HK': 'tzh-HK',\n}\n\ntime_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}\n\n# search-url\nurl = 'https://lite.duckduckgo.com/lite/'\nurl_ping = 'https://duckduckgo.com/t/sl_l'\n\n\n# match query's language to a region code that duckduckgo will accept\ndef get_region_code(lang, lang_list=None):\n if lang == 'all':\n return None\n\n lang_code = match_language(lang, lang_list or [], language_aliases, 'wt-WT')\n lang_parts = lang_code.split('-')\n\n # country code goes first\n return lang_parts[1].lower() + '-' + lang_parts[0].lower()\n\n\ndef request(query, params):\n\n params['url'] = url\n params['method'] = 'POST'\n\n params['data']['q'] = query\n\n # The API is not documented, so we do some reverse engineering and emulate\n # what https://lite.duckduckgo.com/lite/ does when you press \"next Page\"\n # link again and again ..\n\n params['headers']['Content-Type'] = 'application/x-www-form-urlencoded'\n params['headers']['Referer'] = 'https://lite.duckduckgo.com/'\n\n # initial page does not have an offset\n if params['pageno'] == 2:\n # second page does have an offset of 30\n offset = (params['pageno'] - 1) * 30\n params['data']['s'] = offset\n params['data']['dc'] = offset + 1\n\n elif params['pageno'] > 2:\n # third and following pages do have an offset of 30 + n*50\n offset = 30 + (params['pageno'] - 2) * 50\n params['data']['s'] = offset\n params['data']['dc'] = offset + 1\n\n # initial page does not have additional data in the input form\n if params['pageno'] > 1:\n # request the second page (and more pages) needs 'o' and 'api' arguments\n params['data']['o'] = 'json'\n params['data']['api'] = 'd.js'\n\n # initial page does not have additional data in the input form\n if params['pageno'] > 2:\n # request the third page (and more pages) some more arguments\n params['data']['nextParams'] = ''\n params['data']['v'] = ''\n params['data']['vqd'] = ''\n\n region_code = get_region_code(params['language'], supported_languages)\n if region_code:\n params['data']['kl'] = region_code\n params['cookies']['kl'] = region_code\n\n params['data']['df'] = ''\n if params['time_range'] in time_range_dict:\n params['data']['df'] = time_range_dict[params['time_range']]\n params['cookies']['df'] = time_range_dict[params['time_range']]\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n\n headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie'])\n get(url_ping, headers=headers_ping)\n\n if resp.status_code == 303:\n return []\n\n results = []\n doc = fromstring(resp.text)\n\n result_table = eval_xpath(doc, '//html/body/form/div[@class=\"filters\"]/table')\n if not len(result_table) >= 3:\n # no more results\n return []\n result_table = result_table[2]\n\n tr_rows = eval_xpath(result_table, './/tr')\n\n # In the last <tr> is the form of the 'previous/next page' links\n tr_rows = tr_rows[:-1]\n\n len_tr_rows = len(tr_rows)\n offset = 0\n\n while len_tr_rows >= offset + 4:\n\n # assemble table rows we need to scrap\n tr_title = tr_rows[offset]\n tr_content = tr_rows[offset + 1]\n offset += 4\n\n # ignore sponsored Adds <tr class=\"result-sponsored\">\n if tr_content.get('class') == 'result-sponsored':\n continue\n\n a_tag = eval_xpath_getindex(tr_title, './/td//a[@class=\"result-link\"]', 0, None)\n if a_tag is None:\n continue\n\n td_content = eval_xpath_getindex(tr_content, './/td[@class=\"result-snippet\"]', 0, None)\n if td_content is None:\n continue\n\n results.append(\n {\n 'title': a_tag.text_content(),\n 'content': extract_text(td_content),\n 'url': a_tag.get('href'),\n }\n )\n\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n\n # response is a js file with regions as an embedded object\n response_page = resp.text\n response_page = response_page[response_page.find('regions:{') + 8:]\n response_page = response_page[: response_page.find('}') + 1]\n\n regions_json = loads(response_page)\n supported_languages = map((lambda x: x[3:] + '-' + x[:2].upper()), regions_json.keys())\n\n return list(supported_languages)\n", "path": "searx/engines/duckduckgo.py"}]}
| 2,756 | 315 |
gh_patches_debug_8803
|
rasdani/github-patches
|
git_diff
|
safe-global__safe-config-service-244
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Relative path for `static` django resources
@Uxio0 addressed this in this PR: https://github.com/gnosis/safe-transaction-service/pull/400
This would facilitate having a custom mounting point for the services in nginx, making `static`'s location relative.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/config/settings.py`
Content:
```
1 """
2 Django settings for safe_client_config_service project.
3
4 Generated by 'django-admin startproject' using Django 3.2.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.2/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.2/ref/settings/
11 """
12 import os
13 from distutils.util import strtobool
14 from pathlib import Path
15
16 # Build paths inside the project like this: BASE_DIR / 'subdir'.
17
18 BASE_DIR = Path(__file__).resolve().parent.parent
19
20 # Quick-start development settings - unsuitable for production
21 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
22
23 # SECURITY WARNING: keep the secret key used in production secret!
24 SECRET_KEY = os.getenv("SECRET_KEY", None)
25
26 # SECURITY WARNING: don't run with debug turned on in production!
27 DEBUG = bool(strtobool(os.getenv("DEBUG", "false")))
28
29 # https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS
30 allowed_hosts = os.getenv("DJANGO_ALLOWED_HOSTS", ".localhost,127.0.0.1,[::1]")
31 ALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(",")]
32
33 # Application definition
34
35 REST_FRAMEWORK = {
36 # https://www.django-rest-framework.org/api-guide/renderers/
37 "DEFAULT_RENDERER_CLASSES": [
38 "djangorestframework_camel_case.render.CamelCaseJSONRenderer",
39 ],
40 "DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.NamespaceVersioning",
41 }
42
43 INSTALLED_APPS = [
44 "corsheaders",
45 "about.apps.AboutAppConfig",
46 "chains.apps.AppsConfig",
47 "safe_apps.apps.AppsConfig",
48 "django.contrib.admin",
49 "django.contrib.auth",
50 "django.contrib.contenttypes",
51 "django.contrib.sessions",
52 "django.contrib.messages",
53 "django.contrib.staticfiles",
54 "rest_framework",
55 "drf_yasg",
56 ]
57
58 MIDDLEWARE = [
59 "config.middleware.LoggingMiddleware",
60 "corsheaders.middleware.CorsMiddleware",
61 "django.middleware.security.SecurityMiddleware",
62 "django.contrib.sessions.middleware.SessionMiddleware",
63 "django.middleware.common.CommonMiddleware",
64 "django.middleware.csrf.CsrfViewMiddleware",
65 "django.contrib.auth.middleware.AuthenticationMiddleware",
66 "django.contrib.messages.middleware.MessageMiddleware",
67 "django.middleware.clickjacking.XFrameOptionsMiddleware",
68 ]
69
70 CACHES = {
71 "default": {
72 "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
73 },
74 "safe-apps": {
75 "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
76 },
77 }
78
79 LOGGING = {
80 "version": 1,
81 "disable_existing_loggers": False,
82 "formatters": {
83 "short": {"format": "%(asctime)s %(message)s"},
84 "verbose": {
85 "format": "%(asctime)s [%(levelname)s] [%(processName)s] %(message)s"
86 },
87 },
88 "handlers": {
89 "console": {
90 "class": "logging.StreamHandler",
91 "formatter": "verbose",
92 },
93 "console_short": {
94 "class": "logging.StreamHandler",
95 "formatter": "short",
96 },
97 },
98 "root": {
99 "handlers": ["console"],
100 "level": os.getenv("ROOT_LOG_LEVEL", "INFO"),
101 },
102 "loggers": {
103 "LoggingMiddleware": {
104 "handlers": ["console_short"],
105 "level": "INFO",
106 "propagate": False,
107 },
108 },
109 }
110
111 ROOT_URLCONF = "config.urls"
112
113 TEMPLATES = [
114 {
115 "BACKEND": "django.template.backends.django.DjangoTemplates",
116 "DIRS": [],
117 "APP_DIRS": True,
118 "OPTIONS": {
119 "context_processors": [
120 "django.template.context_processors.debug",
121 "django.template.context_processors.request",
122 "django.contrib.auth.context_processors.auth",
123 "django.contrib.messages.context_processors.messages",
124 ],
125 },
126 },
127 ]
128
129 WSGI_APPLICATION = "config.wsgi.application"
130
131 # Database
132 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
133
134 DATABASES = {
135 "default": {
136 "ENGINE": "django.db.backends.postgresql",
137 "NAME": os.getenv("POSTGRES_NAME", "postgres"),
138 "USER": os.getenv("POSTGRES_USER", "postgres"),
139 "PASSWORD": os.getenv("POSTGRES_PASSWORD", "postgres"),
140 "HOST": os.getenv("POSTGRES_HOST", "db"),
141 "PORT": os.getenv("POSTGRES_PORT", "5432"),
142 }
143 }
144
145 # Password validation
146 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
147
148 AUTH_PASSWORD_VALIDATORS = [
149 {
150 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
151 },
152 {
153 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
154 },
155 {
156 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
157 },
158 {
159 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
160 },
161 ]
162
163 # Internationalization
164 # https://docs.djangoproject.com/en/3.2/topics/i18n/
165
166 LANGUAGE_CODE = "en-us"
167
168 TIME_ZONE = "UTC"
169
170 USE_I18N = True
171
172 USE_L10N = True
173
174 USE_TZ = True
175
176 # Static files (CSS, JavaScript, Images)
177 # https://docs.djangoproject.com/en/3.2/howto/static-files/
178
179 STATIC_URL = "/static/"
180
181 STATIC_ROOT = "staticfiles"
182
183 # Default primary key field type
184 # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
185
186 DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
187
188 SWAGGER_SETTINGS = {
189 "DEFAULT_INFO": "config.swagger_info.SAFE_CONFIG_SERVICE_SWAGGER_INFO"
190 }
191
192 CORS_ALLOW_ALL_ORIGINS = True
193 CORS_URLS_REGEX = r"^/api/.*$"
194
195 CGW_URL = os.environ.get("CGW_URL")
196 CGW_FLUSH_TOKEN = os.environ.get("CGW_FLUSH_TOKEN")
197
198 # By default, Django stores files locally, using the MEDIA_ROOT and MEDIA_URL settings.
199 # (using the default the default FileSystemStorage)
200 # https://docs.djangoproject.com/en/dev/ref/settings/#media-root
201 MEDIA_ROOT = f"{BASE_DIR}/media/"
202 # https://docs.djangoproject.com/en/dev/ref/settings/#media-url
203 MEDIA_URL = "/media/"
204
205 AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
206 AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
207 AWS_STORAGE_BUCKET_NAME = os.getenv("AWS_STORAGE_BUCKET_NAME")
208 AWS_S3_CUSTOM_DOMAIN = os.getenv("AWS_S3_CUSTOM_DOMAIN")
209 # By default files with the same name will overwrite each other. Set this to False to have extra characters appended.
210 AWS_S3_FILE_OVERWRITE = True
211 # Setting AWS_QUERYSTRING_AUTH to False to remove query parameter authentication from generated URLs.
212 # This can be useful if your S3 buckets are public.
213 AWS_QUERYSTRING_AUTH = False
214 DEFAULT_FILE_STORAGE = os.getenv(
215 "DEFAULT_FILE_STORAGE", "storages.backends.s3boto3.S3Boto3Storage"
216 )
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/config/settings.py b/src/config/settings.py
--- a/src/config/settings.py
+++ b/src/config/settings.py
@@ -109,6 +109,7 @@
}
ROOT_URLCONF = "config.urls"
+FORCE_SCRIPT_NAME = os.getenv("FORCE_SCRIPT_NAME", default=None)
TEMPLATES = [
{
@@ -176,8 +177,7 @@
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
-STATIC_URL = "/static/"
-
+STATIC_URL = "static/"
STATIC_ROOT = "staticfiles"
# Default primary key field type
|
{"golden_diff": "diff --git a/src/config/settings.py b/src/config/settings.py\n--- a/src/config/settings.py\n+++ b/src/config/settings.py\n@@ -109,6 +109,7 @@\n }\n \n ROOT_URLCONF = \"config.urls\"\n+FORCE_SCRIPT_NAME = os.getenv(\"FORCE_SCRIPT_NAME\", default=None)\n \n TEMPLATES = [\n {\n@@ -176,8 +177,7 @@\n # Static files (CSS, JavaScript, Images)\n # https://docs.djangoproject.com/en/3.2/howto/static-files/\n \n-STATIC_URL = \"/static/\"\n-\n+STATIC_URL = \"static/\"\n STATIC_ROOT = \"staticfiles\"\n \n # Default primary key field type\n", "issue": "Relative path for `static` django resources\n@Uxio0 addressed this in this PR: https://github.com/gnosis/safe-transaction-service/pull/400\r\n\r\nThis would facilitate having a custom mounting point for the services in nginx, making `static`'s location relative.\n", "before_files": [{"content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ],\n \"DEFAULT_VERSIONING_CLASS\": \"rest_framework.versioning.NamespaceVersioning\",\n}\n\nINSTALLED_APPS = [\n \"corsheaders\",\n \"about.apps.AboutAppConfig\",\n \"chains.apps.AppsConfig\",\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"drf_yasg\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATIC_ROOT = \"staticfiles\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\nSWAGGER_SETTINGS = {\n \"DEFAULT_INFO\": \"config.swagger_info.SAFE_CONFIG_SERVICE_SWAGGER_INFO\"\n}\n\nCORS_ALLOW_ALL_ORIGINS = True\nCORS_URLS_REGEX = r\"^/api/.*$\"\n\nCGW_URL = os.environ.get(\"CGW_URL\")\nCGW_FLUSH_TOKEN = os.environ.get(\"CGW_FLUSH_TOKEN\")\n\n# By default, Django stores files locally, using the MEDIA_ROOT and MEDIA_URL settings.\n# (using the default the default FileSystemStorage)\n# https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = f\"{BASE_DIR}/media/\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = \"/media/\"\n\nAWS_ACCESS_KEY_ID = os.getenv(\"AWS_ACCESS_KEY_ID\")\nAWS_SECRET_ACCESS_KEY = os.getenv(\"AWS_SECRET_ACCESS_KEY\")\nAWS_STORAGE_BUCKET_NAME = os.getenv(\"AWS_STORAGE_BUCKET_NAME\")\nAWS_S3_CUSTOM_DOMAIN = os.getenv(\"AWS_S3_CUSTOM_DOMAIN\")\n# By default files with the same name will overwrite each other. Set this to False to have extra characters appended.\nAWS_S3_FILE_OVERWRITE = True\n# Setting AWS_QUERYSTRING_AUTH to False to remove query parameter authentication from generated URLs.\n# This can be useful if your S3 buckets are public.\nAWS_QUERYSTRING_AUTH = False\nDEFAULT_FILE_STORAGE = os.getenv(\n \"DEFAULT_FILE_STORAGE\", \"storages.backends.s3boto3.S3Boto3Storage\"\n)\n", "path": "src/config/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ],\n \"DEFAULT_VERSIONING_CLASS\": \"rest_framework.versioning.NamespaceVersioning\",\n}\n\nINSTALLED_APPS = [\n \"corsheaders\",\n \"about.apps.AboutAppConfig\",\n \"chains.apps.AppsConfig\",\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"drf_yasg\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\nFORCE_SCRIPT_NAME = os.getenv(\"FORCE_SCRIPT_NAME\", default=None)\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"static/\"\nSTATIC_ROOT = \"staticfiles\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\nSWAGGER_SETTINGS = {\n \"DEFAULT_INFO\": \"config.swagger_info.SAFE_CONFIG_SERVICE_SWAGGER_INFO\"\n}\n\nCORS_ALLOW_ALL_ORIGINS = True\nCORS_URLS_REGEX = r\"^/api/.*$\"\n\nCGW_URL = os.environ.get(\"CGW_URL\")\nCGW_FLUSH_TOKEN = os.environ.get(\"CGW_FLUSH_TOKEN\")\n\n# By default, Django stores files locally, using the MEDIA_ROOT and MEDIA_URL settings.\n# (using the default the default FileSystemStorage)\n# https://docs.djangoproject.com/en/dev/ref/settings/#media-root\nMEDIA_ROOT = f\"{BASE_DIR}/media/\"\n# https://docs.djangoproject.com/en/dev/ref/settings/#media-url\nMEDIA_URL = \"/media/\"\n\nAWS_ACCESS_KEY_ID = os.getenv(\"AWS_ACCESS_KEY_ID\")\nAWS_SECRET_ACCESS_KEY = os.getenv(\"AWS_SECRET_ACCESS_KEY\")\nAWS_STORAGE_BUCKET_NAME = os.getenv(\"AWS_STORAGE_BUCKET_NAME\")\nAWS_S3_CUSTOM_DOMAIN = os.getenv(\"AWS_S3_CUSTOM_DOMAIN\")\n# By default files with the same name will overwrite each other. Set this to False to have extra characters appended.\nAWS_S3_FILE_OVERWRITE = True\n# Setting AWS_QUERYSTRING_AUTH to False to remove query parameter authentication from generated URLs.\n# This can be useful if your S3 buckets are public.\nAWS_QUERYSTRING_AUTH = False\nDEFAULT_FILE_STORAGE = os.getenv(\n \"DEFAULT_FILE_STORAGE\", \"storages.backends.s3boto3.S3Boto3Storage\"\n)\n", "path": "src/config/settings.py"}]}
| 2,420 | 146 |
gh_patches_debug_17695
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1359
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DOC: update read_postgis docstring for CRS changes
The read_postgis docstring still mentions to use a dict:
https://github.com/geopandas/geopandas/blob/078062d303e401aaa5e37e04c0e7c3ce188920fe/geopandas/io/sql.py#L33-L36
we should update this to follow how CRS is explained in other places now we use pyproj.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/io/sql.py`
Content:
```
1 import sys
2
3 import pandas as pd
4
5 import shapely.wkb
6
7 from geopandas import GeoDataFrame
8
9
10 def read_postgis(
11 sql,
12 con,
13 geom_col="geom",
14 crs=None,
15 index_col=None,
16 coerce_float=True,
17 parse_dates=None,
18 params=None,
19 ):
20 """
21 Returns a GeoDataFrame corresponding to the result of the query
22 string, which must contain a geometry column in WKB representation.
23
24 Parameters
25 ----------
26 sql : string
27 SQL query to execute in selecting entries from database, or name
28 of the table to read from the database.
29 con : DB connection object or SQLAlchemy engine
30 Active connection to the database to query.
31 geom_col : string, default 'geom'
32 column name to convert to shapely geometries
33 crs : dict or str, optional
34 CRS to use for the returned GeoDataFrame; if not set, tries to
35 determine CRS from the SRID associated with the first geometry in
36 the database, and assigns that to all geometries.
37
38 See the documentation for pandas.read_sql for further explanation
39 of the following parameters:
40 index_col, coerce_float, parse_dates, params
41
42 Returns
43 -------
44 GeoDataFrame
45
46 Example
47 -------
48 PostGIS
49 >>> sql = "SELECT geom, kind FROM polygons"
50 SpatiaLite
51 >>> sql = "SELECT ST_AsBinary(geom) AS geom, kind FROM polygons"
52 >>> df = geopandas.read_postgis(sql, con)
53 """
54
55 df = pd.read_sql(
56 sql,
57 con,
58 index_col=index_col,
59 coerce_float=coerce_float,
60 parse_dates=parse_dates,
61 params=params,
62 )
63
64 if geom_col not in df:
65 raise ValueError("Query missing geometry column '{}'".format(geom_col))
66
67 geoms = df[geom_col].dropna()
68
69 if not geoms.empty:
70 load_geom_bytes = shapely.wkb.loads
71 """Load from Python 3 binary."""
72
73 def load_geom_buffer(x):
74 """Load from Python 2 binary."""
75 return shapely.wkb.loads(str(x))
76
77 def load_geom_text(x):
78 """Load from binary encoded as text."""
79 return shapely.wkb.loads(str(x), hex=True)
80
81 if sys.version_info.major < 3:
82 if isinstance(geoms.iat[0], buffer):
83 load_geom = load_geom_buffer
84 else:
85 load_geom = load_geom_text
86 elif isinstance(geoms.iat[0], bytes):
87 load_geom = load_geom_bytes
88 else:
89 load_geom = load_geom_text
90
91 df[geom_col] = geoms = geoms.apply(load_geom)
92 if crs is None:
93 srid = shapely.geos.lgeos.GEOSGetSRID(geoms.iat[0]._geom)
94 # if no defined SRID in geodatabase, returns SRID of 0
95 if srid != 0:
96 crs = "epsg:{}".format(srid)
97
98 return GeoDataFrame(df, crs=crs, geometry=geom_col)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geopandas/io/sql.py b/geopandas/io/sql.py
--- a/geopandas/io/sql.py
+++ b/geopandas/io/sql.py
@@ -30,10 +30,12 @@
Active connection to the database to query.
geom_col : string, default 'geom'
column name to convert to shapely geometries
- crs : dict or str, optional
- CRS to use for the returned GeoDataFrame; if not set, tries to
- determine CRS from the SRID associated with the first geometry in
- the database, and assigns that to all geometries.
+ crs : pyproj.CRS, optional
+ CRS to use for the returned GeoDataFrame. The value can be anything accepted
+ by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
+ such as an authority string (eg "EPSG:4326") or a WKT string.
+ If not set, tries to determine CRS from the SRID associated with the
+ first geometry in the database, and assigns that to all geometries.
See the documentation for pandas.read_sql for further explanation
of the following parameters:
|
{"golden_diff": "diff --git a/geopandas/io/sql.py b/geopandas/io/sql.py\n--- a/geopandas/io/sql.py\n+++ b/geopandas/io/sql.py\n@@ -30,10 +30,12 @@\n Active connection to the database to query.\n geom_col : string, default 'geom'\n column name to convert to shapely geometries\n- crs : dict or str, optional\n- CRS to use for the returned GeoDataFrame; if not set, tries to\n- determine CRS from the SRID associated with the first geometry in\n- the database, and assigns that to all geometries.\n+ crs : pyproj.CRS, optional\n+ CRS to use for the returned GeoDataFrame. The value can be anything accepted\n+ by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n+ such as an authority string (eg \"EPSG:4326\") or a WKT string.\n+ If not set, tries to determine CRS from the SRID associated with the\n+ first geometry in the database, and assigns that to all geometries.\n \n See the documentation for pandas.read_sql for further explanation\n of the following parameters:\n", "issue": "DOC: update read_postgis docstring for CRS changes\nThe read_postgis docstring still mentions to use a dict:\r\n\r\nhttps://github.com/geopandas/geopandas/blob/078062d303e401aaa5e37e04c0e7c3ce188920fe/geopandas/io/sql.py#L33-L36\r\n\r\nwe should update this to follow how CRS is explained in other places now we use pyproj.\n", "before_files": [{"content": "import sys\n\nimport pandas as pd\n\nimport shapely.wkb\n\nfrom geopandas import GeoDataFrame\n\n\ndef read_postgis(\n sql,\n con,\n geom_col=\"geom\",\n crs=None,\n index_col=None,\n coerce_float=True,\n parse_dates=None,\n params=None,\n):\n \"\"\"\n Returns a GeoDataFrame corresponding to the result of the query\n string, which must contain a geometry column in WKB representation.\n\n Parameters\n ----------\n sql : string\n SQL query to execute in selecting entries from database, or name\n of the table to read from the database.\n con : DB connection object or SQLAlchemy engine\n Active connection to the database to query.\n geom_col : string, default 'geom'\n column name to convert to shapely geometries\n crs : dict or str, optional\n CRS to use for the returned GeoDataFrame; if not set, tries to\n determine CRS from the SRID associated with the first geometry in\n the database, and assigns that to all geometries.\n\n See the documentation for pandas.read_sql for further explanation\n of the following parameters:\n index_col, coerce_float, parse_dates, params\n\n Returns\n -------\n GeoDataFrame\n\n Example\n -------\n PostGIS\n >>> sql = \"SELECT geom, kind FROM polygons\"\n SpatiaLite\n >>> sql = \"SELECT ST_AsBinary(geom) AS geom, kind FROM polygons\"\n >>> df = geopandas.read_postgis(sql, con)\n \"\"\"\n\n df = pd.read_sql(\n sql,\n con,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n params=params,\n )\n\n if geom_col not in df:\n raise ValueError(\"Query missing geometry column '{}'\".format(geom_col))\n\n geoms = df[geom_col].dropna()\n\n if not geoms.empty:\n load_geom_bytes = shapely.wkb.loads\n \"\"\"Load from Python 3 binary.\"\"\"\n\n def load_geom_buffer(x):\n \"\"\"Load from Python 2 binary.\"\"\"\n return shapely.wkb.loads(str(x))\n\n def load_geom_text(x):\n \"\"\"Load from binary encoded as text.\"\"\"\n return shapely.wkb.loads(str(x), hex=True)\n\n if sys.version_info.major < 3:\n if isinstance(geoms.iat[0], buffer):\n load_geom = load_geom_buffer\n else:\n load_geom = load_geom_text\n elif isinstance(geoms.iat[0], bytes):\n load_geom = load_geom_bytes\n else:\n load_geom = load_geom_text\n\n df[geom_col] = geoms = geoms.apply(load_geom)\n if crs is None:\n srid = shapely.geos.lgeos.GEOSGetSRID(geoms.iat[0]._geom)\n # if no defined SRID in geodatabase, returns SRID of 0\n if srid != 0:\n crs = \"epsg:{}\".format(srid)\n\n return GeoDataFrame(df, crs=crs, geometry=geom_col)\n", "path": "geopandas/io/sql.py"}], "after_files": [{"content": "import sys\n\nimport pandas as pd\n\nimport shapely.wkb\n\nfrom geopandas import GeoDataFrame\n\n\ndef read_postgis(\n sql,\n con,\n geom_col=\"geom\",\n crs=None,\n index_col=None,\n coerce_float=True,\n parse_dates=None,\n params=None,\n):\n \"\"\"\n Returns a GeoDataFrame corresponding to the result of the query\n string, which must contain a geometry column in WKB representation.\n\n Parameters\n ----------\n sql : string\n SQL query to execute in selecting entries from database, or name\n of the table to read from the database.\n con : DB connection object or SQLAlchemy engine\n Active connection to the database to query.\n geom_col : string, default 'geom'\n column name to convert to shapely geometries\n crs : pyproj.CRS, optional\n CRS to use for the returned GeoDataFrame. The value can be anything accepted\n by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n such as an authority string (eg \"EPSG:4326\") or a WKT string.\n If not set, tries to determine CRS from the SRID associated with the\n first geometry in the database, and assigns that to all geometries.\n\n See the documentation for pandas.read_sql for further explanation\n of the following parameters:\n index_col, coerce_float, parse_dates, params\n\n Returns\n -------\n GeoDataFrame\n\n Example\n -------\n PostGIS\n >>> sql = \"SELECT geom, kind FROM polygons\"\n SpatiaLite\n >>> sql = \"SELECT ST_AsBinary(geom) AS geom, kind FROM polygons\"\n >>> df = geopandas.read_postgis(sql, con)\n \"\"\"\n\n df = pd.read_sql(\n sql,\n con,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n params=params,\n )\n\n if geom_col not in df:\n raise ValueError(\"Query missing geometry column '{}'\".format(geom_col))\n\n geoms = df[geom_col].dropna()\n\n if not geoms.empty:\n load_geom_bytes = shapely.wkb.loads\n \"\"\"Load from Python 3 binary.\"\"\"\n\n def load_geom_buffer(x):\n \"\"\"Load from Python 2 binary.\"\"\"\n return shapely.wkb.loads(str(x))\n\n def load_geom_text(x):\n \"\"\"Load from binary encoded as text.\"\"\"\n return shapely.wkb.loads(str(x), hex=True)\n\n if sys.version_info.major < 3:\n if isinstance(geoms.iat[0], buffer):\n load_geom = load_geom_buffer\n else:\n load_geom = load_geom_text\n elif isinstance(geoms.iat[0], bytes):\n load_geom = load_geom_bytes\n else:\n load_geom = load_geom_text\n\n df[geom_col] = geoms = geoms.apply(load_geom)\n if crs is None:\n srid = shapely.geos.lgeos.GEOSGetSRID(geoms.iat[0]._geom)\n # if no defined SRID in geodatabase, returns SRID of 0\n if srid != 0:\n crs = \"epsg:{}\".format(srid)\n\n return GeoDataFrame(df, crs=crs, geometry=geom_col)\n", "path": "geopandas/io/sql.py"}]}
| 1,237 | 271 |
gh_patches_debug_4913
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-14664
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strange behavior in dictionaries if replacement contains space
<!-- Please read the text in this edit field before filling it in.
Please thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.
Issues may be closed if the required information is not present.
https://github.com/nvaccess/nvda/blob/master/devDocs/githubIssueTemplateExplanationAndExamples.md
Please also note that the NVDA project has a Citizen and Contributor Code of Conduct which can be found at https://github.com/nvaccess/nvda/blob/master/CODE_OF_CONDUCT.md. NV Access expects that all contributors and other community members read and abide by the rules set out in this document while participating or contributing to this project. This includes creating or commenting on issues and pull requests.
Each of the questions and sections below start with multiple hash symbols (#). Place your answers and information on the blank line below each question.
-->
### Steps to reproduce:
1. First, set symbol level of "\\" to have it reported (i.e., None);
2. open a dictionary and create something like "foo" replaced by "foo bar" (note the space in replacement);
3. read text: "test of foo".
### Actual behavior:
Voice says:
"test of foo\\bar"
### Expected behavior:
<!--
Use "Speak command keys" (NVDA+4) and speech viewer to copy and paste here.
Use braille viewer to copy and paste here.
You may additionally include an explanation.
-->
Voice should say:
"test of foo bar"
without any "\\".
### System configuration
#### NVDA installed/portable/running from source:
Installed and running from source.
#### NVDA version:
NVDA alpha-27727,86e93666 (installed).
#### Windows version:
Windows 10 22H2 Build 19045.2604 64-bit.
#### Other information about your system:
Tested with Eloquence and Microsoft OneCore (Elsa) synthesizers.
### Other questions
#### Does the issue still occur after restarting your computer?
Not tested.
#### Have you tried any other versions of NVDA? If so, please report their behaviors.
2023.1beta1 seems to not have this issue.
#### If NVDA add-ons are disabled, is your problem still occurring?
Yes.
#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?
Not tested.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/speechDictHandler/__init__.py`
Content:
```
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2006-2023 NVDA Contributors <http://www.nvda-project.org/>
3 # This file is covered by the GNU General Public License.
4 # See the file COPYING for more details.
5
6 import re
7 import globalVars
8 from logHandler import log
9 import os
10 import codecs
11 import api
12 import config
13 from . import dictFormatUpgrade
14 from .speechDictVars import speechDictsPath
15
16 dictionaries = {}
17 dictTypes = ("temp", "voice", "default", "builtin") # ordered by their priority E.G. voice specific speech dictionary is processed before the default
18
19 # Types of speech dictionary entries:
20 ENTRY_TYPE_ANYWHERE = 0 # String can match anywhere
21 ENTRY_TYPE_WORD = 2 # String must have word boundaries on both sides to match
22 ENTRY_TYPE_REGEXP = 1 # Regular expression
23
24 class SpeechDictEntry:
25
26 def __init__(self, pattern, replacement,comment,caseSensitive=True,type=ENTRY_TYPE_ANYWHERE):
27 self.pattern = pattern
28 flags = re.U
29 if not caseSensitive: flags|=re.IGNORECASE
30 if type == ENTRY_TYPE_REGEXP:
31 tempPattern = pattern
32 elif type == ENTRY_TYPE_WORD:
33 tempPattern = r"\b" + re.escape(pattern) + r"\b"
34 else:
35 tempPattern= re.escape(pattern)
36 type = ENTRY_TYPE_ANYWHERE # Insure sane values.
37 self.compiled = re.compile(tempPattern,flags)
38 self.replacement = replacement
39 self.comment=comment
40 self.caseSensitive=caseSensitive
41 self.type=type
42
43 def sub(self, text: str) -> str:
44 if self.type == ENTRY_TYPE_REGEXP:
45 replacement = self.replacement
46 else:
47 replacement = re.escape(self.replacement)
48 return self.compiled.sub(replacement, text)
49
50 class SpeechDict(list):
51
52 fileName = None
53
54 def load(self, fileName):
55 self.fileName=fileName
56 comment=""
57 del self[:]
58 log.debug("Loading speech dictionary '%s'..." % fileName)
59 if not os.path.isfile(fileName):
60 log.debug("file '%s' not found." % fileName)
61 return
62 file = codecs.open(fileName,"r","utf_8_sig",errors="replace")
63 for line in file:
64 if line.isspace():
65 comment=""
66 continue
67 line=line.rstrip('\r\n')
68 if line.startswith('#'):
69 if comment:
70 comment+=" "
71 comment+=line[1:]
72 else:
73 temp=line.split("\t")
74 if len(temp) ==4:
75 pattern = temp[0].replace(r'\#','#')
76 replace = temp[1].replace(r'\#','#')
77 try:
78 dictionaryEntry=SpeechDictEntry(pattern, replace, comment, caseSensitive=bool(int(temp[2])), type=int(temp[3]))
79 self.append(dictionaryEntry)
80 except Exception as e:
81 log.exception("Dictionary (\"%s\") entry invalid for \"%s\" error raised: \"%s\"" % (fileName, line, e))
82 comment=""
83 else:
84 log.warning("can't parse line '%s'" % line)
85 log.debug("%d loaded records." % len(self))
86 file.close()
87 return
88
89 def save(self,fileName=None):
90 if not fileName:
91 fileName=getattr(self,'fileName',None)
92 if not fileName:
93 return
94 dirName=os.path.dirname(fileName)
95 if not os.path.isdir(dirName):
96 os.makedirs(dirName)
97 file = codecs.open(fileName,"w","utf_8_sig",errors="replace")
98 for entry in self:
99 if entry.comment:
100 file.write("#%s\r\n"%entry.comment)
101 file.write("%s\t%s\t%s\t%s\r\n"%(entry.pattern.replace('#',r'\#'),entry.replacement.replace('#',r'\#'),int(entry.caseSensitive),entry.type))
102 file.close()
103
104 def sub(self, text):
105 invalidEntries = []
106 for index, entry in enumerate(self):
107 try:
108 text = entry.sub(text)
109 except re.error as exc:
110 dictName = self.fileName or "temporary dictionary"
111 log.error(f"Invalid dictionary entry {index+1} in {dictName}: \"{entry.pattern}\", {exc}")
112 invalidEntries.append(index)
113 for index in reversed(invalidEntries):
114 del self[index]
115 return text
116
117 def processText(text):
118 if not globalVars.speechDictionaryProcessing:
119 return text
120 for type in dictTypes:
121 text=dictionaries[type].sub(text)
122 return text
123
124 def initialize():
125 for type in dictTypes:
126 dictionaries[type]=SpeechDict()
127 dictionaries["default"].load(os.path.join(speechDictsPath, "default.dic"))
128 dictionaries["builtin"].load(os.path.join(globalVars.appDir, "builtin.dic"))
129
130 def loadVoiceDict(synth):
131 """Loads appropriate dictionary for the given synthesizer.
132 It handles case when the synthesizer doesn't support voice setting.
133 """
134 try:
135 dictFormatUpgrade.doAnyUpgrades(synth)
136 except:
137 log.error("error trying to upgrade dictionaries", exc_info=True)
138 pass
139 if synth.isSupported("voice"):
140 voice = synth.availableVoices[synth.voice].displayName
141 baseName = dictFormatUpgrade.createVoiceDictFileName(synth.name, voice)
142 else:
143 baseName=r"{synth}.dic".format(synth=synth.name)
144 voiceDictsPath = dictFormatUpgrade.voiceDictsPath
145 fileName= os.path.join(voiceDictsPath, synth.name, baseName)
146 dictionaries["voice"].load(fileName)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/speechDictHandler/__init__.py b/source/speechDictHandler/__init__.py
--- a/source/speechDictHandler/__init__.py
+++ b/source/speechDictHandler/__init__.py
@@ -44,7 +44,8 @@
if self.type == ENTRY_TYPE_REGEXP:
replacement = self.replacement
else:
- replacement = re.escape(self.replacement)
+ # Escape the backslashes for non-regexp replacements
+ replacement = self.replacement.replace('\\', '\\\\')
return self.compiled.sub(replacement, text)
class SpeechDict(list):
|
{"golden_diff": "diff --git a/source/speechDictHandler/__init__.py b/source/speechDictHandler/__init__.py\n--- a/source/speechDictHandler/__init__.py\n+++ b/source/speechDictHandler/__init__.py\n@@ -44,7 +44,8 @@\n \t\tif self.type == ENTRY_TYPE_REGEXP:\r\n \t\t\treplacement = self.replacement\r\n \t\telse:\r\n-\t\t\treplacement = re.escape(self.replacement)\r\n+\t\t\t# Escape the backslashes for non-regexp replacements\r\n+\t\t\treplacement = self.replacement.replace('\\\\', '\\\\\\\\')\r\n \t\treturn self.compiled.sub(replacement, text)\r\n \r\n class SpeechDict(list):\n", "issue": "Strange behavior in dictionaries if replacement contains space\n<!-- Please read the text in this edit field before filling it in.\r\nPlease thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.\r\nIssues may be closed if the required information is not present.\r\nhttps://github.com/nvaccess/nvda/blob/master/devDocs/githubIssueTemplateExplanationAndExamples.md\r\nPlease also note that the NVDA project has a Citizen and Contributor Code of Conduct which can be found at https://github.com/nvaccess/nvda/blob/master/CODE_OF_CONDUCT.md. NV Access expects that all contributors and other community members read and abide by the rules set out in this document while participating or contributing to this project. This includes creating or commenting on issues and pull requests. \r\n\r\nEach of the questions and sections below start with multiple hash symbols (#). Place your answers and information on the blank line below each question.\r\n-->\r\n\r\n### Steps to reproduce:\r\n\r\n1. First, set symbol level of \"\\\\\" to have it reported (i.e., None);\r\n2. open a dictionary and create something like \"foo\" replaced by \"foo bar\" (note the space in replacement);\r\n3. read text: \"test of foo\".\r\n\r\n### Actual behavior:\r\n\r\nVoice says:\r\n\"test of foo\\\\bar\"\r\n\r\n### Expected behavior:\r\n<!--\r\nUse \"Speak command keys\" (NVDA+4) and speech viewer to copy and paste here. \r\nUse braille viewer to copy and paste here.\r\nYou may additionally include an explanation.\r\n-->\r\n\r\nVoice should say:\r\n\"test of foo bar\"\r\nwithout any \"\\\\\".\r\n\r\n### System configuration\r\n\r\n#### NVDA installed/portable/running from source:\r\n\r\nInstalled and running from source.\r\n\r\n#### NVDA version:\r\n\r\nNVDA alpha-27727,86e93666 (installed).\r\n\r\n#### Windows version:\r\n\r\nWindows 10 22H2 Build 19045.2604 64-bit.\r\n\r\n#### Other information about your system:\r\n\r\nTested with Eloquence and Microsoft OneCore (Elsa) synthesizers.\r\n\r\n### Other questions\r\n\r\n#### Does the issue still occur after restarting your computer?\r\n\r\nNot tested.\r\n\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\n\r\n2023.1beta1 seems to not have this issue.\r\n\r\n#### If NVDA add-ons are disabled, is your problem still occurring?\r\n\r\nYes.\r\n\r\n#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?\r\n\r\nNot tested.\r\n\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2023 NVDA Contributors <http://www.nvda-project.org/>\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nimport re\r\nimport globalVars\r\nfrom logHandler import log\r\nimport os\r\nimport codecs\r\nimport api\r\nimport config\r\nfrom . import dictFormatUpgrade\r\nfrom .speechDictVars import speechDictsPath\r\n\r\ndictionaries = {}\r\ndictTypes = (\"temp\", \"voice\", \"default\", \"builtin\") # ordered by their priority E.G. voice specific speech dictionary is processed before the default\r\n\r\n# Types of speech dictionary entries:\r\nENTRY_TYPE_ANYWHERE = 0 # String can match anywhere\r\nENTRY_TYPE_WORD = 2 # String must have word boundaries on both sides to match\r\nENTRY_TYPE_REGEXP = 1 # Regular expression\r\n\r\nclass SpeechDictEntry:\r\n\r\n\tdef __init__(self, pattern, replacement,comment,caseSensitive=True,type=ENTRY_TYPE_ANYWHERE):\r\n\t\tself.pattern = pattern\r\n\t\tflags = re.U\r\n\t\tif not caseSensitive: flags|=re.IGNORECASE\r\n\t\tif type == ENTRY_TYPE_REGEXP:\r\n\t\t\ttempPattern = pattern\r\n\t\telif type == ENTRY_TYPE_WORD:\r\n\t\t\ttempPattern = r\"\\b\" + re.escape(pattern) + r\"\\b\"\r\n\t\telse:\r\n\t\t\ttempPattern= re.escape(pattern)\r\n\t\t\ttype = ENTRY_TYPE_ANYWHERE # Insure sane values.\r\n\t\tself.compiled = re.compile(tempPattern,flags)\r\n\t\tself.replacement = replacement\r\n\t\tself.comment=comment\r\n\t\tself.caseSensitive=caseSensitive\r\n\t\tself.type=type\r\n\r\n\tdef sub(self, text: str) -> str:\r\n\t\tif self.type == ENTRY_TYPE_REGEXP:\r\n\t\t\treplacement = self.replacement\r\n\t\telse:\r\n\t\t\treplacement = re.escape(self.replacement)\r\n\t\treturn self.compiled.sub(replacement, text)\r\n\r\nclass SpeechDict(list):\r\n\r\n\tfileName = None\r\n\r\n\tdef load(self, fileName):\r\n\t\tself.fileName=fileName\r\n\t\tcomment=\"\"\r\n\t\tdel self[:]\r\n\t\tlog.debug(\"Loading speech dictionary '%s'...\" % fileName)\r\n\t\tif not os.path.isfile(fileName): \r\n\t\t\tlog.debug(\"file '%s' not found.\" % fileName)\r\n\t\t\treturn\r\n\t\tfile = codecs.open(fileName,\"r\",\"utf_8_sig\",errors=\"replace\")\r\n\t\tfor line in file:\r\n\t\t\tif line.isspace():\r\n\t\t\t\tcomment=\"\"\r\n\t\t\t\tcontinue\r\n\t\t\tline=line.rstrip('\\r\\n')\r\n\t\t\tif line.startswith('#'):\r\n\t\t\t\tif comment:\r\n\t\t\t\t\tcomment+=\" \"\r\n\t\t\t\tcomment+=line[1:]\r\n\t\t\telse:\r\n\t\t\t\ttemp=line.split(\"\\t\")\r\n\t\t\t\tif len(temp) ==4:\r\n\t\t\t\t\tpattern = temp[0].replace(r'\\#','#')\r\n\t\t\t\t\treplace = temp[1].replace(r'\\#','#')\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tdictionaryEntry=SpeechDictEntry(pattern, replace, comment, caseSensitive=bool(int(temp[2])), type=int(temp[3]))\r\n\t\t\t\t\t\tself.append(dictionaryEntry)\r\n\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\tlog.exception(\"Dictionary (\\\"%s\\\") entry invalid for \\\"%s\\\" error raised: \\\"%s\\\"\" % (fileName, line, e))\r\n\t\t\t\t\tcomment=\"\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tlog.warning(\"can't parse line '%s'\" % line)\r\n\t\tlog.debug(\"%d loaded records.\" % len(self))\r\n\t\tfile.close()\r\n\t\treturn\r\n\r\n\tdef save(self,fileName=None):\r\n\t\tif not fileName:\r\n\t\t\tfileName=getattr(self,'fileName',None)\r\n\t\tif not fileName:\r\n\t\t\treturn\r\n\t\tdirName=os.path.dirname(fileName)\r\n\t\tif not os.path.isdir(dirName):\r\n\t\t\tos.makedirs(dirName)\r\n\t\tfile = codecs.open(fileName,\"w\",\"utf_8_sig\",errors=\"replace\")\r\n\t\tfor entry in self:\r\n\t\t\tif entry.comment:\r\n\t\t\t\tfile.write(\"#%s\\r\\n\"%entry.comment)\r\n\t\t\tfile.write(\"%s\\t%s\\t%s\\t%s\\r\\n\"%(entry.pattern.replace('#',r'\\#'),entry.replacement.replace('#',r'\\#'),int(entry.caseSensitive),entry.type))\r\n\t\tfile.close()\r\n\r\n\tdef sub(self, text):\r\n\t\tinvalidEntries = []\r\n\t\tfor index, entry in enumerate(self):\r\n\t\t\ttry:\r\n\t\t\t\ttext = entry.sub(text)\r\n\t\t\texcept re.error as exc:\r\n\t\t\t\tdictName = self.fileName or \"temporary dictionary\"\r\n\t\t\t\tlog.error(f\"Invalid dictionary entry {index+1} in {dictName}: \\\"{entry.pattern}\\\", {exc}\")\r\n\t\t\t\tinvalidEntries.append(index)\r\n\t\t\tfor index in reversed(invalidEntries):\r\n\t\t\t\tdel self[index]\r\n\t\treturn text\r\n\r\ndef processText(text):\r\n\tif not globalVars.speechDictionaryProcessing:\r\n\t\treturn text\r\n\tfor type in dictTypes:\r\n\t\ttext=dictionaries[type].sub(text)\r\n\treturn text\r\n\r\ndef initialize():\r\n\tfor type in dictTypes:\r\n\t\tdictionaries[type]=SpeechDict()\r\n\tdictionaries[\"default\"].load(os.path.join(speechDictsPath, \"default.dic\"))\r\n\tdictionaries[\"builtin\"].load(os.path.join(globalVars.appDir, \"builtin.dic\"))\r\n\r\ndef loadVoiceDict(synth):\r\n\t\"\"\"Loads appropriate dictionary for the given synthesizer.\r\nIt handles case when the synthesizer doesn't support voice setting.\r\n\"\"\"\r\n\ttry:\r\n\t\tdictFormatUpgrade.doAnyUpgrades(synth)\r\n\texcept:\r\n\t\tlog.error(\"error trying to upgrade dictionaries\", exc_info=True)\r\n\t\tpass\r\n\tif synth.isSupported(\"voice\"):\r\n\t\tvoice = synth.availableVoices[synth.voice].displayName\r\n\t\tbaseName = dictFormatUpgrade.createVoiceDictFileName(synth.name, voice)\r\n\telse:\r\n\t\tbaseName=r\"{synth}.dic\".format(synth=synth.name)\r\n\tvoiceDictsPath = dictFormatUpgrade.voiceDictsPath\r\n\tfileName= os.path.join(voiceDictsPath, synth.name, baseName)\r\n\tdictionaries[\"voice\"].load(fileName)\r\n", "path": "source/speechDictHandler/__init__.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2023 NVDA Contributors <http://www.nvda-project.org/>\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nimport re\r\nimport globalVars\r\nfrom logHandler import log\r\nimport os\r\nimport codecs\r\nimport api\r\nimport config\r\nfrom . import dictFormatUpgrade\r\nfrom .speechDictVars import speechDictsPath\r\n\r\ndictionaries = {}\r\ndictTypes = (\"temp\", \"voice\", \"default\", \"builtin\") # ordered by their priority E.G. voice specific speech dictionary is processed before the default\r\n\r\n# Types of speech dictionary entries:\r\nENTRY_TYPE_ANYWHERE = 0 # String can match anywhere\r\nENTRY_TYPE_WORD = 2 # String must have word boundaries on both sides to match\r\nENTRY_TYPE_REGEXP = 1 # Regular expression\r\n\r\nclass SpeechDictEntry:\r\n\r\n\tdef __init__(self, pattern, replacement,comment,caseSensitive=True,type=ENTRY_TYPE_ANYWHERE):\r\n\t\tself.pattern = pattern\r\n\t\tflags = re.U\r\n\t\tif not caseSensitive: flags|=re.IGNORECASE\r\n\t\tif type == ENTRY_TYPE_REGEXP:\r\n\t\t\ttempPattern = pattern\r\n\t\telif type == ENTRY_TYPE_WORD:\r\n\t\t\ttempPattern = r\"\\b\" + re.escape(pattern) + r\"\\b\"\r\n\t\telse:\r\n\t\t\ttempPattern= re.escape(pattern)\r\n\t\t\ttype = ENTRY_TYPE_ANYWHERE # Insure sane values.\r\n\t\tself.compiled = re.compile(tempPattern,flags)\r\n\t\tself.replacement = replacement\r\n\t\tself.comment=comment\r\n\t\tself.caseSensitive=caseSensitive\r\n\t\tself.type=type\r\n\r\n\tdef sub(self, text: str) -> str:\r\n\t\tif self.type == ENTRY_TYPE_REGEXP:\r\n\t\t\treplacement = self.replacement\r\n\t\telse:\r\n\t\t\t# Escape the backslashes for non-regexp replacements\r\n\t\t\treplacement = self.replacement.replace('\\\\', '\\\\\\\\')\r\n\t\treturn self.compiled.sub(replacement, text)\r\n\r\nclass SpeechDict(list):\r\n\r\n\tfileName = None\r\n\r\n\tdef load(self, fileName):\r\n\t\tself.fileName=fileName\r\n\t\tcomment=\"\"\r\n\t\tdel self[:]\r\n\t\tlog.debug(\"Loading speech dictionary '%s'...\" % fileName)\r\n\t\tif not os.path.isfile(fileName): \r\n\t\t\tlog.debug(\"file '%s' not found.\" % fileName)\r\n\t\t\treturn\r\n\t\tfile = codecs.open(fileName,\"r\",\"utf_8_sig\",errors=\"replace\")\r\n\t\tfor line in file:\r\n\t\t\tif line.isspace():\r\n\t\t\t\tcomment=\"\"\r\n\t\t\t\tcontinue\r\n\t\t\tline=line.rstrip('\\r\\n')\r\n\t\t\tif line.startswith('#'):\r\n\t\t\t\tif comment:\r\n\t\t\t\t\tcomment+=\" \"\r\n\t\t\t\tcomment+=line[1:]\r\n\t\t\telse:\r\n\t\t\t\ttemp=line.split(\"\\t\")\r\n\t\t\t\tif len(temp) ==4:\r\n\t\t\t\t\tpattern = temp[0].replace(r'\\#','#')\r\n\t\t\t\t\treplace = temp[1].replace(r'\\#','#')\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tdictionaryEntry=SpeechDictEntry(pattern, replace, comment, caseSensitive=bool(int(temp[2])), type=int(temp[3]))\r\n\t\t\t\t\t\tself.append(dictionaryEntry)\r\n\t\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\tlog.exception(\"Dictionary (\\\"%s\\\") entry invalid for \\\"%s\\\" error raised: \\\"%s\\\"\" % (fileName, line, e))\r\n\t\t\t\t\tcomment=\"\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tlog.warning(\"can't parse line '%s'\" % line)\r\n\t\tlog.debug(\"%d loaded records.\" % len(self))\r\n\t\tfile.close()\r\n\t\treturn\r\n\r\n\tdef save(self,fileName=None):\r\n\t\tif not fileName:\r\n\t\t\tfileName=getattr(self,'fileName',None)\r\n\t\tif not fileName:\r\n\t\t\treturn\r\n\t\tdirName=os.path.dirname(fileName)\r\n\t\tif not os.path.isdir(dirName):\r\n\t\t\tos.makedirs(dirName)\r\n\t\tfile = codecs.open(fileName,\"w\",\"utf_8_sig\",errors=\"replace\")\r\n\t\tfor entry in self:\r\n\t\t\tif entry.comment:\r\n\t\t\t\tfile.write(\"#%s\\r\\n\"%entry.comment)\r\n\t\t\tfile.write(\"%s\\t%s\\t%s\\t%s\\r\\n\"%(entry.pattern.replace('#',r'\\#'),entry.replacement.replace('#',r'\\#'),int(entry.caseSensitive),entry.type))\r\n\t\tfile.close()\r\n\r\n\tdef sub(self, text):\r\n\t\tinvalidEntries = []\r\n\t\tfor index, entry in enumerate(self):\r\n\t\t\ttry:\r\n\t\t\t\ttext = entry.sub(text)\r\n\t\t\texcept re.error as exc:\r\n\t\t\t\tdictName = self.fileName or \"temporary dictionary\"\r\n\t\t\t\tlog.error(f\"Invalid dictionary entry {index+1} in {dictName}: \\\"{entry.pattern}\\\", {exc}\")\r\n\t\t\t\tinvalidEntries.append(index)\r\n\t\t\tfor index in reversed(invalidEntries):\r\n\t\t\t\tdel self[index]\r\n\t\treturn text\r\n\r\ndef processText(text):\r\n\tif not globalVars.speechDictionaryProcessing:\r\n\t\treturn text\r\n\tfor type in dictTypes:\r\n\t\ttext=dictionaries[type].sub(text)\r\n\treturn text\r\n\r\ndef initialize():\r\n\tfor type in dictTypes:\r\n\t\tdictionaries[type]=SpeechDict()\r\n\tdictionaries[\"default\"].load(os.path.join(speechDictsPath, \"default.dic\"))\r\n\tdictionaries[\"builtin\"].load(os.path.join(globalVars.appDir, \"builtin.dic\"))\r\n\r\ndef loadVoiceDict(synth):\r\n\t\"\"\"Loads appropriate dictionary for the given synthesizer.\r\nIt handles case when the synthesizer doesn't support voice setting.\r\n\"\"\"\r\n\ttry:\r\n\t\tdictFormatUpgrade.doAnyUpgrades(synth)\r\n\texcept:\r\n\t\tlog.error(\"error trying to upgrade dictionaries\", exc_info=True)\r\n\t\tpass\r\n\tif synth.isSupported(\"voice\"):\r\n\t\tvoice = synth.availableVoices[synth.voice].displayName\r\n\t\tbaseName = dictFormatUpgrade.createVoiceDictFileName(synth.name, voice)\r\n\telse:\r\n\t\tbaseName=r\"{synth}.dic\".format(synth=synth.name)\r\n\tvoiceDictsPath = dictFormatUpgrade.voiceDictsPath\r\n\tfileName= os.path.join(voiceDictsPath, synth.name, baseName)\r\n\tdictionaries[\"voice\"].load(fileName)\r\n", "path": "source/speechDictHandler/__init__.py"}]}
| 2,345 | 136 |
gh_patches_debug_787
|
rasdani/github-patches
|
git_diff
|
translate__pootle-5820
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding a new languages forces you to specify special characters
If you add a new language you are blocked with the UI insisting that you add special characters.

The part in red is Afrikaans and say "This field is required"
Not all languages require special characters.
Also the model allows `blank=True`
So somewhere we're blocking.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_app/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import re
10 import urlparse
11 from collections import OrderedDict
12
13 from django import forms
14 from django.contrib.auth import get_user_model
15
16 from pootle.i18n.gettext import ugettext_lazy as _
17 from pootle_language.models import Language
18 from pootle_project.models import Project
19 from pootle_store.models import Store
20
21
22 LANGCODE_RE = re.compile("^[a-z]{2,}([_-]([a-z]{2,}|[0-9]{3}))*(@[a-z0-9]+)?$",
23 re.IGNORECASE)
24
25
26 class LanguageForm(forms.ModelForm):
27
28 specialchars = forms.CharField(strip=False)
29
30 class Meta(object):
31 model = Language
32 fields = ('id', 'code', 'fullname', 'specialchars', 'nplurals',
33 'pluralequation',)
34
35 def clean_code(self):
36 if (not self.cleaned_data['code'] == 'templates' and
37 not LANGCODE_RE.match(self.cleaned_data['code'])):
38 raise forms.ValidationError(
39 _('Language code does not follow the ISO convention')
40 )
41
42 return self.cleaned_data["code"]
43
44 def clean_specialchars(self):
45 """Ensures inputted characters are unique."""
46 chars = self.cleaned_data['specialchars']
47 return u''.join(
48 OrderedDict((char, None) for char in list(chars)).keys()
49 )
50
51
52 class ProjectForm(forms.ModelForm):
53
54 source_language = forms.ModelChoiceField(label=_('Source Language'),
55 queryset=Language.objects.none())
56
57 class Meta(object):
58 model = Project
59 fields = ('id', 'code', 'fullname', 'checkstyle',
60 'filetypes', 'treestyle', 'source_language', 'ignoredfiles',
61 'report_email', 'screenshot_search_prefix', 'disabled',)
62
63 def __init__(self, *args, **kwargs):
64 super(ProjectForm, self).__init__(*args, **kwargs)
65
66 queryset = Language.objects.exclude(code='templates')
67 self.fields['source_language'].queryset = queryset
68
69 self.fields["filetypes"].initial = [
70 self.fields["filetypes"].queryset.get(name="po")]
71
72 if self.instance.id:
73 if (self.instance.treestyle != 'auto' and
74 self.instance.translationproject_set.count() and
75 self.instance.treestyle == self.instance._detect_treestyle()):
76 self.fields['treestyle'].required = False
77
78 def clean_filetypes(self):
79 value = self.cleaned_data.get('filetypes', [])
80 if not self.instance.pk:
81 return value
82 for filetype in self.instance.filetypes.all():
83 if filetype not in value:
84 has_stores = Store.objects.filter(
85 translation_project__project=self.instance, filetype=filetype)
86 if has_stores.exists():
87 raise forms.ValidationError(
88 _("You cannot remove a file type from a Project, "
89 "if there are Stores of that file type ('%s')"
90 % filetype))
91 return value
92
93 def clean_fullname(self):
94 return self.cleaned_data['fullname'].strip()
95
96 def clean_treestyle(self):
97 value = self.cleaned_data.get('treestyle', None)
98 if not value:
99 value = self.instance.treestyle
100 return value
101
102 def clean_code(self):
103 return self.cleaned_data['code'].strip()
104
105
106 class UserForm(forms.ModelForm):
107
108 password = forms.CharField(label=_('Password'), required=False,
109 widget=forms.PasswordInput)
110
111 class Meta(object):
112 model = get_user_model()
113 fields = ('id', 'username', 'is_active', 'full_name', 'email',
114 'is_superuser', 'twitter', 'linkedin', 'website', 'bio')
115
116 def __init__(self, *args, **kwargs):
117 super(UserForm, self).__init__(*args, **kwargs)
118
119 # Require setting the password for new users
120 if self.instance.pk is None:
121 self.fields['password'].required = True
122
123 def save(self, commit=True):
124 password = self.cleaned_data['password']
125
126 if password != '':
127 user = super(UserForm, self).save(commit=False)
128 user.set_password(password)
129
130 if commit:
131 user.save()
132 else:
133 user = super(UserForm, self).save(commit=commit)
134
135 return user
136
137 def clean_linkedin(self):
138 url = self.cleaned_data['linkedin']
139 if url != '':
140 parsed = urlparse.urlparse(url)
141 if 'linkedin.com' not in parsed.netloc or parsed.path == '/':
142 raise forms.ValidationError(
143 _('Please enter a valid LinkedIn user profile URL.')
144 )
145
146 return url
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pootle/apps/pootle_app/forms.py b/pootle/apps/pootle_app/forms.py
--- a/pootle/apps/pootle_app/forms.py
+++ b/pootle/apps/pootle_app/forms.py
@@ -25,7 +25,7 @@
class LanguageForm(forms.ModelForm):
- specialchars = forms.CharField(strip=False)
+ specialchars = forms.CharField(strip=False, required=False)
class Meta(object):
model = Language
|
{"golden_diff": "diff --git a/pootle/apps/pootle_app/forms.py b/pootle/apps/pootle_app/forms.py\n--- a/pootle/apps/pootle_app/forms.py\n+++ b/pootle/apps/pootle_app/forms.py\n@@ -25,7 +25,7 @@\n \n class LanguageForm(forms.ModelForm):\n \n- specialchars = forms.CharField(strip=False)\n+ specialchars = forms.CharField(strip=False, required=False)\n \n class Meta(object):\n model = Language\n", "issue": "Adding a new languages forces you to specify special characters\nIf you add a new language you are blocked with the UI insisting that you add special characters.\r\n\r\n\r\n\r\nThe part in red is Afrikaans and say \"This field is required\"\r\n\r\nNot all languages require special characters.\r\n\r\nAlso the model allows `blank=True`\r\n\r\nSo somewhere we're blocking.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\nimport urlparse\nfrom collections import OrderedDict\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\n\nfrom pootle.i18n.gettext import ugettext_lazy as _\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom pootle_store.models import Store\n\n\nLANGCODE_RE = re.compile(\"^[a-z]{2,}([_-]([a-z]{2,}|[0-9]{3}))*(@[a-z0-9]+)?$\",\n re.IGNORECASE)\n\n\nclass LanguageForm(forms.ModelForm):\n\n specialchars = forms.CharField(strip=False)\n\n class Meta(object):\n model = Language\n fields = ('id', 'code', 'fullname', 'specialchars', 'nplurals',\n 'pluralequation',)\n\n def clean_code(self):\n if (not self.cleaned_data['code'] == 'templates' and\n not LANGCODE_RE.match(self.cleaned_data['code'])):\n raise forms.ValidationError(\n _('Language code does not follow the ISO convention')\n )\n\n return self.cleaned_data[\"code\"]\n\n def clean_specialchars(self):\n \"\"\"Ensures inputted characters are unique.\"\"\"\n chars = self.cleaned_data['specialchars']\n return u''.join(\n OrderedDict((char, None) for char in list(chars)).keys()\n )\n\n\nclass ProjectForm(forms.ModelForm):\n\n source_language = forms.ModelChoiceField(label=_('Source Language'),\n queryset=Language.objects.none())\n\n class Meta(object):\n model = Project\n fields = ('id', 'code', 'fullname', 'checkstyle',\n 'filetypes', 'treestyle', 'source_language', 'ignoredfiles',\n 'report_email', 'screenshot_search_prefix', 'disabled',)\n\n def __init__(self, *args, **kwargs):\n super(ProjectForm, self).__init__(*args, **kwargs)\n\n queryset = Language.objects.exclude(code='templates')\n self.fields['source_language'].queryset = queryset\n\n self.fields[\"filetypes\"].initial = [\n self.fields[\"filetypes\"].queryset.get(name=\"po\")]\n\n if self.instance.id:\n if (self.instance.treestyle != 'auto' and\n self.instance.translationproject_set.count() and\n self.instance.treestyle == self.instance._detect_treestyle()):\n self.fields['treestyle'].required = False\n\n def clean_filetypes(self):\n value = self.cleaned_data.get('filetypes', [])\n if not self.instance.pk:\n return value\n for filetype in self.instance.filetypes.all():\n if filetype not in value:\n has_stores = Store.objects.filter(\n translation_project__project=self.instance, filetype=filetype)\n if has_stores.exists():\n raise forms.ValidationError(\n _(\"You cannot remove a file type from a Project, \"\n \"if there are Stores of that file type ('%s')\"\n % filetype))\n return value\n\n def clean_fullname(self):\n return self.cleaned_data['fullname'].strip()\n\n def clean_treestyle(self):\n value = self.cleaned_data.get('treestyle', None)\n if not value:\n value = self.instance.treestyle\n return value\n\n def clean_code(self):\n return self.cleaned_data['code'].strip()\n\n\nclass UserForm(forms.ModelForm):\n\n password = forms.CharField(label=_('Password'), required=False,\n widget=forms.PasswordInput)\n\n class Meta(object):\n model = get_user_model()\n fields = ('id', 'username', 'is_active', 'full_name', 'email',\n 'is_superuser', 'twitter', 'linkedin', 'website', 'bio')\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n\n # Require setting the password for new users\n if self.instance.pk is None:\n self.fields['password'].required = True\n\n def save(self, commit=True):\n password = self.cleaned_data['password']\n\n if password != '':\n user = super(UserForm, self).save(commit=False)\n user.set_password(password)\n\n if commit:\n user.save()\n else:\n user = super(UserForm, self).save(commit=commit)\n\n return user\n\n def clean_linkedin(self):\n url = self.cleaned_data['linkedin']\n if url != '':\n parsed = urlparse.urlparse(url)\n if 'linkedin.com' not in parsed.netloc or parsed.path == '/':\n raise forms.ValidationError(\n _('Please enter a valid LinkedIn user profile URL.')\n )\n\n return url\n", "path": "pootle/apps/pootle_app/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\nimport urlparse\nfrom collections import OrderedDict\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\n\nfrom pootle.i18n.gettext import ugettext_lazy as _\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom pootle_store.models import Store\n\n\nLANGCODE_RE = re.compile(\"^[a-z]{2,}([_-]([a-z]{2,}|[0-9]{3}))*(@[a-z0-9]+)?$\",\n re.IGNORECASE)\n\n\nclass LanguageForm(forms.ModelForm):\n\n specialchars = forms.CharField(strip=False, required=False)\n\n class Meta(object):\n model = Language\n fields = ('id', 'code', 'fullname', 'specialchars', 'nplurals',\n 'pluralequation',)\n\n def clean_code(self):\n if (not self.cleaned_data['code'] == 'templates' and\n not LANGCODE_RE.match(self.cleaned_data['code'])):\n raise forms.ValidationError(\n _('Language code does not follow the ISO convention')\n )\n\n return self.cleaned_data[\"code\"]\n\n def clean_specialchars(self):\n \"\"\"Ensures inputted characters are unique.\"\"\"\n chars = self.cleaned_data['specialchars']\n return u''.join(\n OrderedDict((char, None) for char in list(chars)).keys()\n )\n\n\nclass ProjectForm(forms.ModelForm):\n\n source_language = forms.ModelChoiceField(label=_('Source Language'),\n queryset=Language.objects.none())\n\n class Meta(object):\n model = Project\n fields = ('id', 'code', 'fullname', 'checkstyle',\n 'filetypes', 'treestyle', 'source_language', 'ignoredfiles',\n 'report_email', 'screenshot_search_prefix', 'disabled',)\n\n def __init__(self, *args, **kwargs):\n super(ProjectForm, self).__init__(*args, **kwargs)\n\n queryset = Language.objects.exclude(code='templates')\n self.fields['source_language'].queryset = queryset\n\n self.fields[\"filetypes\"].initial = [\n self.fields[\"filetypes\"].queryset.get(name=\"po\")]\n\n if self.instance.id:\n if (self.instance.treestyle != 'auto' and\n self.instance.translationproject_set.count() and\n self.instance.treestyle == self.instance._detect_treestyle()):\n self.fields['treestyle'].required = False\n\n def clean_filetypes(self):\n value = self.cleaned_data.get('filetypes', [])\n if not self.instance.pk:\n return value\n for filetype in self.instance.filetypes.all():\n if filetype not in value:\n has_stores = Store.objects.filter(\n translation_project__project=self.instance, filetype=filetype)\n if has_stores.exists():\n raise forms.ValidationError(\n _(\"You cannot remove a file type from a Project, \"\n \"if there are Stores of that file type ('%s')\"\n % filetype))\n return value\n\n def clean_fullname(self):\n return self.cleaned_data['fullname'].strip()\n\n def clean_treestyle(self):\n value = self.cleaned_data.get('treestyle', None)\n if not value:\n value = self.instance.treestyle\n return value\n\n def clean_code(self):\n return self.cleaned_data['code'].strip()\n\n\nclass UserForm(forms.ModelForm):\n\n password = forms.CharField(label=_('Password'), required=False,\n widget=forms.PasswordInput)\n\n class Meta(object):\n model = get_user_model()\n fields = ('id', 'username', 'is_active', 'full_name', 'email',\n 'is_superuser', 'twitter', 'linkedin', 'website', 'bio')\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n\n # Require setting the password for new users\n if self.instance.pk is None:\n self.fields['password'].required = True\n\n def save(self, commit=True):\n password = self.cleaned_data['password']\n\n if password != '':\n user = super(UserForm, self).save(commit=False)\n user.set_password(password)\n\n if commit:\n user.save()\n else:\n user = super(UserForm, self).save(commit=commit)\n\n return user\n\n def clean_linkedin(self):\n url = self.cleaned_data['linkedin']\n if url != '':\n parsed = urlparse.urlparse(url)\n if 'linkedin.com' not in parsed.netloc or parsed.path == '/':\n raise forms.ValidationError(\n _('Please enter a valid LinkedIn user profile URL.')\n )\n\n return url\n", "path": "pootle/apps/pootle_app/forms.py"}]}
| 1,797 | 106 |
gh_patches_debug_62906
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-21581
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hardcoded MAX_RETRIES = 1
https://github.com/getsentry/sentry/blob/master/src/sentry/tasks/deletion.py#L18
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/tasks/deletion.py`
Content:
```
1 from __future__ import absolute_import
2
3 from uuid import uuid4
4
5 from django.apps import apps
6 from django.conf import settings
7 from django.db import transaction
8 from django.utils import timezone
9
10 from sentry.constants import ObjectStatus
11 from sentry.exceptions import DeleteAborted
12 from sentry.signals import pending_delete
13 from sentry.tasks.base import instrumented_task, retry, track_group_async_operation
14
15 # in prod we run with infinite retries to recover from errors
16 # in debug/development, we assume these tasks generally shouldn't fail
17 MAX_RETRIES = 1 if settings.DEBUG else None
18 MAX_RETRIES = 1
19
20
21 @instrumented_task(name="sentry.tasks.deletion.run_scheduled_deletions", queue="cleanup")
22 def run_scheduled_deletions():
23 from sentry.models import ScheduledDeletion
24
25 queryset = ScheduledDeletion.objects.filter(
26 in_progress=False, aborted=False, date_scheduled__lte=timezone.now()
27 )
28 for item in queryset:
29 with transaction.atomic():
30 affected = ScheduledDeletion.objects.filter(
31 id=item.id, in_progress=False, aborted=False
32 ).update(in_progress=True)
33 if not affected:
34 continue
35
36 run_deletion.delay(deletion_id=item.id)
37
38
39 @instrumented_task(
40 name="sentry.tasks.deletion.run_deletion",
41 queue="cleanup",
42 default_retry_delay=60 * 5,
43 max_retries=MAX_RETRIES,
44 )
45 @retry(exclude=(DeleteAborted,))
46 def run_deletion(deletion_id):
47 from sentry import deletions
48 from sentry.models import ScheduledDeletion
49
50 try:
51 deletion = ScheduledDeletion.objects.get(id=deletion_id)
52 except ScheduledDeletion.DoesNotExist:
53 return
54
55 if deletion.aborted:
56 raise DeleteAborted
57
58 if not deletion.in_progress:
59 actor = deletion.get_actor()
60 instance = deletion.get_instance()
61 with transaction.atomic():
62 deletion.update(in_progress=True)
63 pending_delete.send(sender=type(instance), instance=instance, actor=actor)
64
65 task = deletions.get(
66 model=deletion.get_model(),
67 query={"id": deletion.object_id},
68 transaction_id=deletion.guid,
69 actor_id=deletion.actor_id,
70 )
71 has_more = task.chunk()
72 if has_more:
73 run_deletion.apply_async(kwargs={"deletion_id": deletion_id}, countdown=15)
74 deletion.delete()
75
76
77 @instrumented_task(
78 name="sentry.tasks.deletion.revoke_api_tokens",
79 queue="cleanup",
80 default_retry_delay=60 * 5,
81 max_retries=MAX_RETRIES,
82 )
83 @retry(exclude=(DeleteAborted,))
84 def revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):
85 from sentry.models import ApiToken
86
87 queryset = ApiToken.objects.filter(application=object_id)
88 if timestamp:
89 queryset = queryset.filter(date_added__lte=timestamp)
90
91 # we're using a slow deletion strategy to avoid a lot of custom code for
92 # postgres
93 has_more = False
94 for obj in queryset[:1000]:
95 obj.delete()
96 has_more = True
97
98 if has_more:
99 revoke_api_tokens.apply_async(
100 kwargs={
101 "object_id": object_id,
102 "transaction_id": transaction_id,
103 "timestamp": timestamp,
104 },
105 countdown=15,
106 )
107 return has_more
108
109
110 @instrumented_task(
111 name="sentry.tasks.deletion.delete_organization",
112 queue="cleanup",
113 default_retry_delay=60 * 5,
114 max_retries=MAX_RETRIES,
115 )
116 @retry(exclude=(DeleteAborted,))
117 def delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):
118 from sentry import deletions
119 from sentry.models import Organization, OrganizationStatus
120
121 try:
122 instance = Organization.objects.get(id=object_id)
123 except Organization.DoesNotExist:
124 return
125
126 if instance.status == OrganizationStatus.VISIBLE:
127 raise DeleteAborted
128
129 # compat: can be removed after we switch to scheduled deletions
130 if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:
131 pending_delete.send(sender=type(instance), instance=instance)
132
133 task = deletions.get(
134 model=Organization,
135 query={"id": object_id},
136 transaction_id=transaction_id or uuid4().hex,
137 actor_id=actor_id,
138 )
139 has_more = task.chunk()
140 if has_more:
141 delete_organization.apply_async(
142 kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
143 countdown=15,
144 )
145
146
147 @instrumented_task(
148 name="sentry.tasks.deletion.delete_team",
149 queue="cleanup",
150 default_retry_delay=60 * 5,
151 max_retries=MAX_RETRIES,
152 )
153 @retry(exclude=(DeleteAborted,))
154 def delete_team(object_id, transaction_id=None, **kwargs):
155 from sentry import deletions
156 from sentry.models import Team, TeamStatus
157
158 try:
159 instance = Team.objects.get(id=object_id)
160 except Team.DoesNotExist:
161 return
162
163 if instance.status == TeamStatus.VISIBLE:
164 raise DeleteAborted
165
166 task = deletions.get(
167 model=Team, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
168 )
169 has_more = task.chunk()
170 if has_more:
171 delete_team.apply_async(
172 kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
173 )
174
175
176 @instrumented_task(
177 name="sentry.tasks.deletion.delete_project",
178 queue="cleanup",
179 default_retry_delay=60 * 5,
180 max_retries=MAX_RETRIES,
181 )
182 @retry(exclude=(DeleteAborted,))
183 def delete_project(object_id, transaction_id=None, **kwargs):
184 from sentry import deletions
185 from sentry.models import Project, ProjectStatus
186
187 try:
188 instance = Project.objects.get(id=object_id)
189 except Project.DoesNotExist:
190 return
191
192 if instance.status == ProjectStatus.VISIBLE:
193 raise DeleteAborted
194
195 task = deletions.get(
196 model=Project, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
197 )
198 has_more = task.chunk()
199 if has_more:
200 delete_project.apply_async(
201 kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
202 )
203
204
205 @instrumented_task(
206 name="sentry.tasks.deletion.delete_groups",
207 queue="cleanup",
208 default_retry_delay=60 * 5,
209 max_retries=MAX_RETRIES,
210 )
211 @retry(exclude=(DeleteAborted,))
212 @track_group_async_operation
213 def delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):
214 from sentry import deletions, eventstream
215 from sentry.models import Group
216
217 transaction_id = transaction_id or uuid4().hex
218
219 max_batch_size = 100
220 current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]
221
222 task = deletions.get(
223 model=Group, query={"id__in": current_batch}, transaction_id=transaction_id
224 )
225 has_more = task.chunk()
226 if has_more or rest:
227 delete_groups.apply_async(
228 kwargs={
229 "object_ids": object_ids if has_more else rest,
230 "transaction_id": transaction_id,
231 "eventstream_state": eventstream_state,
232 },
233 countdown=15,
234 )
235 else:
236 # all groups have been deleted
237 if eventstream_state:
238 eventstream.end_delete_groups(eventstream_state)
239
240
241 @instrumented_task(
242 name="sentry.tasks.deletion.delete_api_application",
243 queue="cleanup",
244 default_retry_delay=60 * 5,
245 max_retries=MAX_RETRIES,
246 )
247 @retry(exclude=(DeleteAborted,))
248 def delete_api_application(object_id, transaction_id=None, **kwargs):
249 from sentry import deletions
250 from sentry.models import ApiApplication, ApiApplicationStatus
251
252 try:
253 instance = ApiApplication.objects.get(id=object_id)
254 except ApiApplication.DoesNotExist:
255 return
256
257 if instance.status == ApiApplicationStatus.active:
258 raise DeleteAborted
259
260 task = deletions.get(
261 model=ApiApplication, query={"id": object_id}, transaction_id=transaction_id or uuid4().hex
262 )
263 has_more = task.chunk()
264 if has_more:
265 delete_api_application.apply_async(
266 kwargs={"object_id": object_id, "transaction_id": transaction_id}, countdown=15
267 )
268
269
270 @instrumented_task(
271 name="sentry.tasks.deletion.generic_delete",
272 queue="cleanup",
273 default_retry_delay=60 * 5,
274 max_retries=MAX_RETRIES,
275 )
276 @retry(exclude=(DeleteAborted,))
277 def generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):
278 from sentry import deletions
279 from sentry.models import User
280
281 model = apps.get_model(app_label, model_name)
282
283 try:
284 instance = model.objects.get(id=object_id)
285 except model.DoesNotExist:
286 return
287
288 if instance.status != ObjectStatus.DELETION_IN_PROGRESS:
289 pending_delete.send(
290 sender=type(instance),
291 instance=instance,
292 actor=User.objects.get(id=actor_id) if actor_id else None,
293 )
294
295 if instance.status == ObjectStatus.VISIBLE:
296 raise DeleteAborted
297
298 task = deletions.get(
299 model=model,
300 actor_id=actor_id,
301 query={"id": object_id},
302 transaction_id=transaction_id or uuid4().hex,
303 )
304 has_more = task.chunk()
305 if has_more:
306 generic_delete.apply_async(
307 kwargs={
308 "app_label": app_label,
309 "model_name": model_name,
310 "object_id": object_id,
311 "transaction_id": transaction_id,
312 "actor_id": actor_id,
313 },
314 countdown=15,
315 )
316
317
318 @instrumented_task(
319 name="sentry.tasks.deletion.delete_repository",
320 queue="cleanup",
321 default_retry_delay=60 * 5,
322 max_retries=MAX_RETRIES,
323 )
324 @retry(exclude=(DeleteAborted,))
325 def delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):
326 from sentry import deletions
327 from sentry.models import Repository, User
328
329 try:
330 instance = Repository.objects.get(id=object_id)
331 except Repository.DoesNotExist:
332 return
333
334 if instance.status == ObjectStatus.VISIBLE:
335 raise DeleteAborted
336
337 # compat: can be removed after we switch to scheduled deletions
338 if instance.status != ObjectStatus.DELETION_IN_PROGRESS:
339 pending_delete.send(
340 sender=type(instance),
341 instance=instance,
342 actor=User.objects.get(id=actor_id) if actor_id else None,
343 )
344
345 task = deletions.get(
346 model=Repository,
347 actor_id=actor_id,
348 query={"id": object_id},
349 transaction_id=transaction_id or uuid4().hex,
350 )
351 has_more = task.chunk()
352 if has_more:
353 delete_repository.apply_async(
354 kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
355 countdown=15,
356 )
357
358
359 @instrumented_task(
360 name="sentry.tasks.deletion.delete_organization_integration",
361 queue="cleanup",
362 default_retry_delay=60 * 5,
363 max_retries=MAX_RETRIES,
364 )
365 @retry(exclude=(DeleteAborted,))
366 def delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):
367 from sentry import deletions
368 from sentry.models import OrganizationIntegration, Repository
369
370 try:
371 instance = OrganizationIntegration.objects.get(id=object_id)
372 except OrganizationIntegration.DoesNotExist:
373 return
374
375 if instance.status == ObjectStatus.VISIBLE:
376 raise DeleteAborted
377
378 # dissociate repos from that integration
379 Repository.objects.filter(
380 organization_id=instance.organization_id, integration_id=instance.integration_id
381 ).update(integration_id=None)
382
383 task = deletions.get(
384 model=OrganizationIntegration,
385 actor_id=actor_id,
386 query={"id": object_id},
387 transaction_id=transaction_id or uuid4().hex,
388 )
389 has_more = task.chunk()
390 if has_more:
391 delete_organization_integration.apply_async(
392 kwargs={"object_id": object_id, "transaction_id": transaction_id, "actor_id": actor_id},
393 countdown=15,
394 )
395
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/tasks/deletion.py b/src/sentry/tasks/deletion.py
--- a/src/sentry/tasks/deletion.py
+++ b/src/sentry/tasks/deletion.py
@@ -14,8 +14,7 @@
# in prod we run with infinite retries to recover from errors
# in debug/development, we assume these tasks generally shouldn't fail
-MAX_RETRIES = 1 if settings.DEBUG else None
-MAX_RETRIES = 1
+MAX_RETRIES = 1 if settings.DEBUG else 5
@instrumented_task(name="sentry.tasks.deletion.run_scheduled_deletions", queue="cleanup")
|
{"golden_diff": "diff --git a/src/sentry/tasks/deletion.py b/src/sentry/tasks/deletion.py\n--- a/src/sentry/tasks/deletion.py\n+++ b/src/sentry/tasks/deletion.py\n@@ -14,8 +14,7 @@\n \n # in prod we run with infinite retries to recover from errors\n # in debug/development, we assume these tasks generally shouldn't fail\n-MAX_RETRIES = 1 if settings.DEBUG else None\n-MAX_RETRIES = 1\n+MAX_RETRIES = 1 if settings.DEBUG else 5\n \n \n @instrumented_task(name=\"sentry.tasks.deletion.run_scheduled_deletions\", queue=\"cleanup\")\n", "issue": "Hardcoded MAX_RETRIES = 1\nhttps://github.com/getsentry/sentry/blob/master/src/sentry/tasks/deletion.py#L18\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom uuid import uuid4\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom sentry.constants import ObjectStatus\nfrom sentry.exceptions import DeleteAborted\nfrom sentry.signals import pending_delete\nfrom sentry.tasks.base import instrumented_task, retry, track_group_async_operation\n\n# in prod we run with infinite retries to recover from errors\n# in debug/development, we assume these tasks generally shouldn't fail\nMAX_RETRIES = 1 if settings.DEBUG else None\nMAX_RETRIES = 1\n\n\n@instrumented_task(name=\"sentry.tasks.deletion.run_scheduled_deletions\", queue=\"cleanup\")\ndef run_scheduled_deletions():\n from sentry.models import ScheduledDeletion\n\n queryset = ScheduledDeletion.objects.filter(\n in_progress=False, aborted=False, date_scheduled__lte=timezone.now()\n )\n for item in queryset:\n with transaction.atomic():\n affected = ScheduledDeletion.objects.filter(\n id=item.id, in_progress=False, aborted=False\n ).update(in_progress=True)\n if not affected:\n continue\n\n run_deletion.delay(deletion_id=item.id)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.run_deletion\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef run_deletion(deletion_id):\n from sentry import deletions\n from sentry.models import ScheduledDeletion\n\n try:\n deletion = ScheduledDeletion.objects.get(id=deletion_id)\n except ScheduledDeletion.DoesNotExist:\n return\n\n if deletion.aborted:\n raise DeleteAborted\n\n if not deletion.in_progress:\n actor = deletion.get_actor()\n instance = deletion.get_instance()\n with transaction.atomic():\n deletion.update(in_progress=True)\n pending_delete.send(sender=type(instance), instance=instance, actor=actor)\n\n task = deletions.get(\n model=deletion.get_model(),\n query={\"id\": deletion.object_id},\n transaction_id=deletion.guid,\n actor_id=deletion.actor_id,\n )\n has_more = task.chunk()\n if has_more:\n run_deletion.apply_async(kwargs={\"deletion_id\": deletion_id}, countdown=15)\n deletion.delete()\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.revoke_api_tokens\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):\n from sentry.models import ApiToken\n\n queryset = ApiToken.objects.filter(application=object_id)\n if timestamp:\n queryset = queryset.filter(date_added__lte=timestamp)\n\n # we're using a slow deletion strategy to avoid a lot of custom code for\n # postgres\n has_more = False\n for obj in queryset[:1000]:\n obj.delete()\n has_more = True\n\n if has_more:\n revoke_api_tokens.apply_async(\n kwargs={\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"timestamp\": timestamp,\n },\n countdown=15,\n )\n return has_more\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Organization, OrganizationStatus\n\n try:\n instance = Organization.objects.get(id=object_id)\n except Organization.DoesNotExist:\n return\n\n if instance.status == OrganizationStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:\n pending_delete.send(sender=type(instance), instance=instance)\n\n task = deletions.get(\n model=Organization,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n actor_id=actor_id,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_team\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_team(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Team, TeamStatus\n\n try:\n instance = Team.objects.get(id=object_id)\n except Team.DoesNotExist:\n return\n\n if instance.status == TeamStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Team, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_team.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_project\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_project(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Project, ProjectStatus\n\n try:\n instance = Project.objects.get(id=object_id)\n except Project.DoesNotExist:\n return\n\n if instance.status == ProjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Project, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_project.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_groups\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\n@track_group_async_operation\ndef delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):\n from sentry import deletions, eventstream\n from sentry.models import Group\n\n transaction_id = transaction_id or uuid4().hex\n\n max_batch_size = 100\n current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]\n\n task = deletions.get(\n model=Group, query={\"id__in\": current_batch}, transaction_id=transaction_id\n )\n has_more = task.chunk()\n if has_more or rest:\n delete_groups.apply_async(\n kwargs={\n \"object_ids\": object_ids if has_more else rest,\n \"transaction_id\": transaction_id,\n \"eventstream_state\": eventstream_state,\n },\n countdown=15,\n )\n else:\n # all groups have been deleted\n if eventstream_state:\n eventstream.end_delete_groups(eventstream_state)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_api_application\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_api_application(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import ApiApplication, ApiApplicationStatus\n\n try:\n instance = ApiApplication.objects.get(id=object_id)\n except ApiApplication.DoesNotExist:\n return\n\n if instance.status == ApiApplicationStatus.active:\n raise DeleteAborted\n\n task = deletions.get(\n model=ApiApplication, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_api_application.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.generic_delete\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import User\n\n model = apps.get_model(app_label, model_name)\n\n try:\n instance = model.objects.get(id=object_id)\n except model.DoesNotExist:\n return\n\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=model,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n generic_delete.apply_async(\n kwargs={\n \"app_label\": app_label,\n \"model_name\": model_name,\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"actor_id\": actor_id,\n },\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_repository\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Repository, User\n\n try:\n instance = Repository.objects.get(id=object_id)\n except Repository.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n task = deletions.get(\n model=Repository,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_repository.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization_integration\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import OrganizationIntegration, Repository\n\n try:\n instance = OrganizationIntegration.objects.get(id=object_id)\n except OrganizationIntegration.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # dissociate repos from that integration\n Repository.objects.filter(\n organization_id=instance.organization_id, integration_id=instance.integration_id\n ).update(integration_id=None)\n\n task = deletions.get(\n model=OrganizationIntegration,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization_integration.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n", "path": "src/sentry/tasks/deletion.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom uuid import uuid4\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom sentry.constants import ObjectStatus\nfrom sentry.exceptions import DeleteAborted\nfrom sentry.signals import pending_delete\nfrom sentry.tasks.base import instrumented_task, retry, track_group_async_operation\n\n# in prod we run with infinite retries to recover from errors\n# in debug/development, we assume these tasks generally shouldn't fail\nMAX_RETRIES = 1 if settings.DEBUG else 5\n\n\n@instrumented_task(name=\"sentry.tasks.deletion.run_scheduled_deletions\", queue=\"cleanup\")\ndef run_scheduled_deletions():\n from sentry.models import ScheduledDeletion\n\n queryset = ScheduledDeletion.objects.filter(\n in_progress=False, aborted=False, date_scheduled__lte=timezone.now()\n )\n for item in queryset:\n with transaction.atomic():\n affected = ScheduledDeletion.objects.filter(\n id=item.id, in_progress=False, aborted=False\n ).update(in_progress=True)\n if not affected:\n continue\n\n run_deletion.delay(deletion_id=item.id)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.run_deletion\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef run_deletion(deletion_id):\n from sentry import deletions\n from sentry.models import ScheduledDeletion\n\n try:\n deletion = ScheduledDeletion.objects.get(id=deletion_id)\n except ScheduledDeletion.DoesNotExist:\n return\n\n if deletion.aborted:\n raise DeleteAborted\n\n if not deletion.in_progress:\n actor = deletion.get_actor()\n instance = deletion.get_instance()\n with transaction.atomic():\n deletion.update(in_progress=True)\n pending_delete.send(sender=type(instance), instance=instance, actor=actor)\n\n task = deletions.get(\n model=deletion.get_model(),\n query={\"id\": deletion.object_id},\n transaction_id=deletion.guid,\n actor_id=deletion.actor_id,\n )\n has_more = task.chunk()\n if has_more:\n run_deletion.apply_async(kwargs={\"deletion_id\": deletion_id}, countdown=15)\n deletion.delete()\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.revoke_api_tokens\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):\n from sentry.models import ApiToken\n\n queryset = ApiToken.objects.filter(application=object_id)\n if timestamp:\n queryset = queryset.filter(date_added__lte=timestamp)\n\n # we're using a slow deletion strategy to avoid a lot of custom code for\n # postgres\n has_more = False\n for obj in queryset[:1000]:\n obj.delete()\n has_more = True\n\n if has_more:\n revoke_api_tokens.apply_async(\n kwargs={\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"timestamp\": timestamp,\n },\n countdown=15,\n )\n return has_more\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Organization, OrganizationStatus\n\n try:\n instance = Organization.objects.get(id=object_id)\n except Organization.DoesNotExist:\n return\n\n if instance.status == OrganizationStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:\n pending_delete.send(sender=type(instance), instance=instance)\n\n task = deletions.get(\n model=Organization,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n actor_id=actor_id,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_team\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_team(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Team, TeamStatus\n\n try:\n instance = Team.objects.get(id=object_id)\n except Team.DoesNotExist:\n return\n\n if instance.status == TeamStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Team, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_team.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_project\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_project(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Project, ProjectStatus\n\n try:\n instance = Project.objects.get(id=object_id)\n except Project.DoesNotExist:\n return\n\n if instance.status == ProjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Project, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_project.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_groups\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\n@track_group_async_operation\ndef delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):\n from sentry import deletions, eventstream\n from sentry.models import Group\n\n transaction_id = transaction_id or uuid4().hex\n\n max_batch_size = 100\n current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]\n\n task = deletions.get(\n model=Group, query={\"id__in\": current_batch}, transaction_id=transaction_id\n )\n has_more = task.chunk()\n if has_more or rest:\n delete_groups.apply_async(\n kwargs={\n \"object_ids\": object_ids if has_more else rest,\n \"transaction_id\": transaction_id,\n \"eventstream_state\": eventstream_state,\n },\n countdown=15,\n )\n else:\n # all groups have been deleted\n if eventstream_state:\n eventstream.end_delete_groups(eventstream_state)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_api_application\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_api_application(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import ApiApplication, ApiApplicationStatus\n\n try:\n instance = ApiApplication.objects.get(id=object_id)\n except ApiApplication.DoesNotExist:\n return\n\n if instance.status == ApiApplicationStatus.active:\n raise DeleteAborted\n\n task = deletions.get(\n model=ApiApplication, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_api_application.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.generic_delete\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import User\n\n model = apps.get_model(app_label, model_name)\n\n try:\n instance = model.objects.get(id=object_id)\n except model.DoesNotExist:\n return\n\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=model,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n generic_delete.apply_async(\n kwargs={\n \"app_label\": app_label,\n \"model_name\": model_name,\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"actor_id\": actor_id,\n },\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_repository\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Repository, User\n\n try:\n instance = Repository.objects.get(id=object_id)\n except Repository.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n task = deletions.get(\n model=Repository,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_repository.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization_integration\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import OrganizationIntegration, Repository\n\n try:\n instance = OrganizationIntegration.objects.get(id=object_id)\n except OrganizationIntegration.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # dissociate repos from that integration\n Repository.objects.filter(\n organization_id=instance.organization_id, integration_id=instance.integration_id\n ).update(integration_id=None)\n\n task = deletions.get(\n model=OrganizationIntegration,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization_integration.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n", "path": "src/sentry/tasks/deletion.py"}]}
| 4,068 | 139 |
gh_patches_debug_1334
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-11317
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve formatting for "arguments" sections with long examples.
The line-wrapping for this endpoint's API documentation looks really ugly:

We should either remove the maximum width on "description", or figure out a way to use more than one line for a given endpoint that doesn't look so bad (e.g. having the example be on the next line after the rest of the endpoint description).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/bugdown/api_arguments_table_generator.py`
Content:
```
1 import re
2 import os
3 import ujson
4
5 from django.utils.html import escape as escape_html
6 from markdown.extensions import Extension
7 from markdown.preprocessors import Preprocessor
8 from zerver.lib.openapi import get_openapi_parameters
9 from typing import Any, Dict, Optional, List
10 import markdown
11
12 REGEXP = re.compile(r'\{generate_api_arguments_table\|\s*(.+?)\s*\|\s*(.+)\s*\}')
13
14
15 class MarkdownArgumentsTableGenerator(Extension):
16 def __init__(self, configs: Optional[Dict[str, Any]]=None) -> None:
17 if configs is None:
18 configs = {}
19 self.config = {
20 'base_path': ['.', 'Default location from which to evaluate relative paths for the JSON files.'],
21 }
22 for key, value in configs.items():
23 self.setConfig(key, value)
24
25 def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
26 md.preprocessors.add(
27 'generate_api_arguments', APIArgumentsTablePreprocessor(md, self.getConfigs()), '_begin'
28 )
29
30
31 class APIArgumentsTablePreprocessor(Preprocessor):
32 def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
33 super(APIArgumentsTablePreprocessor, self).__init__(md)
34 self.base_path = config['base_path']
35
36 def run(self, lines: List[str]) -> List[str]:
37 done = False
38 while not done:
39 for line in lines:
40 loc = lines.index(line)
41 match = REGEXP.search(line)
42
43 if not match:
44 continue
45
46 filename = match.group(1)
47 doc_name = match.group(2)
48 filename = os.path.expanduser(filename)
49
50 is_openapi_format = filename.endswith('.yaml')
51
52 if not os.path.isabs(filename):
53 parent_dir = self.base_path
54 filename = os.path.normpath(os.path.join(parent_dir, filename))
55
56 if is_openapi_format:
57 endpoint, method = doc_name.rsplit(':', 1)
58 arguments = [] # type: List[Dict[str, Any]]
59
60 try:
61 arguments = get_openapi_parameters(endpoint, method)
62 except KeyError as e:
63 # Don't raise an exception if the "parameters"
64 # field is missing; we assume that's because the
65 # endpoint doesn't accept any parameters
66 if e.args != ('parameters',):
67 raise e
68 else:
69 with open(filename, 'r') as fp:
70 json_obj = ujson.load(fp)
71 arguments = json_obj[doc_name]
72
73 if arguments:
74 text = self.render_table(arguments)
75 else:
76 text = ['This endpoint does not consume any arguments.']
77 # The line that contains the directive to include the macro
78 # may be preceded or followed by text or tags, in that case
79 # we need to make sure that any preceding or following text
80 # stays the same.
81 line_split = REGEXP.split(line, maxsplit=0)
82 preceding = line_split[0]
83 following = line_split[-1]
84 text = [preceding] + text + [following]
85 lines = lines[:loc] + text + lines[loc+1:]
86 break
87 else:
88 done = True
89 return lines
90
91 def render_table(self, arguments: List[Dict[str, Any]]) -> List[str]:
92 table = []
93 beginning = """
94 <table class="table">
95 <thead>
96 <tr>
97 <th>Argument</th>
98 <th>Example</th>
99 <th>Required</th>
100 <th>Description</th>
101 </tr>
102 </thead>
103 <tbody>
104 """
105 tr = """
106 <tr>
107 <td><code>{argument}</code></td>
108 <td><code>{example}</code></td>
109 <td>{required}</td>
110 <td>{description}</td>
111 </tr>
112 """
113
114 table.append(beginning)
115
116 md_engine = markdown.Markdown(extensions=[])
117
118 for argument in arguments:
119 description = argument['description']
120
121 oneof = ['`' + item + '`'
122 for item in argument.get('schema', {}).get('enum', [])]
123 if oneof:
124 description += '\nMust be one of: {}.'.format(', '.join(oneof))
125
126 default = argument.get('schema', {}).get('default')
127 if default is not None:
128 description += '\nDefaults to `{}`.'.format(ujson.dumps(default))
129
130 # TODO: Swagger allows indicating where the argument goes
131 # (path, querystring, form data...). A column in the table should
132 # be added for this.
133 table.append(tr.format(
134 argument=argument.get('argument') or argument.get('name'),
135 # Show this as JSON to avoid changing the quoting style, which
136 # may cause problems with JSON encoding.
137 example=escape_html(ujson.dumps(argument['example'])),
138 required='Yes' if argument.get('required') else 'No',
139 description=md_engine.convert(description),
140 ))
141
142 table.append("</tbody>")
143 table.append("</table>")
144
145 return table
146
147 def makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:
148 return MarkdownArgumentsTableGenerator(kwargs)
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/bugdown/api_arguments_table_generator.py b/zerver/lib/bugdown/api_arguments_table_generator.py
--- a/zerver/lib/bugdown/api_arguments_table_generator.py
+++ b/zerver/lib/bugdown/api_arguments_table_generator.py
@@ -105,7 +105,7 @@
tr = """
<tr>
<td><code>{argument}</code></td>
- <td><code>{example}</code></td>
+ <td class="json-api-example"><code>{example}</code></td>
<td>{required}</td>
<td>{description}</td>
</tr>
|
{"golden_diff": "diff --git a/zerver/lib/bugdown/api_arguments_table_generator.py b/zerver/lib/bugdown/api_arguments_table_generator.py\n--- a/zerver/lib/bugdown/api_arguments_table_generator.py\n+++ b/zerver/lib/bugdown/api_arguments_table_generator.py\n@@ -105,7 +105,7 @@\n tr = \"\"\"\n <tr>\n <td><code>{argument}</code></td>\n- <td><code>{example}</code></td>\n+ <td class=\"json-api-example\"><code>{example}</code></td>\n <td>{required}</td>\n <td>{description}</td>\n </tr>\n", "issue": "Improve formatting for \"arguments\" sections with long examples.\nThe line-wrapping for this endpoint's API documentation looks really ugly:\r\n\r\n\r\n\r\nWe should either remove the maximum width on \"description\", or figure out a way to use more than one line for a given endpoint that doesn't look so bad (e.g. having the example be on the next line after the rest of the endpoint description).\n", "before_files": [{"content": "import re\nimport os\nimport ujson\n\nfrom django.utils.html import escape as escape_html\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom zerver.lib.openapi import get_openapi_parameters\nfrom typing import Any, Dict, Optional, List\nimport markdown\n\nREGEXP = re.compile(r'\\{generate_api_arguments_table\\|\\s*(.+?)\\s*\\|\\s*(.+)\\s*\\}')\n\n\nclass MarkdownArgumentsTableGenerator(Extension):\n def __init__(self, configs: Optional[Dict[str, Any]]=None) -> None:\n if configs is None:\n configs = {}\n self.config = {\n 'base_path': ['.', 'Default location from which to evaluate relative paths for the JSON files.'],\n }\n for key, value in configs.items():\n self.setConfig(key, value)\n\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.preprocessors.add(\n 'generate_api_arguments', APIArgumentsTablePreprocessor(md, self.getConfigs()), '_begin'\n )\n\n\nclass APIArgumentsTablePreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(APIArgumentsTablePreprocessor, self).__init__(md)\n self.base_path = config['base_path']\n\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if not match:\n continue\n\n filename = match.group(1)\n doc_name = match.group(2)\n filename = os.path.expanduser(filename)\n\n is_openapi_format = filename.endswith('.yaml')\n\n if not os.path.isabs(filename):\n parent_dir = self.base_path\n filename = os.path.normpath(os.path.join(parent_dir, filename))\n\n if is_openapi_format:\n endpoint, method = doc_name.rsplit(':', 1)\n arguments = [] # type: List[Dict[str, Any]]\n\n try:\n arguments = get_openapi_parameters(endpoint, method)\n except KeyError as e:\n # Don't raise an exception if the \"parameters\"\n # field is missing; we assume that's because the\n # endpoint doesn't accept any parameters\n if e.args != ('parameters',):\n raise e\n else:\n with open(filename, 'r') as fp:\n json_obj = ujson.load(fp)\n arguments = json_obj[doc_name]\n\n if arguments:\n text = self.render_table(arguments)\n else:\n text = ['This endpoint does not consume any arguments.']\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def render_table(self, arguments: List[Dict[str, Any]]) -> List[str]:\n table = []\n beginning = \"\"\"\n<table class=\"table\">\n <thead>\n <tr>\n <th>Argument</th>\n <th>Example</th>\n <th>Required</th>\n <th>Description</th>\n </tr>\n </thead>\n<tbody>\n\"\"\"\n tr = \"\"\"\n<tr>\n <td><code>{argument}</code></td>\n <td><code>{example}</code></td>\n <td>{required}</td>\n <td>{description}</td>\n</tr>\n\"\"\"\n\n table.append(beginning)\n\n md_engine = markdown.Markdown(extensions=[])\n\n for argument in arguments:\n description = argument['description']\n\n oneof = ['`' + item + '`'\n for item in argument.get('schema', {}).get('enum', [])]\n if oneof:\n description += '\\nMust be one of: {}.'.format(', '.join(oneof))\n\n default = argument.get('schema', {}).get('default')\n if default is not None:\n description += '\\nDefaults to `{}`.'.format(ujson.dumps(default))\n\n # TODO: Swagger allows indicating where the argument goes\n # (path, querystring, form data...). A column in the table should\n # be added for this.\n table.append(tr.format(\n argument=argument.get('argument') or argument.get('name'),\n # Show this as JSON to avoid changing the quoting style, which\n # may cause problems with JSON encoding.\n example=escape_html(ujson.dumps(argument['example'])),\n required='Yes' if argument.get('required') else 'No',\n description=md_engine.convert(description),\n ))\n\n table.append(\"</tbody>\")\n table.append(\"</table>\")\n\n return table\n\ndef makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:\n return MarkdownArgumentsTableGenerator(kwargs)\n", "path": "zerver/lib/bugdown/api_arguments_table_generator.py"}], "after_files": [{"content": "import re\nimport os\nimport ujson\n\nfrom django.utils.html import escape as escape_html\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom zerver.lib.openapi import get_openapi_parameters\nfrom typing import Any, Dict, Optional, List\nimport markdown\n\nREGEXP = re.compile(r'\\{generate_api_arguments_table\\|\\s*(.+?)\\s*\\|\\s*(.+)\\s*\\}')\n\n\nclass MarkdownArgumentsTableGenerator(Extension):\n def __init__(self, configs: Optional[Dict[str, Any]]=None) -> None:\n if configs is None:\n configs = {}\n self.config = {\n 'base_path': ['.', 'Default location from which to evaluate relative paths for the JSON files.'],\n }\n for key, value in configs.items():\n self.setConfig(key, value)\n\n def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:\n md.preprocessors.add(\n 'generate_api_arguments', APIArgumentsTablePreprocessor(md, self.getConfigs()), '_begin'\n )\n\n\nclass APIArgumentsTablePreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:\n super(APIArgumentsTablePreprocessor, self).__init__(md)\n self.base_path = config['base_path']\n\n def run(self, lines: List[str]) -> List[str]:\n done = False\n while not done:\n for line in lines:\n loc = lines.index(line)\n match = REGEXP.search(line)\n\n if not match:\n continue\n\n filename = match.group(1)\n doc_name = match.group(2)\n filename = os.path.expanduser(filename)\n\n is_openapi_format = filename.endswith('.yaml')\n\n if not os.path.isabs(filename):\n parent_dir = self.base_path\n filename = os.path.normpath(os.path.join(parent_dir, filename))\n\n if is_openapi_format:\n endpoint, method = doc_name.rsplit(':', 1)\n arguments = [] # type: List[Dict[str, Any]]\n\n try:\n arguments = get_openapi_parameters(endpoint, method)\n except KeyError as e:\n # Don't raise an exception if the \"parameters\"\n # field is missing; we assume that's because the\n # endpoint doesn't accept any parameters\n if e.args != ('parameters',):\n raise e\n else:\n with open(filename, 'r') as fp:\n json_obj = ujson.load(fp)\n arguments = json_obj[doc_name]\n\n if arguments:\n text = self.render_table(arguments)\n else:\n text = ['This endpoint does not consume any arguments.']\n # The line that contains the directive to include the macro\n # may be preceded or followed by text or tags, in that case\n # we need to make sure that any preceding or following text\n # stays the same.\n line_split = REGEXP.split(line, maxsplit=0)\n preceding = line_split[0]\n following = line_split[-1]\n text = [preceding] + text + [following]\n lines = lines[:loc] + text + lines[loc+1:]\n break\n else:\n done = True\n return lines\n\n def render_table(self, arguments: List[Dict[str, Any]]) -> List[str]:\n table = []\n beginning = \"\"\"\n<table class=\"table\">\n <thead>\n <tr>\n <th>Argument</th>\n <th>Example</th>\n <th>Required</th>\n <th>Description</th>\n </tr>\n </thead>\n<tbody>\n\"\"\"\n tr = \"\"\"\n<tr>\n <td><code>{argument}</code></td>\n <td class=\"json-api-example\"><code>{example}</code></td>\n <td>{required}</td>\n <td>{description}</td>\n</tr>\n\"\"\"\n\n table.append(beginning)\n\n md_engine = markdown.Markdown(extensions=[])\n\n for argument in arguments:\n description = argument['description']\n\n oneof = ['`' + item + '`'\n for item in argument.get('schema', {}).get('enum', [])]\n if oneof:\n description += '\\nMust be one of: {}.'.format(', '.join(oneof))\n\n default = argument.get('schema', {}).get('default')\n if default is not None:\n description += '\\nDefaults to `{}`.'.format(ujson.dumps(default))\n\n # TODO: Swagger allows indicating where the argument goes\n # (path, querystring, form data...). A column in the table should\n # be added for this.\n table.append(tr.format(\n argument=argument.get('argument') or argument.get('name'),\n # Show this as JSON to avoid changing the quoting style, which\n # may cause problems with JSON encoding.\n example=escape_html(ujson.dumps(argument['example'])),\n required='Yes' if argument.get('required') else 'No',\n description=md_engine.convert(description),\n ))\n\n table.append(\"</tbody>\")\n table.append(\"</table>\")\n\n return table\n\ndef makeExtension(*args: Any, **kwargs: str) -> MarkdownArgumentsTableGenerator:\n return MarkdownArgumentsTableGenerator(kwargs)\n", "path": "zerver/lib/bugdown/api_arguments_table_generator.py"}]}
| 1,871 | 139 |
gh_patches_debug_5532
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-3216
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HKDF key-length inconsistency
For too small key sizes, `HKDF.derive()` outputs an empty array instead of a small key:
Program:
```python
#!/usr/bin/env python3.5
import cryptography
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
print("cryptography.io:{}".format(cryptography.__version__))
hkdf = HKDF(algorithm=hashes.SHA256(), length=4, salt=b"salt",
info=b"some-test", backend=default_backend())
key = hkdf.derive(b"my secret passphrase")
print("Derived key: {}".format(key))
```
Output:
```
cryptography.io:1.5.2
Derived key: b''
```
Suggested fix:
I am not quite sure why the division by 8 in the snippet below was added. The cumulative size of the output array is always `self._algorithm.digest_size * len(output)` and thus we can stop after `self._algorithm.digest_size * len(output) >= self._length`. At first I thought this might be a clever trick taken from the paper, but I didn't find it there. I guess there was a mixup between bits and bytes at some point.
```python
# class HKDFExpand
def _expand(self, key_material):
output = [b""]
counter = 1
while (self._algorithm.digest_size // 8) * len(output) < self._length:
h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
h.update(output[-1])
h.update(self._info)
h.update(six.int2byte(counter))
output.append(h.finalize())
counter += 1
return b"".join(output)[:self._length]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/kdf/hkdf.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import six
8
9 from cryptography import utils
10 from cryptography.exceptions import (
11 AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
12 )
13 from cryptography.hazmat.backends.interfaces import HMACBackend
14 from cryptography.hazmat.primitives import constant_time, hmac
15 from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
16
17
18 @utils.register_interface(KeyDerivationFunction)
19 class HKDF(object):
20 def __init__(self, algorithm, length, salt, info, backend):
21 if not isinstance(backend, HMACBackend):
22 raise UnsupportedAlgorithm(
23 "Backend object does not implement HMACBackend.",
24 _Reasons.BACKEND_MISSING_INTERFACE
25 )
26
27 self._algorithm = algorithm
28
29 if not (salt is None or isinstance(salt, bytes)):
30 raise TypeError("salt must be bytes.")
31
32 if salt is None:
33 salt = b"\x00" * (self._algorithm.digest_size // 8)
34
35 self._salt = salt
36
37 self._backend = backend
38
39 self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)
40
41 def _extract(self, key_material):
42 h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)
43 h.update(key_material)
44 return h.finalize()
45
46 def derive(self, key_material):
47 if not isinstance(key_material, bytes):
48 raise TypeError("key_material must be bytes.")
49
50 return self._hkdf_expand.derive(self._extract(key_material))
51
52 def verify(self, key_material, expected_key):
53 if not constant_time.bytes_eq(self.derive(key_material), expected_key):
54 raise InvalidKey
55
56
57 @utils.register_interface(KeyDerivationFunction)
58 class HKDFExpand(object):
59 def __init__(self, algorithm, length, info, backend):
60 if not isinstance(backend, HMACBackend):
61 raise UnsupportedAlgorithm(
62 "Backend object does not implement HMACBackend.",
63 _Reasons.BACKEND_MISSING_INTERFACE
64 )
65
66 self._algorithm = algorithm
67
68 self._backend = backend
69
70 max_length = 255 * (algorithm.digest_size // 8)
71
72 if length > max_length:
73 raise ValueError(
74 "Can not derive keys larger than {0} octets.".format(
75 max_length
76 ))
77
78 self._length = length
79
80 if not (info is None or isinstance(info, bytes)):
81 raise TypeError("info must be bytes.")
82
83 if info is None:
84 info = b""
85
86 self._info = info
87
88 self._used = False
89
90 def _expand(self, key_material):
91 output = [b""]
92 counter = 1
93
94 while (self._algorithm.digest_size // 8) * len(output) < self._length:
95 h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
96 h.update(output[-1])
97 h.update(self._info)
98 h.update(six.int2byte(counter))
99 output.append(h.finalize())
100 counter += 1
101
102 return b"".join(output)[:self._length]
103
104 def derive(self, key_material):
105 if not isinstance(key_material, bytes):
106 raise TypeError("key_material must be bytes.")
107
108 if self._used:
109 raise AlreadyFinalized
110
111 self._used = True
112 return self._expand(key_material)
113
114 def verify(self, key_material, expected_key):
115 if not constant_time.bytes_eq(self.derive(key_material), expected_key):
116 raise InvalidKey
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py
--- a/src/cryptography/hazmat/primitives/kdf/hkdf.py
+++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py
@@ -91,7 +91,7 @@
output = [b""]
counter = 1
- while (self._algorithm.digest_size // 8) * len(output) < self._length:
+ while self._algorithm.digest_size * (len(output) - 1) < self._length:
h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
h.update(output[-1])
h.update(self._info)
|
{"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py\n--- a/src/cryptography/hazmat/primitives/kdf/hkdf.py\n+++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py\n@@ -91,7 +91,7 @@\n output = [b\"\"]\n counter = 1\n \n- while (self._algorithm.digest_size // 8) * len(output) < self._length:\n+ while self._algorithm.digest_size * (len(output) - 1) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n", "issue": "HKDF key-length inconsistency\nFor too small key sizes, `HKDF.derive()` outputs an empty array instead of a small key:\r\n\r\nProgram:\r\n```python\r\n#!/usr/bin/env python3.5\r\nimport cryptography\r\nfrom cryptography.hazmat.primitives import hashes\r\nfrom cryptography.hazmat.primitives.kdf.hkdf import HKDF\r\nfrom cryptography.hazmat.backends import default_backend\r\n\r\nprint(\"cryptography.io:{}\".format(cryptography.__version__))\r\n\r\nhkdf = HKDF(algorithm=hashes.SHA256(), length=4, salt=b\"salt\",\r\n info=b\"some-test\", backend=default_backend())\r\n\r\nkey = hkdf.derive(b\"my secret passphrase\")\r\nprint(\"Derived key: {}\".format(key))\r\n```\r\n\r\nOutput:\r\n```\r\ncryptography.io:1.5.2\r\nDerived key: b''\r\n```\r\n\r\nSuggested fix:\r\n\r\nI am not quite sure why the division by 8 in the snippet below was added. The cumulative size of the output array is always `self._algorithm.digest_size * len(output)` and thus we can stop after `self._algorithm.digest_size * len(output) >= self._length`. At first I thought this might be a clever trick taken from the paper, but I didn't find it there. I guess there was a mixup between bits and bytes at some point.\r\n\r\n```python\r\n# class HKDFExpand\r\ndef _expand(self, key_material):\r\n output = [b\"\"]\r\n counter = 1\r\n\r\n while (self._algorithm.digest_size // 8) * len(output) < self._length:\r\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\r\n h.update(output[-1])\r\n h.update(self._info)\r\n h.update(six.int2byte(counter))\r\n output.append(h.finalize())\r\n counter += 1\r\n\r\n return b\"\".join(output)[:self._length]\r\n```\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HMACBackend\nfrom cryptography.hazmat.primitives import constant_time, hmac\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDF(object):\n def __init__(self, algorithm, length, salt, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n if not (salt is None or isinstance(salt, bytes)):\n raise TypeError(\"salt must be bytes.\")\n\n if salt is None:\n salt = b\"\\x00\" * (self._algorithm.digest_size // 8)\n\n self._salt = salt\n\n self._backend = backend\n\n self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)\n\n def _extract(self, key_material):\n h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)\n h.update(key_material)\n return h.finalize()\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n return self._hkdf_expand.derive(self._extract(key_material))\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDFExpand(object):\n def __init__(self, algorithm, length, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n self._backend = backend\n\n max_length = 255 * (algorithm.digest_size // 8)\n\n if length > max_length:\n raise ValueError(\n \"Can not derive keys larger than {0} octets.\".format(\n max_length\n ))\n\n self._length = length\n\n if not (info is None or isinstance(info, bytes)):\n raise TypeError(\"info must be bytes.\")\n\n if info is None:\n info = b\"\"\n\n self._info = info\n\n self._used = False\n\n def _expand(self, key_material):\n output = [b\"\"]\n counter = 1\n\n while (self._algorithm.digest_size // 8) * len(output) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n h.update(six.int2byte(counter))\n output.append(h.finalize())\n counter += 1\n\n return b\"\".join(output)[:self._length]\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n if self._used:\n raise AlreadyFinalized\n\n self._used = True\n return self._expand(key_material)\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n", "path": "src/cryptography/hazmat/primitives/kdf/hkdf.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HMACBackend\nfrom cryptography.hazmat.primitives import constant_time, hmac\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDF(object):\n def __init__(self, algorithm, length, salt, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n if not (salt is None or isinstance(salt, bytes)):\n raise TypeError(\"salt must be bytes.\")\n\n if salt is None:\n salt = b\"\\x00\" * (self._algorithm.digest_size // 8)\n\n self._salt = salt\n\n self._backend = backend\n\n self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)\n\n def _extract(self, key_material):\n h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)\n h.update(key_material)\n return h.finalize()\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n return self._hkdf_expand.derive(self._extract(key_material))\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDFExpand(object):\n def __init__(self, algorithm, length, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n self._backend = backend\n\n max_length = 255 * (algorithm.digest_size // 8)\n\n if length > max_length:\n raise ValueError(\n \"Can not derive keys larger than {0} octets.\".format(\n max_length\n ))\n\n self._length = length\n\n if not (info is None or isinstance(info, bytes)):\n raise TypeError(\"info must be bytes.\")\n\n if info is None:\n info = b\"\"\n\n self._info = info\n\n self._used = False\n\n def _expand(self, key_material):\n output = [b\"\"]\n counter = 1\n\n while self._algorithm.digest_size * (len(output) - 1) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n h.update(six.int2byte(counter))\n output.append(h.finalize())\n counter += 1\n\n return b\"\".join(output)[:self._length]\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n if self._used:\n raise AlreadyFinalized\n\n self._used = True\n return self._expand(key_material)\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n", "path": "src/cryptography/hazmat/primitives/kdf/hkdf.py"}]}
| 1,713 | 169 |
gh_patches_debug_19553
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1971
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: fetch failed for source Recycle!
### I Have A Problem With:
A specific source
### What's Your Problem
For (I unfortunately don't know how long, only recently moved and started HA again) the the Waste collection integration doesn't work anymore for the source Recycle!.
Navigating towards the API URL (https://api.fostplus.be/recycle-public/app/v1/streets) actually gives a 404.
Thank you for your support ;)
### Source (if relevant)
recycleapp_be
### Logs
```Shell
This error originated from a custom integration.
Logger: waste_collection_schedule.source_shell
Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136
integration: waste_collection_schedule (documentation)
First occurred: April 7, 2024 at 19:45:49 (2 occurrences)
Last logged: 01:21:00
fetch failed for source Recycle!: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py", line 64, in fetch r.raise_for_status() File "/usr/local/lib/python3.12/site-packages/requests/models.py", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://api.fostplus.be/recycle-public/app/v1/streets?qxxxxxxx&zipcodes=3200-24001
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: recycleapp_be
args:
postcode: 3200
street: xxxxxxx
house_number: 1
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py`
Content:
```
1 import logging
2 from datetime import datetime, timedelta
3
4 import requests
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "Recycle!"
8 DESCRIPTION = "Source for RecycleApp.be"
9 URL = "https://www.recycleapp.be"
10 TEST_CASES = {
11 "1140 Evere, Bazellaan 1": {
12 "postcode": 1140,
13 "street": "Bazellaan",
14 "house_number": 1,
15 },
16 "3001, Waversebaan 276 with events": {
17 "postcode": 3001,
18 "street": "Waversebaan",
19 "house_number": 276,
20 },
21 "3001, Waversebaan 276 without events": {
22 "postcode": 3001,
23 "street": "Waversebaan",
24 "house_number": 276,
25 "add_events": False,
26 },
27 "1400, Rue de namur 1 with events": {
28 "postcode": 1400,
29 "street": "Rue de namur",
30 "house_number": 1,
31 "add_events": True,
32 },
33 }
34
35 _LOGGER = logging.getLogger(__name__)
36
37
38 class Source:
39 def __init__(self, postcode, street, house_number, add_events=True):
40 self._postcode = postcode
41 self._street = street
42 self._house_number = house_number
43 self._add_events = add_events
44
45 def fetch(self):
46 url = "https://api.fostplus.be/recycle-public/app/v1"
47 headers = {
48 "x-secret": "Op2tDi2pBmh1wzeC5TaN2U3knZan7ATcfOQgxh4vqC0mDKmnPP2qzoQusmInpglfIkxx8SZrasBqi5zgMSvyHggK9j6xCQNQ8xwPFY2o03GCcQfcXVOyKsvGWLze7iwcfcgk2Ujpl0dmrt3hSJMCDqzAlvTrsvAEiaSzC9hKRwhijQAFHuFIhJssnHtDSB76vnFQeTCCvwVB27DjSVpDmq8fWQKEmjEncdLqIsRnfxLcOjGIVwX5V0LBntVbeiBvcjyKF2nQ08rIxqHHGXNJ6SbnAmTgsPTg7k6Ejqa7dVfTmGtEPdftezDbuEc8DdK66KDecqnxwOOPSJIN0zaJ6k2Ye2tgMSxxf16gxAmaOUqHS0i7dtG5PgPSINti3qlDdw6DTKEPni7X0rxM",
49 "x-consumer": "recycleapp.be",
50 "User-Agent": "",
51 "Authorization": "",
52 }
53 r = requests.get(f"{url}/access-token", headers=headers)
54 r.raise_for_status()
55 headers["Authorization"] = r.json()["accessToken"]
56
57 params = {"q": self._postcode}
58 r = requests.get(f"{url}/zipcodes", params=params, headers=headers)
59 r.raise_for_status()
60 zipcodeId = r.json()["items"][0]["id"]
61
62 params = {"q": self._street, "zipcodes": zipcodeId}
63 r = requests.post(f"{url}/streets", params=params, headers=headers)
64 r.raise_for_status()
65
66 streetId = None
67 for item in r.json()["items"]:
68 if item["name"] == self._street:
69 streetId = item["id"]
70 if streetId is None:
71 streetId = r.json()["items"][0]["id"]
72
73 now = datetime.now()
74 fromDate = now.strftime("%Y-%m-%d")
75 untilDate = (now + timedelta(days=365)).strftime("%Y-%m-%d")
76 params = {
77 "zipcodeId": zipcodeId,
78 "streetId": streetId,
79 "houseNumber": self._house_number,
80 "fromDate": fromDate,
81 "untilDate": untilDate,
82 # "size":100,
83 }
84 r = requests.get(f"{url}/collections", params=params, headers=headers)
85 r.raise_for_status()
86
87 entries = []
88 for item in r.json()["items"]:
89 if "exception" in item and "replacedBy" in item["exception"]:
90 continue
91
92 date = datetime.strptime(item["timestamp"], "%Y-%m-%dT%H:%M:%S.000Z").date()
93 if item["type"] == "collection":
94 entries.append(Collection(date, item["fraction"]["name"]["en"]))
95 elif item["type"] == "event" and self._add_events:
96 entries.append(Collection(date, item["event"]["title"]["en"]))
97
98 return entries
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
@@ -43,7 +43,7 @@
self._add_events = add_events
def fetch(self):
- url = "https://api.fostplus.be/recycle-public/app/v1"
+ url = "https://api.fostplus.be/recyclecms/app/v1"
headers = {
"x-secret": "Op2tDi2pBmh1wzeC5TaN2U3knZan7ATcfOQgxh4vqC0mDKmnPP2qzoQusmInpglfIkxx8SZrasBqi5zgMSvyHggK9j6xCQNQ8xwPFY2o03GCcQfcXVOyKsvGWLze7iwcfcgk2Ujpl0dmrt3hSJMCDqzAlvTrsvAEiaSzC9hKRwhijQAFHuFIhJssnHtDSB76vnFQeTCCvwVB27DjSVpDmq8fWQKEmjEncdLqIsRnfxLcOjGIVwX5V0LBntVbeiBvcjyKF2nQ08rIxqHHGXNJ6SbnAmTgsPTg7k6Ejqa7dVfTmGtEPdftezDbuEc8DdK66KDecqnxwOOPSJIN0zaJ6k2Ye2tgMSxxf16gxAmaOUqHS0i7dtG5PgPSINti3qlDdw6DTKEPni7X0rxM",
"x-consumer": "recycleapp.be",
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n@@ -43,7 +43,7 @@\n self._add_events = add_events\n \n def fetch(self):\n- url = \"https://api.fostplus.be/recycle-public/app/v1\"\n+ url = \"https://api.fostplus.be/recyclecms/app/v1\"\n headers = {\n \"x-secret\": \"Op2tDi2pBmh1wzeC5TaN2U3knZan7ATcfOQgxh4vqC0mDKmnPP2qzoQusmInpglfIkxx8SZrasBqi5zgMSvyHggK9j6xCQNQ8xwPFY2o03GCcQfcXVOyKsvGWLze7iwcfcgk2Ujpl0dmrt3hSJMCDqzAlvTrsvAEiaSzC9hKRwhijQAFHuFIhJssnHtDSB76vnFQeTCCvwVB27DjSVpDmq8fWQKEmjEncdLqIsRnfxLcOjGIVwX5V0LBntVbeiBvcjyKF2nQ08rIxqHHGXNJ6SbnAmTgsPTg7k6Ejqa7dVfTmGtEPdftezDbuEc8DdK66KDecqnxwOOPSJIN0zaJ6k2Ye2tgMSxxf16gxAmaOUqHS0i7dtG5PgPSINti3qlDdw6DTKEPni7X0rxM\",\n \"x-consumer\": \"recycleapp.be\",\n", "issue": "[Bug]: fetch failed for source Recycle!\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nFor (I unfortunately don't know how long, only recently moved and started HA again) the the Waste collection integration doesn't work anymore for the source Recycle!.\r\nNavigating towards the API URL (https://api.fostplus.be/recycle-public/app/v1/streets) actually gives a 404.\r\n\r\nThank you for your support ;) \n\n### Source (if relevant)\n\nrecycleapp_be\n\n### Logs\n\n```Shell\nThis error originated from a custom integration.\r\n\r\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nintegration: waste_collection_schedule (documentation)\r\nFirst occurred: April 7, 2024 at 19:45:49 (2 occurrences)\r\nLast logged: 01:21:00\r\n\r\nfetch failed for source Recycle!: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\", line 64, in fetch r.raise_for_status() File \"/usr/local/lib/python3.12/site-packages/requests/models.py\", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://api.fostplus.be/recycle-public/app/v1/streets?qxxxxxxx&zipcodes=3200-24001\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: recycleapp_be\r\n args:\r\n postcode: 3200\r\n street: xxxxxxx\r\n house_number: 1\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import logging\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle!\"\nDESCRIPTION = \"Source for RecycleApp.be\"\nURL = \"https://www.recycleapp.be\"\nTEST_CASES = {\n \"1140 Evere, Bazellaan 1\": {\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n },\n \"3001, Waversebaan 276 with events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n },\n \"3001, Waversebaan 276 without events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n \"add_events\": False,\n },\n \"1400, Rue de namur 1 with events\": {\n \"postcode\": 1400,\n \"street\": \"Rue de namur\",\n \"house_number\": 1,\n \"add_events\": True,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, postcode, street, house_number, add_events=True):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n self._add_events = add_events\n\n def fetch(self):\n url = \"https://api.fostplus.be/recycle-public/app/v1\"\n headers = {\n \"x-secret\": \"Op2tDi2pBmh1wzeC5TaN2U3knZan7ATcfOQgxh4vqC0mDKmnPP2qzoQusmInpglfIkxx8SZrasBqi5zgMSvyHggK9j6xCQNQ8xwPFY2o03GCcQfcXVOyKsvGWLze7iwcfcgk2Ujpl0dmrt3hSJMCDqzAlvTrsvAEiaSzC9hKRwhijQAFHuFIhJssnHtDSB76vnFQeTCCvwVB27DjSVpDmq8fWQKEmjEncdLqIsRnfxLcOjGIVwX5V0LBntVbeiBvcjyKF2nQ08rIxqHHGXNJ6SbnAmTgsPTg7k6Ejqa7dVfTmGtEPdftezDbuEc8DdK66KDecqnxwOOPSJIN0zaJ6k2Ye2tgMSxxf16gxAmaOUqHS0i7dtG5PgPSINti3qlDdw6DTKEPni7X0rxM\",\n \"x-consumer\": \"recycleapp.be\",\n \"User-Agent\": \"\",\n \"Authorization\": \"\",\n }\n r = requests.get(f\"{url}/access-token\", headers=headers)\n r.raise_for_status()\n headers[\"Authorization\"] = r.json()[\"accessToken\"]\n\n params = {\"q\": self._postcode}\n r = requests.get(f\"{url}/zipcodes\", params=params, headers=headers)\n r.raise_for_status()\n zipcodeId = r.json()[\"items\"][0][\"id\"]\n\n params = {\"q\": self._street, \"zipcodes\": zipcodeId}\n r = requests.post(f\"{url}/streets\", params=params, headers=headers)\n r.raise_for_status()\n\n streetId = None\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n if streetId is None:\n streetId = r.json()[\"items\"][0][\"id\"]\n\n now = datetime.now()\n fromDate = now.strftime(\"%Y-%m-%d\")\n untilDate = (now + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n params = {\n \"zipcodeId\": zipcodeId,\n \"streetId\": streetId,\n \"houseNumber\": self._house_number,\n \"fromDate\": fromDate,\n \"untilDate\": untilDate,\n # \"size\":100,\n }\n r = requests.get(f\"{url}/collections\", params=params, headers=headers)\n r.raise_for_status()\n\n entries = []\n for item in r.json()[\"items\"]:\n if \"exception\" in item and \"replacedBy\" in item[\"exception\"]:\n continue\n\n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n if item[\"type\"] == \"collection\":\n entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n elif item[\"type\"] == \"event\" and self._add_events:\n entries.append(Collection(date, item[\"event\"][\"title\"][\"en\"]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py"}], "after_files": [{"content": "import logging\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle!\"\nDESCRIPTION = \"Source for RecycleApp.be\"\nURL = \"https://www.recycleapp.be\"\nTEST_CASES = {\n \"1140 Evere, Bazellaan 1\": {\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n },\n \"3001, Waversebaan 276 with events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n },\n \"3001, Waversebaan 276 without events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n \"add_events\": False,\n },\n \"1400, Rue de namur 1 with events\": {\n \"postcode\": 1400,\n \"street\": \"Rue de namur\",\n \"house_number\": 1,\n \"add_events\": True,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, postcode, street, house_number, add_events=True):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n self._add_events = add_events\n\n def fetch(self):\n url = \"https://api.fostplus.be/recyclecms/app/v1\"\n headers = {\n \"x-secret\": \"Op2tDi2pBmh1wzeC5TaN2U3knZan7ATcfOQgxh4vqC0mDKmnPP2qzoQusmInpglfIkxx8SZrasBqi5zgMSvyHggK9j6xCQNQ8xwPFY2o03GCcQfcXVOyKsvGWLze7iwcfcgk2Ujpl0dmrt3hSJMCDqzAlvTrsvAEiaSzC9hKRwhijQAFHuFIhJssnHtDSB76vnFQeTCCvwVB27DjSVpDmq8fWQKEmjEncdLqIsRnfxLcOjGIVwX5V0LBntVbeiBvcjyKF2nQ08rIxqHHGXNJ6SbnAmTgsPTg7k6Ejqa7dVfTmGtEPdftezDbuEc8DdK66KDecqnxwOOPSJIN0zaJ6k2Ye2tgMSxxf16gxAmaOUqHS0i7dtG5PgPSINti3qlDdw6DTKEPni7X0rxM\",\n \"x-consumer\": \"recycleapp.be\",\n \"User-Agent\": \"\",\n \"Authorization\": \"\",\n }\n r = requests.get(f\"{url}/access-token\", headers=headers)\n r.raise_for_status()\n headers[\"Authorization\"] = r.json()[\"accessToken\"]\n\n params = {\"q\": self._postcode}\n r = requests.get(f\"{url}/zipcodes\", params=params, headers=headers)\n r.raise_for_status()\n zipcodeId = r.json()[\"items\"][0][\"id\"]\n\n params = {\"q\": self._street, \"zipcodes\": zipcodeId}\n r = requests.post(f\"{url}/streets\", params=params, headers=headers)\n r.raise_for_status()\n\n streetId = None\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n if streetId is None:\n streetId = r.json()[\"items\"][0][\"id\"]\n\n now = datetime.now()\n fromDate = now.strftime(\"%Y-%m-%d\")\n untilDate = (now + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n params = {\n \"zipcodeId\": zipcodeId,\n \"streetId\": streetId,\n \"houseNumber\": self._house_number,\n \"fromDate\": fromDate,\n \"untilDate\": untilDate,\n # \"size\":100,\n }\n r = requests.get(f\"{url}/collections\", params=params, headers=headers)\n r.raise_for_status()\n\n entries = []\n for item in r.json()[\"items\"]:\n if \"exception\" in item and \"replacedBy\" in item[\"exception\"]:\n continue\n\n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n if item[\"type\"] == \"collection\":\n entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n elif item[\"type\"] == \"event\" and self._add_events:\n entries.append(Collection(date, item[\"event\"][\"title\"][\"en\"]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py"}]}
| 2,196 | 448 |
gh_patches_debug_20557
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-5408
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc exp gc: can't run experiments after garbage collection
# Bug Report
## Description
Experiments fail after garbage collection via `dvc exp gc`.
### Reproduce
1. dvc init
2. dvc stage add -n foo -M foo.yaml 'echo "score: 1" > foo.yaml'
3. git add .
4. git commit -m "add stage"
5. dvc exp run
6. dvc exp gc -fT
7. dvc exp run
```console
Stage 'foo' didn't change, skipping
ERROR: Failed to set 'refs/exps/d1/5cee9fd7a4fc20c5d7167671df4ed78b3cfb7a/exp-985c5'
ERROR: Failed to set 'refs/exps/d1/5cee9fd7a4fc20c5d7167671df4ed78b3cfb7a/exp-985c5'
```
### Expected
`dvc exp run` to successfully run experiment.
### Environment information
**Output of `dvc version`:**
```console
$ dvc version
DVC version: 2.0.0a0+adedd1
---------------------------------
Platform: Python 3.9.1 on Linux-5.8.0-38-generic-x86_64-with-glibc2.31
Supports: http, https
Cache types: <https://error.dvc.org/no-dvc-cache>
Caches: local
Remotes: None
Workspace directory: ext4 on /dev/mapper/vgubuntu-root
Repo: dvc, git
```
**Additional Information (if any):**
<!--
If applicable, please also provide a `--verbose` output of the command, eg: `dvc add --verbose`.
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/experiments/gc.py`
Content:
```
1 import logging
2 from typing import Optional
3
4 from dvc.repo import locked
5
6 from .utils import exp_refs
7
8 logger = logging.getLogger(__name__)
9
10
11 @locked
12 def gc(
13 repo,
14 all_branches: Optional[bool] = False,
15 all_tags: Optional[bool] = False,
16 all_commits: Optional[bool] = False,
17 workspace: Optional[bool] = False,
18 queued: Optional[bool] = False,
19 ):
20 keep_revs = set(
21 repo.brancher(
22 all_branches=all_branches,
23 all_tags=all_tags,
24 all_commits=all_commits,
25 sha_only=True,
26 )
27 )
28 if workspace:
29 keep_revs.add(repo.scm.get_rev())
30
31 if not keep_revs:
32 return 0
33
34 removed = 0
35 for ref_info in exp_refs(repo.scm):
36 if ref_info.baseline_sha not in keep_revs:
37 repo.scm.remove_ref(str(ref_info))
38 removed += 1
39
40 delete_stashes = []
41 for _, entry in repo.experiments.stash_revs.items():
42 if not queued or entry.baseline_rev not in keep_revs:
43 delete_stashes.append(entry.index)
44 for index in sorted(delete_stashes, reverse=True):
45 repo.experiments.stash.drop(index)
46 removed += len(delete_stashes)
47
48 return removed
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/repo/experiments/gc.py b/dvc/repo/experiments/gc.py
--- a/dvc/repo/experiments/gc.py
+++ b/dvc/repo/experiments/gc.py
@@ -3,6 +3,7 @@
from dvc.repo import locked
+from .base import EXEC_APPLY, EXEC_BRANCH, EXEC_CHECKPOINT
from .utils import exp_refs
logger = logging.getLogger(__name__)
@@ -31,9 +32,20 @@
if not keep_revs:
return 0
+ exec_branch = repo.scm.get_ref(EXEC_BRANCH, follow=False)
+ exec_apply = repo.scm.get_ref(EXEC_APPLY)
+ exec_checkpoint = repo.scm.get_ref(EXEC_CHECKPOINT)
+
removed = 0
for ref_info in exp_refs(repo.scm):
if ref_info.baseline_sha not in keep_revs:
+ ref = repo.scm.get_ref(str(ref_info))
+ if exec_branch and str(ref_info):
+ repo.scm.remove_ref(EXEC_BRANCH)
+ if exec_apply and exec_apply == ref:
+ repo.scm.remove_ref(EXEC_APPLY)
+ if exec_checkpoint and exec_checkpoint == ref:
+ repo.scm.remove_ref(EXEC_CHECKPOINT)
repo.scm.remove_ref(str(ref_info))
removed += 1
|
{"golden_diff": "diff --git a/dvc/repo/experiments/gc.py b/dvc/repo/experiments/gc.py\n--- a/dvc/repo/experiments/gc.py\n+++ b/dvc/repo/experiments/gc.py\n@@ -3,6 +3,7 @@\n \n from dvc.repo import locked\n \n+from .base import EXEC_APPLY, EXEC_BRANCH, EXEC_CHECKPOINT\n from .utils import exp_refs\n \n logger = logging.getLogger(__name__)\n@@ -31,9 +32,20 @@\n if not keep_revs:\n return 0\n \n+ exec_branch = repo.scm.get_ref(EXEC_BRANCH, follow=False)\n+ exec_apply = repo.scm.get_ref(EXEC_APPLY)\n+ exec_checkpoint = repo.scm.get_ref(EXEC_CHECKPOINT)\n+\n removed = 0\n for ref_info in exp_refs(repo.scm):\n if ref_info.baseline_sha not in keep_revs:\n+ ref = repo.scm.get_ref(str(ref_info))\n+ if exec_branch and str(ref_info):\n+ repo.scm.remove_ref(EXEC_BRANCH)\n+ if exec_apply and exec_apply == ref:\n+ repo.scm.remove_ref(EXEC_APPLY)\n+ if exec_checkpoint and exec_checkpoint == ref:\n+ repo.scm.remove_ref(EXEC_CHECKPOINT)\n repo.scm.remove_ref(str(ref_info))\n removed += 1\n", "issue": "dvc exp gc: can't run experiments after garbage collection\n# Bug Report\r\n\r\n## Description\r\n\r\nExperiments fail after garbage collection via `dvc exp gc`.\r\n\r\n### Reproduce\r\n\r\n1. dvc init\r\n2. dvc stage add -n foo -M foo.yaml 'echo \"score: 1\" > foo.yaml'\r\n3. git add .\r\n4. git commit -m \"add stage\"\r\n5. dvc exp run\r\n6. dvc exp gc -fT\r\n7. dvc exp run\r\n\r\n```console\r\nStage 'foo' didn't change, skipping\r\nERROR: Failed to set 'refs/exps/d1/5cee9fd7a4fc20c5d7167671df4ed78b3cfb7a/exp-985c5'\r\nERROR: Failed to set 'refs/exps/d1/5cee9fd7a4fc20c5d7167671df4ed78b3cfb7a/exp-985c5'\r\n```\r\n\r\n### Expected\r\n\r\n`dvc exp run` to successfully run experiment.\r\n\r\n### Environment information\r\n\r\n**Output of `dvc version`:**\r\n\r\n```console\r\n$ dvc version\r\nDVC version: 2.0.0a0+adedd1\r\n---------------------------------\r\nPlatform: Python 3.9.1 on Linux-5.8.0-38-generic-x86_64-with-glibc2.31\r\nSupports: http, https\r\nCache types: <https://error.dvc.org/no-dvc-cache>\r\nCaches: local\r\nRemotes: None\r\nWorkspace directory: ext4 on /dev/mapper/vgubuntu-root\r\nRepo: dvc, git\r\n```\r\n\r\n**Additional Information (if any):**\r\n\r\n<!--\r\nIf applicable, please also provide a `--verbose` output of the command, eg: `dvc add --verbose`.\r\n-->\n", "before_files": [{"content": "import logging\nfrom typing import Optional\n\nfrom dvc.repo import locked\n\nfrom .utils import exp_refs\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\ndef gc(\n repo,\n all_branches: Optional[bool] = False,\n all_tags: Optional[bool] = False,\n all_commits: Optional[bool] = False,\n workspace: Optional[bool] = False,\n queued: Optional[bool] = False,\n):\n keep_revs = set(\n repo.brancher(\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n sha_only=True,\n )\n )\n if workspace:\n keep_revs.add(repo.scm.get_rev())\n\n if not keep_revs:\n return 0\n\n removed = 0\n for ref_info in exp_refs(repo.scm):\n if ref_info.baseline_sha not in keep_revs:\n repo.scm.remove_ref(str(ref_info))\n removed += 1\n\n delete_stashes = []\n for _, entry in repo.experiments.stash_revs.items():\n if not queued or entry.baseline_rev not in keep_revs:\n delete_stashes.append(entry.index)\n for index in sorted(delete_stashes, reverse=True):\n repo.experiments.stash.drop(index)\n removed += len(delete_stashes)\n\n return removed\n", "path": "dvc/repo/experiments/gc.py"}], "after_files": [{"content": "import logging\nfrom typing import Optional\n\nfrom dvc.repo import locked\n\nfrom .base import EXEC_APPLY, EXEC_BRANCH, EXEC_CHECKPOINT\nfrom .utils import exp_refs\n\nlogger = logging.getLogger(__name__)\n\n\n@locked\ndef gc(\n repo,\n all_branches: Optional[bool] = False,\n all_tags: Optional[bool] = False,\n all_commits: Optional[bool] = False,\n workspace: Optional[bool] = False,\n queued: Optional[bool] = False,\n):\n keep_revs = set(\n repo.brancher(\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n sha_only=True,\n )\n )\n if workspace:\n keep_revs.add(repo.scm.get_rev())\n\n if not keep_revs:\n return 0\n\n exec_branch = repo.scm.get_ref(EXEC_BRANCH, follow=False)\n exec_apply = repo.scm.get_ref(EXEC_APPLY)\n exec_checkpoint = repo.scm.get_ref(EXEC_CHECKPOINT)\n\n removed = 0\n for ref_info in exp_refs(repo.scm):\n if ref_info.baseline_sha not in keep_revs:\n ref = repo.scm.get_ref(str(ref_info))\n if exec_branch and str(ref_info):\n repo.scm.remove_ref(EXEC_BRANCH)\n if exec_apply and exec_apply == ref:\n repo.scm.remove_ref(EXEC_APPLY)\n if exec_checkpoint and exec_checkpoint == ref:\n repo.scm.remove_ref(EXEC_CHECKPOINT)\n repo.scm.remove_ref(str(ref_info))\n removed += 1\n\n delete_stashes = []\n for _, entry in repo.experiments.stash_revs.items():\n if not queued or entry.baseline_rev not in keep_revs:\n delete_stashes.append(entry.index)\n for index in sorted(delete_stashes, reverse=True):\n repo.experiments.stash.drop(index)\n removed += len(delete_stashes)\n\n return removed\n", "path": "dvc/repo/experiments/gc.py"}]}
| 1,056 | 295 |
gh_patches_debug_28813
|
rasdani/github-patches
|
git_diff
|
web2py__web2py-1375
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RedisCache fails to delete cached objects when f=None
For background, see https://groups.google.com/d/msg/web2py/1LoXXrPlIGE/bX2u3CJ2BQAJ.
According to the book, calling `cache.model(key, f=None)` should cause `key` to be deleted from the cache. However, in `gluon.contrib.redis_cache.RedisClient.__call__`, setting `f=None` only deletes the cached object if it has expired.
This causes a problem with `@cache.action` because it relies on the `f=None` deletion method to delete pages that were cached with an invalid HTTP status.
We should change the behavior of the Redis cache to match that of the other cache models (i.e., setting `f=None` should _always_ delete the key from the cache).
Also, for extra safety, perhaps `@cache.action` should delete pages with an invalid status via `cache_model.clear(r'^%s$' % cache_key)` rather than `cache_model(cache_key, None)`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gluon/contrib/redis_cache.py`
Content:
```
1 """
2 Developed by [email protected]
3 Released under web2py license because includes gluon/cache.py source code
4 """
5
6 try:
7 import cPickle as pickle
8 except:
9 import pickle
10 import time
11 import re
12 import logging
13 import thread
14 import random
15 from gluon import current
16 from gluon.cache import CacheAbstract
17 from gluon.contrib.redis_utils import acquire_lock, release_lock
18 from gluon.contrib.redis_utils import register_release_lock, RConnectionError
19
20 logger = logging.getLogger("web2py.cache.redis")
21
22 locker = thread.allocate_lock()
23
24
25 def RedisCache(redis_conn=None, debug=False, with_lock=False, fail_gracefully=False, db=None):
26 """
27 Usage example: put in models::
28
29 First of all install Redis
30 Ubuntu :
31 sudo apt-get install redis-server
32 sudo pip install redis
33
34 Then
35
36 from gluon.contrib.redis_utils import RConn
37 rconn = RConn()
38 from gluon.contrib.redis_cache import RedisCache
39 cache.redis = RedisCache(redis_conn=rconn, debug=True, with_lock=True)
40
41 Args:
42 redis_conn: a redis-like connection object
43 debug: if True adds to stats() the total_hits and misses
44 with_lock: sets the default locking mode for creating new keys.
45 By default is False (usualy when you choose Redis you do it
46 for performances reason)
47 When True, only one thread/process can set a value concurrently
48 fail_gracefully: if redis is unavailable, returns the value computing it
49 instead of raising an exception
50
51 It can be used pretty much the same as cache.ram()
52 When you use cache.redis directly you can use :
53
54 redis_key_and_var_name = cache.redis('redis_key_and_var_name', lambda or function,
55 time_expire=time.time(), with_lock=True)
56
57 to enforce locking. The with_lock parameter overrides the one set in the
58 cache.redis instance creation
59
60 cache.redis.stats()
61 returns a dictionary with statistics of Redis server
62 with one additional key ('w2p_keys') showing all keys currently set
63 from web2py with their TTL
64
65 A little wording on how keys are stored (and why the cache_it() function
66 and the clear() one look a little bit convoluted): there are a lot of
67 libraries that just store values and then use the KEYS command to delete it.
68 Until recent releases of this module, that technique was used here too.
69 In the need of deleting specific keys in a database with zillions keys in it
70 (other web2py apps, other applications in the need of a Redis stack) the
71 KEYS command is slow (it needs to scan every key in the database).
72 So, we use Redis 'sets' to store keys in "buckets"...
73 - every key created gets "indexed" in a bucket
74 - all buckets are indexed in a fixed key that never expires
75 - all keys generated within the same minute go in the same bucket
76 - every bucket is then set to expire when every key within it is expired
77 When we need to clear() cached keys:
78 - we tell Redis to SUNION all buckets
79 - gives us just the keys that are not expired yet
80 - buckets that are expired are removed from the fixed set
81 - we scan the keys and then delete them
82 """
83
84 locker.acquire()
85 try:
86 instance_name = 'redis_instance_' + current.request.application
87 if not hasattr(RedisCache, instance_name):
88 setattr(RedisCache, instance_name,
89 RedisClient(redis_conn=redis_conn, debug=debug,
90 with_lock=with_lock, fail_gracefully=fail_gracefully))
91 return getattr(RedisCache, instance_name)
92 finally:
93 locker.release()
94
95
96 class RedisClient(object):
97
98 meta_storage = {}
99 MAX_RETRIES = 5
100 RETRIES = 0
101
102 def __init__(self, redis_conn=None, debug=False,
103 with_lock=False, fail_gracefully=False):
104 self.request = current.request
105 self.debug = debug
106 self.with_lock = with_lock
107 self.fail_gracefully = fail_gracefully
108 self.prefix = "w2p:cache:%s:" % self.request.application
109 if self.request:
110 app = self.request.application
111 else:
112 app = ''
113
114 if app not in self.meta_storage:
115 self.storage = self.meta_storage[app] = {
116 CacheAbstract.cache_stats_name: {
117 'hit_total': 0,
118 'misses': 0,
119 }}
120 else:
121 self.storage = self.meta_storage[app]
122
123 self.cache_set_key = 'w2p:%s:___cache_set' % self.request.application
124
125 self.r_server = redis_conn
126 self._release_script = register_release_lock(self.r_server)
127
128 def initialize(self):
129 pass
130
131 def __call__(self, key, f, time_expire=300, with_lock=None):
132 if with_lock is None:
133 with_lock = self.with_lock
134 if time_expire is None:
135 time_expire = 24 * 60 * 60
136 newKey = self.__keyFormat__(key)
137 value = None
138 ttl = 0
139 try:
140 # is there a value
141 obj = self.r_server.get(newKey)
142 # what's its ttl
143 if obj:
144 ttl = self.r_server.ttl(newKey)
145 if ttl > time_expire:
146 obj = None
147 if obj:
148 # was cached
149 if self.debug:
150 self.r_server.incr('web2py_cache_statistics:hit_total')
151 value = pickle.loads(obj)
152 elif f is None:
153 # delete and never look back
154 self.r_server.delete(newKey)
155 else:
156 # naive distributed locking
157 if with_lock:
158 lock_key = '%s:__lock' % newKey
159 randomvalue = time.time()
160 al = acquire_lock(self.r_server, lock_key, randomvalue)
161 # someone may have computed it
162 obj = self.r_server.get(newKey)
163 if obj is None:
164 value = self.cache_it(newKey, f, time_expire)
165 else:
166 value = pickle.loads(obj)
167 release_lock(self, lock_key, al)
168 else:
169 # without distributed locking
170 value = self.cache_it(newKey, f, time_expire)
171 return value
172 except RConnectionError:
173 return self.retry_call(key, f, time_expire, with_lock)
174
175 def cache_it(self, key, f, time_expire):
176 if self.debug:
177 self.r_server.incr('web2py_cache_statistics:misses')
178 cache_set_key = self.cache_set_key
179 expire_at = int(time.time() + time_expire) + 120
180 bucket_key = "%s:%s" % (cache_set_key, expire_at / 60)
181 value = f()
182 value_ = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
183 if time_expire == 0:
184 time_expire = 1
185 self.r_server.setex(key, time_expire, value_)
186 # print '%s will expire on %s: it goes in bucket %s' % (key, time.ctime(expire_at))
187 # print 'that will expire on %s' % (bucket_key, time.ctime(((expire_at / 60) + 1) * 60))
188 p = self.r_server.pipeline()
189 # add bucket to the fixed set
190 p.sadd(cache_set_key, bucket_key)
191 # sets the key
192 p.setex(key, time_expire, value_)
193 # add the key to the bucket
194 p.sadd(bucket_key, key)
195 # expire the bucket properly
196 p.expireat(bucket_key, ((expire_at / 60) + 1) * 60)
197 p.execute()
198 return value
199
200 def retry_call(self, key, f, time_expire, with_lock):
201 self.RETRIES += 1
202 if self.RETRIES <= self.MAX_RETRIES:
203 logger.error("sleeping %s seconds before reconnecting" % (2 * self.RETRIES))
204 time.sleep(2 * self.RETRIES)
205 if self.fail_gracefully:
206 self.RETRIES = 0
207 return f()
208 return self.__call__(key, f, time_expire, with_lock)
209 else:
210 self.RETRIES = 0
211 if self.fail_gracefully:
212 return f
213 raise RConnectionError('Redis instance is unavailable')
214
215 def increment(self, key, value=1):
216 try:
217 newKey = self.__keyFormat__(key)
218 return self.r_server.incr(newKey, value)
219 except RConnectionError:
220 return self.retry_increment(key, value)
221
222 def retry_increment(self, key, value):
223 self.RETRIES += 1
224 if self.RETRIES <= self.MAX_RETRIES:
225 logger.error("sleeping some seconds before reconnecting")
226 time.sleep(2 * self.RETRIES)
227 return self.increment(key, value)
228 else:
229 self.RETRIES = 0
230 raise RConnectionError('Redis instance is unavailable')
231
232 def clear(self, regex):
233 """
234 Auxiliary function called by `clear` to search and
235 clear cache entries
236 """
237 r = re.compile(regex)
238 # get all buckets
239 buckets = self.r_server.smembers(self.cache_set_key)
240 # get all keys in buckets
241 if buckets:
242 keys = self.r_server.sunion(buckets)
243 else:
244 return
245 prefix = self.prefix
246 pipe = self.r_server.pipeline()
247 for a in keys:
248 if r.match(str(a).replace(prefix, '', 1)):
249 pipe.delete(a)
250 if random.randrange(0, 100) < 10:
251 # do this just once in a while (10% chance)
252 self.clear_buckets(buckets)
253 pipe.execute()
254
255 def clear_buckets(self, buckets):
256 p = self.r_server.pipeline()
257 for b in buckets:
258 if not self.r_server.exists(b):
259 p.srem(self.cache_set_key, b)
260 p.execute()
261
262 def delete(self, key):
263 newKey = self.__keyFormat__(key)
264 return self.r_server.delete(newKey)
265
266 def stats(self):
267 stats_collector = self.r_server.info()
268 if self.debug:
269 stats_collector['w2p_stats'] = dict(
270 hit_total=self.r_server.get(
271 'web2py_cache_statistics:hit_total'),
272 misses=self.r_server.get('web2py_cache_statistics:misses')
273 )
274 stats_collector['w2p_keys'] = dict()
275
276 for a in self.r_server.keys("w2p:%s:*" % (
277 self.request.application)):
278 stats_collector['w2p_keys']["%s_expire_in_sec" % a] = self.r_server.ttl(a)
279 return stats_collector
280
281 def __keyFormat__(self, key):
282 return '%s%s' % (self.prefix, key.replace(' ', '_'))
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gluon/contrib/redis_cache.py b/gluon/contrib/redis_cache.py
--- a/gluon/contrib/redis_cache.py
+++ b/gluon/contrib/redis_cache.py
@@ -85,7 +85,7 @@
try:
instance_name = 'redis_instance_' + current.request.application
if not hasattr(RedisCache, instance_name):
- setattr(RedisCache, instance_name,
+ setattr(RedisCache, instance_name,
RedisClient(redis_conn=redis_conn, debug=debug,
with_lock=with_lock, fail_gracefully=fail_gracefully))
return getattr(RedisCache, instance_name)
@@ -137,6 +137,10 @@
value = None
ttl = 0
try:
+ if f is None:
+ # delete and never look back
+ self.r_server.delete(newKey)
+ return None
# is there a value
obj = self.r_server.get(newKey)
# what's its ttl
@@ -149,9 +153,6 @@
if self.debug:
self.r_server.incr('web2py_cache_statistics:hit_total')
value = pickle.loads(obj)
- elif f is None:
- # delete and never look back
- self.r_server.delete(newKey)
else:
# naive distributed locking
if with_lock:
|
{"golden_diff": "diff --git a/gluon/contrib/redis_cache.py b/gluon/contrib/redis_cache.py\n--- a/gluon/contrib/redis_cache.py\n+++ b/gluon/contrib/redis_cache.py\n@@ -85,7 +85,7 @@\n try:\n instance_name = 'redis_instance_' + current.request.application\n if not hasattr(RedisCache, instance_name):\n- setattr(RedisCache, instance_name, \n+ setattr(RedisCache, instance_name,\n RedisClient(redis_conn=redis_conn, debug=debug,\n with_lock=with_lock, fail_gracefully=fail_gracefully))\n return getattr(RedisCache, instance_name)\n@@ -137,6 +137,10 @@\n value = None\n ttl = 0\n try:\n+ if f is None:\n+ # delete and never look back\n+ self.r_server.delete(newKey)\n+ return None\n # is there a value\n obj = self.r_server.get(newKey)\n # what's its ttl\n@@ -149,9 +153,6 @@\n if self.debug:\n self.r_server.incr('web2py_cache_statistics:hit_total')\n value = pickle.loads(obj)\n- elif f is None:\n- # delete and never look back\n- self.r_server.delete(newKey)\n else:\n # naive distributed locking\n if with_lock:\n", "issue": "RedisCache fails to delete cached objects when f=None\nFor background, see https://groups.google.com/d/msg/web2py/1LoXXrPlIGE/bX2u3CJ2BQAJ.\n\nAccording to the book, calling `cache.model(key, f=None)` should cause `key` to be deleted from the cache. However, in `gluon.contrib.redis_cache.RedisClient.__call__`, setting `f=None` only deletes the cached object if it has expired.\n\nThis causes a problem with `@cache.action` because it relies on the `f=None` deletion method to delete pages that were cached with an invalid HTTP status.\n\nWe should change the behavior of the Redis cache to match that of the other cache models (i.e., setting `f=None` should _always_ delete the key from the cache).\n\nAlso, for extra safety, perhaps `@cache.action` should delete pages with an invalid status via `cache_model.clear(r'^%s$' % cache_key)` rather than `cache_model(cache_key, None)`.\n\n", "before_files": [{"content": "\"\"\"\nDeveloped by [email protected]\nReleased under web2py license because includes gluon/cache.py source code\n\"\"\"\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport time\nimport re\nimport logging\nimport thread\nimport random\nfrom gluon import current\nfrom gluon.cache import CacheAbstract\nfrom gluon.contrib.redis_utils import acquire_lock, release_lock\nfrom gluon.contrib.redis_utils import register_release_lock, RConnectionError\n\nlogger = logging.getLogger(\"web2py.cache.redis\")\n\nlocker = thread.allocate_lock()\n\n\ndef RedisCache(redis_conn=None, debug=False, with_lock=False, fail_gracefully=False, db=None):\n \"\"\"\n Usage example: put in models::\n\n First of all install Redis\n Ubuntu :\n sudo apt-get install redis-server\n sudo pip install redis\n\n Then\n\n from gluon.contrib.redis_utils import RConn\n rconn = RConn()\n from gluon.contrib.redis_cache import RedisCache\n cache.redis = RedisCache(redis_conn=rconn, debug=True, with_lock=True)\n\n Args:\n redis_conn: a redis-like connection object\n debug: if True adds to stats() the total_hits and misses\n with_lock: sets the default locking mode for creating new keys.\n By default is False (usualy when you choose Redis you do it\n for performances reason)\n When True, only one thread/process can set a value concurrently\n fail_gracefully: if redis is unavailable, returns the value computing it\n instead of raising an exception\n\n It can be used pretty much the same as cache.ram()\n When you use cache.redis directly you can use :\n\n redis_key_and_var_name = cache.redis('redis_key_and_var_name', lambda or function,\n time_expire=time.time(), with_lock=True)\n\n to enforce locking. The with_lock parameter overrides the one set in the\n cache.redis instance creation\n\n cache.redis.stats()\n returns a dictionary with statistics of Redis server\n with one additional key ('w2p_keys') showing all keys currently set\n from web2py with their TTL\n\n A little wording on how keys are stored (and why the cache_it() function\n and the clear() one look a little bit convoluted): there are a lot of\n libraries that just store values and then use the KEYS command to delete it.\n Until recent releases of this module, that technique was used here too.\n In the need of deleting specific keys in a database with zillions keys in it\n (other web2py apps, other applications in the need of a Redis stack) the\n KEYS command is slow (it needs to scan every key in the database).\n So, we use Redis 'sets' to store keys in \"buckets\"...\n - every key created gets \"indexed\" in a bucket\n - all buckets are indexed in a fixed key that never expires\n - all keys generated within the same minute go in the same bucket\n - every bucket is then set to expire when every key within it is expired\n When we need to clear() cached keys:\n - we tell Redis to SUNION all buckets\n - gives us just the keys that are not expired yet\n - buckets that are expired are removed from the fixed set\n - we scan the keys and then delete them\n \"\"\"\n\n locker.acquire()\n try:\n instance_name = 'redis_instance_' + current.request.application\n if not hasattr(RedisCache, instance_name):\n setattr(RedisCache, instance_name, \n RedisClient(redis_conn=redis_conn, debug=debug,\n with_lock=with_lock, fail_gracefully=fail_gracefully))\n return getattr(RedisCache, instance_name)\n finally:\n locker.release()\n\n\nclass RedisClient(object):\n\n meta_storage = {}\n MAX_RETRIES = 5\n RETRIES = 0\n\n def __init__(self, redis_conn=None, debug=False,\n with_lock=False, fail_gracefully=False):\n self.request = current.request\n self.debug = debug\n self.with_lock = with_lock\n self.fail_gracefully = fail_gracefully\n self.prefix = \"w2p:cache:%s:\" % self.request.application\n if self.request:\n app = self.request.application\n else:\n app = ''\n\n if app not in self.meta_storage:\n self.storage = self.meta_storage[app] = {\n CacheAbstract.cache_stats_name: {\n 'hit_total': 0,\n 'misses': 0,\n }}\n else:\n self.storage = self.meta_storage[app]\n\n self.cache_set_key = 'w2p:%s:___cache_set' % self.request.application\n\n self.r_server = redis_conn\n self._release_script = register_release_lock(self.r_server)\n\n def initialize(self):\n pass\n\n def __call__(self, key, f, time_expire=300, with_lock=None):\n if with_lock is None:\n with_lock = self.with_lock\n if time_expire is None:\n time_expire = 24 * 60 * 60\n newKey = self.__keyFormat__(key)\n value = None\n ttl = 0\n try:\n # is there a value\n obj = self.r_server.get(newKey)\n # what's its ttl\n if obj:\n ttl = self.r_server.ttl(newKey)\n if ttl > time_expire:\n obj = None\n if obj:\n # was cached\n if self.debug:\n self.r_server.incr('web2py_cache_statistics:hit_total')\n value = pickle.loads(obj)\n elif f is None:\n # delete and never look back\n self.r_server.delete(newKey)\n else:\n # naive distributed locking\n if with_lock:\n lock_key = '%s:__lock' % newKey\n randomvalue = time.time()\n al = acquire_lock(self.r_server, lock_key, randomvalue)\n # someone may have computed it\n obj = self.r_server.get(newKey)\n if obj is None:\n value = self.cache_it(newKey, f, time_expire)\n else:\n value = pickle.loads(obj)\n release_lock(self, lock_key, al)\n else:\n # without distributed locking\n value = self.cache_it(newKey, f, time_expire)\n return value\n except RConnectionError:\n return self.retry_call(key, f, time_expire, with_lock)\n\n def cache_it(self, key, f, time_expire):\n if self.debug:\n self.r_server.incr('web2py_cache_statistics:misses')\n cache_set_key = self.cache_set_key\n expire_at = int(time.time() + time_expire) + 120\n bucket_key = \"%s:%s\" % (cache_set_key, expire_at / 60)\n value = f()\n value_ = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)\n if time_expire == 0:\n time_expire = 1\n self.r_server.setex(key, time_expire, value_)\n # print '%s will expire on %s: it goes in bucket %s' % (key, time.ctime(expire_at))\n # print 'that will expire on %s' % (bucket_key, time.ctime(((expire_at / 60) + 1) * 60))\n p = self.r_server.pipeline()\n # add bucket to the fixed set\n p.sadd(cache_set_key, bucket_key)\n # sets the key\n p.setex(key, time_expire, value_)\n # add the key to the bucket\n p.sadd(bucket_key, key)\n # expire the bucket properly\n p.expireat(bucket_key, ((expire_at / 60) + 1) * 60)\n p.execute()\n return value\n\n def retry_call(self, key, f, time_expire, with_lock):\n self.RETRIES += 1\n if self.RETRIES <= self.MAX_RETRIES:\n logger.error(\"sleeping %s seconds before reconnecting\" % (2 * self.RETRIES))\n time.sleep(2 * self.RETRIES)\n if self.fail_gracefully:\n self.RETRIES = 0\n return f()\n return self.__call__(key, f, time_expire, with_lock)\n else:\n self.RETRIES = 0\n if self.fail_gracefully:\n return f\n raise RConnectionError('Redis instance is unavailable')\n\n def increment(self, key, value=1):\n try:\n newKey = self.__keyFormat__(key)\n return self.r_server.incr(newKey, value)\n except RConnectionError:\n return self.retry_increment(key, value)\n\n def retry_increment(self, key, value):\n self.RETRIES += 1\n if self.RETRIES <= self.MAX_RETRIES:\n logger.error(\"sleeping some seconds before reconnecting\")\n time.sleep(2 * self.RETRIES)\n return self.increment(key, value)\n else:\n self.RETRIES = 0\n raise RConnectionError('Redis instance is unavailable')\n\n def clear(self, regex):\n \"\"\"\n Auxiliary function called by `clear` to search and\n clear cache entries\n \"\"\"\n r = re.compile(regex)\n # get all buckets\n buckets = self.r_server.smembers(self.cache_set_key)\n # get all keys in buckets\n if buckets:\n keys = self.r_server.sunion(buckets)\n else:\n return\n prefix = self.prefix\n pipe = self.r_server.pipeline()\n for a in keys:\n if r.match(str(a).replace(prefix, '', 1)):\n pipe.delete(a)\n if random.randrange(0, 100) < 10:\n # do this just once in a while (10% chance)\n self.clear_buckets(buckets)\n pipe.execute()\n\n def clear_buckets(self, buckets):\n p = self.r_server.pipeline()\n for b in buckets:\n if not self.r_server.exists(b):\n p.srem(self.cache_set_key, b)\n p.execute()\n\n def delete(self, key):\n newKey = self.__keyFormat__(key)\n return self.r_server.delete(newKey)\n\n def stats(self):\n stats_collector = self.r_server.info()\n if self.debug:\n stats_collector['w2p_stats'] = dict(\n hit_total=self.r_server.get(\n 'web2py_cache_statistics:hit_total'),\n misses=self.r_server.get('web2py_cache_statistics:misses')\n )\n stats_collector['w2p_keys'] = dict()\n\n for a in self.r_server.keys(\"w2p:%s:*\" % (\n self.request.application)):\n stats_collector['w2p_keys'][\"%s_expire_in_sec\" % a] = self.r_server.ttl(a)\n return stats_collector\n\n def __keyFormat__(self, key):\n return '%s%s' % (self.prefix, key.replace(' ', '_'))\n", "path": "gluon/contrib/redis_cache.py"}], "after_files": [{"content": "\"\"\"\nDeveloped by [email protected]\nReleased under web2py license because includes gluon/cache.py source code\n\"\"\"\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport time\nimport re\nimport logging\nimport thread\nimport random\nfrom gluon import current\nfrom gluon.cache import CacheAbstract\nfrom gluon.contrib.redis_utils import acquire_lock, release_lock\nfrom gluon.contrib.redis_utils import register_release_lock, RConnectionError\n\nlogger = logging.getLogger(\"web2py.cache.redis\")\n\nlocker = thread.allocate_lock()\n\n\ndef RedisCache(redis_conn=None, debug=False, with_lock=False, fail_gracefully=False, db=None):\n \"\"\"\n Usage example: put in models::\n\n First of all install Redis\n Ubuntu :\n sudo apt-get install redis-server\n sudo pip install redis\n\n Then\n\n from gluon.contrib.redis_utils import RConn\n rconn = RConn()\n from gluon.contrib.redis_cache import RedisCache\n cache.redis = RedisCache(redis_conn=rconn, debug=True, with_lock=True)\n\n Args:\n redis_conn: a redis-like connection object\n debug: if True adds to stats() the total_hits and misses\n with_lock: sets the default locking mode for creating new keys.\n By default is False (usualy when you choose Redis you do it\n for performances reason)\n When True, only one thread/process can set a value concurrently\n fail_gracefully: if redis is unavailable, returns the value computing it\n instead of raising an exception\n\n It can be used pretty much the same as cache.ram()\n When you use cache.redis directly you can use :\n\n redis_key_and_var_name = cache.redis('redis_key_and_var_name', lambda or function,\n time_expire=time.time(), with_lock=True)\n\n to enforce locking. The with_lock parameter overrides the one set in the\n cache.redis instance creation\n\n cache.redis.stats()\n returns a dictionary with statistics of Redis server\n with one additional key ('w2p_keys') showing all keys currently set\n from web2py with their TTL\n\n A little wording on how keys are stored (and why the cache_it() function\n and the clear() one look a little bit convoluted): there are a lot of\n libraries that just store values and then use the KEYS command to delete it.\n Until recent releases of this module, that technique was used here too.\n In the need of deleting specific keys in a database with zillions keys in it\n (other web2py apps, other applications in the need of a Redis stack) the\n KEYS command is slow (it needs to scan every key in the database).\n So, we use Redis 'sets' to store keys in \"buckets\"...\n - every key created gets \"indexed\" in a bucket\n - all buckets are indexed in a fixed key that never expires\n - all keys generated within the same minute go in the same bucket\n - every bucket is then set to expire when every key within it is expired\n When we need to clear() cached keys:\n - we tell Redis to SUNION all buckets\n - gives us just the keys that are not expired yet\n - buckets that are expired are removed from the fixed set\n - we scan the keys and then delete them\n \"\"\"\n\n locker.acquire()\n try:\n instance_name = 'redis_instance_' + current.request.application\n if not hasattr(RedisCache, instance_name):\n setattr(RedisCache, instance_name,\n RedisClient(redis_conn=redis_conn, debug=debug,\n with_lock=with_lock, fail_gracefully=fail_gracefully))\n return getattr(RedisCache, instance_name)\n finally:\n locker.release()\n\n\nclass RedisClient(object):\n\n meta_storage = {}\n MAX_RETRIES = 5\n RETRIES = 0\n\n def __init__(self, redis_conn=None, debug=False,\n with_lock=False, fail_gracefully=False):\n self.request = current.request\n self.debug = debug\n self.with_lock = with_lock\n self.fail_gracefully = fail_gracefully\n self.prefix = \"w2p:cache:%s:\" % self.request.application\n if self.request:\n app = self.request.application\n else:\n app = ''\n\n if app not in self.meta_storage:\n self.storage = self.meta_storage[app] = {\n CacheAbstract.cache_stats_name: {\n 'hit_total': 0,\n 'misses': 0,\n }}\n else:\n self.storage = self.meta_storage[app]\n\n self.cache_set_key = 'w2p:%s:___cache_set' % self.request.application\n\n self.r_server = redis_conn\n self._release_script = register_release_lock(self.r_server)\n\n def initialize(self):\n pass\n\n def __call__(self, key, f, time_expire=300, with_lock=None):\n if with_lock is None:\n with_lock = self.with_lock\n if time_expire is None:\n time_expire = 24 * 60 * 60\n newKey = self.__keyFormat__(key)\n value = None\n ttl = 0\n try:\n if f is None:\n # delete and never look back\n self.r_server.delete(newKey)\n return None\n # is there a value\n obj = self.r_server.get(newKey)\n # what's its ttl\n if obj:\n ttl = self.r_server.ttl(newKey)\n if ttl > time_expire:\n obj = None\n if obj:\n # was cached\n if self.debug:\n self.r_server.incr('web2py_cache_statistics:hit_total')\n value = pickle.loads(obj)\n else:\n # naive distributed locking\n if with_lock:\n lock_key = '%s:__lock' % newKey\n randomvalue = time.time()\n al = acquire_lock(self.r_server, lock_key, randomvalue)\n # someone may have computed it\n obj = self.r_server.get(newKey)\n if obj is None:\n value = self.cache_it(newKey, f, time_expire)\n else:\n value = pickle.loads(obj)\n release_lock(self, lock_key, al)\n else:\n # without distributed locking\n value = self.cache_it(newKey, f, time_expire)\n return value\n except RConnectionError:\n return self.retry_call(key, f, time_expire, with_lock)\n\n def cache_it(self, key, f, time_expire):\n if self.debug:\n self.r_server.incr('web2py_cache_statistics:misses')\n cache_set_key = self.cache_set_key\n expire_at = int(time.time() + time_expire) + 120\n bucket_key = \"%s:%s\" % (cache_set_key, expire_at / 60)\n value = f()\n value_ = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)\n if time_expire == 0:\n time_expire = 1\n self.r_server.setex(key, time_expire, value_)\n # print '%s will expire on %s: it goes in bucket %s' % (key, time.ctime(expire_at))\n # print 'that will expire on %s' % (bucket_key, time.ctime(((expire_at / 60) + 1) * 60))\n p = self.r_server.pipeline()\n # add bucket to the fixed set\n p.sadd(cache_set_key, bucket_key)\n # sets the key\n p.setex(key, time_expire, value_)\n # add the key to the bucket\n p.sadd(bucket_key, key)\n # expire the bucket properly\n p.expireat(bucket_key, ((expire_at / 60) + 1) * 60)\n p.execute()\n return value\n\n def retry_call(self, key, f, time_expire, with_lock):\n self.RETRIES += 1\n if self.RETRIES <= self.MAX_RETRIES:\n logger.error(\"sleeping %s seconds before reconnecting\" % (2 * self.RETRIES))\n time.sleep(2 * self.RETRIES)\n if self.fail_gracefully:\n self.RETRIES = 0\n return f()\n return self.__call__(key, f, time_expire, with_lock)\n else:\n self.RETRIES = 0\n if self.fail_gracefully:\n return f\n raise RConnectionError('Redis instance is unavailable')\n\n def increment(self, key, value=1):\n try:\n newKey = self.__keyFormat__(key)\n return self.r_server.incr(newKey, value)\n except RConnectionError:\n return self.retry_increment(key, value)\n\n def retry_increment(self, key, value):\n self.RETRIES += 1\n if self.RETRIES <= self.MAX_RETRIES:\n logger.error(\"sleeping some seconds before reconnecting\")\n time.sleep(2 * self.RETRIES)\n return self.increment(key, value)\n else:\n self.RETRIES = 0\n raise RConnectionError('Redis instance is unavailable')\n\n def clear(self, regex):\n \"\"\"\n Auxiliary function called by `clear` to search and\n clear cache entries\n \"\"\"\n r = re.compile(regex)\n # get all buckets\n buckets = self.r_server.smembers(self.cache_set_key)\n # get all keys in buckets\n if buckets:\n keys = self.r_server.sunion(buckets)\n else:\n return\n prefix = self.prefix\n pipe = self.r_server.pipeline()\n for a in keys:\n if r.match(str(a).replace(prefix, '', 1)):\n pipe.delete(a)\n if random.randrange(0, 100) < 10:\n # do this just once in a while (10% chance)\n self.clear_buckets(buckets)\n pipe.execute()\n\n def clear_buckets(self, buckets):\n p = self.r_server.pipeline()\n for b in buckets:\n if not self.r_server.exists(b):\n p.srem(self.cache_set_key, b)\n p.execute()\n\n def delete(self, key):\n newKey = self.__keyFormat__(key)\n return self.r_server.delete(newKey)\n\n def stats(self):\n stats_collector = self.r_server.info()\n if self.debug:\n stats_collector['w2p_stats'] = dict(\n hit_total=self.r_server.get(\n 'web2py_cache_statistics:hit_total'),\n misses=self.r_server.get('web2py_cache_statistics:misses')\n )\n stats_collector['w2p_keys'] = dict()\n\n for a in self.r_server.keys(\"w2p:%s:*\" % (\n self.request.application)):\n stats_collector['w2p_keys'][\"%s_expire_in_sec\" % a] = self.r_server.ttl(a)\n return stats_collector\n\n def __keyFormat__(self, key):\n return '%s%s' % (self.prefix, key.replace(' ', '_'))\n", "path": "gluon/contrib/redis_cache.py"}]}
| 3,600 | 312 |
gh_patches_debug_5350
|
rasdani/github-patches
|
git_diff
|
getmoto__moto-241
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Routes: Uncomment VPC Peering Connection test dependent on Boto > 2.32.1
One of the tests for routes is for a route to VPC Peering Connection, which will be supported in Boto 2.32.2, though that version is not yet packaged and released.
Once a new https://github.com/boto/boto version is released, then `test_routes_vpc_peering_connection` in `tests/test_ec2/test_route_tables.py` should be uncommented, the `@requires_boto_gte("2.32.2")` version number should be confirmed (in case Boto releases as "2.33.0" instead, for instance) and `.travis.yml` should be updated with the new Boto version as well.
It probably makes sense to simply change `BOTO_VERSION=2.32.1` to the new version number, rather than adding a new BOTO_VERSION entry, since this version is already the "VPC peering connection" version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/ec2/responses/route_tables.py`
Content:
```
1 from __future__ import unicode_literals
2 from jinja2 import Template
3
4 from moto.core.responses import BaseResponse
5 from moto.ec2.models import ec2_backend
6 from moto.ec2.utils import route_table_ids_from_querystring, filters_from_querystring, optional_from_querystring
7
8
9 class RouteTables(BaseResponse):
10 def associate_route_table(self):
11 route_table_id = self.querystring.get('RouteTableId')[0]
12 subnet_id = self.querystring.get('SubnetId')[0]
13 association_id = ec2_backend.associate_route_table(route_table_id, subnet_id)
14 template = Template(ASSOCIATE_ROUTE_TABLE_RESPONSE)
15 return template.render(association_id=association_id)
16
17 def create_route(self):
18 route_table_id = self.querystring.get('RouteTableId')[0]
19 destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]
20
21 internet_gateway_id = optional_from_querystring('GatewayId', self.querystring)
22 instance_id = optional_from_querystring('InstanceId', self.querystring)
23 interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring)
24 pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring)
25
26 route = ec2_backend.create_route(route_table_id, destination_cidr_block,
27 gateway_id=internet_gateway_id,
28 instance_id=instance_id,
29 interface_id=interface_id,
30 vpc_peering_connection_id=pcx_id)
31
32 template = Template(CREATE_ROUTE_RESPONSE)
33 return template.render()
34
35 def create_route_table(self):
36 vpc_id = self.querystring.get('VpcId')[0]
37 route_table = ec2_backend.create_route_table(vpc_id)
38 template = Template(CREATE_ROUTE_TABLE_RESPONSE)
39 return template.render(route_table=route_table)
40
41 def delete_route(self):
42 route_table_id = self.querystring.get('RouteTableId')[0]
43 destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]
44 ec2_backend.delete_route(route_table_id, destination_cidr_block)
45 template = Template(DELETE_ROUTE_RESPONSE)
46 return template.render()
47
48 def delete_route_table(self):
49 route_table_id = self.querystring.get('RouteTableId')[0]
50 ec2_backend.delete_route_table(route_table_id)
51 template = Template(DELETE_ROUTE_TABLE_RESPONSE)
52 return template.render()
53
54 def describe_route_tables(self):
55 route_table_ids = route_table_ids_from_querystring(self.querystring)
56 filters = filters_from_querystring(self.querystring)
57 route_tables = ec2_backend.get_all_route_tables(route_table_ids, filters)
58 template = Template(DESCRIBE_ROUTE_TABLES_RESPONSE)
59 return template.render(route_tables=route_tables)
60
61 def disassociate_route_table(self):
62 association_id = self.querystring.get('AssociationId')[0]
63 ec2_backend.disassociate_route_table(association_id)
64 template = Template(DISASSOCIATE_ROUTE_TABLE_RESPONSE)
65 return template.render()
66
67 def replace_route(self):
68 route_table_id = self.querystring.get('RouteTableId')[0]
69 destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]
70
71 internet_gateway_id = optional_from_querystring('GatewayId', self.querystring)
72 instance_id = optional_from_querystring('InstanceId', self.querystring)
73 interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring)
74 pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring)
75
76 route = ec2_backend.replace_route(route_table_id, destination_cidr_block,
77 gateway_id=internet_gateway_id,
78 instance_id=instance_id,
79 interface_id=interface_id,
80 vpc_peering_connection_id=pcx_id)
81
82 template = Template(REPLACE_ROUTE_RESPONSE)
83 return template.render()
84
85 def replace_route_table_association(self):
86 route_table_id = self.querystring.get('RouteTableId')[0]
87 association_id = self.querystring.get('AssociationId')[0]
88 new_association_id = ec2_backend.replace_route_table_association(association_id, route_table_id)
89 template = Template(REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE)
90 return template.render(association_id=new_association_id)
91
92
93 CREATE_ROUTE_RESPONSE = """
94 <CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
95 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
96 <return>true</return>
97 </CreateRouteResponse>
98 """
99
100 REPLACE_ROUTE_RESPONSE = """
101 <ReplaceRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
102 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
103 <return>true</return>
104 </ReplaceRouteResponse>
105 """
106
107 CREATE_ROUTE_TABLE_RESPONSE = """
108 <CreateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
109 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
110 <routeTable>
111 <routeTableId>{{ route_table.id }}</routeTableId>
112 <vpcId>{{ route_table.vpc_id }}</vpcId>
113 <routeSet>
114 {% for route in route_table.routes.values() %}
115 {% if route.local %}
116 <item>
117 <destinationCidrBlock>{{ route.destination_cidr_block }}</destinationCidrBlock>
118 <gatewayId>local</gatewayId>
119 <state>active</state>
120 </item>
121 {% endif %}
122 {% endfor %}
123 </routeSet>
124 <associationSet/>
125 <tagSet/>
126 </routeTable>
127 </CreateRouteTableResponse>
128 """
129
130 DESCRIBE_ROUTE_TABLES_RESPONSE = """
131 <DescribeRouteTablesResponse xmlns="http://ec2.amazonaws.com/doc/2013-08-15/">
132 <requestId>6f570b0b-9c18-4b07-bdec-73740dcf861a</requestId>
133 <routeTableSet>
134 {% for route_table in route_tables %}
135 <item>
136 <routeTableId>{{ route_table.id }}</routeTableId>
137 <vpcId>{{ route_table.vpc_id }}</vpcId>
138 <routeSet>
139 {% for route in route_table.routes.values() %}
140 <item>
141 <destinationCidrBlock>{{ route.destination_cidr_block }}</destinationCidrBlock>
142 {% if route.local %}
143 <gatewayId>local</gatewayId>
144 <origin>CreateRouteTable</origin>
145 <state>active</state>
146 {% endif %}
147 {% if route.internet_gateway %}
148 <gatewayId>{{ route.internet_gateway.id }}</gatewayId>
149 <origin>CreateRoute</origin>
150 <state>active</state>
151 {% endif %}
152 {% if route.instance %}
153 <instanceId>{{ route.instance.id }}</instanceId>
154 <origin>CreateRoute</origin>
155 <state>active</state>
156 {% endif %}
157 {% if route.vpc_pcx %}
158 <origin>CreateRoute</origin>
159 <state>blackhole</state>
160 {% endif %}
161 </item>
162 {% endfor %}
163 </routeSet>
164 <associationSet>
165 {% for association_id,subnet_id in route_table.associations.items() %}
166 <item>
167 <routeTableAssociationId>{{ association_id }}</routeTableAssociationId>
168 <routeTableId>{{ route_table.id }}</routeTableId>
169 <main>false</main>
170 <subnetId>{{ subnet_id }}</subnetId>
171 </item>
172 {% endfor %}
173 </associationSet>
174 <tagSet/>
175 </item>
176 {% endfor %}
177 </routeTableSet>
178 </DescribeRouteTablesResponse>
179 """
180
181 DELETE_ROUTE_RESPONSE = """
182 <DeleteRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
183 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
184 <return>true</return>
185 </DeleteRouteResponse>
186 """
187
188 DELETE_ROUTE_TABLE_RESPONSE = """
189 <DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-07-15/">
190 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
191 <return>true</return>
192 </DeleteRouteTableResponse>
193 """
194
195 ASSOCIATE_ROUTE_TABLE_RESPONSE = """
196 <AssociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
197 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
198 <associationId>{{ association_id }}</associationId>
199 </AssociateRouteTableResponse>
200 """
201
202 DISASSOCIATE_ROUTE_TABLE_RESPONSE = """
203 <DisassociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
204 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
205 <return>true</return>
206 </DisassociateRouteTableResponse>
207 """
208
209 REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE = """
210 <ReplaceRouteTableAssociationResponse xmlns="http://ec2.amazonaws.com/doc/2014-06-15/">
211 <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
212 <newAssociationId>{{ association_id }}</newAssociationId>
213 </ReplaceRouteTableAssociationResponse>
214 """
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py
--- a/moto/ec2/responses/route_tables.py
+++ b/moto/ec2/responses/route_tables.py
@@ -144,6 +144,7 @@
<state>active</state>
{% endif %}
{% if route.vpc_pcx %}
+ <vpcPeeringConnectionId>{{ route.vpc_pcx.id }}</vpcPeeringConnectionId>
<origin>CreateRoute</origin>
<state>blackhole</state>
{% endif %}
|
{"golden_diff": "diff --git a/moto/ec2/responses/route_tables.py b/moto/ec2/responses/route_tables.py\n--- a/moto/ec2/responses/route_tables.py\n+++ b/moto/ec2/responses/route_tables.py\n@@ -144,6 +144,7 @@\n <state>active</state>\n {% endif %}\n {% if route.vpc_pcx %}\n+ <vpcPeeringConnectionId>{{ route.vpc_pcx.id }}</vpcPeeringConnectionId>\n <origin>CreateRoute</origin>\n <state>blackhole</state>\n {% endif %}\n", "issue": "Routes: Uncomment VPC Peering Connection test dependent on Boto > 2.32.1\nOne of the tests for routes is for a route to VPC Peering Connection, which will be supported in Boto 2.32.2, though that version is not yet packaged and released.\n\nOnce a new https://github.com/boto/boto version is released, then `test_routes_vpc_peering_connection` in `tests/test_ec2/test_route_tables.py` should be uncommented, the `@requires_boto_gte(\"2.32.2\")` version number should be confirmed (in case Boto releases as \"2.33.0\" instead, for instance) and `.travis.yml` should be updated with the new Boto version as well.\n\nIt probably makes sense to simply change `BOTO_VERSION=2.32.1` to the new version number, rather than adding a new BOTO_VERSION entry, since this version is already the \"VPC peering connection\" version.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom jinja2 import Template\n\nfrom moto.core.responses import BaseResponse\nfrom moto.ec2.models import ec2_backend\nfrom moto.ec2.utils import route_table_ids_from_querystring, filters_from_querystring, optional_from_querystring\n\n\nclass RouteTables(BaseResponse):\n def associate_route_table(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n subnet_id = self.querystring.get('SubnetId')[0]\n association_id = ec2_backend.associate_route_table(route_table_id, subnet_id)\n template = Template(ASSOCIATE_ROUTE_TABLE_RESPONSE)\n return template.render(association_id=association_id)\n\n def create_route(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]\n\n internet_gateway_id = optional_from_querystring('GatewayId', self.querystring)\n instance_id = optional_from_querystring('InstanceId', self.querystring)\n interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring)\n pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring)\n\n route = ec2_backend.create_route(route_table_id, destination_cidr_block,\n gateway_id=internet_gateway_id,\n instance_id=instance_id,\n interface_id=interface_id,\n vpc_peering_connection_id=pcx_id)\n\n template = Template(CREATE_ROUTE_RESPONSE)\n return template.render()\n\n def create_route_table(self):\n vpc_id = self.querystring.get('VpcId')[0]\n route_table = ec2_backend.create_route_table(vpc_id)\n template = Template(CREATE_ROUTE_TABLE_RESPONSE)\n return template.render(route_table=route_table)\n\n def delete_route(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]\n ec2_backend.delete_route(route_table_id, destination_cidr_block)\n template = Template(DELETE_ROUTE_RESPONSE)\n return template.render()\n\n def delete_route_table(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n ec2_backend.delete_route_table(route_table_id)\n template = Template(DELETE_ROUTE_TABLE_RESPONSE)\n return template.render()\n\n def describe_route_tables(self):\n route_table_ids = route_table_ids_from_querystring(self.querystring)\n filters = filters_from_querystring(self.querystring)\n route_tables = ec2_backend.get_all_route_tables(route_table_ids, filters)\n template = Template(DESCRIBE_ROUTE_TABLES_RESPONSE)\n return template.render(route_tables=route_tables)\n\n def disassociate_route_table(self):\n association_id = self.querystring.get('AssociationId')[0]\n ec2_backend.disassociate_route_table(association_id)\n template = Template(DISASSOCIATE_ROUTE_TABLE_RESPONSE)\n return template.render()\n\n def replace_route(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]\n\n internet_gateway_id = optional_from_querystring('GatewayId', self.querystring)\n instance_id = optional_from_querystring('InstanceId', self.querystring)\n interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring)\n pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring)\n\n route = ec2_backend.replace_route(route_table_id, destination_cidr_block,\n gateway_id=internet_gateway_id,\n instance_id=instance_id,\n interface_id=interface_id,\n vpc_peering_connection_id=pcx_id)\n\n template = Template(REPLACE_ROUTE_RESPONSE)\n return template.render()\n\n def replace_route_table_association(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n association_id = self.querystring.get('AssociationId')[0]\n new_association_id = ec2_backend.replace_route_table_association(association_id, route_table_id)\n template = Template(REPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE)\n return template.render(association_id=new_association_id)\n\n\nCREATE_ROUTE_RESPONSE = \"\"\"\n<CreateRouteResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</CreateRouteResponse>\n\"\"\"\n\nREPLACE_ROUTE_RESPONSE = \"\"\"\n<ReplaceRouteResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</ReplaceRouteResponse>\n\"\"\"\n\nCREATE_ROUTE_TABLE_RESPONSE = \"\"\"\n<CreateRouteTableResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <routeTable>\n <routeTableId>{{ route_table.id }}</routeTableId>\n <vpcId>{{ route_table.vpc_id }}</vpcId>\n <routeSet>\n {% for route in route_table.routes.values() %}\n {% if route.local %}\n <item>\n <destinationCidrBlock>{{ route.destination_cidr_block }}</destinationCidrBlock>\n <gatewayId>local</gatewayId>\n <state>active</state>\n </item>\n {% endif %}\n {% endfor %}\n </routeSet>\n <associationSet/>\n <tagSet/>\n </routeTable>\n</CreateRouteTableResponse>\n\"\"\"\n\nDESCRIBE_ROUTE_TABLES_RESPONSE = \"\"\"\n<DescribeRouteTablesResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-08-15/\">\n <requestId>6f570b0b-9c18-4b07-bdec-73740dcf861a</requestId>\n <routeTableSet>\n {% for route_table in route_tables %}\n <item>\n <routeTableId>{{ route_table.id }}</routeTableId>\n <vpcId>{{ route_table.vpc_id }}</vpcId>\n <routeSet>\n {% for route in route_table.routes.values() %}\n <item>\n <destinationCidrBlock>{{ route.destination_cidr_block }}</destinationCidrBlock>\n {% if route.local %}\n <gatewayId>local</gatewayId>\n <origin>CreateRouteTable</origin>\n <state>active</state>\n {% endif %}\n {% if route.internet_gateway %}\n <gatewayId>{{ route.internet_gateway.id }}</gatewayId>\n <origin>CreateRoute</origin>\n <state>active</state>\n {% endif %}\n {% if route.instance %}\n <instanceId>{{ route.instance.id }}</instanceId>\n <origin>CreateRoute</origin>\n <state>active</state>\n {% endif %}\n {% if route.vpc_pcx %}\n <origin>CreateRoute</origin>\n <state>blackhole</state>\n {% endif %}\n </item>\n {% endfor %}\n </routeSet>\n <associationSet>\n {% for association_id,subnet_id in route_table.associations.items() %}\n <item>\n <routeTableAssociationId>{{ association_id }}</routeTableAssociationId>\n <routeTableId>{{ route_table.id }}</routeTableId>\n <main>false</main>\n <subnetId>{{ subnet_id }}</subnetId>\n </item>\n {% endfor %}\n </associationSet>\n <tagSet/>\n </item>\n {% endfor %}\n </routeTableSet>\n</DescribeRouteTablesResponse>\n\"\"\"\n\nDELETE_ROUTE_RESPONSE = \"\"\"\n<DeleteRouteResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</DeleteRouteResponse>\n\"\"\"\n\nDELETE_ROUTE_TABLE_RESPONSE = \"\"\"\n<DeleteRouteTableResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</DeleteRouteTableResponse>\n\"\"\"\n\nASSOCIATE_ROUTE_TABLE_RESPONSE = \"\"\"\n<AssociateRouteTableResponse xmlns=\"http://ec2.amazonaws.com/doc/2014-06-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <associationId>{{ association_id }}</associationId>\n</AssociateRouteTableResponse>\n\"\"\"\n\nDISASSOCIATE_ROUTE_TABLE_RESPONSE = \"\"\"\n<DisassociateRouteTableResponse xmlns=\"http://ec2.amazonaws.com/doc/2014-06-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</DisassociateRouteTableResponse>\n\"\"\"\n\nREPLACE_ROUTE_TABLE_ASSOCIATION_RESPONSE = \"\"\"\n<ReplaceRouteTableAssociationResponse xmlns=\"http://ec2.amazonaws.com/doc/2014-06-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <newAssociationId>{{ association_id }}</newAssociationId>\n</ReplaceRouteTableAssociationResponse>\n\"\"\"\n", "path": "moto/ec2/responses/route_tables.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nfrom jinja2 import Template\n\nfrom moto.core.responses import BaseResponse\nfrom moto.ec2.models import ec2_backend\nfrom moto.ec2.utils import route_table_ids_from_querystring, filters_from_querystring, optional_from_querystring\n\n\nclass RouteTables(BaseResponse):\n def associate_route_table(self):\n raise NotImplementedError('RouteTables(AmazonVPC).associate_route_table is not yet implemented')\n\n def create_route(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]\n\n internet_gateway_id = optional_from_querystring('GatewayId', self.querystring)\n instance_id = optional_from_querystring('InstanceId', self.querystring)\n interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring)\n pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring)\n\n route = ec2_backend.create_route(route_table_id, destination_cidr_block,\n gateway_id=internet_gateway_id,\n instance_id=instance_id,\n interface_id=interface_id,\n vpc_peering_connection_id=pcx_id)\n\n template = Template(CREATE_ROUTE_RESPONSE)\n return template.render()\n\n def create_route_table(self):\n vpc_id = self.querystring.get('VpcId')[0]\n route_table = ec2_backend.create_route_table(vpc_id)\n template = Template(CREATE_ROUTE_TABLE_RESPONSE)\n return template.render(route_table=route_table)\n\n def delete_route(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]\n ec2_backend.delete_route(route_table_id, destination_cidr_block)\n template = Template(DELETE_ROUTE_RESPONSE)\n return template.render()\n\n def delete_route_table(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n ec2_backend.delete_route_table(route_table_id)\n template = Template(DELETE_ROUTE_TABLE_RESPONSE)\n return template.render()\n\n def describe_route_tables(self):\n route_table_ids = route_table_ids_from_querystring(self.querystring)\n filters = filters_from_querystring(self.querystring)\n route_tables = ec2_backend.get_all_route_tables(route_table_ids, filters)\n template = Template(DESCRIBE_ROUTE_TABLES_RESPONSE)\n return template.render(route_tables=route_tables)\n\n def disassociate_route_table(self):\n raise NotImplementedError('RouteTables(AmazonVPC).disassociate_route_table is not yet implemented')\n\n def replace_route(self):\n route_table_id = self.querystring.get('RouteTableId')[0]\n destination_cidr_block = self.querystring.get('DestinationCidrBlock')[0]\n\n internet_gateway_id = optional_from_querystring('GatewayId', self.querystring)\n instance_id = optional_from_querystring('InstanceId', self.querystring)\n interface_id = optional_from_querystring('NetworkInterfaceId', self.querystring)\n pcx_id = optional_from_querystring('VpcPeeringConnectionId', self.querystring)\n\n route = ec2_backend.replace_route(route_table_id, destination_cidr_block,\n gateway_id=internet_gateway_id,\n instance_id=instance_id,\n interface_id=interface_id,\n vpc_peering_connection_id=pcx_id)\n\n template = Template(REPLACE_ROUTE_RESPONSE)\n return template.render()\n\n def replace_route_table_association(self):\n raise NotImplementedError('RouteTables(AmazonVPC).replace_route_table_association is not yet implemented')\n\n\nCREATE_ROUTE_RESPONSE = \"\"\"\n<CreateRouteResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</CreateRouteResponse>\n\"\"\"\n\nREPLACE_ROUTE_RESPONSE = \"\"\"\n<ReplaceRouteResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</ReplaceRouteResponse>\n\"\"\"\n\nCREATE_ROUTE_TABLE_RESPONSE = \"\"\"\n<CreateRouteTableResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <routeTable>\n <routeTableId>{{ route_table.id }}</routeTableId>\n <vpcId>{{ route_table.vpc_id }}</vpcId>\n <routeSet>\n {% for route in route_table.routes.values() %}\n {% if route.local %}\n <item>\n <destinationCidrBlock>{{ route.destination_cidr_block }}</destinationCidrBlock>\n <gatewayId>local</gatewayId>\n <state>active</state>\n </item>\n {% endif %}\n {% endfor %}\n </routeSet>\n <associationSet/>\n <tagSet/>\n </routeTable>\n</CreateRouteTableResponse>\n\"\"\"\n\nDESCRIBE_ROUTE_TABLES_RESPONSE = \"\"\"\n<DescribeRouteTablesResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-08-15/\">\n <requestId>6f570b0b-9c18-4b07-bdec-73740dcf861a</requestId>\n <routeTableSet>\n {% for route_table in route_tables %}\n <item>\n <routeTableId>{{ route_table.id }}</routeTableId>\n <vpcId>{{ route_table.vpc_id }}</vpcId>\n <routeSet>\n {% for route in route_table.routes.values() %}\n <item>\n <destinationCidrBlock>{{ route.destination_cidr_block }}</destinationCidrBlock>\n {% if route.local %}\n <gatewayId>local</gatewayId>\n <origin>CreateRouteTable</origin>\n <state>active</state>\n {% endif %}\n {% if route.internet_gateway %}\n <gatewayId>{{ route.internet_gateway.id }}</gatewayId>\n <origin>CreateRoute</origin>\n <state>active</state>\n {% endif %}\n {% if route.instance %}\n <instanceId>{{ route.instance.id }}</instanceId>\n <origin>CreateRoute</origin>\n <state>active</state>\n {% endif %}\n {% if route.vpc_pcx %}\n <vpcPeeringConnectionId>{{ route.vpc_pcx.id }}</vpcPeeringConnectionId>\n <origin>CreateRoute</origin>\n <state>blackhole</state>\n {% endif %}\n </item>\n {% endfor %}\n </routeSet>\n <associationSet>\n {% if route_table.association_id %}\n <item>\n <routeTableAssociationId>{{ route_table.association_id }}</routeTableAssociationId>\n <routeTableId>{{ route_table.id }}</routeTableId>\n {% if not route_table.subnet_id %}\n <main>true</main>\n {% endif %}\n {% if route_table.subnet_id %}\n <subnetId>{{ route_table.subnet_id }}</subnetId>\n {% endif %}\n </item>\n {% endif %}\n </associationSet>\n <tagSet/>\n </item>\n {% endfor %}\n </routeTableSet>\n</DescribeRouteTablesResponse>\n\"\"\"\n\nDELETE_ROUTE_RESPONSE = \"\"\"\n<DeleteRouteResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</DeleteRouteResponse>\n\"\"\"\n\nDELETE_ROUTE_TABLE_RESPONSE = \"\"\"\n<DeleteRouteTableResponse xmlns=\"http://ec2.amazonaws.com/doc/2013-07-15/\">\n <requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>\n <return>true</return>\n</DeleteRouteTableResponse>\n\"\"\"\n", "path": "moto/ec2/responses/route_tables.py"}]}
| 3,131 | 132 |
gh_patches_debug_23973
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-579
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Auto select Transit Agency when only 1 Active
Simplify the flow for a user, and avoid having to deal with the design of a single button on the `core:index` page.
When there is only 1 active transit agency, and user lands on `core:index`, forward to the active agency's `core:agency_index` page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/views.py`
Content:
```
1 """
2 The core application: view definition for the root of the webapp.
3 """
4 from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
5 from django.template import loader
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.translation import gettext as _
9
10 from . import models, session, viewmodels
11 from .middleware import pageview_decorator
12
13
14 def PageTemplateResponse(request, page_vm):
15 """Helper returns a TemplateResponse using the common page template."""
16 return TemplateResponse(request, "core/page.html", page_vm.context_dict())
17
18
19 def _index_content_title():
20 """Helper returns the content title for the common index page."""
21 return _("core.pages.index.content_title")
22
23
24 def _index_url():
25 """Helper computes the index url path."""
26 return reverse("core:index")
27
28
29 @pageview_decorator
30 def index(request):
31 """View handler for the main entry page."""
32 session.reset(request)
33
34 # generate a button to the landing page for each active agency
35 agencies = models.TransitAgency.all_active()
36 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]
37 buttons[0].classes.append("mt-3")
38 buttons[0].label = _("core.pages.index.chooseprovider")
39
40 page = viewmodels.Page(
41 title=_("core.pages.index.title"),
42 content_title=_index_content_title(),
43 buttons=buttons,
44 classes="home",
45 )
46
47 return PageTemplateResponse(request, page)
48
49
50 @pageview_decorator
51 def agency_index(request, agency):
52 """View handler for an agency entry page."""
53 session.reset(request)
54 session.update(request, agency=agency, origin=agency.index_url)
55
56 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index"))
57 button.label = _("core.pages.agency_index.button.label")
58
59 page = viewmodels.Page(
60 title=_("core.pages.agency_index.title"),
61 content_title=_("core.pages.agency_index.content_title"),
62 button=button,
63 classes="home",
64 )
65
66 help_page = reverse("core:help")
67 context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}}
68
69 return TemplateResponse(request, "core/agency_index.html", context_dict)
70
71
72 @pageview_decorator
73 def help(request):
74 """View handler for the help page."""
75 if session.active_agency(request):
76 agency = session.agency(request)
77 buttons = viewmodels.Button.agency_contact_links(agency)
78 else:
79 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]
80
81 buttons.append(viewmodels.Button.home(request, _("core.buttons.back")))
82
83 page = viewmodels.Page(
84 title=_("core.buttons.help"),
85 content_title=_("core.buttons.help"),
86 buttons=buttons,
87 noimage=True,
88 )
89
90 return TemplateResponse(request, "core/help.html", page.context_dict())
91
92
93 @pageview_decorator
94 def bad_request(request, exception, template_name="400.html"):
95 """View handler for HTTP 400 Bad Request responses."""
96 if session.active_agency(request):
97 session.update(request, origin=session.agency(request).index_url)
98 else:
99 session.update(request, origin=_index_url())
100
101 home = viewmodels.Button.home(request)
102 page = viewmodels.ErrorPage.error(button=home)
103 t = loader.get_template(template_name)
104
105 return HttpResponseBadRequest(t.render(page.context_dict()))
106
107
108 @pageview_decorator
109 def csrf_failure(request, reason):
110 """
111 View handler for CSRF_FAILURE_VIEW with custom data.
112 """
113 if session.active_agency(request):
114 session.update(request, origin=session.agency(request).index_url)
115 else:
116 session.update(request, origin=_index_url())
117
118 home = viewmodels.Button.home(request)
119 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
120 t = loader.get_template("400.html")
121
122 return HttpResponseNotFound(t.render(page.context_dict()))
123
124
125 @pageview_decorator
126 def page_not_found(request, exception, template_name="404.html"):
127 """View handler for HTTP 404 Not Found responses."""
128 if session.active_agency(request):
129 session.update(request, origin=session.agency(request).index_url)
130 else:
131 session.update(request, origin=_index_url())
132
133 home = viewmodels.Button.home(request)
134 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
135 t = loader.get_template(template_name)
136
137 return HttpResponseNotFound(t.render(page.context_dict()))
138
139
140 @pageview_decorator
141 def server_error(request, template_name="500.html"):
142 """View handler for HTTP 500 Server Error responses."""
143 if session.active_agency(request):
144 session.update(request, origin=session.agency(request).index_url)
145 else:
146 session.update(request, origin=_index_url())
147
148 home = viewmodels.Button.home(request)
149 page = viewmodels.ErrorPage.error(button=home)
150 t = loader.get_template(template_name)
151
152 return HttpResponseServerError(t.render(page.context_dict()))
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/core/views.py b/benefits/core/views.py
--- a/benefits/core/views.py
+++ b/benefits/core/views.py
@@ -2,6 +2,7 @@
The core application: view definition for the root of the webapp.
"""
from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
+from django.shortcuts import redirect
from django.template import loader
from django.template.response import TemplateResponse
from django.urls import reverse
@@ -31,8 +32,13 @@
"""View handler for the main entry page."""
session.reset(request)
- # generate a button to the landing page for each active agency
agencies = models.TransitAgency.all_active()
+
+ if len(agencies) == 1:
+ agency = agencies[0]
+ return redirect(agency.index_url)
+
+ # generate a button to the landing page for each active agency
buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]
buttons[0].classes.append("mt-3")
buttons[0].label = _("core.pages.index.chooseprovider")
|
{"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -2,6 +2,7 @@\n The core application: view definition for the root of the webapp.\n \"\"\"\n from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\n+from django.shortcuts import redirect\n from django.template import loader\n from django.template.response import TemplateResponse\n from django.urls import reverse\n@@ -31,8 +32,13 @@\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n \n- # generate a button to the landing page for each active agency\n agencies = models.TransitAgency.all_active()\n+\n+ if len(agencies) == 1:\n+ agency = agencies[0]\n+ return redirect(agency.index_url)\n+\n+ # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n", "issue": "Auto select Transit Agency when only 1 Active\nSimplify the flow for a user, and avoid having to deal with the design of a single button on the `core:index` page.\r\n\r\nWhen there is only 1 active transit agency, and user lands on `core:index`, forward to the active agency's `core:agency_index` page.\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\n\ndef PageTemplateResponse(request, page_vm):\n \"\"\"Helper returns a TemplateResponse using the common page template.\"\"\"\n return TemplateResponse(request, \"core/page.html\", page_vm.context_dict())\n\n\ndef _index_content_title():\n \"\"\"Helper returns the content title for the common index page.\"\"\"\n return _(\"core.pages.index.content_title\")\n\n\ndef _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n # generate a button to the landing page for each active agency\n agencies = models.TransitAgency.all_active()\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_index_content_title(),\n buttons=buttons,\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n button.label = _(\"core.pages.agency_index.button.label\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n help_page = reverse(\"core:help\")\n context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n\n return TemplateResponse(request, \"core/agency_index.html\", context_dict)\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n noimage=True,\n )\n\n return TemplateResponse(request, \"core/help.html\", page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\n\ndef PageTemplateResponse(request, page_vm):\n \"\"\"Helper returns a TemplateResponse using the common page template.\"\"\"\n return TemplateResponse(request, \"core/page.html\", page_vm.context_dict())\n\n\ndef _index_content_title():\n \"\"\"Helper returns the content title for the common index page.\"\"\"\n return _(\"core.pages.index.content_title\")\n\n\ndef _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_index_content_title(),\n buttons=buttons,\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n button.label = _(\"core.pages.agency_index.button.label\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n help_page = reverse(\"core:help\")\n context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n\n return TemplateResponse(request, \"core/agency_index.html\", context_dict)\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n noimage=True,\n )\n\n return TemplateResponse(request, \"core/help.html\", page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}]}
| 1,773 | 251 |
gh_patches_debug_18481
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-4966
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing chair email aliases for research area groups
### Describe the issue
There are two Research Area Groups in the datatracker, anrw and irtfopen. These groups have a chair listed, but the email expansions at https://datatracker.ietf.org/group/irtfopen/email/ appear to be missing the chairs alias ([email protected]) and email sent to that address bounces. Similarly, presumably, for anrw.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/group/management/commands/generate_group_aliases.py`
Content:
```
1 # Copyright The IETF Trust 2012-2021, All Rights Reserved
2 # -*- coding: utf-8 -*-
3
4 # This was written as a script by Markus Stenberg <[email protected]>.
5 # It was turned into a management command by Russ Housley <[email protected]>.
6
7 import datetime
8 import io
9 import os
10 import shutil
11 import stat
12 import time
13
14 from tempfile import mkstemp
15
16 from django.conf import settings
17 from django.core.management.base import BaseCommand
18 from django.utils import timezone
19
20 import debug # pyflakes:ignore
21
22 from ietf.group.models import Group
23 from ietf.group.utils import get_group_ad_emails, get_group_role_emails, get_child_group_role_emails
24 from ietf.name.models import GroupTypeName
25 from ietf.utils.aliases import dump_sublist
26
27 DEFAULT_YEARS = 5
28 ACTIVE_STATES=['active', 'bof', 'proposed']
29 GROUP_TYPES=['wg', 'rg', 'dir', 'team', 'review', 'program', 'rfcedtyp']
30 NO_AD_GROUP_TYPES=['rg', 'team', 'program', 'rfcedtyp']
31 IETF_DOMAIN=['ietf.org', ]
32 IRTF_DOMAIN=['irtf.org', ]
33 IAB_DOMAIN=['iab.org', ]
34
35 class Command(BaseCommand):
36 help = ('Generate the group-aliases and group-virtual files for Internet-Draft '
37 'mail aliases, placing them in the file configured in '
38 'settings.GROUP_ALIASES_PATH and settings.GROUP_VIRTUAL_PATH, '
39 'respectively. The generation includes aliases for groups that '
40 'have seen activity in the last %s years.' % (DEFAULT_YEARS))
41
42 def handle(self, *args, **options):
43 show_since = timezone.now() - datetime.timedelta(DEFAULT_YEARS*365)
44
45 date = time.strftime("%Y-%m-%d_%H:%M:%S")
46 signature = '# Generated by %s at %s\n' % (os.path.abspath(__file__), date)
47
48 ahandle, aname = mkstemp()
49 os.close(ahandle)
50 afile = io.open(aname,"w")
51
52 vhandle, vname = mkstemp()
53 os.close(vhandle)
54 vfile = io.open(vname,"w")
55
56 afile.write(signature)
57 vfile.write(signature)
58 vfile.write("%s anything\n" % settings.GROUP_VIRTUAL_DOMAIN)
59
60 # Loop through each group type and build -ads and -chairs entries
61 for g in GROUP_TYPES:
62 domains = []
63 domains += IETF_DOMAIN
64 if g == 'rg':
65 domains += IRTF_DOMAIN
66 if g == 'program':
67 domains += IAB_DOMAIN
68
69 entries = Group.objects.filter(type=g).all()
70 active_entries = entries.filter(state__in=ACTIVE_STATES)
71 inactive_recent_entries = entries.exclude(state__in=ACTIVE_STATES).filter(time__gte=show_since)
72 interesting_entries = active_entries | inactive_recent_entries
73
74 for e in interesting_entries.distinct().iterator():
75 name = e.acronym
76
77 # Research groups, teams, and programs do not have -ads lists
78 if not g in NO_AD_GROUP_TYPES:
79 dump_sublist(afile, vfile, name+'-ads', domains, settings.GROUP_VIRTUAL_DOMAIN, get_group_ad_emails(e))
80 # All group types have -chairs lists
81 dump_sublist(afile, vfile, name+'-chairs', domains, settings.GROUP_VIRTUAL_DOMAIN, get_group_role_emails(e, ['chair', 'secr']))
82
83 # The area lists include every chair in active working groups in the area
84 areas = Group.objects.filter(type='area').all()
85 active_areas = areas.filter(state__in=ACTIVE_STATES)
86 for area in active_areas:
87 name = area.acronym
88 area_ad_emails = get_group_role_emails(area, ['pre-ad', 'ad', 'chair'])
89 dump_sublist(afile, vfile, name+'-ads', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, area_ad_emails)
90 dump_sublist(afile, vfile, name+'-chairs', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, (get_child_group_role_emails(area, ['chair', 'secr']) | area_ad_emails))
91
92 # Other groups with chairs that require Internet-Draft submission approval
93 gtypes = GroupTypeName.objects.values_list('slug', flat=True)
94 special_groups = Group.objects.filter(type__features__req_subm_approval=True, acronym__in=gtypes, state='active')
95 for group in special_groups:
96 dump_sublist(afile, vfile, group.acronym+'-chairs', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, get_group_role_emails(group, ['chair', 'delegate']))
97
98 afile.close()
99 vfile.close()
100
101 os.chmod(aname, stat.S_IWUSR|stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
102 os.chmod(vname, stat.S_IWUSR|stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
103
104 shutil.move(aname, settings.GROUP_ALIASES_PATH)
105 shutil.move(vname, settings.GROUP_VIRTUAL_PATH)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ietf/group/management/commands/generate_group_aliases.py b/ietf/group/management/commands/generate_group_aliases.py
--- a/ietf/group/management/commands/generate_group_aliases.py
+++ b/ietf/group/management/commands/generate_group_aliases.py
@@ -26,8 +26,8 @@
DEFAULT_YEARS = 5
ACTIVE_STATES=['active', 'bof', 'proposed']
-GROUP_TYPES=['wg', 'rg', 'dir', 'team', 'review', 'program', 'rfcedtyp']
-NO_AD_GROUP_TYPES=['rg', 'team', 'program', 'rfcedtyp']
+GROUP_TYPES=['wg', 'rg', 'rag', 'dir', 'team', 'review', 'program', 'rfcedtyp']
+NO_AD_GROUP_TYPES=['rg', 'rag', 'team', 'program', 'rfcedtyp']
IETF_DOMAIN=['ietf.org', ]
IRTF_DOMAIN=['irtf.org', ]
IAB_DOMAIN=['iab.org', ]
@@ -61,7 +61,7 @@
for g in GROUP_TYPES:
domains = []
domains += IETF_DOMAIN
- if g == 'rg':
+ if g in ('rg', 'rag'):
domains += IRTF_DOMAIN
if g == 'program':
domains += IAB_DOMAIN
|
{"golden_diff": "diff --git a/ietf/group/management/commands/generate_group_aliases.py b/ietf/group/management/commands/generate_group_aliases.py\n--- a/ietf/group/management/commands/generate_group_aliases.py\n+++ b/ietf/group/management/commands/generate_group_aliases.py\n@@ -26,8 +26,8 @@\n \n DEFAULT_YEARS = 5\n ACTIVE_STATES=['active', 'bof', 'proposed']\n-GROUP_TYPES=['wg', 'rg', 'dir', 'team', 'review', 'program', 'rfcedtyp']\n-NO_AD_GROUP_TYPES=['rg', 'team', 'program', 'rfcedtyp']\n+GROUP_TYPES=['wg', 'rg', 'rag', 'dir', 'team', 'review', 'program', 'rfcedtyp']\n+NO_AD_GROUP_TYPES=['rg', 'rag', 'team', 'program', 'rfcedtyp']\n IETF_DOMAIN=['ietf.org', ]\n IRTF_DOMAIN=['irtf.org', ]\n IAB_DOMAIN=['iab.org', ]\n@@ -61,7 +61,7 @@\n for g in GROUP_TYPES:\n domains = []\n domains += IETF_DOMAIN\n- if g == 'rg':\n+ if g in ('rg', 'rag'):\n domains += IRTF_DOMAIN\n if g == 'program':\n domains += IAB_DOMAIN\n", "issue": "Missing chair email aliases for research area groups\n### Describe the issue\n\nThere are two Research Area Groups in the datatracker, anrw and irtfopen. These groups have a chair listed, but the email expansions at https://datatracker.ietf.org/group/irtfopen/email/ appear to be missing the chairs alias ([email protected]) and email sent to that address bounces. Similarly, presumably, for anrw.\r\n\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2012-2021, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n# This was written as a script by Markus Stenberg <[email protected]>.\n# It was turned into a management command by Russ Housley <[email protected]>.\n\nimport datetime\nimport io\nimport os\nimport shutil\nimport stat\nimport time\n\nfrom tempfile import mkstemp\n \nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nimport debug # pyflakes:ignore\n\nfrom ietf.group.models import Group\nfrom ietf.group.utils import get_group_ad_emails, get_group_role_emails, get_child_group_role_emails\nfrom ietf.name.models import GroupTypeName\nfrom ietf.utils.aliases import dump_sublist\n\nDEFAULT_YEARS = 5\nACTIVE_STATES=['active', 'bof', 'proposed']\nGROUP_TYPES=['wg', 'rg', 'dir', 'team', 'review', 'program', 'rfcedtyp']\nNO_AD_GROUP_TYPES=['rg', 'team', 'program', 'rfcedtyp']\nIETF_DOMAIN=['ietf.org', ]\nIRTF_DOMAIN=['irtf.org', ]\nIAB_DOMAIN=['iab.org', ]\n\nclass Command(BaseCommand):\n help = ('Generate the group-aliases and group-virtual files for Internet-Draft '\n 'mail aliases, placing them in the file configured in '\n 'settings.GROUP_ALIASES_PATH and settings.GROUP_VIRTUAL_PATH, '\n 'respectively. The generation includes aliases for groups that '\n 'have seen activity in the last %s years.' % (DEFAULT_YEARS))\n\n def handle(self, *args, **options):\n show_since = timezone.now() - datetime.timedelta(DEFAULT_YEARS*365)\n\n date = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n signature = '# Generated by %s at %s\\n' % (os.path.abspath(__file__), date)\n\n ahandle, aname = mkstemp()\n os.close(ahandle)\n afile = io.open(aname,\"w\")\n\n vhandle, vname = mkstemp()\n os.close(vhandle)\n vfile = io.open(vname,\"w\")\n\n afile.write(signature)\n vfile.write(signature)\n vfile.write(\"%s anything\\n\" % settings.GROUP_VIRTUAL_DOMAIN)\n\n # Loop through each group type and build -ads and -chairs entries\n for g in GROUP_TYPES:\n domains = []\n domains += IETF_DOMAIN\n if g == 'rg':\n domains += IRTF_DOMAIN\n if g == 'program':\n domains += IAB_DOMAIN\n\n entries = Group.objects.filter(type=g).all()\n active_entries = entries.filter(state__in=ACTIVE_STATES)\n inactive_recent_entries = entries.exclude(state__in=ACTIVE_STATES).filter(time__gte=show_since)\n interesting_entries = active_entries | inactive_recent_entries\n\n for e in interesting_entries.distinct().iterator():\n name = e.acronym\n \n # Research groups, teams, and programs do not have -ads lists\n if not g in NO_AD_GROUP_TYPES:\n dump_sublist(afile, vfile, name+'-ads', domains, settings.GROUP_VIRTUAL_DOMAIN, get_group_ad_emails(e))\n # All group types have -chairs lists\n dump_sublist(afile, vfile, name+'-chairs', domains, settings.GROUP_VIRTUAL_DOMAIN, get_group_role_emails(e, ['chair', 'secr']))\n\n # The area lists include every chair in active working groups in the area\n areas = Group.objects.filter(type='area').all()\n active_areas = areas.filter(state__in=ACTIVE_STATES)\n for area in active_areas:\n name = area.acronym\n area_ad_emails = get_group_role_emails(area, ['pre-ad', 'ad', 'chair'])\n dump_sublist(afile, vfile, name+'-ads', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, area_ad_emails)\n dump_sublist(afile, vfile, name+'-chairs', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, (get_child_group_role_emails(area, ['chair', 'secr']) | area_ad_emails))\n\n # Other groups with chairs that require Internet-Draft submission approval\n gtypes = GroupTypeName.objects.values_list('slug', flat=True)\n special_groups = Group.objects.filter(type__features__req_subm_approval=True, acronym__in=gtypes, state='active')\n for group in special_groups:\n dump_sublist(afile, vfile, group.acronym+'-chairs', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, get_group_role_emails(group, ['chair', 'delegate']))\n\n afile.close()\n vfile.close()\n\n os.chmod(aname, stat.S_IWUSR|stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH) \n os.chmod(vname, stat.S_IWUSR|stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH) \n\n shutil.move(aname, settings.GROUP_ALIASES_PATH)\n shutil.move(vname, settings.GROUP_VIRTUAL_PATH)\n", "path": "ietf/group/management/commands/generate_group_aliases.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2012-2021, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n# This was written as a script by Markus Stenberg <[email protected]>.\n# It was turned into a management command by Russ Housley <[email protected]>.\n\nimport datetime\nimport io\nimport os\nimport shutil\nimport stat\nimport time\n\nfrom tempfile import mkstemp\n \nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nimport debug # pyflakes:ignore\n\nfrom ietf.group.models import Group\nfrom ietf.group.utils import get_group_ad_emails, get_group_role_emails, get_child_group_role_emails\nfrom ietf.name.models import GroupTypeName\nfrom ietf.utils.aliases import dump_sublist\n\nDEFAULT_YEARS = 5\nACTIVE_STATES=['active', 'bof', 'proposed']\nGROUP_TYPES=['wg', 'rg', 'rag', 'dir', 'team', 'review', 'program', 'rfcedtyp']\nNO_AD_GROUP_TYPES=['rg', 'rag', 'team', 'program', 'rfcedtyp']\nIETF_DOMAIN=['ietf.org', ]\nIRTF_DOMAIN=['irtf.org', ]\nIAB_DOMAIN=['iab.org', ]\n\nclass Command(BaseCommand):\n help = ('Generate the group-aliases and group-virtual files for Internet-Draft '\n 'mail aliases, placing them in the file configured in '\n 'settings.GROUP_ALIASES_PATH and settings.GROUP_VIRTUAL_PATH, '\n 'respectively. The generation includes aliases for groups that '\n 'have seen activity in the last %s years.' % (DEFAULT_YEARS))\n\n def handle(self, *args, **options):\n show_since = timezone.now() - datetime.timedelta(DEFAULT_YEARS*365)\n\n date = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n signature = '# Generated by %s at %s\\n' % (os.path.abspath(__file__), date)\n\n ahandle, aname = mkstemp()\n os.close(ahandle)\n afile = io.open(aname,\"w\")\n\n vhandle, vname = mkstemp()\n os.close(vhandle)\n vfile = io.open(vname,\"w\")\n\n afile.write(signature)\n vfile.write(signature)\n vfile.write(\"%s anything\\n\" % settings.GROUP_VIRTUAL_DOMAIN)\n\n # Loop through each group type and build -ads and -chairs entries\n for g in GROUP_TYPES:\n domains = []\n domains += IETF_DOMAIN\n if g in ('rg', 'rag'):\n domains += IRTF_DOMAIN\n if g == 'program':\n domains += IAB_DOMAIN\n\n entries = Group.objects.filter(type=g).all()\n active_entries = entries.filter(state__in=ACTIVE_STATES)\n inactive_recent_entries = entries.exclude(state__in=ACTIVE_STATES).filter(time__gte=show_since)\n interesting_entries = active_entries | inactive_recent_entries\n\n for e in interesting_entries.distinct().iterator():\n name = e.acronym\n \n # Research groups, teams, and programs do not have -ads lists\n if not g in NO_AD_GROUP_TYPES:\n dump_sublist(afile, vfile, name+'-ads', domains, settings.GROUP_VIRTUAL_DOMAIN, get_group_ad_emails(e))\n # All group types have -chairs lists\n dump_sublist(afile, vfile, name+'-chairs', domains, settings.GROUP_VIRTUAL_DOMAIN, get_group_role_emails(e, ['chair', 'secr']))\n\n # The area lists include every chair in active working groups in the area\n areas = Group.objects.filter(type='area').all()\n active_areas = areas.filter(state__in=ACTIVE_STATES)\n for area in active_areas:\n name = area.acronym\n area_ad_emails = get_group_role_emails(area, ['pre-ad', 'ad', 'chair'])\n dump_sublist(afile, vfile, name+'-ads', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, area_ad_emails)\n dump_sublist(afile, vfile, name+'-chairs', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, (get_child_group_role_emails(area, ['chair', 'secr']) | area_ad_emails))\n\n # Other groups with chairs that require Internet-Draft submission approval\n gtypes = GroupTypeName.objects.values_list('slug', flat=True)\n special_groups = Group.objects.filter(type__features__req_subm_approval=True, acronym__in=gtypes, state='active')\n for group in special_groups:\n dump_sublist(afile, vfile, group.acronym+'-chairs', IETF_DOMAIN, settings.GROUP_VIRTUAL_DOMAIN, get_group_role_emails(group, ['chair', 'delegate']))\n\n afile.close()\n vfile.close()\n\n os.chmod(aname, stat.S_IWUSR|stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH) \n os.chmod(vname, stat.S_IWUSR|stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH) \n\n shutil.move(aname, settings.GROUP_ALIASES_PATH)\n shutil.move(vname, settings.GROUP_VIRTUAL_PATH)\n", "path": "ietf/group/management/commands/generate_group_aliases.py"}]}
| 1,742 | 288 |
gh_patches_debug_24050
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-308
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Federation: exception for async reference resolver
Hi all! I'm tinkering with the just-merged Federation support :tada: I'm trying to define a reference resolver that returns a coroutine (`aiodataloader`):
```py
@User.reference_resolver
def resolve_reference(type_object, info, reference):
type, id = from_global_id(reference["id"]) # parses a Relay ID
return info.context.users.get_by_id.load(int(id))
```
But that raises an exception in `add_typename_to_possible_return`:
```
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py", line 343, in execute_operation
self.execute_fields_serially
File "/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py", line 435, in execute_fields
result = self.resolve_field(
File "/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py", line 606, in resolve_field
return self.complete_value_catching_error(
File "/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py", line 683, in complete_value_catching_error
self.handle_field_error(error, field_nodes, path, return_type)
File "/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py", line 698, in handle_field_error
raise error
File "/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py", line 669, in complete_value_catching_error
completed = self.complete_value(
File "/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py", line 735, in complete_value
raise result
File "/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py", line 625, in resolve_field_value_or_error
result = resolve_fn(source, info, **args)
File "/usr/local/lib/python3.8/site-packages/ariadne/contrib/federation/utils.py", line 83, in resolve_entities
add_typename_to_possible_return(
File "/usr/local/lib/python3.8/site-packages/ariadne/contrib/federation/utils.py", line 132, in add_typename_to_possible_return
setattr(obj, f"_{obj.__class__.__name__}__typename", typename)
graphql.error.graphql_error.GraphQLError: 'coroutine' object has no attribute '_coroutine__typename'
```
If I change the reference resolver to return a `dict` with some test data, it works fine, but I need to use that dataloader to do a real lookup.
Any thoughts on where the problem lies?
(also, I'm new to asyncio, and originally had `async def resolve_reference(...):` and `return await ...`, which I realized is sort of pointless. Mentioning it here in case it's relevant.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/contrib/federation/utils.py`
Content:
```
1 # pylint: disable=cell-var-from-loop
2
3 import re
4 from typing import Any, List
5
6 from graphql.language import DirectiveNode
7 from graphql.type import (
8 GraphQLNamedType,
9 GraphQLInputObjectType,
10 GraphQLObjectType,
11 GraphQLResolveInfo,
12 GraphQLSchema,
13 )
14
15
16 _i_token_delimiter = r"(?:^|[\s\r\n]+|$)"
17 _i_token_name = "[_A-Za-z][_0-9A-Za-z]*"
18 _i_token_arguments = r"\([^)]*\)"
19 _i_token_location = "[_A-Za-z][_0-9A-Za-z]*"
20
21 _r_directive_definition = re.compile(
22 "("
23 f"{_i_token_delimiter}directive"
24 f"(?:{_i_token_delimiter})?@({_i_token_name})"
25 f"(?:(?:{_i_token_delimiter})?{_i_token_arguments})?"
26 f"{_i_token_delimiter}on"
27 f"{_i_token_delimiter}(?:[|]{_i_token_delimiter})?{_i_token_location}"
28 f"(?:{_i_token_delimiter}[|]{_i_token_delimiter}{_i_token_location})*"
29 ")"
30 f"(?={_i_token_delimiter})",
31 )
32
33 _r_directive = re.compile(
34 "("
35 f"(?:{_i_token_delimiter})?@({_i_token_name})"
36 f"(?:(?:{_i_token_delimiter})?{_i_token_arguments})?"
37 ")"
38 f"(?={_i_token_delimiter})",
39 )
40
41 _allowed_directives = [
42 "skip", # Default directive as per specs.
43 "include", # Default directive as per specs.
44 "deprecated", # Default directive as per specs.
45 "external", # Federation directive.
46 "requires", # Federation directive.
47 "provides", # Federation directive.
48 "key", # Federation directive.
49 "extends", # Federation directive.
50 ]
51
52
53 def purge_schema_directives(joined_type_defs: str) -> str:
54 """Remove custom schema directives from federation."""
55 joined_type_defs = _r_directive_definition.sub("", joined_type_defs)
56 joined_type_defs = _r_directive.sub(
57 lambda m: m.group(1) if m.group(2) in _allowed_directives else "",
58 joined_type_defs,
59 )
60 return joined_type_defs
61
62
63 def resolve_entities(_: Any, info: GraphQLResolveInfo, **kwargs) -> Any:
64 representations = list(kwargs.get("representations", list()))
65
66 result = []
67 for reference in representations:
68 __typename = reference["__typename"]
69 type_object = info.schema.get_type(__typename)
70
71 if not type_object or not isinstance(type_object, GraphQLObjectType):
72 raise Exception(
73 f"The `_entities` resolver tried to load an entity for"
74 f' type "{__typename}", but no object type of that name'
75 f" was found in the schema",
76 )
77
78 resolve_reference = getattr(
79 type_object, "__resolve_reference__", lambda o, i, r: reference,
80 )
81
82 result.append(
83 add_typename_to_possible_return(
84 resolve_reference(type_object, info, reference), __typename,
85 ),
86 )
87
88 return result
89
90
91 def get_entity_types(schema: GraphQLSchema) -> List[GraphQLNamedType]:
92 """Get all types that include the @key directive."""
93 schema_types = schema.type_map.values()
94
95 def check_type(t):
96 return isinstance(t, GraphQLObjectType) and includes_directive(t, "key")
97
98 return [t for t in schema_types if check_type(t)]
99
100
101 def includes_directive(type_object: GraphQLNamedType, directive_name: str,) -> bool:
102 """Check if specified type includes a directive."""
103 if isinstance(type_object, GraphQLInputObjectType):
104 return False
105
106 directives = gather_directives(type_object)
107 return any([d.name.value == directive_name for d in directives])
108
109
110 def gather_directives(type_object: GraphQLNamedType,) -> List[DirectiveNode]:
111 """Get all directive attached to a type."""
112 directives: List[DirectiveNode] = []
113
114 if hasattr(type_object, "extension_ast_nodes"):
115 if type_object.extension_ast_nodes:
116 for ast_node in type_object.extension_ast_nodes:
117 if ast_node.directives:
118 directives.extend(ast_node.directives)
119
120 if hasattr(type_object, "ast_node"):
121 if type_object.ast_node and type_object.ast_node.directives:
122 directives.extend(type_object.ast_node.directives)
123
124 return directives
125
126
127 def add_typename_to_possible_return(obj: Any, typename: str) -> Any:
128 if obj is not None:
129 if isinstance(obj, dict):
130 obj["__typename"] = typename
131 else:
132 setattr(obj, f"_{obj.__class__.__name__}__typename", typename)
133 return obj
134 return {"__typename": typename}
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ariadne/contrib/federation/utils.py b/ariadne/contrib/federation/utils.py
--- a/ariadne/contrib/federation/utils.py
+++ b/ariadne/contrib/federation/utils.py
@@ -1,6 +1,7 @@
# pylint: disable=cell-var-from-loop
import re
+from inspect import isawaitable
from typing import Any, List
from graphql.language import DirectiveNode
@@ -79,15 +80,20 @@
type_object, "__resolve_reference__", lambda o, i, r: reference,
)
- result.append(
- add_typename_to_possible_return(
- resolve_reference(type_object, info, reference), __typename,
- ),
- )
+ representation = resolve_reference(type_object, info, reference)
+
+ if isawaitable(representation):
+ result.append(add_typename_to_async_return(representation, __typename))
+ else:
+ result.append(add_typename_to_possible_return(representation, __typename))
return result
+async def add_typename_to_async_return(obj: Any, typename: str) -> Any:
+ return add_typename_to_possible_return(await obj, typename)
+
+
def get_entity_types(schema: GraphQLSchema) -> List[GraphQLNamedType]:
"""Get all types that include the @key directive."""
schema_types = schema.type_map.values()
|
{"golden_diff": "diff --git a/ariadne/contrib/federation/utils.py b/ariadne/contrib/federation/utils.py\n--- a/ariadne/contrib/federation/utils.py\n+++ b/ariadne/contrib/federation/utils.py\n@@ -1,6 +1,7 @@\n # pylint: disable=cell-var-from-loop\n \n import re\n+from inspect import isawaitable\n from typing import Any, List\n \n from graphql.language import DirectiveNode\n@@ -79,15 +80,20 @@\n type_object, \"__resolve_reference__\", lambda o, i, r: reference,\n )\n \n- result.append(\n- add_typename_to_possible_return(\n- resolve_reference(type_object, info, reference), __typename,\n- ),\n- )\n+ representation = resolve_reference(type_object, info, reference)\n+\n+ if isawaitable(representation):\n+ result.append(add_typename_to_async_return(representation, __typename))\n+ else:\n+ result.append(add_typename_to_possible_return(representation, __typename))\n \n return result\n \n \n+async def add_typename_to_async_return(obj: Any, typename: str) -> Any:\n+ return add_typename_to_possible_return(await obj, typename)\n+\n+\n def get_entity_types(schema: GraphQLSchema) -> List[GraphQLNamedType]:\n \"\"\"Get all types that include the @key directive.\"\"\"\n schema_types = schema.type_map.values()\n", "issue": "Federation: exception for async reference resolver\nHi all! I'm tinkering with the just-merged Federation support :tada: I'm trying to define a reference resolver that returns a coroutine (`aiodataloader`):\r\n\r\n```py\r\[email protected]_resolver\r\ndef resolve_reference(type_object, info, reference):\r\n type, id = from_global_id(reference[\"id\"]) # parses a Relay ID\r\n return info.context.users.get_by_id.load(int(id))\r\n```\r\n\r\nBut that raises an exception in `add_typename_to_possible_return`:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py\", line 343, in execute_operation\r\n self.execute_fields_serially\r\n File \"/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py\", line 435, in execute_fields\r\n result = self.resolve_field(\r\n File \"/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py\", line 606, in resolve_field\r\n return self.complete_value_catching_error(\r\n File \"/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py\", line 683, in complete_value_catching_error\r\n self.handle_field_error(error, field_nodes, path, return_type)\r\n File \"/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py\", line 698, in handle_field_error\r\n raise error\r\n File \"/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py\", line 669, in complete_value_catching_error\r\n completed = self.complete_value(\r\n File \"/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py\", line 735, in complete_value\r\n raise result\r\n File \"/usr/local/lib/python3.8/site-packages/graphql/execution/execute.py\", line 625, in resolve_field_value_or_error\r\n result = resolve_fn(source, info, **args)\r\n File \"/usr/local/lib/python3.8/site-packages/ariadne/contrib/federation/utils.py\", line 83, in resolve_entities\r\n add_typename_to_possible_return(\r\n File \"/usr/local/lib/python3.8/site-packages/ariadne/contrib/federation/utils.py\", line 132, in add_typename_to_possible_return\r\n setattr(obj, f\"_{obj.__class__.__name__}__typename\", typename)\r\ngraphql.error.graphql_error.GraphQLError: 'coroutine' object has no attribute '_coroutine__typename'\r\n```\r\n\r\nIf I change the reference resolver to return a `dict` with some test data, it works fine, but I need to use that dataloader to do a real lookup.\r\n\r\nAny thoughts on where the problem lies?\r\n\r\n(also, I'm new to asyncio, and originally had `async def resolve_reference(...):` and `return await ...`, which I realized is sort of pointless. Mentioning it here in case it's relevant.)\n", "before_files": [{"content": "# pylint: disable=cell-var-from-loop\n\nimport re\nfrom typing import Any, List\n\nfrom graphql.language import DirectiveNode\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLInputObjectType,\n GraphQLObjectType,\n GraphQLResolveInfo,\n GraphQLSchema,\n)\n\n\n_i_token_delimiter = r\"(?:^|[\\s\\r\\n]+|$)\"\n_i_token_name = \"[_A-Za-z][_0-9A-Za-z]*\"\n_i_token_arguments = r\"\\([^)]*\\)\"\n_i_token_location = \"[_A-Za-z][_0-9A-Za-z]*\"\n\n_r_directive_definition = re.compile(\n \"(\"\n f\"{_i_token_delimiter}directive\"\n f\"(?:{_i_token_delimiter})?@({_i_token_name})\"\n f\"(?:(?:{_i_token_delimiter})?{_i_token_arguments})?\"\n f\"{_i_token_delimiter}on\"\n f\"{_i_token_delimiter}(?:[|]{_i_token_delimiter})?{_i_token_location}\"\n f\"(?:{_i_token_delimiter}[|]{_i_token_delimiter}{_i_token_location})*\"\n \")\"\n f\"(?={_i_token_delimiter})\",\n)\n\n_r_directive = re.compile(\n \"(\"\n f\"(?:{_i_token_delimiter})?@({_i_token_name})\"\n f\"(?:(?:{_i_token_delimiter})?{_i_token_arguments})?\"\n \")\"\n f\"(?={_i_token_delimiter})\",\n)\n\n_allowed_directives = [\n \"skip\", # Default directive as per specs.\n \"include\", # Default directive as per specs.\n \"deprecated\", # Default directive as per specs.\n \"external\", # Federation directive.\n \"requires\", # Federation directive.\n \"provides\", # Federation directive.\n \"key\", # Federation directive.\n \"extends\", # Federation directive.\n]\n\n\ndef purge_schema_directives(joined_type_defs: str) -> str:\n \"\"\"Remove custom schema directives from federation.\"\"\"\n joined_type_defs = _r_directive_definition.sub(\"\", joined_type_defs)\n joined_type_defs = _r_directive.sub(\n lambda m: m.group(1) if m.group(2) in _allowed_directives else \"\",\n joined_type_defs,\n )\n return joined_type_defs\n\n\ndef resolve_entities(_: Any, info: GraphQLResolveInfo, **kwargs) -> Any:\n representations = list(kwargs.get(\"representations\", list()))\n\n result = []\n for reference in representations:\n __typename = reference[\"__typename\"]\n type_object = info.schema.get_type(__typename)\n\n if not type_object or not isinstance(type_object, GraphQLObjectType):\n raise Exception(\n f\"The `_entities` resolver tried to load an entity for\"\n f' type \"{__typename}\", but no object type of that name'\n f\" was found in the schema\",\n )\n\n resolve_reference = getattr(\n type_object, \"__resolve_reference__\", lambda o, i, r: reference,\n )\n\n result.append(\n add_typename_to_possible_return(\n resolve_reference(type_object, info, reference), __typename,\n ),\n )\n\n return result\n\n\ndef get_entity_types(schema: GraphQLSchema) -> List[GraphQLNamedType]:\n \"\"\"Get all types that include the @key directive.\"\"\"\n schema_types = schema.type_map.values()\n\n def check_type(t):\n return isinstance(t, GraphQLObjectType) and includes_directive(t, \"key\")\n\n return [t for t in schema_types if check_type(t)]\n\n\ndef includes_directive(type_object: GraphQLNamedType, directive_name: str,) -> bool:\n \"\"\"Check if specified type includes a directive.\"\"\"\n if isinstance(type_object, GraphQLInputObjectType):\n return False\n\n directives = gather_directives(type_object)\n return any([d.name.value == directive_name for d in directives])\n\n\ndef gather_directives(type_object: GraphQLNamedType,) -> List[DirectiveNode]:\n \"\"\"Get all directive attached to a type.\"\"\"\n directives: List[DirectiveNode] = []\n\n if hasattr(type_object, \"extension_ast_nodes\"):\n if type_object.extension_ast_nodes:\n for ast_node in type_object.extension_ast_nodes:\n if ast_node.directives:\n directives.extend(ast_node.directives)\n\n if hasattr(type_object, \"ast_node\"):\n if type_object.ast_node and type_object.ast_node.directives:\n directives.extend(type_object.ast_node.directives)\n\n return directives\n\n\ndef add_typename_to_possible_return(obj: Any, typename: str) -> Any:\n if obj is not None:\n if isinstance(obj, dict):\n obj[\"__typename\"] = typename\n else:\n setattr(obj, f\"_{obj.__class__.__name__}__typename\", typename)\n return obj\n return {\"__typename\": typename}\n", "path": "ariadne/contrib/federation/utils.py"}], "after_files": [{"content": "# pylint: disable=cell-var-from-loop\n\nimport re\nfrom inspect import isawaitable\nfrom typing import Any, List\n\nfrom graphql.language import DirectiveNode\nfrom graphql.type import (\n GraphQLNamedType,\n GraphQLInputObjectType,\n GraphQLObjectType,\n GraphQLResolveInfo,\n GraphQLSchema,\n)\n\n\n_i_token_delimiter = r\"(?:^|[\\s\\r\\n]+|$)\"\n_i_token_name = \"[_A-Za-z][_0-9A-Za-z]*\"\n_i_token_arguments = r\"\\([^)]*\\)\"\n_i_token_location = \"[_A-Za-z][_0-9A-Za-z]*\"\n\n_r_directive_definition = re.compile(\n \"(\"\n f\"{_i_token_delimiter}directive\"\n f\"(?:{_i_token_delimiter})?@({_i_token_name})\"\n f\"(?:(?:{_i_token_delimiter})?{_i_token_arguments})?\"\n f\"{_i_token_delimiter}on\"\n f\"{_i_token_delimiter}(?:[|]{_i_token_delimiter})?{_i_token_location}\"\n f\"(?:{_i_token_delimiter}[|]{_i_token_delimiter}{_i_token_location})*\"\n \")\"\n f\"(?={_i_token_delimiter})\",\n)\n\n_r_directive = re.compile(\n \"(\"\n f\"(?:{_i_token_delimiter})?@({_i_token_name})\"\n f\"(?:(?:{_i_token_delimiter})?{_i_token_arguments})?\"\n \")\"\n f\"(?={_i_token_delimiter})\",\n)\n\n_allowed_directives = [\n \"skip\", # Default directive as per specs.\n \"include\", # Default directive as per specs.\n \"deprecated\", # Default directive as per specs.\n \"external\", # Federation directive.\n \"requires\", # Federation directive.\n \"provides\", # Federation directive.\n \"key\", # Federation directive.\n \"extends\", # Federation directive.\n]\n\n\ndef purge_schema_directives(joined_type_defs: str) -> str:\n \"\"\"Remove custom schema directives from federation.\"\"\"\n joined_type_defs = _r_directive_definition.sub(\"\", joined_type_defs)\n joined_type_defs = _r_directive.sub(\n lambda m: m.group(1) if m.group(2) in _allowed_directives else \"\",\n joined_type_defs,\n )\n return joined_type_defs\n\n\ndef resolve_entities(_: Any, info: GraphQLResolveInfo, **kwargs) -> Any:\n representations = list(kwargs.get(\"representations\", list()))\n\n result = []\n for reference in representations:\n __typename = reference[\"__typename\"]\n type_object = info.schema.get_type(__typename)\n\n if not type_object or not isinstance(type_object, GraphQLObjectType):\n raise Exception(\n f\"The `_entities` resolver tried to load an entity for\"\n f' type \"{__typename}\", but no object type of that name'\n f\" was found in the schema\",\n )\n\n resolve_reference = getattr(\n type_object, \"__resolve_reference__\", lambda o, i, r: reference,\n )\n\n representation = resolve_reference(type_object, info, reference)\n\n if isawaitable(representation):\n result.append(add_typename_to_async_return(representation, __typename))\n else:\n result.append(add_typename_to_possible_return(representation, __typename))\n\n return result\n\n\nasync def add_typename_to_async_return(obj: Any, typename: str) -> Any:\n return add_typename_to_possible_return(await obj, typename)\n\n\ndef get_entity_types(schema: GraphQLSchema) -> List[GraphQLNamedType]:\n \"\"\"Get all types that include the @key directive.\"\"\"\n schema_types = schema.type_map.values()\n\n def check_type(t):\n return isinstance(t, GraphQLObjectType) and includes_directive(t, \"key\")\n\n return [t for t in schema_types if check_type(t)]\n\n\ndef includes_directive(type_object: GraphQLNamedType, directive_name: str,) -> bool:\n \"\"\"Check if specified type includes a directive.\"\"\"\n if isinstance(type_object, GraphQLInputObjectType):\n return False\n\n directives = gather_directives(type_object)\n return any([d.name.value == directive_name for d in directives])\n\n\ndef gather_directives(type_object: GraphQLNamedType,) -> List[DirectiveNode]:\n \"\"\"Get all directive attached to a type.\"\"\"\n directives: List[DirectiveNode] = []\n\n if hasattr(type_object, \"extension_ast_nodes\"):\n if type_object.extension_ast_nodes:\n for ast_node in type_object.extension_ast_nodes:\n if ast_node.directives:\n directives.extend(ast_node.directives)\n\n if hasattr(type_object, \"ast_node\"):\n if type_object.ast_node and type_object.ast_node.directives:\n directives.extend(type_object.ast_node.directives)\n\n return directives\n\n\ndef add_typename_to_possible_return(obj: Any, typename: str) -> Any:\n if obj is not None:\n if isinstance(obj, dict):\n obj[\"__typename\"] = typename\n else:\n setattr(obj, f\"_{obj.__class__.__name__}__typename\", typename)\n return obj\n return {\"__typename\": typename}\n", "path": "ariadne/contrib/federation/utils.py"}]}
| 2,237 | 304 |
gh_patches_debug_327
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-843
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using sys.exit() with no arguments causes TypeError inside ptvsd
## Environment data
- PTVSD version: 4.1.3
- OS and version: Windows 10
- Python version (& distribution if applicable, e.g. Anaconda): 3.6
- Using VS Code or Visual Studio: VS
## Actual behavior
```
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\program files (x86)\microsoft visual studio\2017\community\common7\ide\extensions\microsoft\python\core\ptvsd_launcher.py", line 119, in <module>
vspd.debug(filename, port_num, debug_id, debug_options, run_as)
File "c:\program files (x86)\microsoft visual studio\2017\community\common7\ide\extensions\microsoft\python\core\Packages\ptvsd\debugger.py", line 37, in debug
run(address, filename, *args, **kwargs)
File "c:\program files (x86)\microsoft visual studio\2017\community\common7\ide\extensions\microsoft\python\core\Packages\ptvsd\_local.py", line 48, in run_file
run(argv, addr, **kwargs)
File "c:\program files (x86)\microsoft visual studio\2017\community\common7\ide\extensions\microsoft\python\core\Packages\ptvsd\_local.py", line 101, in _run
daemon.exitcode = int(ex.code)
TypeError
:
int() argument must be a string, a bytes-like object or a number, not 'NoneType'
```
## Expected behavior
No error
## Steps to reproduce:
1. Debug this code:
```
sys.exit()
```
https://docs.python.org/3/library/sys.html#sys.exit
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/_local.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6 import time
7
8 import pydevd
9 from _pydevd_bundle.pydevd_comm import get_global_debugger
10
11 from ptvsd.pydevd_hooks import install
12 from ptvsd.runner import run as no_debug_runner
13 from ptvsd.socket import Address
14 from ptvsd._util import new_hidden_thread
15
16
17 PYDEVD_DEFAULTS = {
18 '--qt-support=auto',
19 }
20
21
22 def _set_pydevd_defaults(pydevd_args):
23 args_to_append = []
24 for arg in PYDEVD_DEFAULTS:
25 if arg not in pydevd_args:
26 args_to_append.append(arg)
27 return pydevd_args + args_to_append
28
29
30 ########################
31 # high-level functions
32
33 def debug_main(address, name, kind, *extra, **kwargs):
34 if not kwargs.pop('wait', False) and address.isserver:
35 def unblock_debugger():
36 debugger = get_global_debugger()
37 while debugger is None:
38 time.sleep(0.1)
39 debugger = get_global_debugger()
40 debugger.ready_to_run = True
41 new_hidden_thread('ptvsd.unblock_debugger', unblock_debugger).start()
42 if kind == 'module':
43 run_module(address, name, *extra, **kwargs)
44 else:
45 run_file(address, name, *extra, **kwargs)
46
47
48 def run_main(address, name, kind, *extra, **kwargs):
49 addr = Address.from_raw(address)
50 sys.argv[:] = _run_main_argv(name, extra)
51 runner = kwargs.pop('_runner', no_debug_runner)
52 runner(addr, name, kind == 'module', *extra, **kwargs)
53
54
55 ########################
56 # low-level functions
57
58 def run_module(address, modname, *extra, **kwargs):
59 """Run pydevd for the given module."""
60 addr = Address.from_raw(address)
61 if not addr.isserver:
62 kwargs['singlesession'] = True
63 run = kwargs.pop('_run', _run)
64 prog = kwargs.pop('_prog', sys.argv[0])
65 filename = modname + ':'
66 argv = _run_argv(addr, filename, extra, _prog=prog)
67 argv.insert(argv.index('--file'), '--module')
68 run(argv, addr, **kwargs)
69
70
71 def run_file(address, filename, *extra, **kwargs):
72 """Run pydevd for the given Python file."""
73 addr = Address.from_raw(address)
74 if not addr.isserver:
75 kwargs['singlesession'] = True
76 run = kwargs.pop('_run', _run)
77 prog = kwargs.pop('_prog', sys.argv[0])
78 argv = _run_argv(addr, filename, extra, _prog=prog)
79 run(argv, addr, **kwargs)
80
81
82 def _run_argv(address, filename, extra, _prog=sys.argv[0]):
83 """Convert the given values to an argv that pydevd.main() supports."""
84 if '--' in extra:
85 pydevd = list(extra[:extra.index('--')])
86 extra = list(extra[len(pydevd) + 1:])
87 else:
88 pydevd = []
89 extra = list(extra)
90
91 pydevd = _set_pydevd_defaults(pydevd)
92 host, port = address
93 argv = [
94 _prog,
95 '--port', str(port),
96 ]
97 if not address.isserver:
98 argv.extend([
99 '--client', host or 'localhost',
100 ])
101 return argv + pydevd + [
102 '--file', filename,
103 ] + extra
104
105
106 def _run_main_argv(filename, extra):
107 if '--' in extra:
108 pydevd = list(extra[:extra.index('--')])
109 extra = list(extra[len(pydevd) + 1:])
110 else:
111 extra = list(extra)
112 return [filename] + extra
113
114
115 def _run(argv, addr, _pydevd=pydevd, _install=install, **kwargs):
116 """Start pydevd with the given commandline args."""
117 #print(' '.join(argv))
118
119 # Pydevd assumes that the "__main__" module is the "pydevd" module
120 # and does some tricky stuff under that assumption. For example,
121 # when the debugger starts up it calls save_main_module()
122 # (in pydevd_bundle/pydevd_utils.py). That function explicitly sets
123 # sys.modules["pydevd"] to sys.modules["__main__"] and then sets
124 # the __main__ module to a new one. This makes some sense since
125 # it gives the debugged script a fresh __main__ module.
126 #
127 # This complicates things for us since we are running a different
128 # file (i.e. this one) as the __main__ module. Consequently,
129 # sys.modules["pydevd"] gets set to ptvsd/__main__.py. Subsequent
130 # imports of the "pydevd" module then return the wrong module. We
131 # work around this by avoiding lazy imports of the "pydevd" module.
132 # We also replace the __main__ module with the "pydevd" module here.
133 if sys.modules['__main__'].__file__ != _pydevd.__file__:
134 sys.modules['__main___orig'] = sys.modules['__main__']
135 sys.modules['__main__'] = _pydevd
136
137 daemon = _install(_pydevd, addr, **kwargs)
138 sys.argv[:] = argv
139 try:
140 _pydevd.main()
141 except SystemExit as ex:
142 daemon.exitcode = int(ex.code)
143 raise
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ptvsd/_local.py b/ptvsd/_local.py
--- a/ptvsd/_local.py
+++ b/ptvsd/_local.py
@@ -139,5 +139,5 @@
try:
_pydevd.main()
except SystemExit as ex:
- daemon.exitcode = int(ex.code)
+ daemon.exitcode = 0 if ex.code is None else int(ex.code)
raise
|
{"golden_diff": "diff --git a/ptvsd/_local.py b/ptvsd/_local.py\n--- a/ptvsd/_local.py\n+++ b/ptvsd/_local.py\n@@ -139,5 +139,5 @@\n try:\n _pydevd.main()\n except SystemExit as ex:\n- daemon.exitcode = int(ex.code)\n+ daemon.exitcode = 0 if ex.code is None else int(ex.code)\n raise\n", "issue": "Using sys.exit() with no arguments causes TypeError inside ptvsd\n## Environment data\r\n\r\n- PTVSD version: 4.1.3\r\n- OS and version: Windows 10\r\n- Python version (& distribution if applicable, e.g. Anaconda): 3.6\r\n- Using VS Code or Visual Studio: VS\r\n\r\n## Actual behavior\r\n\r\n```\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"c:\\program files (x86)\\microsoft visual studio\\2017\\community\\common7\\ide\\extensions\\microsoft\\python\\core\\ptvsd_launcher.py\", line 119, in <module>\r\n \r\nvspd.debug(filename, port_num, debug_id, debug_options, run_as)\r\n\r\n File \"c:\\program files (x86)\\microsoft visual studio\\2017\\community\\common7\\ide\\extensions\\microsoft\\python\\core\\Packages\\ptvsd\\debugger.py\", line 37, in debug\r\n \r\nrun(address, filename, *args, **kwargs)\r\n\r\n File \"c:\\program files (x86)\\microsoft visual studio\\2017\\community\\common7\\ide\\extensions\\microsoft\\python\\core\\Packages\\ptvsd\\_local.py\", line 48, in run_file\r\n \r\nrun(argv, addr, **kwargs)\r\n\r\n File \"c:\\program files (x86)\\microsoft visual studio\\2017\\community\\common7\\ide\\extensions\\microsoft\\python\\core\\Packages\\ptvsd\\_local.py\", line 101, in _run\r\n \r\ndaemon.exitcode = int(ex.code)\r\n\r\nTypeError\r\n: \r\nint() argument must be a string, a bytes-like object or a number, not 'NoneType'\r\n```\r\n\r\n\r\n## Expected behavior\r\n\r\nNo error\r\n\r\n## Steps to reproduce:\r\n1. Debug this code:\r\n\r\n```\r\nsys.exit()\r\n```\r\n\r\nhttps://docs.python.org/3/library/sys.html#sys.exit\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\nimport time\n\nimport pydevd\nfrom _pydevd_bundle.pydevd_comm import get_global_debugger\n\nfrom ptvsd.pydevd_hooks import install\nfrom ptvsd.runner import run as no_debug_runner\nfrom ptvsd.socket import Address\nfrom ptvsd._util import new_hidden_thread\n\n\nPYDEVD_DEFAULTS = {\n '--qt-support=auto',\n}\n\n\ndef _set_pydevd_defaults(pydevd_args):\n args_to_append = []\n for arg in PYDEVD_DEFAULTS:\n if arg not in pydevd_args:\n args_to_append.append(arg)\n return pydevd_args + args_to_append\n\n\n########################\n# high-level functions\n\ndef debug_main(address, name, kind, *extra, **kwargs):\n if not kwargs.pop('wait', False) and address.isserver:\n def unblock_debugger():\n debugger = get_global_debugger()\n while debugger is None:\n time.sleep(0.1)\n debugger = get_global_debugger()\n debugger.ready_to_run = True\n new_hidden_thread('ptvsd.unblock_debugger', unblock_debugger).start()\n if kind == 'module':\n run_module(address, name, *extra, **kwargs)\n else:\n run_file(address, name, *extra, **kwargs)\n\n\ndef run_main(address, name, kind, *extra, **kwargs):\n addr = Address.from_raw(address)\n sys.argv[:] = _run_main_argv(name, extra)\n runner = kwargs.pop('_runner', no_debug_runner)\n runner(addr, name, kind == 'module', *extra, **kwargs)\n\n\n########################\n# low-level functions\n\ndef run_module(address, modname, *extra, **kwargs):\n \"\"\"Run pydevd for the given module.\"\"\"\n addr = Address.from_raw(address)\n if not addr.isserver:\n kwargs['singlesession'] = True\n run = kwargs.pop('_run', _run)\n prog = kwargs.pop('_prog', sys.argv[0])\n filename = modname + ':'\n argv = _run_argv(addr, filename, extra, _prog=prog)\n argv.insert(argv.index('--file'), '--module')\n run(argv, addr, **kwargs)\n\n\ndef run_file(address, filename, *extra, **kwargs):\n \"\"\"Run pydevd for the given Python file.\"\"\"\n addr = Address.from_raw(address)\n if not addr.isserver:\n kwargs['singlesession'] = True\n run = kwargs.pop('_run', _run)\n prog = kwargs.pop('_prog', sys.argv[0])\n argv = _run_argv(addr, filename, extra, _prog=prog)\n run(argv, addr, **kwargs)\n\n\ndef _run_argv(address, filename, extra, _prog=sys.argv[0]):\n \"\"\"Convert the given values to an argv that pydevd.main() supports.\"\"\"\n if '--' in extra:\n pydevd = list(extra[:extra.index('--')])\n extra = list(extra[len(pydevd) + 1:])\n else:\n pydevd = []\n extra = list(extra)\n\n pydevd = _set_pydevd_defaults(pydevd)\n host, port = address\n argv = [\n _prog,\n '--port', str(port),\n ]\n if not address.isserver:\n argv.extend([\n '--client', host or 'localhost',\n ])\n return argv + pydevd + [\n '--file', filename,\n ] + extra\n\n\ndef _run_main_argv(filename, extra):\n if '--' in extra:\n pydevd = list(extra[:extra.index('--')])\n extra = list(extra[len(pydevd) + 1:])\n else:\n extra = list(extra)\n return [filename] + extra\n\n\ndef _run(argv, addr, _pydevd=pydevd, _install=install, **kwargs):\n \"\"\"Start pydevd with the given commandline args.\"\"\"\n #print(' '.join(argv))\n\n # Pydevd assumes that the \"__main__\" module is the \"pydevd\" module\n # and does some tricky stuff under that assumption. For example,\n # when the debugger starts up it calls save_main_module()\n # (in pydevd_bundle/pydevd_utils.py). That function explicitly sets\n # sys.modules[\"pydevd\"] to sys.modules[\"__main__\"] and then sets\n # the __main__ module to a new one. This makes some sense since\n # it gives the debugged script a fresh __main__ module.\n #\n # This complicates things for us since we are running a different\n # file (i.e. this one) as the __main__ module. Consequently,\n # sys.modules[\"pydevd\"] gets set to ptvsd/__main__.py. Subsequent\n # imports of the \"pydevd\" module then return the wrong module. We\n # work around this by avoiding lazy imports of the \"pydevd\" module.\n # We also replace the __main__ module with the \"pydevd\" module here.\n if sys.modules['__main__'].__file__ != _pydevd.__file__:\n sys.modules['__main___orig'] = sys.modules['__main__']\n sys.modules['__main__'] = _pydevd\n\n daemon = _install(_pydevd, addr, **kwargs)\n sys.argv[:] = argv\n try:\n _pydevd.main()\n except SystemExit as ex:\n daemon.exitcode = int(ex.code)\n raise\n", "path": "ptvsd/_local.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\nimport time\n\nimport pydevd\nfrom _pydevd_bundle.pydevd_comm import get_global_debugger\n\nfrom ptvsd.pydevd_hooks import install\nfrom ptvsd.runner import run as no_debug_runner\nfrom ptvsd.socket import Address\nfrom ptvsd._util import new_hidden_thread\n\n\nPYDEVD_DEFAULTS = {\n '--qt-support=auto',\n}\n\n\ndef _set_pydevd_defaults(pydevd_args):\n args_to_append = []\n for arg in PYDEVD_DEFAULTS:\n if arg not in pydevd_args:\n args_to_append.append(arg)\n return pydevd_args + args_to_append\n\n\n########################\n# high-level functions\n\ndef debug_main(address, name, kind, *extra, **kwargs):\n if not kwargs.pop('wait', False) and address.isserver:\n def unblock_debugger():\n debugger = get_global_debugger()\n while debugger is None:\n time.sleep(0.1)\n debugger = get_global_debugger()\n debugger.ready_to_run = True\n new_hidden_thread('ptvsd.unblock_debugger', unblock_debugger).start()\n if kind == 'module':\n run_module(address, name, *extra, **kwargs)\n else:\n run_file(address, name, *extra, **kwargs)\n\n\ndef run_main(address, name, kind, *extra, **kwargs):\n addr = Address.from_raw(address)\n sys.argv[:] = _run_main_argv(name, extra)\n runner = kwargs.pop('_runner', no_debug_runner)\n runner(addr, name, kind == 'module', *extra, **kwargs)\n\n\n########################\n# low-level functions\n\ndef run_module(address, modname, *extra, **kwargs):\n \"\"\"Run pydevd for the given module.\"\"\"\n addr = Address.from_raw(address)\n if not addr.isserver:\n kwargs['singlesession'] = True\n run = kwargs.pop('_run', _run)\n prog = kwargs.pop('_prog', sys.argv[0])\n filename = modname + ':'\n argv = _run_argv(addr, filename, extra, _prog=prog)\n argv.insert(argv.index('--file'), '--module')\n run(argv, addr, **kwargs)\n\n\ndef run_file(address, filename, *extra, **kwargs):\n \"\"\"Run pydevd for the given Python file.\"\"\"\n addr = Address.from_raw(address)\n if not addr.isserver:\n kwargs['singlesession'] = True\n run = kwargs.pop('_run', _run)\n prog = kwargs.pop('_prog', sys.argv[0])\n argv = _run_argv(addr, filename, extra, _prog=prog)\n run(argv, addr, **kwargs)\n\n\ndef _run_argv(address, filename, extra, _prog=sys.argv[0]):\n \"\"\"Convert the given values to an argv that pydevd.main() supports.\"\"\"\n if '--' in extra:\n pydevd = list(extra[:extra.index('--')])\n extra = list(extra[len(pydevd) + 1:])\n else:\n pydevd = []\n extra = list(extra)\n\n pydevd = _set_pydevd_defaults(pydevd)\n host, port = address\n argv = [\n _prog,\n '--port', str(port),\n ]\n if not address.isserver:\n argv.extend([\n '--client', host or 'localhost',\n ])\n return argv + pydevd + [\n '--file', filename,\n ] + extra\n\n\ndef _run_main_argv(filename, extra):\n if '--' in extra:\n pydevd = list(extra[:extra.index('--')])\n extra = list(extra[len(pydevd) + 1:])\n else:\n extra = list(extra)\n return [filename] + extra\n\n\ndef _run(argv, addr, _pydevd=pydevd, _install=install, **kwargs):\n \"\"\"Start pydevd with the given commandline args.\"\"\"\n #print(' '.join(argv))\n\n # Pydevd assumes that the \"__main__\" module is the \"pydevd\" module\n # and does some tricky stuff under that assumption. For example,\n # when the debugger starts up it calls save_main_module()\n # (in pydevd_bundle/pydevd_utils.py). That function explicitly sets\n # sys.modules[\"pydevd\"] to sys.modules[\"__main__\"] and then sets\n # the __main__ module to a new one. This makes some sense since\n # it gives the debugged script a fresh __main__ module.\n #\n # This complicates things for us since we are running a different\n # file (i.e. this one) as the __main__ module. Consequently,\n # sys.modules[\"pydevd\"] gets set to ptvsd/__main__.py. Subsequent\n # imports of the \"pydevd\" module then return the wrong module. We\n # work around this by avoiding lazy imports of the \"pydevd\" module.\n # We also replace the __main__ module with the \"pydevd\" module here.\n if sys.modules['__main__'].__file__ != _pydevd.__file__:\n sys.modules['__main___orig'] = sys.modules['__main__']\n sys.modules['__main__'] = _pydevd\n\n daemon = _install(_pydevd, addr, **kwargs)\n sys.argv[:] = argv\n try:\n _pydevd.main()\n except SystemExit as ex:\n daemon.exitcode = 0 if ex.code is None else int(ex.code)\n raise\n", "path": "ptvsd/_local.py"}]}
| 2,243 | 102 |
gh_patches_debug_5169
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-4075
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dash plugin: MPDParsingError: root tag did not match the expected tag: MPD
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [ ] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
The following steps worked until yesterday:
1. Connect to a channel livestream of Teleboy (teleboy.ch) or Zattoo (zattoo.com) with my web browser.
2. Get the .mpd URL link of the livestream.
3. Launch the command "streamlink "mpd-url-link best".
4. The livestream opens up in VLC.
Since today, the command fails.
Here is an example of the .mpd file content which streamlink fails to play: https://pastebin.com/jXD1NY1F
### Debug log
```text
streamlink "https://zh2-0-dash-live.zahs.tv/HD_orf1/m.mpd?z32=MF2WI2LPL5RW6ZDFMNZT2YLBMMTGG43JMQ6TCNSBII4DSMJSIZBTEMJVIQ2EKLJUGEZDORJSGVCDKOBTIE4ECOKEEZWWC6DSMF2GKPJVGAYDAJTNNFXHEYLUMU6TEMJQEZYHEZLGMVZHEZLEL5WGC3THOVQWOZJ5MZZCM43JM46TSX3FG4YDGMJXG5RGIMRVGNSTCMLBGI4GEMZRG5SDQN3DGRSGGYJXGETHK43FOJPWSZB5ORSWYZLCN54TUORYGM4DSMRXEZ3D2MA" best --loglevel debug
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.9.6
[cli][debug] Streamlink: 2.4.0
[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(1.2.1)
[cli][debug] Arguments:
[cli][debug] url=https://zh2-0-dash-live.zahs.tv/HD_orf1/m.mpd?z32=MF2WI2LPL5RW6ZDFMNZT2YLBMMTGG43JMQ6TCNSBII4DSMJSIZBTEMJVIQ2EKLJUGEZDORJSGVCDKOBTIE4ECOKEEZWWC6DSMF2GKPJVGAYDAJTNNFXHEYLUMU6TEMJQEZYHEZLGMVZHEZLEL5WGC3THOVQWOZJ5MZZCM43JM46TSX3FG4YDGMJXG5RGIMRVGNSTCMLBGI4GEMZRG5SDQN3DGRSGGYJXGETHK43FOJPWSZB5ORSWYZLCN54TUORYGM4DSMRXEZ3D2MA
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --rtmp-rtmpdump=rtmpdump.exe
[cli][debug] --rtmpdump=C:\Program Files (x86)\Streamlink\rtmpdump\rtmpdump.exe
[cli][debug] --ffmpeg-ffmpeg=C:\Program Files (x86)\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin dash for URL https://zh2-0-dash-live.zahs.tv/HD_orf1/m.mpd?z32=MF2WI2LPL5RW6ZDFMNZT2YLBMMTGG43JMQ6TCNSBII4DSMJSIZBTEMJVIQ2EKLJUGEZDORJSGVCDKOBTIE4ECOKEEZWWC6DSMF2GKPJVGAYDAJTNNFXHEYLUMU6TEMJQEZYHEZLGMVZHEZLEL5WGC3THOVQWOZJ5MZZCM43JM46TSX3FG4YDGMJXG5RGIMRVGNSTCMLBGI4GEMZRG5SDQN3DGRSGGYJXGETHK43FOJPWSZB5ORSWYZLCN54TUORYGM4DSMRXEZ3D2MA
[plugins.dash][debug] Parsing MPD URL: https://zh2-0-dash-live.zahs.tv/HD_orf1/m.mpd?z32=MF2WI2LPL5RW6ZDFMNZT2YLBMMTGG43JMQ6TCNSBII4DSMJSIZBTEMJVIQ2EKLJUGEZDORJSGVCDKOBTIE4ECOKEEZWWC6DSMF2GKPJVGAYDAJTNNFXHEYLUMU6TEMJQEZYHEZLGMVZHEZLEL5WGC3THOVQWOZJ5MZZCM43JM46TSX3FG4YDGMJXG5RGIMRVGNSTCMLBGI4GEMZRG5SDQN3DGRSGGYJXGETHK43FOJPWSZB5ORSWYZLCN54TUORYGM4DSMRXEZ3D2MA
Traceback (most recent call last):
File "runpy.py", line 197, in _run_module_as_main
File "runpy.py", line 87, in _run_code
File "C:\Program Files (x86)\Streamlink\bin\streamlink.exe\__main__.py", line 18, in <module>
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 1082, in main
handle_url()
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 576, in handle_url
streams = fetch_streams(plugin)
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 470, in fetch_streams
return plugin.streams(stream_types=args.stream_types,
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugin\plugin.py", line 335, in streams
ostreams = self._get_streams()
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugins\dash.py", line 34, in _get_streams
return DASHStream.parse_manifest(self.session, url)
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\stream\dash.py", line 185, in parse_manifest
mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url)
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\stream\dash_manifest.py", line 224, in __init__
super().__init__(node, root=self, *args, **kwargs)
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\stream\dash_manifest.py", line 146, in __init__
raise MPDParsingError("root tag did not match the expected tag: {}".format(self.__tag__))
streamlink.stream.dash_manifest.MPDParsingError: root tag did not match the expected tag: MPD
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/utils/parse.py`
Content:
```
1 import json
2 import re
3 from urllib.parse import parse_qsl
4
5 from lxml.etree import HTML, XML
6
7 from streamlink.plugin import PluginError
8
9
10 def _parse(parser, data, name, exception, schema, *args, **kwargs):
11 try:
12 parsed = parser(data, *args, **kwargs)
13 except Exception as err:
14 snippet = repr(data)
15 if len(snippet) > 35:
16 snippet = f"{snippet[:35]} ..."
17
18 raise exception(f"Unable to parse {name}: {err} ({snippet})")
19
20 if schema:
21 parsed = schema.validate(parsed, name=name, exception=exception)
22
23 return parsed
24
25
26 def parse_json(
27 data,
28 name="JSON",
29 exception=PluginError,
30 schema=None,
31 *args, **kwargs
32 ):
33 """Wrapper around json.loads.
34
35 Provides these extra features:
36 - Wraps errors in custom exception with a snippet of the data in the message
37 """
38 return _parse(json.loads, data, name, exception, schema, *args, **kwargs)
39
40
41 def parse_html(
42 data,
43 name="HTML",
44 exception=PluginError,
45 schema=None,
46 *args, **kwargs
47 ):
48 """Wrapper around lxml.etree.HTML with some extras.
49
50 Provides these extra features:
51 - Handles incorrectly encoded HTML
52 - Wraps errors in custom exception with a snippet of the data in the message
53 """
54 if isinstance(data, str):
55 data = bytes(data, "utf8")
56
57 return _parse(HTML, data, name, exception, schema, *args, **kwargs)
58
59
60 def parse_xml(
61 data,
62 ignore_ns=False,
63 invalid_char_entities=False,
64 name="XML",
65 exception=PluginError,
66 schema=None,
67 *args, **kwargs
68 ):
69 """Wrapper around lxml.etree.XML with some extras.
70
71 Provides these extra features:
72 - Handles incorrectly encoded XML
73 - Allows stripping namespace information
74 - Wraps errors in custom exception with a snippet of the data in the message
75 """
76 if isinstance(data, str):
77 data = bytes(data, "utf8")
78 if ignore_ns:
79 data = re.sub(br"[\t ]xmlns=\"(.+?)\"", b"", data)
80 if invalid_char_entities:
81 data = re.sub(br"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)", b"&", data)
82
83 return _parse(XML, data, name, exception, schema, *args, **kwargs)
84
85
86 def parse_qsd(
87 data,
88 name="query string",
89 exception=PluginError,
90 schema=None,
91 *args, **kwargs
92 ):
93 """Parses a query string into a dict.
94
95 Provides these extra features:
96 - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value
97 - Wraps errors in custom exception with a snippet of the data in the message
98 """
99 return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/utils/parse.py b/src/streamlink/utils/parse.py
--- a/src/streamlink/utils/parse.py
+++ b/src/streamlink/utils/parse.py
@@ -76,7 +76,7 @@
if isinstance(data, str):
data = bytes(data, "utf8")
if ignore_ns:
- data = re.sub(br"[\t ]xmlns=\"(.+?)\"", b"", data)
+ data = re.sub(br"\s+xmlns=\"(.+?)\"", b"", data)
if invalid_char_entities:
data = re.sub(br"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)", b"&", data)
|
{"golden_diff": "diff --git a/src/streamlink/utils/parse.py b/src/streamlink/utils/parse.py\n--- a/src/streamlink/utils/parse.py\n+++ b/src/streamlink/utils/parse.py\n@@ -76,7 +76,7 @@\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n if ignore_ns:\n- data = re.sub(br\"[\\t ]xmlns=\\\"(.+?)\\\"\", b\"\", data)\n+ data = re.sub(br\"\\s+xmlns=\\\"(.+?)\\\"\", b\"\", data)\n if invalid_char_entities:\n data = re.sub(br\"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)\", b\"&\", data)\n", "issue": "dash plugin: MPDParsingError: root tag did not match the expected tag: MPD\n### Checklist\r\n\r\n- [X] This is a plugin issue and not a different kind of issue\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [ ] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\nLatest build from the master branch\r\n\r\n### Description\r\n\r\nThe following steps worked until yesterday:\r\n\r\n1. Connect to a channel livestream of Teleboy (teleboy.ch) or Zattoo (zattoo.com) with my web browser.\r\n2. Get the .mpd URL link of the livestream.\r\n3. Launch the command \"streamlink \"mpd-url-link best\".\r\n4. The livestream opens up in VLC.\r\n \r\nSince today, the command fails.\r\n\r\nHere is an example of the .mpd file content which streamlink fails to play: https://pastebin.com/jXD1NY1F\r\n\r\n\r\n### Debug log\r\n\r\n```text\r\nstreamlink \"https://zh2-0-dash-live.zahs.tv/HD_orf1/m.mpd?z32=MF2WI2LPL5RW6ZDFMNZT2YLBMMTGG43JMQ6TCNSBII4DSMJSIZBTEMJVIQ2EKLJUGEZDORJSGVCDKOBTIE4ECOKEEZWWC6DSMF2GKPJVGAYDAJTNNFXHEYLUMU6TEMJQEZYHEZLGMVZHEZLEL5WGC3THOVQWOZJ5MZZCM43JM46TSX3FG4YDGMJXG5RGIMRVGNSTCMLBGI4GEMZRG5SDQN3DGRSGGYJXGETHK43FOJPWSZB5ORSWYZLCN54TUORYGM4DSMRXEZ3D2MA\" best --loglevel debug\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.9.6\r\n[cli][debug] Streamlink: 2.4.0\r\n[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(1.2.1)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://zh2-0-dash-live.zahs.tv/HD_orf1/m.mpd?z32=MF2WI2LPL5RW6ZDFMNZT2YLBMMTGG43JMQ6TCNSBII4DSMJSIZBTEMJVIQ2EKLJUGEZDORJSGVCDKOBTIE4ECOKEEZWWC6DSMF2GKPJVGAYDAJTNNFXHEYLUMU6TEMJQEZYHEZLGMVZHEZLEL5WGC3THOVQWOZJ5MZZCM43JM46TSX3FG4YDGMJXG5RGIMRVGNSTCMLBGI4GEMZRG5SDQN3DGRSGGYJXGETHK43FOJPWSZB5ORSWYZLCN54TUORYGM4DSMRXEZ3D2MA\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --rtmp-rtmpdump=rtmpdump.exe\r\n[cli][debug] --rtmpdump=C:\\Program Files (x86)\\Streamlink\\rtmpdump\\rtmpdump.exe\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Program Files (x86)\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin dash for URL https://zh2-0-dash-live.zahs.tv/HD_orf1/m.mpd?z32=MF2WI2LPL5RW6ZDFMNZT2YLBMMTGG43JMQ6TCNSBII4DSMJSIZBTEMJVIQ2EKLJUGEZDORJSGVCDKOBTIE4ECOKEEZWWC6DSMF2GKPJVGAYDAJTNNFXHEYLUMU6TEMJQEZYHEZLGMVZHEZLEL5WGC3THOVQWOZJ5MZZCM43JM46TSX3FG4YDGMJXG5RGIMRVGNSTCMLBGI4GEMZRG5SDQN3DGRSGGYJXGETHK43FOJPWSZB5ORSWYZLCN54TUORYGM4DSMRXEZ3D2MA\r\n[plugins.dash][debug] Parsing MPD URL: https://zh2-0-dash-live.zahs.tv/HD_orf1/m.mpd?z32=MF2WI2LPL5RW6ZDFMNZT2YLBMMTGG43JMQ6TCNSBII4DSMJSIZBTEMJVIQ2EKLJUGEZDORJSGVCDKOBTIE4ECOKEEZWWC6DSMF2GKPJVGAYDAJTNNFXHEYLUMU6TEMJQEZYHEZLGMVZHEZLEL5WGC3THOVQWOZJ5MZZCM43JM46TSX3FG4YDGMJXG5RGIMRVGNSTCMLBGI4GEMZRG5SDQN3DGRSGGYJXGETHK43FOJPWSZB5ORSWYZLCN54TUORYGM4DSMRXEZ3D2MA\r\nTraceback (most recent call last):\r\n File \"runpy.py\", line 197, in _run_module_as_main\r\n File \"runpy.py\", line 87, in _run_code\r\n File \"C:\\Program Files (x86)\\Streamlink\\bin\\streamlink.exe\\__main__.py\", line 18, in <module>\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 1082, in main\r\n handle_url()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 576, in handle_url\r\n streams = fetch_streams(plugin)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 470, in fetch_streams\r\n return plugin.streams(stream_types=args.stream_types,\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugin\\plugin.py\", line 335, in streams\r\n ostreams = self._get_streams()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugins\\dash.py\", line 34, in _get_streams\r\n return DASHStream.parse_manifest(self.session, url)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\stream\\dash.py\", line 185, in parse_manifest\r\n mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\stream\\dash_manifest.py\", line 224, in __init__\r\n super().__init__(node, root=self, *args, **kwargs)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\stream\\dash_manifest.py\", line 146, in __init__\r\n raise MPDParsingError(\"root tag did not match the expected tag: {}\".format(self.__tag__))\r\nstreamlink.stream.dash_manifest.MPDParsingError: root tag did not match the expected tag: MPD\r\n```\r\n\n", "before_files": [{"content": "import json\nimport re\nfrom urllib.parse import parse_qsl\n\nfrom lxml.etree import HTML, XML\n\nfrom streamlink.plugin import PluginError\n\n\ndef _parse(parser, data, name, exception, schema, *args, **kwargs):\n try:\n parsed = parser(data, *args, **kwargs)\n except Exception as err:\n snippet = repr(data)\n if len(snippet) > 35:\n snippet = f\"{snippet[:35]} ...\"\n\n raise exception(f\"Unable to parse {name}: {err} ({snippet})\")\n\n if schema:\n parsed = schema.validate(parsed, name=name, exception=exception)\n\n return parsed\n\n\ndef parse_json(\n data,\n name=\"JSON\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around json.loads.\n\n Provides these extra features:\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(json.loads, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_html(\n data,\n name=\"HTML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.HTML with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded HTML\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n\n return _parse(HTML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_xml(\n data,\n ignore_ns=False,\n invalid_char_entities=False,\n name=\"XML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.XML with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded XML\n - Allows stripping namespace information\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n if ignore_ns:\n data = re.sub(br\"[\\t ]xmlns=\\\"(.+?)\\\"\", b\"\", data)\n if invalid_char_entities:\n data = re.sub(br\"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)\", b\"&\", data)\n\n return _parse(XML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_qsd(\n data,\n name=\"query string\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Parses a query string into a dict.\n\n Provides these extra features:\n - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema)\n", "path": "src/streamlink/utils/parse.py"}], "after_files": [{"content": "import json\nimport re\nfrom urllib.parse import parse_qsl\n\nfrom lxml.etree import HTML, XML\n\nfrom streamlink.plugin import PluginError\n\n\ndef _parse(parser, data, name, exception, schema, *args, **kwargs):\n try:\n parsed = parser(data, *args, **kwargs)\n except Exception as err:\n snippet = repr(data)\n if len(snippet) > 35:\n snippet = f\"{snippet[:35]} ...\"\n\n raise exception(f\"Unable to parse {name}: {err} ({snippet})\")\n\n if schema:\n parsed = schema.validate(parsed, name=name, exception=exception)\n\n return parsed\n\n\ndef parse_json(\n data,\n name=\"JSON\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around json.loads.\n\n Provides these extra features:\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(json.loads, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_html(\n data,\n name=\"HTML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.HTML with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded HTML\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n\n return _parse(HTML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_xml(\n data,\n ignore_ns=False,\n invalid_char_entities=False,\n name=\"XML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.XML with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded XML\n - Allows stripping namespace information\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n if ignore_ns:\n data = re.sub(br\"\\s+xmlns=\\\"(.+?)\\\"\", b\"\", data)\n if invalid_char_entities:\n data = re.sub(br\"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)\", b\"&\", data)\n\n return _parse(XML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_qsd(\n data,\n name=\"query string\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Parses a query string into a dict.\n\n Provides these extra features:\n - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema)\n", "path": "src/streamlink/utils/parse.py"}]}
| 2,963 | 167 |
gh_patches_debug_8843
|
rasdani/github-patches
|
git_diff
|
huggingface__trl-675
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tutorial script missing eval_batch_size
Following this tutorial https://huggingface.co/blog/dpo-trl, I ran into out of memory issues with the DPO script. The problem was that the script missed copying per_device_eval_batch_size to Training Arguments [here](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py#L166C12-L166C12). This makes it use the default size of 8 for eval_batch_size instead of 1 as set in this script. The below addition fixes the issue.
```
training_args = TrainingArguments(
per_device_train_batch_size=script_args.per_device_train_batch_size,
--> per_device_eval_batch_size=script_args.per_device_eval_batch_size,
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/research_projects/stack_llama_2/scripts/dpo_llama2.py`
Content:
```
1 # 0. imports
2 import os
3 from dataclasses import dataclass, field
4 from typing import Dict, Optional
5
6 import torch
7 from datasets import Dataset, load_dataset
8 from peft import AutoPeftModelForCausalLM, LoraConfig
9 from transformers import AutoTokenizer, HfArgumentParser, TrainingArguments
10
11 from trl import DPOTrainer
12
13
14 # Define and parse arguments.
15 @dataclass
16 class ScriptArguments:
17 """
18 The arguments for the DPO training script.
19 """
20
21 # data parameters
22 beta: Optional[float] = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"})
23
24 # training parameters
25 model_name_or_path: Optional[str] = field(
26 default="../sft/results/final_checkpoint",
27 metadata={"help": "the location of the SFT model name or path"},
28 )
29 learning_rate: Optional[float] = field(default=5e-4, metadata={"help": "optimizer learning rate"})
30 lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "the lr scheduler type"})
31 warmup_steps: Optional[int] = field(default=100, metadata={"help": "the number of warmup steps"})
32 weight_decay: Optional[float] = field(default=0.05, metadata={"help": "the weight decay"})
33 optimizer_type: Optional[str] = field(default="paged_adamw_32bit", metadata={"help": "the optimizer type"})
34
35 per_device_train_batch_size: Optional[int] = field(default=4, metadata={"help": "train batch size per device"})
36 per_device_eval_batch_size: Optional[int] = field(default=1, metadata={"help": "eval batch size per device"})
37 gradient_accumulation_steps: Optional[int] = field(
38 default=4, metadata={"help": "the number of gradient accumulation steps"}
39 )
40 gradient_checkpointing: Optional[bool] = field(
41 default=True, metadata={"help": "whether to use gradient checkpointing"}
42 )
43
44 lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"})
45 lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"})
46 lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"})
47
48 max_prompt_length: Optional[int] = field(default=512, metadata={"help": "the maximum prompt length"})
49 max_length: Optional[int] = field(default=1024, metadata={"help": "the maximum sequence length"})
50 max_steps: Optional[int] = field(default=1000, metadata={"help": "max number of training steps"})
51 logging_steps: Optional[int] = field(default=10, metadata={"help": "the logging frequency"})
52 save_steps: Optional[int] = field(default=100, metadata={"help": "the saving frequency"})
53 eval_steps: Optional[int] = field(default=100, metadata={"help": "the evaluation frequency"})
54
55 output_dir: Optional[str] = field(default="./results", metadata={"help": "the output directory"})
56 log_freq: Optional[int] = field(default=1, metadata={"help": "the logging frequency"})
57
58 # instrumentation
59 sanity_check: Optional[bool] = field(default=False, metadata={"help": "only train on 1000 samples"})
60 report_to: Optional[str] = field(
61 default="wandb",
62 metadata={
63 "help": 'The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`,'
64 '`"comet_ml"`, `"mlflow"`, `"neptune"`, `"tensorboard"`,`"clearml"` and `"wandb"`. '
65 'Use `"all"` to report to all integrations installed, `"none"` for no integrations.'
66 },
67 )
68 # debug argument for distributed training
69 ignore_bias_buffers: Optional[bool] = field(
70 default=False,
71 metadata={
72 "help": "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See"
73 "https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992"
74 },
75 )
76
77
78 def get_stack_exchange_paired(
79 data_dir: str = "data/rl",
80 sanity_check: bool = False,
81 cache_dir: str = None,
82 num_proc=24,
83 ) -> Dataset:
84 """Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format.
85
86 The dataset is converted to a dictionary with the following structure:
87 {
88 'prompt': List[str],
89 'chosen': List[str],
90 'rejected': List[str],
91 }
92
93 Prompts are structured as follows:
94 "Question: " + <prompt> + "\n\nAnswer: "
95 """
96 dataset = load_dataset(
97 "lvwerra/stack-exchange-paired",
98 split="train",
99 cache_dir=cache_dir,
100 data_dir=data_dir,
101 )
102 original_columns = dataset.column_names
103
104 if sanity_check:
105 dataset = dataset.select(range(min(len(dataset), 1000)))
106
107 def return_prompt_and_responses(samples) -> Dict[str, str]:
108 return {
109 "prompt": ["Question: " + question + "\n\nAnswer: " for question in samples["question"]],
110 "chosen": samples["response_j"],
111 "rejected": samples["response_k"],
112 }
113
114 return dataset.map(
115 return_prompt_and_responses,
116 batched=True,
117 num_proc=num_proc,
118 remove_columns=original_columns,
119 )
120
121
122 if __name__ == "__main__":
123 parser = HfArgumentParser(ScriptArguments)
124 script_args = parser.parse_args_into_dataclasses()[0]
125
126 # 1. load a pretrained model
127 model = AutoPeftModelForCausalLM.from_pretrained(
128 script_args.model_name_or_path,
129 low_cpu_mem_usage=True,
130 torch_dtype=torch.float16,
131 load_in_4bit=True,
132 )
133 model.config.use_cache = False
134
135 if script_args.ignore_bias_buffers:
136 # torch distributed hack
137 model._ddp_params_and_buffers_to_ignore = [
138 name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool
139 ]
140
141 model_ref = AutoPeftModelForCausalLM.from_pretrained(
142 script_args.model_name_or_path,
143 low_cpu_mem_usage=True,
144 torch_dtype=torch.float16,
145 load_in_4bit=True,
146 )
147 tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
148 tokenizer.pad_token = tokenizer.eos_token
149
150 # 2. Load the Stack-exchange paired dataset
151 train_dataset = get_stack_exchange_paired(data_dir="data/rl", sanity_check=script_args.sanity_check)
152 train_dataset = train_dataset.filter(
153 lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length
154 and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length
155 )
156
157 # 3. Load evaluation dataset
158 eval_dataset = get_stack_exchange_paired(data_dir="data/evaluation", sanity_check=True)
159 eval_dataset = eval_dataset.filter(
160 lambda x: len(x["prompt"]) + len(x["chosen"]) <= script_args.max_length
161 and len(x["prompt"]) + len(x["rejected"]) <= script_args.max_length
162 )
163
164 # 4. initialize training arguments:
165 training_args = TrainingArguments(
166 per_device_train_batch_size=script_args.per_device_train_batch_size,
167 max_steps=script_args.max_steps,
168 logging_steps=script_args.logging_steps,
169 save_steps=script_args.save_steps,
170 gradient_accumulation_steps=script_args.gradient_accumulation_steps,
171 gradient_checkpointing=script_args.gradient_checkpointing,
172 learning_rate=script_args.learning_rate,
173 evaluation_strategy="steps",
174 eval_steps=script_args.eval_steps,
175 output_dir=script_args.output_dir,
176 report_to=script_args.report_to,
177 lr_scheduler_type=script_args.lr_scheduler_type,
178 warmup_steps=script_args.warmup_steps,
179 optim=script_args.optimizer_type,
180 bf16=True,
181 remove_unused_columns=False,
182 run_name="dpo_llama2",
183 )
184
185 peft_config = LoraConfig(
186 r=script_args.lora_r,
187 lora_alpha=script_args.lora_alpha,
188 lora_dropout=script_args.lora_dropout,
189 target_modules=[
190 "q_proj",
191 "v_proj",
192 "k_proj",
193 "out_proj",
194 "fc_in",
195 "fc_out",
196 "wte",
197 ],
198 bias="none",
199 task_type="CAUSAL_LM",
200 )
201
202 # 5. initialize the DPO trainer
203 dpo_trainer = DPOTrainer(
204 model,
205 model_ref,
206 args=training_args,
207 beta=script_args.beta,
208 train_dataset=train_dataset,
209 eval_dataset=eval_dataset,
210 tokenizer=tokenizer,
211 peft_config=peft_config,
212 max_prompt_length=script_args.max_prompt_length,
213 max_length=script_args.max_length,
214 )
215
216 # 6. train
217 dpo_trainer.train()
218 dpo_trainer.save_model(script_args.output_dir)
219
220 # 7. save
221 output_dir = os.path.join(script_args.output_dir, "final_checkpoint")
222 dpo_trainer.model.save_pretrained(output_dir)
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py
--- a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py
+++ b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py
@@ -164,6 +164,7 @@
# 4. initialize training arguments:
training_args = TrainingArguments(
per_device_train_batch_size=script_args.per_device_train_batch_size,
+ per_device_eval_batch_size=script_args.per_device_eval_batch_size,
max_steps=script_args.max_steps,
logging_steps=script_args.logging_steps,
save_steps=script_args.save_steps,
|
{"golden_diff": "diff --git a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py\n--- a/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py\n+++ b/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py\n@@ -164,6 +164,7 @@\n # 4. initialize training arguments:\n training_args = TrainingArguments(\n per_device_train_batch_size=script_args.per_device_train_batch_size,\n+ per_device_eval_batch_size=script_args.per_device_eval_batch_size,\n max_steps=script_args.max_steps,\n logging_steps=script_args.logging_steps,\n save_steps=script_args.save_steps,\n", "issue": "Tutorial script missing eval_batch_size\nFollowing this tutorial https://huggingface.co/blog/dpo-trl, I ran into out of memory issues with the DPO script. The problem was that the script missed copying per_device_eval_batch_size to Training Arguments [here](https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py#L166C12-L166C12). This makes it use the default size of 8 for eval_batch_size instead of 1 as set in this script. The below addition fixes the issue.\r\n\r\n```\r\ntraining_args = TrainingArguments(\r\n per_device_train_batch_size=script_args.per_device_train_batch_size,\r\n--> per_device_eval_batch_size=script_args.per_device_eval_batch_size,\r\n```\n", "before_files": [{"content": "# 0. imports\nimport os\nfrom dataclasses import dataclass, field\nfrom typing import Dict, Optional\n\nimport torch\nfrom datasets import Dataset, load_dataset\nfrom peft import AutoPeftModelForCausalLM, LoraConfig\nfrom transformers import AutoTokenizer, HfArgumentParser, TrainingArguments\n\nfrom trl import DPOTrainer\n\n\n# Define and parse arguments.\n@dataclass\nclass ScriptArguments:\n \"\"\"\n The arguments for the DPO training script.\n \"\"\"\n\n # data parameters\n beta: Optional[float] = field(default=0.1, metadata={\"help\": \"the beta parameter for DPO loss\"})\n\n # training parameters\n model_name_or_path: Optional[str] = field(\n default=\"../sft/results/final_checkpoint\",\n metadata={\"help\": \"the location of the SFT model name or path\"},\n )\n learning_rate: Optional[float] = field(default=5e-4, metadata={\"help\": \"optimizer learning rate\"})\n lr_scheduler_type: Optional[str] = field(default=\"cosine\", metadata={\"help\": \"the lr scheduler type\"})\n warmup_steps: Optional[int] = field(default=100, metadata={\"help\": \"the number of warmup steps\"})\n weight_decay: Optional[float] = field(default=0.05, metadata={\"help\": \"the weight decay\"})\n optimizer_type: Optional[str] = field(default=\"paged_adamw_32bit\", metadata={\"help\": \"the optimizer type\"})\n\n per_device_train_batch_size: Optional[int] = field(default=4, metadata={\"help\": \"train batch size per device\"})\n per_device_eval_batch_size: Optional[int] = field(default=1, metadata={\"help\": \"eval batch size per device\"})\n gradient_accumulation_steps: Optional[int] = field(\n default=4, metadata={\"help\": \"the number of gradient accumulation steps\"}\n )\n gradient_checkpointing: Optional[bool] = field(\n default=True, metadata={\"help\": \"whether to use gradient checkpointing\"}\n )\n\n lora_alpha: Optional[float] = field(default=16, metadata={\"help\": \"the lora alpha parameter\"})\n lora_dropout: Optional[float] = field(default=0.05, metadata={\"help\": \"the lora dropout parameter\"})\n lora_r: Optional[int] = field(default=8, metadata={\"help\": \"the lora r parameter\"})\n\n max_prompt_length: Optional[int] = field(default=512, metadata={\"help\": \"the maximum prompt length\"})\n max_length: Optional[int] = field(default=1024, metadata={\"help\": \"the maximum sequence length\"})\n max_steps: Optional[int] = field(default=1000, metadata={\"help\": \"max number of training steps\"})\n logging_steps: Optional[int] = field(default=10, metadata={\"help\": \"the logging frequency\"})\n save_steps: Optional[int] = field(default=100, metadata={\"help\": \"the saving frequency\"})\n eval_steps: Optional[int] = field(default=100, metadata={\"help\": \"the evaluation frequency\"})\n\n output_dir: Optional[str] = field(default=\"./results\", metadata={\"help\": \"the output directory\"})\n log_freq: Optional[int] = field(default=1, metadata={\"help\": \"the logging frequency\"})\n\n # instrumentation\n sanity_check: Optional[bool] = field(default=False, metadata={\"help\": \"only train on 1000 samples\"})\n report_to: Optional[str] = field(\n default=\"wandb\",\n metadata={\n \"help\": 'The list of integrations to report the results and logs to. Supported platforms are `\"azure_ml\"`,'\n '`\"comet_ml\"`, `\"mlflow\"`, `\"neptune\"`, `\"tensorboard\"`,`\"clearml\"` and `\"wandb\"`. '\n 'Use `\"all\"` to report to all integrations installed, `\"none\"` for no integrations.'\n },\n )\n # debug argument for distributed training\n ignore_bias_buffers: Optional[bool] = field(\n default=False,\n metadata={\n \"help\": \"fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See\"\n \"https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992\"\n },\n )\n\n\ndef get_stack_exchange_paired(\n data_dir: str = \"data/rl\",\n sanity_check: bool = False,\n cache_dir: str = None,\n num_proc=24,\n) -> Dataset:\n \"\"\"Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format.\n\n The dataset is converted to a dictionary with the following structure:\n {\n 'prompt': List[str],\n 'chosen': List[str],\n 'rejected': List[str],\n }\n\n Prompts are structured as follows:\n \"Question: \" + <prompt> + \"\\n\\nAnswer: \"\n \"\"\"\n dataset = load_dataset(\n \"lvwerra/stack-exchange-paired\",\n split=\"train\",\n cache_dir=cache_dir,\n data_dir=data_dir,\n )\n original_columns = dataset.column_names\n\n if sanity_check:\n dataset = dataset.select(range(min(len(dataset), 1000)))\n\n def return_prompt_and_responses(samples) -> Dict[str, str]:\n return {\n \"prompt\": [\"Question: \" + question + \"\\n\\nAnswer: \" for question in samples[\"question\"]],\n \"chosen\": samples[\"response_j\"],\n \"rejected\": samples[\"response_k\"],\n }\n\n return dataset.map(\n return_prompt_and_responses,\n batched=True,\n num_proc=num_proc,\n remove_columns=original_columns,\n )\n\n\nif __name__ == \"__main__\":\n parser = HfArgumentParser(ScriptArguments)\n script_args = parser.parse_args_into_dataclasses()[0]\n\n # 1. load a pretrained model\n model = AutoPeftModelForCausalLM.from_pretrained(\n script_args.model_name_or_path,\n low_cpu_mem_usage=True,\n torch_dtype=torch.float16,\n load_in_4bit=True,\n )\n model.config.use_cache = False\n\n if script_args.ignore_bias_buffers:\n # torch distributed hack\n model._ddp_params_and_buffers_to_ignore = [\n name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool\n ]\n\n model_ref = AutoPeftModelForCausalLM.from_pretrained(\n script_args.model_name_or_path,\n low_cpu_mem_usage=True,\n torch_dtype=torch.float16,\n load_in_4bit=True,\n )\n tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-2-7b-hf\")\n tokenizer.pad_token = tokenizer.eos_token\n\n # 2. Load the Stack-exchange paired dataset\n train_dataset = get_stack_exchange_paired(data_dir=\"data/rl\", sanity_check=script_args.sanity_check)\n train_dataset = train_dataset.filter(\n lambda x: len(x[\"prompt\"]) + len(x[\"chosen\"]) <= script_args.max_length\n and len(x[\"prompt\"]) + len(x[\"rejected\"]) <= script_args.max_length\n )\n\n # 3. Load evaluation dataset\n eval_dataset = get_stack_exchange_paired(data_dir=\"data/evaluation\", sanity_check=True)\n eval_dataset = eval_dataset.filter(\n lambda x: len(x[\"prompt\"]) + len(x[\"chosen\"]) <= script_args.max_length\n and len(x[\"prompt\"]) + len(x[\"rejected\"]) <= script_args.max_length\n )\n\n # 4. initialize training arguments:\n training_args = TrainingArguments(\n per_device_train_batch_size=script_args.per_device_train_batch_size,\n max_steps=script_args.max_steps,\n logging_steps=script_args.logging_steps,\n save_steps=script_args.save_steps,\n gradient_accumulation_steps=script_args.gradient_accumulation_steps,\n gradient_checkpointing=script_args.gradient_checkpointing,\n learning_rate=script_args.learning_rate,\n evaluation_strategy=\"steps\",\n eval_steps=script_args.eval_steps,\n output_dir=script_args.output_dir,\n report_to=script_args.report_to,\n lr_scheduler_type=script_args.lr_scheduler_type,\n warmup_steps=script_args.warmup_steps,\n optim=script_args.optimizer_type,\n bf16=True,\n remove_unused_columns=False,\n run_name=\"dpo_llama2\",\n )\n\n peft_config = LoraConfig(\n r=script_args.lora_r,\n lora_alpha=script_args.lora_alpha,\n lora_dropout=script_args.lora_dropout,\n target_modules=[\n \"q_proj\",\n \"v_proj\",\n \"k_proj\",\n \"out_proj\",\n \"fc_in\",\n \"fc_out\",\n \"wte\",\n ],\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n )\n\n # 5. initialize the DPO trainer\n dpo_trainer = DPOTrainer(\n model,\n model_ref,\n args=training_args,\n beta=script_args.beta,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n tokenizer=tokenizer,\n peft_config=peft_config,\n max_prompt_length=script_args.max_prompt_length,\n max_length=script_args.max_length,\n )\n\n # 6. train\n dpo_trainer.train()\n dpo_trainer.save_model(script_args.output_dir)\n\n # 7. save\n output_dir = os.path.join(script_args.output_dir, \"final_checkpoint\")\n dpo_trainer.model.save_pretrained(output_dir)\n", "path": "examples/research_projects/stack_llama_2/scripts/dpo_llama2.py"}], "after_files": [{"content": "# 0. imports\nimport os\nfrom dataclasses import dataclass, field\nfrom typing import Dict, Optional\n\nimport torch\nfrom datasets import Dataset, load_dataset\nfrom peft import AutoPeftModelForCausalLM, LoraConfig\nfrom transformers import AutoTokenizer, HfArgumentParser, TrainingArguments\n\nfrom trl import DPOTrainer\n\n\n# Define and parse arguments.\n@dataclass\nclass ScriptArguments:\n \"\"\"\n The arguments for the DPO training script.\n \"\"\"\n\n # data parameters\n beta: Optional[float] = field(default=0.1, metadata={\"help\": \"the beta parameter for DPO loss\"})\n\n # training parameters\n model_name_or_path: Optional[str] = field(\n default=\"../sft/results/final_checkpoint\",\n metadata={\"help\": \"the location of the SFT model name or path\"},\n )\n learning_rate: Optional[float] = field(default=5e-4, metadata={\"help\": \"optimizer learning rate\"})\n lr_scheduler_type: Optional[str] = field(default=\"cosine\", metadata={\"help\": \"the lr scheduler type\"})\n warmup_steps: Optional[int] = field(default=100, metadata={\"help\": \"the number of warmup steps\"})\n weight_decay: Optional[float] = field(default=0.05, metadata={\"help\": \"the weight decay\"})\n optimizer_type: Optional[str] = field(default=\"paged_adamw_32bit\", metadata={\"help\": \"the optimizer type\"})\n\n per_device_train_batch_size: Optional[int] = field(default=4, metadata={\"help\": \"train batch size per device\"})\n per_device_eval_batch_size: Optional[int] = field(default=1, metadata={\"help\": \"eval batch size per device\"})\n gradient_accumulation_steps: Optional[int] = field(\n default=4, metadata={\"help\": \"the number of gradient accumulation steps\"}\n )\n gradient_checkpointing: Optional[bool] = field(\n default=True, metadata={\"help\": \"whether to use gradient checkpointing\"}\n )\n\n lora_alpha: Optional[float] = field(default=16, metadata={\"help\": \"the lora alpha parameter\"})\n lora_dropout: Optional[float] = field(default=0.05, metadata={\"help\": \"the lora dropout parameter\"})\n lora_r: Optional[int] = field(default=8, metadata={\"help\": \"the lora r parameter\"})\n\n max_prompt_length: Optional[int] = field(default=512, metadata={\"help\": \"the maximum prompt length\"})\n max_length: Optional[int] = field(default=1024, metadata={\"help\": \"the maximum sequence length\"})\n max_steps: Optional[int] = field(default=1000, metadata={\"help\": \"max number of training steps\"})\n logging_steps: Optional[int] = field(default=10, metadata={\"help\": \"the logging frequency\"})\n save_steps: Optional[int] = field(default=100, metadata={\"help\": \"the saving frequency\"})\n eval_steps: Optional[int] = field(default=100, metadata={\"help\": \"the evaluation frequency\"})\n\n output_dir: Optional[str] = field(default=\"./results\", metadata={\"help\": \"the output directory\"})\n log_freq: Optional[int] = field(default=1, metadata={\"help\": \"the logging frequency\"})\n\n # instrumentation\n sanity_check: Optional[bool] = field(default=False, metadata={\"help\": \"only train on 1000 samples\"})\n report_to: Optional[str] = field(\n default=\"wandb\",\n metadata={\n \"help\": 'The list of integrations to report the results and logs to. Supported platforms are `\"azure_ml\"`,'\n '`\"comet_ml\"`, `\"mlflow\"`, `\"neptune\"`, `\"tensorboard\"`,`\"clearml\"` and `\"wandb\"`. '\n 'Use `\"all\"` to report to all integrations installed, `\"none\"` for no integrations.'\n },\n )\n # debug argument for distributed training\n ignore_bias_buffers: Optional[bool] = field(\n default=False,\n metadata={\n \"help\": \"fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See\"\n \"https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992\"\n },\n )\n\n\ndef get_stack_exchange_paired(\n data_dir: str = \"data/rl\",\n sanity_check: bool = False,\n cache_dir: str = None,\n num_proc=24,\n) -> Dataset:\n \"\"\"Load the stack-exchange-paired dataset from Hugging Face and convert it to the necessary format.\n\n The dataset is converted to a dictionary with the following structure:\n {\n 'prompt': List[str],\n 'chosen': List[str],\n 'rejected': List[str],\n }\n\n Prompts are structured as follows:\n \"Question: \" + <prompt> + \"\\n\\nAnswer: \"\n \"\"\"\n dataset = load_dataset(\n \"lvwerra/stack-exchange-paired\",\n split=\"train\",\n cache_dir=cache_dir,\n data_dir=data_dir,\n )\n original_columns = dataset.column_names\n\n if sanity_check:\n dataset = dataset.select(range(min(len(dataset), 1000)))\n\n def return_prompt_and_responses(samples) -> Dict[str, str]:\n return {\n \"prompt\": [\"Question: \" + question + \"\\n\\nAnswer: \" for question in samples[\"question\"]],\n \"chosen\": samples[\"response_j\"],\n \"rejected\": samples[\"response_k\"],\n }\n\n return dataset.map(\n return_prompt_and_responses,\n batched=True,\n num_proc=num_proc,\n remove_columns=original_columns,\n )\n\n\nif __name__ == \"__main__\":\n parser = HfArgumentParser(ScriptArguments)\n script_args = parser.parse_args_into_dataclasses()[0]\n\n # 1. load a pretrained model\n model = AutoPeftModelForCausalLM.from_pretrained(\n script_args.model_name_or_path,\n low_cpu_mem_usage=True,\n torch_dtype=torch.float16,\n load_in_4bit=True,\n )\n model.config.use_cache = False\n\n if script_args.ignore_bias_buffers:\n # torch distributed hack\n model._ddp_params_and_buffers_to_ignore = [\n name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool\n ]\n\n model_ref = AutoPeftModelForCausalLM.from_pretrained(\n script_args.model_name_or_path,\n low_cpu_mem_usage=True,\n torch_dtype=torch.float16,\n load_in_4bit=True,\n )\n tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-2-7b-hf\")\n tokenizer.pad_token = tokenizer.eos_token\n\n # 2. Load the Stack-exchange paired dataset\n train_dataset = get_stack_exchange_paired(data_dir=\"data/rl\", sanity_check=script_args.sanity_check)\n train_dataset = train_dataset.filter(\n lambda x: len(x[\"prompt\"]) + len(x[\"chosen\"]) <= script_args.max_length\n and len(x[\"prompt\"]) + len(x[\"rejected\"]) <= script_args.max_length\n )\n\n # 3. Load evaluation dataset\n eval_dataset = get_stack_exchange_paired(data_dir=\"data/evaluation\", sanity_check=True)\n eval_dataset = eval_dataset.filter(\n lambda x: len(x[\"prompt\"]) + len(x[\"chosen\"]) <= script_args.max_length\n and len(x[\"prompt\"]) + len(x[\"rejected\"]) <= script_args.max_length\n )\n\n # 4. initialize training arguments:\n training_args = TrainingArguments(\n per_device_train_batch_size=script_args.per_device_train_batch_size,\n per_device_eval_batch_size=script_args.per_device_eval_batch_size,\n max_steps=script_args.max_steps,\n logging_steps=script_args.logging_steps,\n save_steps=script_args.save_steps,\n gradient_accumulation_steps=script_args.gradient_accumulation_steps,\n gradient_checkpointing=script_args.gradient_checkpointing,\n learning_rate=script_args.learning_rate,\n evaluation_strategy=\"steps\",\n eval_steps=script_args.eval_steps,\n output_dir=script_args.output_dir,\n report_to=script_args.report_to,\n lr_scheduler_type=script_args.lr_scheduler_type,\n warmup_steps=script_args.warmup_steps,\n optim=script_args.optimizer_type,\n bf16=True,\n remove_unused_columns=False,\n run_name=\"dpo_llama2\",\n )\n\n peft_config = LoraConfig(\n r=script_args.lora_r,\n lora_alpha=script_args.lora_alpha,\n lora_dropout=script_args.lora_dropout,\n target_modules=[\n \"q_proj\",\n \"v_proj\",\n \"k_proj\",\n \"out_proj\",\n \"fc_in\",\n \"fc_out\",\n \"wte\",\n ],\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n )\n\n # 5. initialize the DPO trainer\n dpo_trainer = DPOTrainer(\n model,\n model_ref,\n args=training_args,\n beta=script_args.beta,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n tokenizer=tokenizer,\n peft_config=peft_config,\n max_prompt_length=script_args.max_prompt_length,\n max_length=script_args.max_length,\n )\n\n # 6. train\n dpo_trainer.train()\n dpo_trainer.save_model(script_args.output_dir)\n\n # 7. save\n output_dir = os.path.join(script_args.output_dir, \"final_checkpoint\")\n dpo_trainer.model.save_pretrained(output_dir)\n", "path": "examples/research_projects/stack_llama_2/scripts/dpo_llama2.py"}]}
| 3,077 | 168 |
gh_patches_debug_9214
|
rasdani/github-patches
|
git_diff
|
Frojd__Wagtail-Pipit-35
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Werkzeug
Add django-extensions and Werkzeug to local requirements, and use runserver_plus in docker-entrypoint.sh.
It is a lot faster than regular runserver in Docker.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Example-Project/src/core/settings/local.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 Write local settings here, or override base settings
6 """
7 from __future__ import absolute_import, unicode_literals
8
9 from core.settings.base import * # NOQA
10
11
12 DEBUG = True
13 TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
14
15 DEBUG_TOOLBAR_PATCH_SETTINGS = False
16
17 # Add django debug toolbar when using local version
18 INSTALLED_APPS += [
19 'debug_toolbar',
20 ]
21
22 MIDDLEWARE_CLASSES += [
23 'debug_toolbar.middleware.DebugToolbarMiddleware',
24 ]
25
26 # Allow weak local passwords
27 AUTH_PASSWORD_VALIDATORS = []
28
29 INTERNAL_IPS = get_env('INTERNAL_IPS', default="").split(',')
30
31
32 # Allow django-debug-bar under docker
33 def show_toolbar(request):
34 # https://gist.github.com/douglasmiranda/9de51aaba14543851ca3
35 return not request.is_ajax()
36
37 DEBUG_TOOLBAR_CONFIG = {
38 'SHOW_TOOLBAR_CALLBACK': 'core.settings.local.show_toolbar',
39 }
40
```
Path: `{{cookiecutter.project_name}}/src/core/settings/local.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 Write local settings here, or override base settings
6 """
7 from __future__ import absolute_import, unicode_literals
8
9 from core.settings.base import * # NOQA
10
11
12 DEBUG = True
13 TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
14
15 DEBUG_TOOLBAR_PATCH_SETTINGS = False
16
17 # Add django debug toolbar when using local version
18 INSTALLED_APPS += [
19 'debug_toolbar',
20 ]
21
22 MIDDLEWARE_CLASSES += [
23 'debug_toolbar.middleware.DebugToolbarMiddleware',
24 ]
25
26 # Allow weak local passwords
27 AUTH_PASSWORD_VALIDATORS = []
28
29 INTERNAL_IPS = get_env('INTERNAL_IPS', default="").split(',')
30
31
32 # Allow django-debug-bar under docker
33 def show_toolbar(request):
34 # https://gist.github.com/douglasmiranda/9de51aaba14543851ca3
35 return not request.is_ajax()
36
37 DEBUG_TOOLBAR_CONFIG = {
38 'SHOW_TOOLBAR_CALLBACK': 'core.settings.local.show_toolbar',
39 }
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Example-Project/src/core/settings/local.py b/Example-Project/src/core/settings/local.py
--- a/Example-Project/src/core/settings/local.py
+++ b/Example-Project/src/core/settings/local.py
@@ -17,6 +17,7 @@
# Add django debug toolbar when using local version
INSTALLED_APPS += [
'debug_toolbar',
+ 'django_extensions',
]
MIDDLEWARE_CLASSES += [
diff --git a/{{cookiecutter.project_name}}/src/core/settings/local.py b/{{cookiecutter.project_name}}/src/core/settings/local.py
--- a/{{cookiecutter.project_name}}/src/core/settings/local.py
+++ b/{{cookiecutter.project_name}}/src/core/settings/local.py
@@ -17,6 +17,7 @@
# Add django debug toolbar when using local version
INSTALLED_APPS += [
'debug_toolbar',
+ 'django_extensions',
]
MIDDLEWARE_CLASSES += [
|
{"golden_diff": "diff --git a/Example-Project/src/core/settings/local.py b/Example-Project/src/core/settings/local.py\n--- a/Example-Project/src/core/settings/local.py\n+++ b/Example-Project/src/core/settings/local.py\n@@ -17,6 +17,7 @@\n # Add django debug toolbar when using local version\n INSTALLED_APPS += [\n 'debug_toolbar',\n+ 'django_extensions',\n ]\n \n MIDDLEWARE_CLASSES += [\ndiff --git a/{{cookiecutter.project_name}}/src/core/settings/local.py b/{{cookiecutter.project_name}}/src/core/settings/local.py\n--- a/{{cookiecutter.project_name}}/src/core/settings/local.py\n+++ b/{{cookiecutter.project_name}}/src/core/settings/local.py\n@@ -17,6 +17,7 @@\n # Add django debug toolbar when using local version\n INSTALLED_APPS += [\n 'debug_toolbar',\n+ 'django_extensions',\n ]\n \n MIDDLEWARE_CLASSES += [\n", "issue": "Use Werkzeug\nAdd django-extensions and Werkzeug to local requirements, and use runserver_plus in docker-entrypoint.sh.\r\nIt is a lot faster than regular runserver in Docker.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nWrite local settings here, or override base settings\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom core.settings.base import * # NOQA\n\n\nDEBUG = True\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG\n\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\n# Add django debug toolbar when using local version\nINSTALLED_APPS += [\n 'debug_toolbar',\n]\n\nMIDDLEWARE_CLASSES += [\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\n# Allow weak local passwords\nAUTH_PASSWORD_VALIDATORS = []\n\nINTERNAL_IPS = get_env('INTERNAL_IPS', default=\"\").split(',')\n\n\n# Allow django-debug-bar under docker\ndef show_toolbar(request):\n # https://gist.github.com/douglasmiranda/9de51aaba14543851ca3\n return not request.is_ajax()\n\nDEBUG_TOOLBAR_CONFIG = {\n 'SHOW_TOOLBAR_CALLBACK': 'core.settings.local.show_toolbar',\n}\n", "path": "Example-Project/src/core/settings/local.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nWrite local settings here, or override base settings\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom core.settings.base import * # NOQA\n\n\nDEBUG = True\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG\n\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\n# Add django debug toolbar when using local version\nINSTALLED_APPS += [\n 'debug_toolbar',\n]\n\nMIDDLEWARE_CLASSES += [\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\n# Allow weak local passwords\nAUTH_PASSWORD_VALIDATORS = []\n\nINTERNAL_IPS = get_env('INTERNAL_IPS', default=\"\").split(',')\n\n\n# Allow django-debug-bar under docker\ndef show_toolbar(request):\n # https://gist.github.com/douglasmiranda/9de51aaba14543851ca3\n return not request.is_ajax()\n\nDEBUG_TOOLBAR_CONFIG = {\n 'SHOW_TOOLBAR_CALLBACK': 'core.settings.local.show_toolbar',\n}\n", "path": "{{cookiecutter.project_name}}/src/core/settings/local.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nWrite local settings here, or override base settings\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom core.settings.base import * # NOQA\n\n\nDEBUG = True\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG\n\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\n# Add django debug toolbar when using local version\nINSTALLED_APPS += [\n 'debug_toolbar',\n 'django_extensions',\n]\n\nMIDDLEWARE_CLASSES += [\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\n# Allow weak local passwords\nAUTH_PASSWORD_VALIDATORS = []\n\nINTERNAL_IPS = get_env('INTERNAL_IPS', default=\"\").split(',')\n\n\n# Allow django-debug-bar under docker\ndef show_toolbar(request):\n # https://gist.github.com/douglasmiranda/9de51aaba14543851ca3\n return not request.is_ajax()\n\nDEBUG_TOOLBAR_CONFIG = {\n 'SHOW_TOOLBAR_CALLBACK': 'core.settings.local.show_toolbar',\n}\n", "path": "Example-Project/src/core/settings/local.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nWrite local settings here, or override base settings\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom core.settings.base import * # NOQA\n\n\nDEBUG = True\nTEMPLATES[0]['OPTIONS']['debug'] = DEBUG\n\nDEBUG_TOOLBAR_PATCH_SETTINGS = False\n\n# Add django debug toolbar when using local version\nINSTALLED_APPS += [\n 'debug_toolbar',\n 'django_extensions',\n]\n\nMIDDLEWARE_CLASSES += [\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\n# Allow weak local passwords\nAUTH_PASSWORD_VALIDATORS = []\n\nINTERNAL_IPS = get_env('INTERNAL_IPS', default=\"\").split(',')\n\n\n# Allow django-debug-bar under docker\ndef show_toolbar(request):\n # https://gist.github.com/douglasmiranda/9de51aaba14543851ca3\n return not request.is_ajax()\n\nDEBUG_TOOLBAR_CONFIG = {\n 'SHOW_TOOLBAR_CALLBACK': 'core.settings.local.show_toolbar',\n}\n", "path": "{{cookiecutter.project_name}}/src/core/settings/local.py"}]}
| 912 | 204 |
gh_patches_debug_393
|
rasdani/github-patches
|
git_diff
|
google-research__t5x-475
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Seg Fault after saving checkpoints
Hi,
I am getting a seg fault sometimes after the model has saved the checkpoint. It is not every checkpoint and seems to be random which checkpoints it crashes after. I am not sure if it is related to issue #340
For example, I am running `prompt_tuning/scripts/sst2-demo-xxl.sh`, and the output is below.
```
317 18:14:56.525280 140415323761728 utils.py:138] Saved Numpy Arrays for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/numpy_checkpoints/checkpoint_1104000
I0317 18:14:56.604028 140415323761728 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896
I0317 18:14:56.614308 140622481194048 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896
I0317 18:14:56.624289 140590966570048 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896
I0317 18:14:56.653718 140272509271104 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896
Fatal Python error: Segmentation fault
Thread 0x00007fdb1dc01700 (most recent call first):
File "/home/dptam/.local/lib/python3.8/site-packages/jax/interpreters/pxla.py", line 664 in _sda_value
File "/home/dptam/.local/lib/python3.8/site-packages/jax/_src/device_array.py", line 266 in __array__
File "/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoints.py", line 447 in <lambda>
File "/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoint_importer.py", line 84 in get
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57 in run
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 80 in _worker
File "/usr/lib/python3.8/threading.py", line 870 in run
File "/usr/lib/python3.8/threading.py", line 932 in _bootstrap_inner
File "/usr/lib/python3.8/threading.py", line 890 in _bootstrap
Thread 0x00007f56809df700 (most recent call first):
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 78 in _worker
File "/usr/lib/python3.8/threading.py", line 870 in run
File "/usr/lib/python3.8/threading.py", line 932 in _bootstrap_inner
File "/usr/lib/python3.8/threading.py", line 890 in _bootstrap
Thread 0x00007f56c7aad700 (most recent call first):
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 78 in _worker
File "/usr/lib/python3.8/threading.py", line 870 in run
File "/usr/lib/python3.8/threading.py", line 932 in _bootstrap_inner
File "/usr/lib/python3.8/threading.py", line 890 in _bootstrap
Thread 0x00007fdde29efc40 (most recent call first):
File "/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoints.py", line 693 in _write_array
https://symbolize.stripped_domain/r/?trace=7fdde2e4203b,7fdde2e420bf,e,5ef27540f,e,26f7c5aff,f,b15f59df&map=
E0317 18:14:57.770066 341059 process_state.cc:1062] RAW: Signal 11 raised at PC: 0x7fdde2e4203b while already in FailureSignalHandler!
E0317 18:14:57.770096 341059 process_state.cc:1065] RAW: tid: 341059 raised new signal
@ 0xf 1440 (unknown)
@ 0x25ed159b0 (unknown) (unknown)
@ 0x10 76231216 (unknown)
@ 0x261cdc840 (unknown) (unknown)
@ 0x2dfdd4780 (unknown) (unknown)
@ 0x5f1f8a120 (unknown) (unknown)
https://symbolize.stripped_domain/r/?trace=7fdde301ffd3,7fddd98d57f9,7fdde2e420bf,7,e,25ed159af,f,261cdc83f,2dfdd477f,5f1f8a11f&map=7a511a57244151c993b16b37978e7ed7:7fddcaefd000-7fddd9c3fd50
E0317 18:14:57.818885 341068 coredump_hook.cc:365] RAW: Remote crash data gathering hook invoked.
E0317 18:14:57.818900 341068 coredump_hook.cc:411] RAW: Skipping coredump since rlimit was 0 at process start.
E0317 18:14:57.818919 341068 client.cc:221] RAW: Coroner client retries enabled (b/136286901), will retry for up to 30 sec.
E0317 18:14:57.818922 341068 coredump_hook.cc:473] RAW: Sending fingerprint to remote end.
E0317 18:14:57.818928 341068 coredump_socket.cc:124] RAW: Stat failed errno=2 on socket /var/google/services/logmanagerd/remote_coredump.socket
E0317 18:14:57.818933 341068 coredump_hook.cc:477] RAW: Cannot send fingerprint to Coroner: [NOT_FOUND] Missing crash reporting socket. Is the listener running?
E0317 18:14:57.818938 341068 coredump_hook.cc:550] RAW: Discarding core.
prompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 337643 Segmentation fault (core dumped) python3 -m t5x.train --gin_search_paths="${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}" --gin_file="prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin" --gin_file="prompt_tuning/configs/prompts/from_class_labels.gin" --gin_file="prompt_tuning/configs/runs/prompt_finetune.gin" --gin.CLASS_LABELS="['positive', 'negative']" --gin.MODEL_DIR="'${MODEL_DIR}'" --gin.MIXTURE_OR_TASK_NAME="'taskless_glue_sst2_v200_examples'" --gin.MIXTURE_OR_TASK_MODULE="'prompt_tuning.data.glue'" --gin.TASK_FEATURE_LENGTHS="{'inputs': 512, 'targets': 8}" --gin.INITIAL_CHECKPOINT_PATH="'${PRETRAINED_MODEL}'" --gin.TRAIN_STEPS="1_212_000" --gin.USE_CACHED_TASKS="False" --gin.BATCH_SIZE="16" --gin.partitioning.PjitPartitioner.model_parallel_submesh="(4, 4, 1, 2)" --tfds_data_dir=${TFDS_DATA_DIR}
##### Command execution on worker 3 failed with return code 139. Continuing.
prompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 334750 Aborted (core dumped) python3 -m t5x.train --gin_search_paths="${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}" --gin_file="prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin" --gin_file="prompt_tuning/configs/prompts/from_class_labels.gin" --gin_file="prompt_tuning/configs/runs/prompt_finetune.gin" --gin.CLASS_LABELS="['positive', 'negative']" --gin.MODEL_DIR="'${MODEL_DIR}'" --gin.MIXTURE_OR_TASK_NAME="'taskless_glue_sst2_v200_examples'" --gin.MIXTURE_OR_TASK_MODULE="'prompt_tuning.data.glue'" --gin.TASK_FEATURE_LENGTHS="{'inputs': 512, 'targets': 8}" --gin.INITIAL_CHECKPOINT_PATH="'${PRETRAINED_MODEL}'" --gin.TRAIN_STEPS="1_212_000" --gin.USE_CACHED_TASKS="False" --gin.BATCH_SIZE="16" --gin.partitioning.PjitPartitioner.model_parallel_submesh="(4, 4, 1, 2)" --tfds_data_dir=${TFDS_DATA_DIR}
##### Command execution on worker 1 failed with return code 134. Continuing.
prompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 335504 Aborted (core dumped) python3 -m t5x.train --gin_search_paths="${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}" --gin_file="prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin" --gin_file="prompt_tuning/configs/prompts/from_class_labels.gin" --gin_file="prompt_tuning/configs/runs/prompt_finetune.gin" --gin.CLASS_LABELS="['positive', 'negative']" --gin.MODEL_DIR="'${MODEL_DIR}'" --gin.MIXTURE_OR_TASK_NAME="'taskless_glue_sst2_v200_examples'" --gin.MIXTURE_OR_TASK_MODULE="'prompt_tuning.data.glue'" --gin.TASK_FEATURE_LENGTHS="{'inputs': 512, 'targets': 8}" --gin.INITIAL_CHECKPOINT_PATH="'${PRETRAINED_MODEL}'" --gin.TRAIN_STEPS="1_212_000" --gin.USE_CACHED_TASKS="False" --gin.BATCH_SIZE="16" --gin.partitioning.PjitPartitioner.model_parallel_submesh="(4, 4, 1, 2)" --tfds_data_dir=${TFDS_DATA_DIR}
##### Command execution on worker 0 failed with return code 134. Continuing.
```
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2022 The T5X Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Install T5X."""
16
17 import os
18 import sys
19 import setuptools
20
21 # To enable importing version.py directly, we add its path to sys.path.
22 version_path = os.path.join(os.path.dirname(__file__), 't5x')
23 sys.path.append(version_path)
24 from version import __version__ # pylint: disable=g-import-not-at-top
25
26 # Get the long description from the README file.
27 with open('README.md') as fp:
28 _LONG_DESCRIPTION = fp.read()
29
30 _jax_version = '0.2.27'
31 _jaxlib_version = '0.1.76'
32
33 setuptools.setup(
34 name='t5x',
35 version=__version__,
36 description='T5-eXtended in JAX',
37 long_description=_LONG_DESCRIPTION,
38 long_description_content_type='text/markdown',
39 author='Google Inc.',
40 author_email='[email protected]',
41 url='http://github.com/google-research/t5x',
42 license='Apache 2.0',
43 packages=setuptools.find_packages(),
44 package_data={
45 '': ['**/*.gin'], # not all subdirectories may have __init__.py.
46 },
47 scripts=[],
48 install_requires=[
49 'absl-py',
50 'cached_property',
51 # TODO(adarob): Replace with 'clu' once >0.0.6 is released.
52 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',
53 'flax @ git+https://github.com/google/flax#egg=flax',
54 'gin-config',
55 f'jax >= {_jax_version}',
56 f'jaxlib >= {_jaxlib_version}',
57 'numpy',
58 'seqio-nightly',
59 't5',
60 'tensorflow',
61 'tensorstore',
62 ],
63 extras_require={
64 'gcp': [
65 'gevent', 'google-api-python-client', 'google-compute-engine',
66 'google-cloud-storage', 'oauth2client'
67 ],
68 'test': ['pytest'],
69
70 # Cloud TPU requirements.
71 'tpu': [f'jax[tpu] >= {_jax_version}'],
72 },
73 classifiers=[
74 'Development Status :: 4 - Beta',
75 'Intended Audience :: Developers',
76 'Intended Audience :: Science/Research',
77 'License :: OSI Approved :: Apache Software License',
78 'Topic :: Scientific/Engineering :: Artificial Intelligence',
79 ],
80 keywords='text nlp machinelearning',
81 )
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@
'seqio-nightly',
't5',
'tensorflow',
- 'tensorstore',
+ 'tensorstore >= 0.1.20',
],
extras_require={
'gcp': [
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,7 @@\n 'seqio-nightly',\n 't5',\n 'tensorflow',\n- 'tensorstore',\n+ 'tensorstore >= 0.1.20',\n ],\n extras_require={\n 'gcp': [\n", "issue": "Seg Fault after saving checkpoints\nHi, \r\n\r\nI am getting a seg fault sometimes after the model has saved the checkpoint. It is not every checkpoint and seems to be random which checkpoints it crashes after. I am not sure if it is related to issue #340\r\n\r\nFor example, I am running `prompt_tuning/scripts/sst2-demo-xxl.sh`, and the output is below. \r\n\r\n```\r\n317 18:14:56.525280 140415323761728 utils.py:138] Saved Numpy Arrays for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/numpy_checkpoints/checkpoint_1104000\r\nI0317 18:14:56.604028 140415323761728 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896\r\nI0317 18:14:56.614308 140622481194048 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896\r\nI0317 18:14:56.624289 140590966570048 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896\r\nI0317 18:14:56.653718 140272509271104 checkpoints.py:600] Saving checkpoint for step 1104000 to gs://nicl/checkpoint_models/sst/full_dataset/prompt-tuning/t5-11b/checkpoint_1104000.tmp-1647540896\r\nFatal Python error: Segmentation fault\r\n\r\n\r\nThread 0x00007fdb1dc01700 (most recent call first):\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/jax/interpreters/pxla.py\", line 664 in _sda_value\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/jax/_src/device_array.py\", line 266 in __array__\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoints.py\", line 447 in <lambda>\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoint_importer.py\", line 84 in get\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 57 in run\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 80 in _worker\r\n File \"/usr/lib/python3.8/threading.py\", line 870 in run\r\n File \"/usr/lib/python3.8/threading.py\", line 932 in _bootstrap_inner\r\n File \"/usr/lib/python3.8/threading.py\", line 890 in _bootstrap\r\n\r\nThread 0x00007f56809df700 (most recent call first):\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 78 in _worker\r\n File \"/usr/lib/python3.8/threading.py\", line 870 in run\r\n File \"/usr/lib/python3.8/threading.py\", line 932 in _bootstrap_inner\r\n File \"/usr/lib/python3.8/threading.py\", line 890 in _bootstrap\r\n\r\n Thread 0x00007f56c7aad700 (most recent call first):\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 78 in _worker\r\n File \"/usr/lib/python3.8/threading.py\", line 870 in run\r\n File \"/usr/lib/python3.8/threading.py\", line 932 in _bootstrap_inner\r\n File \"/usr/lib/python3.8/threading.py\", line 890 in _bootstrap\r\nThread 0x00007fdde29efc40 (most recent call first):\r\n File \"/home/dptam/.local/lib/python3.8/site-packages/t5x/checkpoints.py\", line 693 in _write_array\r\nhttps://symbolize.stripped_domain/r/?trace=7fdde2e4203b,7fdde2e420bf,e,5ef27540f,e,26f7c5aff,f,b15f59df&map= \r\nE0317 18:14:57.770066 341059 process_state.cc:1062] RAW: Signal 11 raised at PC: 0x7fdde2e4203b while already in FailureSignalHandler!\r\nE0317 18:14:57.770096 341059 process_state.cc:1065] RAW: tid: 341059 raised new signal\r\n @ 0xf 1440 (unknown)\r\n @ 0x25ed159b0 (unknown) (unknown)\r\n @ 0x10 76231216 (unknown)\r\n @ 0x261cdc840 (unknown) (unknown)\r\n @ 0x2dfdd4780 (unknown) (unknown)\r\n @ 0x5f1f8a120 (unknown) (unknown)\r\nhttps://symbolize.stripped_domain/r/?trace=7fdde301ffd3,7fddd98d57f9,7fdde2e420bf,7,e,25ed159af,f,261cdc83f,2dfdd477f,5f1f8a11f&map=7a511a57244151c993b16b37978e7ed7:7fddcaefd000-7fddd9c3fd50 \r\nE0317 18:14:57.818885 341068 coredump_hook.cc:365] RAW: Remote crash data gathering hook invoked.\r\nE0317 18:14:57.818900 341068 coredump_hook.cc:411] RAW: Skipping coredump since rlimit was 0 at process start.\r\nE0317 18:14:57.818919 341068 client.cc:221] RAW: Coroner client retries enabled (b/136286901), will retry for up to 30 sec.\r\nE0317 18:14:57.818922 341068 coredump_hook.cc:473] RAW: Sending fingerprint to remote end.\r\nE0317 18:14:57.818928 341068 coredump_socket.cc:124] RAW: Stat failed errno=2 on socket /var/google/services/logmanagerd/remote_coredump.socket\r\nE0317 18:14:57.818933 341068 coredump_hook.cc:477] RAW: Cannot send fingerprint to Coroner: [NOT_FOUND] Missing crash reporting socket. Is the listener running?\r\nE0317 18:14:57.818938 341068 coredump_hook.cc:550] RAW: Discarding core.\r\nprompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 337643 Segmentation fault (core dumped) python3 -m t5x.train --gin_search_paths=\"${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}\" --gin_file=\"prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin\" --gin_file=\"prompt_tuning/configs/prompts/from_class_labels.gin\" --gin_file=\"prompt_tuning/configs/runs/prompt_finetune.gin\" --gin.CLASS_LABELS=\"['positive', 'negative']\" --gin.MODEL_DIR=\"'${MODEL_DIR}'\" --gin.MIXTURE_OR_TASK_NAME=\"'taskless_glue_sst2_v200_examples'\" --gin.MIXTURE_OR_TASK_MODULE=\"'prompt_tuning.data.glue'\" --gin.TASK_FEATURE_LENGTHS=\"{'inputs': 512, 'targets': 8}\" --gin.INITIAL_CHECKPOINT_PATH=\"'${PRETRAINED_MODEL}'\" --gin.TRAIN_STEPS=\"1_212_000\" --gin.USE_CACHED_TASKS=\"False\" --gin.BATCH_SIZE=\"16\" --gin.partitioning.PjitPartitioner.model_parallel_submesh=\"(4, 4, 1, 2)\" --tfds_data_dir=${TFDS_DATA_DIR}\r\n##### Command execution on worker 3 failed with return code 139. Continuing.\r\nprompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 334750 Aborted (core dumped) python3 -m t5x.train --gin_search_paths=\"${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}\" --gin_file=\"prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin\" --gin_file=\"prompt_tuning/configs/prompts/from_class_labels.gin\" --gin_file=\"prompt_tuning/configs/runs/prompt_finetune.gin\" --gin.CLASS_LABELS=\"['positive', 'negative']\" --gin.MODEL_DIR=\"'${MODEL_DIR}'\" --gin.MIXTURE_OR_TASK_NAME=\"'taskless_glue_sst2_v200_examples'\" --gin.MIXTURE_OR_TASK_MODULE=\"'prompt_tuning.data.glue'\" --gin.TASK_FEATURE_LENGTHS=\"{'inputs': 512, 'targets': 8}\" --gin.INITIAL_CHECKPOINT_PATH=\"'${PRETRAINED_MODEL}'\" --gin.TRAIN_STEPS=\"1_212_000\" --gin.USE_CACHED_TASKS=\"False\" --gin.BATCH_SIZE=\"16\" --gin.partitioning.PjitPartitioner.model_parallel_submesh=\"(4, 4, 1, 2)\" --tfds_data_dir=${TFDS_DATA_DIR}\r\n##### Command execution on worker 1 failed with return code 134. Continuing.\r\nprompt_tuning/scripts/sst2-demo-xxl.sh: line 37: 335504 Aborted (core dumped) python3 -m t5x.train --gin_search_paths=\"${T5X_DIR},${FLAXFORMER_DIR},${PROMPT_DIR}\" --gin_file=\"prompt_tuning/configs/models/t5_1_1_xxl_prompt.gin\" --gin_file=\"prompt_tuning/configs/prompts/from_class_labels.gin\" --gin_file=\"prompt_tuning/configs/runs/prompt_finetune.gin\" --gin.CLASS_LABELS=\"['positive', 'negative']\" --gin.MODEL_DIR=\"'${MODEL_DIR}'\" --gin.MIXTURE_OR_TASK_NAME=\"'taskless_glue_sst2_v200_examples'\" --gin.MIXTURE_OR_TASK_MODULE=\"'prompt_tuning.data.glue'\" --gin.TASK_FEATURE_LENGTHS=\"{'inputs': 512, 'targets': 8}\" --gin.INITIAL_CHECKPOINT_PATH=\"'${PRETRAINED_MODEL}'\" --gin.TRAIN_STEPS=\"1_212_000\" --gin.USE_CACHED_TASKS=\"False\" --gin.BATCH_SIZE=\"16\" --gin.partitioning.PjitPartitioner.model_parallel_submesh=\"(4, 4, 1, 2)\" --tfds_data_dir=${TFDS_DATA_DIR}\r\n##### Command execution on worker 0 failed with return code 134. Continuing.\r\n```\r\n\r\nThanks\n", "before_files": [{"content": "# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5X.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5x')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n_jax_version = '0.2.27'\n_jaxlib_version = '0.1.76'\n\nsetuptools.setup(\n name='t5x',\n version=__version__,\n description='T5-eXtended in JAX',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/t5x',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['**/*.gin'], # not all subdirectories may have __init__.py.\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'cached_property',\n # TODO(adarob): Replace with 'clu' once >0.0.6 is released.\n 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',\n 'flax @ git+https://github.com/google/flax#egg=flax',\n 'gin-config',\n f'jax >= {_jax_version}',\n f'jaxlib >= {_jaxlib_version}',\n 'numpy',\n 'seqio-nightly',\n 't5',\n 'tensorflow',\n 'tensorstore',\n ],\n extras_require={\n 'gcp': [\n 'gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'\n ],\n 'test': ['pytest'],\n\n # Cloud TPU requirements.\n 'tpu': [f'jax[tpu] >= {_jax_version}'],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2022 The T5X Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5X.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5x')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n_jax_version = '0.2.27'\n_jaxlib_version = '0.1.76'\n\nsetuptools.setup(\n name='t5x',\n version=__version__,\n description='T5-eXtended in JAX',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/t5x',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['**/*.gin'], # not all subdirectories may have __init__.py.\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'cached_property',\n # TODO(adarob): Replace with 'clu' once >0.0.6 is released.\n 'clu @ git+https://github.com/google/CommonLoopUtils#egg=clu',\n 'flax @ git+https://github.com/google/flax#egg=flax',\n 'gin-config',\n f'jax >= {_jax_version}',\n f'jaxlib >= {_jaxlib_version}',\n 'numpy',\n 'seqio-nightly',\n 't5',\n 'tensorflow',\n 'tensorstore >= 0.1.20',\n ],\n extras_require={\n 'gcp': [\n 'gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'\n ],\n 'test': ['pytest'],\n\n # Cloud TPU requirements.\n 'tpu': [f'jax[tpu] >= {_jax_version}'],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n", "path": "setup.py"}]}
| 3,969 | 80 |
gh_patches_debug_40022
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-5062
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ensure_spacing function may be slow
## Description
The `skimage._shared.coord.ensure_spacing` function introduced in #4760 may be slow when the number of input points is large.
## Way to reproduce
```python
In [1]: import numpy as np
In [2]: from skimage._shared.coord import ensure_spacing
In [3]: for n in range(500, 10001, 500):
...: print(f"n = {n}")
...: x = np.random.rand(n, 2)
...: %timeit ensure_spacing(x, 0.2)
...:
n = 500
27.6 ms ± 857 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
n = 1000
64.7 ms ± 1.93 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
n = 1500
105 ms ± 2.27 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
n = 2000
158 ms ± 1.18 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
n = 2500
213 ms ± 7.32 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 3000
299 ms ± 11.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 3500
378 ms ± 3.05 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 4000
428 ms ± 11.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 4500
554 ms ± 14 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 5000
647 ms ± 9.42 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 5500
777 ms ± 7.05 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 6000
870 ms ± 13.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 6500
1.03 s ± 9.56 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 7000
1.17 s ± 12.8 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 7500
1.33 s ± 28 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 8000
1.45 s ± 18.5 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 8500
1.63 s ± 36.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 9000
1.87 s ± 50.4 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 9500
2.07 s ± 23.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
n = 10000
2.29 s ± 51.3 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/_shared/coord.py`
Content:
```
1 import numpy as np
2 from scipy.spatial import cKDTree, distance
3
4
5 def ensure_spacing(coord, spacing=1, p_norm=np.inf):
6 """Returns a subset of coord where a minimum spacing is guaranteed.
7
8 Parameters
9 ----------
10 coord : ndarray
11 The coordinates of the considered points.
12 spacing : float
13 the maximum allowed spacing between the points.
14 p_norm : float
15 Which Minkowski p-norm to use. Should be in the range [1, inf].
16 A finite large p may cause a ValueError if overflow can occur.
17 ``inf`` corresponds to the Chebyshev distance and 2 to the
18 Euclidean distance.
19
20 Returns
21 -------
22 output : ndarray
23 A subset of coord where a minimum spacing is guaranteed.
24
25 """
26
27 output = coord
28 if len(coord):
29 # Use KDtree to find the peaks that are too close to each other
30 tree = cKDTree(coord)
31
32 indices = tree.query_ball_point(coord, r=spacing, p=p_norm)
33 rejected_peaks_indices = set()
34 for idx, candidates in enumerate(indices):
35 if idx not in rejected_peaks_indices:
36 # keep current point and the points at exactly spacing from it
37 candidates.remove(idx)
38 dist = distance.cdist([coord[idx]],
39 coord[candidates],
40 distance.minkowski,
41 p=p_norm).reshape(-1)
42 candidates = [c for c, d in zip(candidates, dist)
43 if d < spacing]
44
45 # candidates.remove(keep)
46 rejected_peaks_indices.update(candidates)
47
48 # Remove the peaks that are too close to each other
49 output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)
50
51 return output
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/_shared/coord.py b/skimage/_shared/coord.py
--- a/skimage/_shared/coord.py
+++ b/skimage/_shared/coord.py
@@ -2,7 +2,7 @@
from scipy.spatial import cKDTree, distance
-def ensure_spacing(coord, spacing=1, p_norm=np.inf):
+def _ensure_spacing(coord, spacing, p_norm):
"""Returns a subset of coord where a minimum spacing is guaranteed.
Parameters
@@ -24,28 +24,72 @@
"""
- output = coord
- if len(coord):
- # Use KDtree to find the peaks that are too close to each other
- tree = cKDTree(coord)
-
- indices = tree.query_ball_point(coord, r=spacing, p=p_norm)
- rejected_peaks_indices = set()
- for idx, candidates in enumerate(indices):
- if idx not in rejected_peaks_indices:
- # keep current point and the points at exactly spacing from it
- candidates.remove(idx)
- dist = distance.cdist([coord[idx]],
- coord[candidates],
- distance.minkowski,
- p=p_norm).reshape(-1)
- candidates = [c for c, d in zip(candidates, dist)
- if d < spacing]
-
- # candidates.remove(keep)
- rejected_peaks_indices.update(candidates)
-
- # Remove the peaks that are too close to each other
- output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)
+ # Use KDtree to find the peaks that are too close to each other
+ tree = cKDTree(coord)
+
+ indices = tree.query_ball_point(coord, r=spacing, p=p_norm)
+ rejected_peaks_indices = set()
+ for idx, candidates in enumerate(indices):
+ if idx not in rejected_peaks_indices:
+ # keep current point and the points at exactly spacing from it
+ candidates.remove(idx)
+ dist = distance.cdist([coord[idx]],
+ coord[candidates],
+ distance.minkowski,
+ p=p_norm).reshape(-1)
+ candidates = [c for c, d in zip(candidates, dist)
+ if d < spacing]
+
+ # candidates.remove(keep)
+ rejected_peaks_indices.update(candidates)
+
+ # Remove the peaks that are too close to each other
+ output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)
+
+ return output
+
+
+def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50):
+ """Returns a subset of coord where a minimum spacing is guaranteed.
+
+ Parameters
+ ----------
+ coord : array_like
+ The coordinates of the considered points.
+ spacing : float
+ the maximum allowed spacing between the points.
+ p_norm : float
+ Which Minkowski p-norm to use. Should be in the range [1, inf].
+ A finite large p may cause a ValueError if overflow can occur.
+ ``inf`` corresponds to the Chebyshev distance and 2 to the
+ Euclidean distance.
+ min_split_size : int
+ Minimum split size used to process ``coord`` by batch to save
+ memory. If None, the memory saving strategy is not applied.
+
+ Returns
+ -------
+ output : array_like
+ A subset of coord where a minimum spacing is guaranteed.
+
+ """
+
+ output = coords
+ if len(coords):
+
+ coords = np.atleast_2d(coords)
+ if min_split_size is None:
+ batch_list = [coords]
+ else:
+ coord_count = len(coords)
+ split_count = int(np.log2(coord_count / min_split_size)) + 1
+ split_idx = np.cumsum(
+ [coord_count // (2 ** i) for i in range(1, split_count)])
+ batch_list = np.array_split(coords, split_idx)
+
+ output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)
+ for batch in batch_list:
+ output = _ensure_spacing(np.vstack([output, batch]),
+ spacing, p_norm)
return output
|
{"golden_diff": "diff --git a/skimage/_shared/coord.py b/skimage/_shared/coord.py\n--- a/skimage/_shared/coord.py\n+++ b/skimage/_shared/coord.py\n@@ -2,7 +2,7 @@\n from scipy.spatial import cKDTree, distance\n \n \n-def ensure_spacing(coord, spacing=1, p_norm=np.inf):\n+def _ensure_spacing(coord, spacing, p_norm):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n \n Parameters\n@@ -24,28 +24,72 @@\n \n \"\"\"\n \n- output = coord\n- if len(coord):\n- # Use KDtree to find the peaks that are too close to each other\n- tree = cKDTree(coord)\n-\n- indices = tree.query_ball_point(coord, r=spacing, p=p_norm)\n- rejected_peaks_indices = set()\n- for idx, candidates in enumerate(indices):\n- if idx not in rejected_peaks_indices:\n- # keep current point and the points at exactly spacing from it\n- candidates.remove(idx)\n- dist = distance.cdist([coord[idx]],\n- coord[candidates],\n- distance.minkowski,\n- p=p_norm).reshape(-1)\n- candidates = [c for c, d in zip(candidates, dist)\n- if d < spacing]\n-\n- # candidates.remove(keep)\n- rejected_peaks_indices.update(candidates)\n-\n- # Remove the peaks that are too close to each other\n- output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)\n+ # Use KDtree to find the peaks that are too close to each other\n+ tree = cKDTree(coord)\n+\n+ indices = tree.query_ball_point(coord, r=spacing, p=p_norm)\n+ rejected_peaks_indices = set()\n+ for idx, candidates in enumerate(indices):\n+ if idx not in rejected_peaks_indices:\n+ # keep current point and the points at exactly spacing from it\n+ candidates.remove(idx)\n+ dist = distance.cdist([coord[idx]],\n+ coord[candidates],\n+ distance.minkowski,\n+ p=p_norm).reshape(-1)\n+ candidates = [c for c, d in zip(candidates, dist)\n+ if d < spacing]\n+\n+ # candidates.remove(keep)\n+ rejected_peaks_indices.update(candidates)\n+\n+ # Remove the peaks that are too close to each other\n+ output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)\n+\n+ return output\n+\n+\n+def ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50):\n+ \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n+\n+ Parameters\n+ ----------\n+ coord : array_like\n+ The coordinates of the considered points.\n+ spacing : float\n+ the maximum allowed spacing between the points.\n+ p_norm : float\n+ Which Minkowski p-norm to use. Should be in the range [1, inf].\n+ A finite large p may cause a ValueError if overflow can occur.\n+ ``inf`` corresponds to the Chebyshev distance and 2 to the\n+ Euclidean distance.\n+ min_split_size : int\n+ Minimum split size used to process ``coord`` by batch to save\n+ memory. If None, the memory saving strategy is not applied.\n+\n+ Returns\n+ -------\n+ output : array_like\n+ A subset of coord where a minimum spacing is guaranteed.\n+\n+ \"\"\"\n+\n+ output = coords\n+ if len(coords):\n+\n+ coords = np.atleast_2d(coords)\n+ if min_split_size is None:\n+ batch_list = [coords]\n+ else:\n+ coord_count = len(coords)\n+ split_count = int(np.log2(coord_count / min_split_size)) + 1\n+ split_idx = np.cumsum(\n+ [coord_count // (2 ** i) for i in range(1, split_count)])\n+ batch_list = np.array_split(coords, split_idx)\n+\n+ output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)\n+ for batch in batch_list:\n+ output = _ensure_spacing(np.vstack([output, batch]),\n+ spacing, p_norm)\n \n return output\n", "issue": "ensure_spacing function may be slow\n## Description\r\n\r\nThe `skimage._shared.coord.ensure_spacing` function introduced in #4760 may be slow when the number of input points is large.\r\n\r\n## Way to reproduce\r\n```python\r\nIn [1]: import numpy as np\r\n\r\nIn [2]: from skimage._shared.coord import ensure_spacing\r\n\r\nIn [3]: for n in range(500, 10001, 500):\r\n ...: print(f\"n = {n}\")\r\n ...: x = np.random.rand(n, 2)\r\n ...: %timeit ensure_spacing(x, 0.2)\r\n ...: \r\nn = 500\r\n27.6 ms \u00b1 857 \u00b5s per loop (mean \u00b1 std. dev. of 7 runs, 10 loops each)\r\nn = 1000\r\n64.7 ms \u00b1 1.93 ms per loop (mean \u00b1 std. dev. of 7 runs, 10 loops each)\r\nn = 1500\r\n105 ms \u00b1 2.27 ms per loop (mean \u00b1 std. dev. of 7 runs, 10 loops each)\r\nn = 2000\r\n158 ms \u00b1 1.18 ms per loop (mean \u00b1 std. dev. of 7 runs, 10 loops each)\r\nn = 2500\r\n213 ms \u00b1 7.32 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 3000\r\n299 ms \u00b1 11.9 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 3500\r\n378 ms \u00b1 3.05 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 4000\r\n428 ms \u00b1 11.7 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 4500\r\n554 ms \u00b1 14 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 5000\r\n647 ms \u00b1 9.42 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 5500\r\n777 ms \u00b1 7.05 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 6000\r\n870 ms \u00b1 13.2 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 6500\r\n1.03 s \u00b1 9.56 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 7000\r\n1.17 s \u00b1 12.8 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 7500\r\n1.33 s \u00b1 28 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 8000\r\n1.45 s \u00b1 18.5 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 8500\r\n1.63 s \u00b1 36.6 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 9000\r\n1.87 s \u00b1 50.4 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 9500\r\n2.07 s \u00b1 23.3 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\nn = 10000\r\n2.29 s \u00b1 51.3 ms per loop (mean \u00b1 std. dev. of 7 runs, 1 loop each)\r\n```\r\n\n", "before_files": [{"content": "import numpy as np\nfrom scipy.spatial import cKDTree, distance\n\n\ndef ensure_spacing(coord, spacing=1, p_norm=np.inf):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coord : ndarray\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n\n Returns\n -------\n output : ndarray\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n output = coord\n if len(coord):\n # Use KDtree to find the peaks that are too close to each other\n tree = cKDTree(coord)\n\n indices = tree.query_ball_point(coord, r=spacing, p=p_norm)\n rejected_peaks_indices = set()\n for idx, candidates in enumerate(indices):\n if idx not in rejected_peaks_indices:\n # keep current point and the points at exactly spacing from it\n candidates.remove(idx)\n dist = distance.cdist([coord[idx]],\n coord[candidates],\n distance.minkowski,\n p=p_norm).reshape(-1)\n candidates = [c for c, d in zip(candidates, dist)\n if d < spacing]\n\n # candidates.remove(keep)\n rejected_peaks_indices.update(candidates)\n\n # Remove the peaks that are too close to each other\n output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)\n\n return output\n", "path": "skimage/_shared/coord.py"}], "after_files": [{"content": "import numpy as np\nfrom scipy.spatial import cKDTree, distance\n\n\ndef _ensure_spacing(coord, spacing, p_norm):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coord : ndarray\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n\n Returns\n -------\n output : ndarray\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n # Use KDtree to find the peaks that are too close to each other\n tree = cKDTree(coord)\n\n indices = tree.query_ball_point(coord, r=spacing, p=p_norm)\n rejected_peaks_indices = set()\n for idx, candidates in enumerate(indices):\n if idx not in rejected_peaks_indices:\n # keep current point and the points at exactly spacing from it\n candidates.remove(idx)\n dist = distance.cdist([coord[idx]],\n coord[candidates],\n distance.minkowski,\n p=p_norm).reshape(-1)\n candidates = [c for c, d in zip(candidates, dist)\n if d < spacing]\n\n # candidates.remove(keep)\n rejected_peaks_indices.update(candidates)\n\n # Remove the peaks that are too close to each other\n output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)\n\n return output\n\n\ndef ensure_spacing(coords, spacing=1, p_norm=np.inf, min_split_size=50):\n \"\"\"Returns a subset of coord where a minimum spacing is guaranteed.\n\n Parameters\n ----------\n coord : array_like\n The coordinates of the considered points.\n spacing : float\n the maximum allowed spacing between the points.\n p_norm : float\n Which Minkowski p-norm to use. Should be in the range [1, inf].\n A finite large p may cause a ValueError if overflow can occur.\n ``inf`` corresponds to the Chebyshev distance and 2 to the\n Euclidean distance.\n min_split_size : int\n Minimum split size used to process ``coord`` by batch to save\n memory. If None, the memory saving strategy is not applied.\n\n Returns\n -------\n output : array_like\n A subset of coord where a minimum spacing is guaranteed.\n\n \"\"\"\n\n output = coords\n if len(coords):\n\n coords = np.atleast_2d(coords)\n if min_split_size is None:\n batch_list = [coords]\n else:\n coord_count = len(coords)\n split_count = int(np.log2(coord_count / min_split_size)) + 1\n split_idx = np.cumsum(\n [coord_count // (2 ** i) for i in range(1, split_count)])\n batch_list = np.array_split(coords, split_idx)\n\n output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)\n for batch in batch_list:\n output = _ensure_spacing(np.vstack([output, batch]),\n spacing, p_norm)\n\n return output\n", "path": "skimage/_shared/coord.py"}]}
| 1,644 | 952 |
gh_patches_debug_20229
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2949
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disallow empty filters
Currently rules can add whatever filters they like. We should guard against simple placeholders like empty strings that will allow everything.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/core/filters.py`
Content:
```
1 """
2 The filters module allows developers to apply filters to datasources,
3 by adding them directly or through dependent components like parsers
4 and combiners. A filter is a simple string, and it matches if it is contained
5 anywhere within a line.
6
7 If a datasource has filters defined, it will return only lines matching at
8 least one of them. If a datasource has no filters, it will return all lines.
9
10 Filters can be added to components like parsers and combiners, to apply consistent
11 filtering to multiple underlying datasources that are configured as filterable.
12
13 Filters aren't applicable to "raw" datasources, which are created with
14 ``kind=RawFileProvider`` and have ``RegistryPoint`` instances with ``raw=True``.
15
16 The addition of a single filter can cause a datasource to change from returning
17 all lines to returning just those that match. Therefore, any filtered
18 datasource should have at least one filter in the commit introducing it so
19 downstream components don't inadvertently change its behavior.
20
21 The benefit of this fragility is the ability to drastically reduce in-memory
22 footprint and archive sizes. An additional benefit is the ability to evaluate
23 only lines known to be free of sensitive information.
24
25 Filters added to a ``RegistryPoint`` will be applied to all datasources that
26 implement it. Filters added to a datasource implementation apply only to that
27 implementation.
28
29 For example, a filter added to ``Specs.ps_auxww`` will apply to
30 ``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
31 ``SosSpecs.ps_auxww``, etc. But a filter added to `DefaultSpecs.ps_auxww` will
32 only apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs``
33 for those classes.
34
35 Filtering can be disabled globally by setting the environment variable
36 ``INSIGHTS_FILTERS_ENABLED=False``. This means that no datasources will be
37 filtered even if filters are defined for them.
38 """
39 import os
40 import pkgutil
41 import six
42 import yaml as ser
43 from collections import defaultdict
44
45 import insights
46 from insights.core import dr, plugins
47 from insights.util import parse_bool
48
49 _CACHE = {}
50 FILTERS = defaultdict(set)
51 ENABLED = parse_bool(os.environ.get("INSIGHTS_FILTERS_ENABLED"), default=True)
52
53
54 def add_filter(component, patterns):
55 """
56 Add a filter or list of filters to a component. When the component is
57 a datasource, the filter will be directly added to that datasouce.
58 In cases when the component is a parser or combiner, the filter will be
59 added to underlying filterable datasources by traversing dependency graph.
60 A filter is a simple string, and it matches if it is contained anywhere
61 within a line.
62
63 Args:
64 component (component): The component to filter, can be datasource,
65 parser or combiner.
66 patterns (str, [str]): A string, list of strings, or set of strings to
67 add to the datasource's filters.
68 """
69 def inner(component, patterns):
70 if component in _CACHE:
71 del _CACHE[component]
72 if isinstance(patterns, six.string_types):
73 FILTERS[component].add(patterns)
74 elif isinstance(patterns, list):
75 FILTERS[component] |= set(patterns)
76 elif isinstance(patterns, set):
77 FILTERS[component] |= patterns
78 else:
79 raise TypeError("patterns must be string, list, or set.")
80
81 if not plugins.is_datasource(component):
82 for dep in dr.run_order(dr.get_dependency_graph(component)):
83 if plugins.is_datasource(dep):
84 d = dr.get_delegate(dep)
85 if d.filterable:
86 inner(dep, patterns)
87 else:
88 delegate = dr.get_delegate(component)
89
90 if delegate.raw:
91 raise Exception("Filters aren't applicable to raw datasources.")
92
93 if not delegate.filterable:
94 raise Exception("Filters aren't applicable to %s." % dr.get_name(component))
95
96 inner(component, patterns)
97
98
99 _add_filter = add_filter
100
101
102 def get_filters(component):
103 """
104 Get the set of filters for the given datasource.
105
106 Filters added to a ``RegistryPoint`` will be applied to all datasources that
107 implement it. Filters added to a datasource implementation apply only to
108 that implementation.
109
110 For example, a filter added to ``Specs.ps_auxww`` will apply to
111 ``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,
112 ``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``
113 will only apply to ``DefaultSpecs.ps_auxww``. See the modules in
114 ``insights.specs`` for those classes.
115
116 Args:
117 component (a datasource): The target datasource
118
119 Returns:
120 set: The set of filters defined for the datasource
121 """
122 def inner(c, filters=None):
123 filters = filters or set()
124 if not ENABLED:
125 return filters
126
127 if not plugins.is_datasource(c):
128 return filters
129
130 if c in FILTERS:
131 filters |= FILTERS[c]
132
133 for d in dr.get_dependents(c):
134 filters |= inner(d, filters)
135 return filters
136
137 if component not in _CACHE:
138 _CACHE[component] = inner(component)
139 return _CACHE[component]
140
141
142 def apply_filters(target, lines):
143 """
144 Applys filters to the lines of a datasource. This function is used only in
145 integration tests. Filters are applied in an equivalent but more performant
146 way at run time.
147 """
148 filters = get_filters(target)
149 if filters:
150 for l in lines:
151 if any(f in l for f in filters):
152 yield l
153 else:
154 for l in lines:
155 yield l
156
157
158 _filename = ".".join(["filters", ser.__name__])
159 _dumps = ser.dump
160 _loads = ser.safe_load
161
162
163 def loads(string):
164 """Loads the filters dictionary given a string."""
165 d = _loads(string)
166 for k, v in d.items():
167 FILTERS[dr.get_component(k) or k] = set(v)
168
169
170 def load(stream=None):
171 """
172 Loads filters from a stream, normally an open file. If one is
173 not passed, filters are loaded from a default location within
174 the project.
175 """
176 if stream:
177 loads(stream.read())
178 else:
179 data = pkgutil.get_data(insights.__name__, _filename)
180 return loads(data) if data else None
181
182
183 def dumps():
184 """Returns a string representation of the FILTERS dictionary."""
185 d = {}
186 for k, v in FILTERS.items():
187 d[dr.get_name(k)] = list(v)
188 return _dumps(d)
189
190
191 def dump(stream=None):
192 """
193 Dumps a string representation of `FILTERS` to a stream, normally an
194 open file. If none is passed, `FILTERS` is dumped to a default location
195 within the project.
196 """
197 if stream:
198 stream.write(dumps())
199 else:
200 path = os.path.join(os.path.dirname(insights.__file__), _filename)
201 with open(path, "w") as f:
202 f.write(dumps())
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/insights/core/filters.py b/insights/core/filters.py
--- a/insights/core/filters.py
+++ b/insights/core/filters.py
@@ -69,14 +69,21 @@
def inner(component, patterns):
if component in _CACHE:
del _CACHE[component]
+
+ types = six.string_types + (list, set)
+ if not isinstance(patterns, types):
+ raise TypeError("Filter patterns must be of type string, list, or set.")
+
if isinstance(patterns, six.string_types):
- FILTERS[component].add(patterns)
+ patterns = set([patterns])
elif isinstance(patterns, list):
- FILTERS[component] |= set(patterns)
- elif isinstance(patterns, set):
- FILTERS[component] |= patterns
- else:
- raise TypeError("patterns must be string, list, or set.")
+ patterns = set(patterns)
+
+ for pat in patterns:
+ if not pat:
+ raise Exception("Filter patterns must not be empy.")
+
+ FILTERS[component] |= patterns
if not plugins.is_datasource(component):
for dep in dr.run_order(dr.get_dependency_graph(component)):
|
{"golden_diff": "diff --git a/insights/core/filters.py b/insights/core/filters.py\n--- a/insights/core/filters.py\n+++ b/insights/core/filters.py\n@@ -69,14 +69,21 @@\n def inner(component, patterns):\n if component in _CACHE:\n del _CACHE[component]\n+\n+ types = six.string_types + (list, set)\n+ if not isinstance(patterns, types):\n+ raise TypeError(\"Filter patterns must be of type string, list, or set.\")\n+\n if isinstance(patterns, six.string_types):\n- FILTERS[component].add(patterns)\n+ patterns = set([patterns])\n elif isinstance(patterns, list):\n- FILTERS[component] |= set(patterns)\n- elif isinstance(patterns, set):\n- FILTERS[component] |= patterns\n- else:\n- raise TypeError(\"patterns must be string, list, or set.\")\n+ patterns = set(patterns)\n+\n+ for pat in patterns:\n+ if not pat:\n+ raise Exception(\"Filter patterns must not be empy.\")\n+\n+ FILTERS[component] |= patterns\n \n if not plugins.is_datasource(component):\n for dep in dr.run_order(dr.get_dependency_graph(component)):\n", "issue": "Disallow empty filters\nCurrently rules can add whatever filters they like. We should guard against simple placeholders like empty strings that will allow everything.\n", "before_files": [{"content": "\"\"\"\nThe filters module allows developers to apply filters to datasources,\nby adding them directly or through dependent components like parsers\nand combiners. A filter is a simple string, and it matches if it is contained\nanywhere within a line.\n\nIf a datasource has filters defined, it will return only lines matching at\nleast one of them. If a datasource has no filters, it will return all lines.\n\nFilters can be added to components like parsers and combiners, to apply consistent\nfiltering to multiple underlying datasources that are configured as filterable.\n\nFilters aren't applicable to \"raw\" datasources, which are created with\n``kind=RawFileProvider`` and have ``RegistryPoint`` instances with ``raw=True``.\n\nThe addition of a single filter can cause a datasource to change from returning\nall lines to returning just those that match. Therefore, any filtered\ndatasource should have at least one filter in the commit introducing it so\ndownstream components don't inadvertently change its behavior.\n\nThe benefit of this fragility is the ability to drastically reduce in-memory\nfootprint and archive sizes. An additional benefit is the ability to evaluate\nonly lines known to be free of sensitive information.\n\nFilters added to a ``RegistryPoint`` will be applied to all datasources that\nimplement it. Filters added to a datasource implementation apply only to that\nimplementation.\n\nFor example, a filter added to ``Specs.ps_auxww`` will apply to\n``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,\n``SosSpecs.ps_auxww``, etc. But a filter added to `DefaultSpecs.ps_auxww` will\nonly apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs``\nfor those classes.\n\nFiltering can be disabled globally by setting the environment variable\n``INSIGHTS_FILTERS_ENABLED=False``. This means that no datasources will be\nfiltered even if filters are defined for them.\n\"\"\"\nimport os\nimport pkgutil\nimport six\nimport yaml as ser\nfrom collections import defaultdict\n\nimport insights\nfrom insights.core import dr, plugins\nfrom insights.util import parse_bool\n\n_CACHE = {}\nFILTERS = defaultdict(set)\nENABLED = parse_bool(os.environ.get(\"INSIGHTS_FILTERS_ENABLED\"), default=True)\n\n\ndef add_filter(component, patterns):\n \"\"\"\n Add a filter or list of filters to a component. When the component is\n a datasource, the filter will be directly added to that datasouce.\n In cases when the component is a parser or combiner, the filter will be\n added to underlying filterable datasources by traversing dependency graph.\n A filter is a simple string, and it matches if it is contained anywhere\n within a line.\n\n Args:\n component (component): The component to filter, can be datasource,\n parser or combiner.\n patterns (str, [str]): A string, list of strings, or set of strings to\n add to the datasource's filters.\n \"\"\"\n def inner(component, patterns):\n if component in _CACHE:\n del _CACHE[component]\n if isinstance(patterns, six.string_types):\n FILTERS[component].add(patterns)\n elif isinstance(patterns, list):\n FILTERS[component] |= set(patterns)\n elif isinstance(patterns, set):\n FILTERS[component] |= patterns\n else:\n raise TypeError(\"patterns must be string, list, or set.\")\n\n if not plugins.is_datasource(component):\n for dep in dr.run_order(dr.get_dependency_graph(component)):\n if plugins.is_datasource(dep):\n d = dr.get_delegate(dep)\n if d.filterable:\n inner(dep, patterns)\n else:\n delegate = dr.get_delegate(component)\n\n if delegate.raw:\n raise Exception(\"Filters aren't applicable to raw datasources.\")\n\n if not delegate.filterable:\n raise Exception(\"Filters aren't applicable to %s.\" % dr.get_name(component))\n\n inner(component, patterns)\n\n\n_add_filter = add_filter\n\n\ndef get_filters(component):\n \"\"\"\n Get the set of filters for the given datasource.\n\n Filters added to a ``RegistryPoint`` will be applied to all datasources that\n implement it. Filters added to a datasource implementation apply only to\n that implementation.\n\n For example, a filter added to ``Specs.ps_auxww`` will apply to\n ``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,\n ``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``\n will only apply to ``DefaultSpecs.ps_auxww``. See the modules in\n ``insights.specs`` for those classes.\n\n Args:\n component (a datasource): The target datasource\n\n Returns:\n set: The set of filters defined for the datasource\n \"\"\"\n def inner(c, filters=None):\n filters = filters or set()\n if not ENABLED:\n return filters\n\n if not plugins.is_datasource(c):\n return filters\n\n if c in FILTERS:\n filters |= FILTERS[c]\n\n for d in dr.get_dependents(c):\n filters |= inner(d, filters)\n return filters\n\n if component not in _CACHE:\n _CACHE[component] = inner(component)\n return _CACHE[component]\n\n\ndef apply_filters(target, lines):\n \"\"\"\n Applys filters to the lines of a datasource. This function is used only in\n integration tests. Filters are applied in an equivalent but more performant\n way at run time.\n \"\"\"\n filters = get_filters(target)\n if filters:\n for l in lines:\n if any(f in l for f in filters):\n yield l\n else:\n for l in lines:\n yield l\n\n\n_filename = \".\".join([\"filters\", ser.__name__])\n_dumps = ser.dump\n_loads = ser.safe_load\n\n\ndef loads(string):\n \"\"\"Loads the filters dictionary given a string.\"\"\"\n d = _loads(string)\n for k, v in d.items():\n FILTERS[dr.get_component(k) or k] = set(v)\n\n\ndef load(stream=None):\n \"\"\"\n Loads filters from a stream, normally an open file. If one is\n not passed, filters are loaded from a default location within\n the project.\n \"\"\"\n if stream:\n loads(stream.read())\n else:\n data = pkgutil.get_data(insights.__name__, _filename)\n return loads(data) if data else None\n\n\ndef dumps():\n \"\"\"Returns a string representation of the FILTERS dictionary.\"\"\"\n d = {}\n for k, v in FILTERS.items():\n d[dr.get_name(k)] = list(v)\n return _dumps(d)\n\n\ndef dump(stream=None):\n \"\"\"\n Dumps a string representation of `FILTERS` to a stream, normally an\n open file. If none is passed, `FILTERS` is dumped to a default location\n within the project.\n \"\"\"\n if stream:\n stream.write(dumps())\n else:\n path = os.path.join(os.path.dirname(insights.__file__), _filename)\n with open(path, \"w\") as f:\n f.write(dumps())\n", "path": "insights/core/filters.py"}], "after_files": [{"content": "\"\"\"\nThe filters module allows developers to apply filters to datasources,\nby adding them directly or through dependent components like parsers\nand combiners. A filter is a simple string, and it matches if it is contained\nanywhere within a line.\n\nIf a datasource has filters defined, it will return only lines matching at\nleast one of them. If a datasource has no filters, it will return all lines.\n\nFilters can be added to components like parsers and combiners, to apply consistent\nfiltering to multiple underlying datasources that are configured as filterable.\n\nFilters aren't applicable to \"raw\" datasources, which are created with\n``kind=RawFileProvider`` and have ``RegistryPoint`` instances with ``raw=True``.\n\nThe addition of a single filter can cause a datasource to change from returning\nall lines to returning just those that match. Therefore, any filtered\ndatasource should have at least one filter in the commit introducing it so\ndownstream components don't inadvertently change its behavior.\n\nThe benefit of this fragility is the ability to drastically reduce in-memory\nfootprint and archive sizes. An additional benefit is the ability to evaluate\nonly lines known to be free of sensitive information.\n\nFilters added to a ``RegistryPoint`` will be applied to all datasources that\nimplement it. Filters added to a datasource implementation apply only to that\nimplementation.\n\nFor example, a filter added to ``Specs.ps_auxww`` will apply to\n``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,\n``SosSpecs.ps_auxww``, etc. But a filter added to `DefaultSpecs.ps_auxww` will\nonly apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs``\nfor those classes.\n\nFiltering can be disabled globally by setting the environment variable\n``INSIGHTS_FILTERS_ENABLED=False``. This means that no datasources will be\nfiltered even if filters are defined for them.\n\"\"\"\nimport os\nimport pkgutil\nimport six\nimport yaml as ser\nfrom collections import defaultdict\n\nimport insights\nfrom insights.core import dr, plugins\nfrom insights.util import parse_bool\n\n_CACHE = {}\nFILTERS = defaultdict(set)\nENABLED = parse_bool(os.environ.get(\"INSIGHTS_FILTERS_ENABLED\"), default=True)\n\n\ndef add_filter(component, patterns):\n \"\"\"\n Add a filter or list of filters to a component. When the component is\n a datasource, the filter will be directly added to that datasouce.\n In cases when the component is a parser or combiner, the filter will be\n added to underlying filterable datasources by traversing dependency graph.\n A filter is a simple string, and it matches if it is contained anywhere\n within a line.\n\n Args:\n component (component): The component to filter, can be datasource,\n parser or combiner.\n patterns (str, [str]): A string, list of strings, or set of strings to\n add to the datasource's filters.\n \"\"\"\n def inner(component, patterns):\n if component in _CACHE:\n del _CACHE[component]\n\n types = six.string_types + (list, set)\n if not isinstance(patterns, types):\n raise TypeError(\"Filter patterns must be of type string, list, or set.\")\n\n if isinstance(patterns, six.string_types):\n patterns = set([patterns])\n elif isinstance(patterns, list):\n patterns = set(patterns)\n\n for pat in patterns:\n if not pat:\n raise Exception(\"Filter patterns must not be empy.\")\n\n FILTERS[component] |= patterns\n\n if not plugins.is_datasource(component):\n for dep in dr.run_order(dr.get_dependency_graph(component)):\n if plugins.is_datasource(dep):\n d = dr.get_delegate(dep)\n if d.filterable:\n inner(dep, patterns)\n else:\n delegate = dr.get_delegate(component)\n\n if delegate.raw:\n raise Exception(\"Filters aren't applicable to raw datasources.\")\n\n if not delegate.filterable:\n raise Exception(\"Filters aren't applicable to %s.\" % dr.get_name(component))\n\n inner(component, patterns)\n\n\n_add_filter = add_filter\n\n\ndef get_filters(component):\n \"\"\"\n Get the set of filters for the given datasource.\n\n Filters added to a ``RegistryPoint`` will be applied to all datasources that\n implement it. Filters added to a datasource implementation apply only to\n that implementation.\n\n For example, a filter added to ``Specs.ps_auxww`` will apply to\n ``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``,\n ``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww``\n will only apply to ``DefaultSpecs.ps_auxww``. See the modules in\n ``insights.specs`` for those classes.\n\n Args:\n component (a datasource): The target datasource\n\n Returns:\n set: The set of filters defined for the datasource\n \"\"\"\n def inner(c, filters=None):\n filters = filters or set()\n if not ENABLED:\n return filters\n\n if not plugins.is_datasource(c):\n return filters\n\n if c in FILTERS:\n filters |= FILTERS[c]\n\n for d in dr.get_dependents(c):\n filters |= inner(d, filters)\n return filters\n\n if component not in _CACHE:\n _CACHE[component] = inner(component)\n return _CACHE[component]\n\n\ndef apply_filters(target, lines):\n \"\"\"\n Applys filters to the lines of a datasource. This function is used only in\n integration tests. Filters are applied in an equivalent but more performant\n way at run time.\n \"\"\"\n filters = get_filters(target)\n if filters:\n for l in lines:\n if any(f in l for f in filters):\n yield l\n else:\n for l in lines:\n yield l\n\n\n_filename = \".\".join([\"filters\", ser.__name__])\n_dumps = ser.dump\n_loads = ser.safe_load\n\n\ndef loads(string):\n \"\"\"Loads the filters dictionary given a string.\"\"\"\n d = _loads(string)\n for k, v in d.items():\n FILTERS[dr.get_component(k) or k] = set(v)\n\n\ndef load(stream=None):\n \"\"\"\n Loads filters from a stream, normally an open file. If one is\n not passed, filters are loaded from a default location within\n the project.\n \"\"\"\n if stream:\n loads(stream.read())\n else:\n data = pkgutil.get_data(insights.__name__, _filename)\n return loads(data) if data else None\n\n\ndef dumps():\n \"\"\"Returns a string representation of the FILTERS dictionary.\"\"\"\n d = {}\n for k, v in FILTERS.items():\n d[dr.get_name(k)] = list(v)\n return _dumps(d)\n\n\ndef dump(stream=None):\n \"\"\"\n Dumps a string representation of `FILTERS` to a stream, normally an\n open file. If none is passed, `FILTERS` is dumped to a default location\n within the project.\n \"\"\"\n if stream:\n stream.write(dumps())\n else:\n path = os.path.join(os.path.dirname(insights.__file__), _filename)\n with open(path, \"w\") as f:\n f.write(dumps())\n", "path": "insights/core/filters.py"}]}
| 2,327 | 273 |
gh_patches_debug_12945
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-333
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Device Name Filter to Secrets API
Filter secrets by the devices they're associated to.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/secrets/filters.py`
Content:
```
1 import django_filters
2
3 from .models import Secret, SecretRole
4
5
6 class SecretFilter(django_filters.FilterSet):
7 role_id = django_filters.ModelMultipleChoiceFilter(
8 name='role',
9 queryset=SecretRole.objects.all(),
10 label='Role (ID)',
11 )
12 role = django_filters.ModelMultipleChoiceFilter(
13 name='role',
14 queryset=SecretRole.objects.all(),
15 to_field_name='slug',
16 label='Role (slug)',
17 )
18
19 class Meta:
20 model = Secret
21 fields = ['name', 'role_id', 'role']
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/secrets/filters.py b/netbox/secrets/filters.py
--- a/netbox/secrets/filters.py
+++ b/netbox/secrets/filters.py
@@ -1,6 +1,7 @@
import django_filters
from .models import Secret, SecretRole
+from dcim.models import Device
class SecretFilter(django_filters.FilterSet):
@@ -15,7 +16,13 @@
to_field_name='slug',
label='Role (slug)',
)
+ device = django_filters.ModelMultipleChoiceFilter(
+ name='device',
+ queryset=Device.objects.all(),
+ to_field_name='name',
+ label='Device (Name)',
+ )
class Meta:
model = Secret
- fields = ['name', 'role_id', 'role']
+ fields = ['name', 'role_id', 'role', 'device']
|
{"golden_diff": "diff --git a/netbox/secrets/filters.py b/netbox/secrets/filters.py\n--- a/netbox/secrets/filters.py\n+++ b/netbox/secrets/filters.py\n@@ -1,6 +1,7 @@\n import django_filters\n \n from .models import Secret, SecretRole\n+from dcim.models import Device\n \n \n class SecretFilter(django_filters.FilterSet):\n@@ -15,7 +16,13 @@\n to_field_name='slug',\n label='Role (slug)',\n )\n+ device = django_filters.ModelMultipleChoiceFilter(\n+ name='device',\n+ queryset=Device.objects.all(),\n+ to_field_name='name',\n+ label='Device (Name)',\n+ )\n \n class Meta:\n model = Secret\n- fields = ['name', 'role_id', 'role']\n+ fields = ['name', 'role_id', 'role', 'device']\n", "issue": "Add Device Name Filter to Secrets API\nFilter secrets by the devices they're associated to.\n\n", "before_files": [{"content": "import django_filters\n\nfrom .models import Secret, SecretRole\n\n\nclass SecretFilter(django_filters.FilterSet):\n role_id = django_filters.ModelMultipleChoiceFilter(\n name='role',\n queryset=SecretRole.objects.all(),\n label='Role (ID)',\n )\n role = django_filters.ModelMultipleChoiceFilter(\n name='role',\n queryset=SecretRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n\n class Meta:\n model = Secret\n fields = ['name', 'role_id', 'role']\n", "path": "netbox/secrets/filters.py"}], "after_files": [{"content": "import django_filters\n\nfrom .models import Secret, SecretRole\nfrom dcim.models import Device\n\n\nclass SecretFilter(django_filters.FilterSet):\n role_id = django_filters.ModelMultipleChoiceFilter(\n name='role',\n queryset=SecretRole.objects.all(),\n label='Role (ID)',\n )\n role = django_filters.ModelMultipleChoiceFilter(\n name='role',\n queryset=SecretRole.objects.all(),\n to_field_name='slug',\n label='Role (slug)',\n )\n device = django_filters.ModelMultipleChoiceFilter(\n name='device',\n queryset=Device.objects.all(),\n to_field_name='name',\n label='Device (Name)',\n )\n\n class Meta:\n model = Secret\n fields = ['name', 'role_id', 'role', 'device']\n", "path": "netbox/secrets/filters.py"}]}
| 434 | 195 |
gh_patches_debug_4955
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-3557
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Long-running space reclamation task
**Version**
```
"versions": [
{
"component": "core",
"version": "3.21.0",
"package": "pulpcore"
},
{
"component": "container",
"version": "2.14.2",
"package": "pulp-container"
},
{
"component": "rpm",
"version": "3.18.5",
"package": "pulp-rpm"
},
{
"component": "python",
"version": "3.7.2",
"package": "pulp-python"
},
{
"component": "ostree",
"version": "2.0.0a6",
"package": "pulp-ostree"
},
{
"component": "file",
"version": "1.11.1",
"package": "pulp-file"
},
{
"component": "deb",
"version": "2.20.0",
"package": "pulp_deb"
},
{
"component": "certguard",
"version": "1.5.5",
"package": "pulp-certguard"
},
{
"component": "ansible",
"version": "0.15.0",
"package": "pulp-ansible"
}
],
```
Katello nightly (4.7)
**Describe the bug**
I noticed my reclaim space task was taking over 20 minutes in an environment with 63 repositories and 91485 rpm content units (to give some perspective). PostgresSQL was being heavily taxed and taking 100% of one CPU core. I tried to cancel it, but the cancellation was stuck so I needed to restart Pulpcore to stop the space reclamation.
Here's the task output after it was canceled forcefully:
```
{
"pulp_href": "/pulp/api/v3/tasks/bce46114-a5d9-445a-a898-217210bf1975/",
"pulp_created": "2022-11-15T16:38:50.639518Z",
"state": "failed",
"name": "pulpcore.app.tasks.reclaim_space.reclaim_space",
"logging_cid": "c658f06c-3b76-49f6-a514-b19dd3bfbe52",
"started_at": "2022-11-15T16:38:50.688113Z",
"finished_at": "2022-11-15T17:09:06.918179Z",
"error": {
"reason": "Worker has gone missing."
},
"worker": "/pulp/api/v3/workers/80173b0a-f731-4c7b-b3ec-ed993369044e/",
"parent_task": null,
"child_tasks": [],
"task_group": null,
"progress_reports": [],
"created_resources": [],
"reserved_resources_record": [
"shared:/pulp/api/v3/repositories/rpm/rpm/4ad1fb8e-ef06-42e6-a83a-00da97551dce/",
"shared:/pulp/api/v3/repositories/rpm/rpm/3224e11b-ec85-4e3d-8d7b-fd44dcfd184d/",
"shared:/pulp/api/v3/repositories/rpm/rpm/d0f49692-31dd-4709-9e52-27be83167a3f/",
"shared:/pulp/api/v3/repositories/rpm/rpm/bef78a95-9555-467b-9fe6-66650c081757/",
"shared:/pulp/api/v3/repositories/rpm/rpm/e5838919-ba35-4497-b8a0-98c10af8941b/",
"shared:/pulp/api/v3/repositories/rpm/rpm/7987e671-61e6-4d07-9c9b-ca7a07367d91/",
"shared:/pulp/api/v3/repositories/rpm/rpm/acd01e87-640a-4584-b52f-c999e937b55f/",
"shared:/pulp/api/v3/repositories/rpm/rpm/b01a1f40-c195-48c0-a05c-77b7748d6338/",
"shared:/pulp/api/v3/repositories/rpm/rpm/504b40fe-5d7f-456e-bc95-683878609791/",
"shared:/pulp/api/v3/repositories/rpm/rpm/8a1a3998-ff6c-460c-b26b-010ac57023a9/",
"shared:/pulp/api/v3/repositories/rpm/rpm/a1a44856-a028-4a2e-a539-aa73d3ef9ff3/",
"shared:/pulp/api/v3/repositories/rpm/rpm/1cde5855-eab1-4ac3-ac2f-f02a22541619/",
"shared:/pulp/api/v3/repositories/deb/apt/509de38c-7ae7-4f7b-a37c-db8404488a51/",
"shared:/pulp/api/v3/repositories/rpm/rpm/cdd44804-8324-48ce-9e61-4ae6770d0427/",
"shared:/pulp/api/v3/repositories/rpm/rpm/dfe18547-f2bf-4c41-9b9e-32d6cb1e2f5e/",
"shared:/pulp/api/v3/repositories/rpm/rpm/d867837e-c35f-475d-9bb5-9c9bde465b19/",
"shared:/pulp/api/v3/repositories/rpm/rpm/a0bcd8d6-8e6d-4e05-83d1-8cbfbc28d8d9/",
"shared:/pulp/api/v3/repositories/rpm/rpm/b0169f69-55cc-4ce1-830c-f444152c6853/"
]
}
```
**To Reproduce**
Run space reclamation on an environment with a similar amount of repositories and content to what I posted above.
**Expected behavior**
After chatting with @dralley it sounds like this may be slower performance than expected.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/tasks/reclaim_space.py`
Content:
```
1 from pulpcore.app.models import (
2 Artifact,
3 Content,
4 ContentArtifact,
5 ProgressReport,
6 PublishedMetadata,
7 Repository,
8 RepositoryVersion,
9 )
10
11
12 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):
13 """
14 This task frees-up disk space by removing Artifact files from the filesystem for Content
15 exclusive to the list of provided repos.
16
17 Note: content marked as `proctected` will be excluded from the reclaim disk space.
18
19 Kwargs:
20 repo_pks (list): A list of repo pks the disk reclaim space is performed on.
21 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim
22 disk space.
23 force (bool): If True, uploaded content will be taken into account.
24
25 """
26 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)
27 for repo in reclaimed_repos:
28 repo.invalidate_cache(everything=True)
29
30 rest_of_repos = Repository.objects.exclude(pk__in=repo_pks)
31 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)
32 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)
33 c_reclaim_qs = c_reclaim_qs.exclude(
34 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()
35 )
36
37 if keeplist_rv_pks:
38 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)
39 rv_content = Content.objects.none()
40 for rv in rv_qs.iterator():
41 rv_content |= rv.content
42 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)
43
44 content_distinct = c_reclaim_qs.distinct("pulp_type")
45 unprotected = []
46 for content in content_distinct:
47 if not content.cast().PROTECTED_FROM_RECLAIM:
48 unprotected.append(content.pulp_type)
49
50 ca_qs = ContentArtifact.objects.filter(
51 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False
52 )
53 if not force:
54 ca_qs = ca_qs.filter(remoteartifact__isnull=False)
55 artifact_pks = set()
56 ca_to_update = []
57 for ca in ca_qs.iterator():
58 if ca.content.pulp_type in unprotected:
59 artifact_pks.add(ca.artifact.pk)
60 ca.artifact = None
61 ca_to_update.append(ca)
62
63 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000)
64 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)
65 progress_bar = ProgressReport(
66 message="Reclaim disk space",
67 total=artifacts_to_delete.count(),
68 code="reclaim-space.artifact",
69 done=0,
70 state="running",
71 )
72 progress_bar.save()
73
74 counter = 0
75 interval = 100
76 for artifact in artifacts_to_delete.iterator():
77 # we need to manually call delete() because it cleans up the file on the filesystem
78 artifact.delete()
79 progress_bar.done += 1
80 counter += 1
81
82 if counter >= interval:
83 progress_bar.save()
84 counter = 0
85
86 progress_bar.state = "completed"
87 progress_bar.save()
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py
--- a/pulpcore/app/tasks/reclaim_space.py
+++ b/pulpcore/app/tasks/reclaim_space.py
@@ -47,7 +47,7 @@
if not content.cast().PROTECTED_FROM_RECLAIM:
unprotected.append(content.pulp_type)
- ca_qs = ContentArtifact.objects.filter(
+ ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter(
content__in=c_reclaim_qs.values("pk"), artifact__isnull=False
)
if not force:
|
{"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -47,7 +47,7 @@\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n \n- ca_qs = ContentArtifact.objects.filter(\n+ ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n", "issue": "Long-running space reclamation task\n**Version**\r\n```\r\n \"versions\": [\r\n {\r\n \"component\": \"core\",\r\n \"version\": \"3.21.0\",\r\n \"package\": \"pulpcore\"\r\n },\r\n {\r\n \"component\": \"container\",\r\n \"version\": \"2.14.2\",\r\n \"package\": \"pulp-container\"\r\n },\r\n {\r\n \"component\": \"rpm\",\r\n \"version\": \"3.18.5\",\r\n \"package\": \"pulp-rpm\"\r\n },\r\n {\r\n \"component\": \"python\",\r\n \"version\": \"3.7.2\",\r\n \"package\": \"pulp-python\"\r\n },\r\n {\r\n \"component\": \"ostree\",\r\n \"version\": \"2.0.0a6\",\r\n \"package\": \"pulp-ostree\"\r\n },\r\n {\r\n \"component\": \"file\",\r\n \"version\": \"1.11.1\",\r\n \"package\": \"pulp-file\"\r\n },\r\n {\r\n \"component\": \"deb\",\r\n \"version\": \"2.20.0\",\r\n \"package\": \"pulp_deb\"\r\n },\r\n {\r\n \"component\": \"certguard\",\r\n \"version\": \"1.5.5\",\r\n \"package\": \"pulp-certguard\"\r\n },\r\n {\r\n \"component\": \"ansible\",\r\n \"version\": \"0.15.0\",\r\n \"package\": \"pulp-ansible\"\r\n }\r\n ],\r\n```\r\n\r\nKatello nightly (4.7)\r\n\r\n**Describe the bug**\r\n\r\nI noticed my reclaim space task was taking over 20 minutes in an environment with 63 repositories and 91485 rpm content units (to give some perspective). PostgresSQL was being heavily taxed and taking 100% of one CPU core. I tried to cancel it, but the cancellation was stuck so I needed to restart Pulpcore to stop the space reclamation.\r\n\r\nHere's the task output after it was canceled forcefully:\r\n\r\n```\r\n{\r\n \"pulp_href\": \"/pulp/api/v3/tasks/bce46114-a5d9-445a-a898-217210bf1975/\",\r\n \"pulp_created\": \"2022-11-15T16:38:50.639518Z\",\r\n \"state\": \"failed\",\r\n \"name\": \"pulpcore.app.tasks.reclaim_space.reclaim_space\",\r\n \"logging_cid\": \"c658f06c-3b76-49f6-a514-b19dd3bfbe52\",\r\n \"started_at\": \"2022-11-15T16:38:50.688113Z\",\r\n \"finished_at\": \"2022-11-15T17:09:06.918179Z\",\r\n \"error\": {\r\n \"reason\": \"Worker has gone missing.\"\r\n },\r\n \"worker\": \"/pulp/api/v3/workers/80173b0a-f731-4c7b-b3ec-ed993369044e/\",\r\n \"parent_task\": null,\r\n \"child_tasks\": [],\r\n \"task_group\": null,\r\n \"progress_reports\": [],\r\n \"created_resources\": [],\r\n \"reserved_resources_record\": [\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/4ad1fb8e-ef06-42e6-a83a-00da97551dce/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/3224e11b-ec85-4e3d-8d7b-fd44dcfd184d/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/d0f49692-31dd-4709-9e52-27be83167a3f/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/bef78a95-9555-467b-9fe6-66650c081757/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/e5838919-ba35-4497-b8a0-98c10af8941b/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/7987e671-61e6-4d07-9c9b-ca7a07367d91/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/acd01e87-640a-4584-b52f-c999e937b55f/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/b01a1f40-c195-48c0-a05c-77b7748d6338/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/504b40fe-5d7f-456e-bc95-683878609791/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/8a1a3998-ff6c-460c-b26b-010ac57023a9/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/a1a44856-a028-4a2e-a539-aa73d3ef9ff3/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/1cde5855-eab1-4ac3-ac2f-f02a22541619/\",\r\n \"shared:/pulp/api/v3/repositories/deb/apt/509de38c-7ae7-4f7b-a37c-db8404488a51/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/cdd44804-8324-48ce-9e61-4ae6770d0427/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/dfe18547-f2bf-4c41-9b9e-32d6cb1e2f5e/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/d867837e-c35f-475d-9bb5-9c9bde465b19/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/a0bcd8d6-8e6d-4e05-83d1-8cbfbc28d8d9/\",\r\n \"shared:/pulp/api/v3/repositories/rpm/rpm/b0169f69-55cc-4ce1-830c-f444152c6853/\"\r\n ]\r\n}\r\n```\r\n\r\n**To Reproduce**\r\nRun space reclamation on an environment with a similar amount of repositories and content to what I posted above.\r\n\r\n**Expected behavior**\r\nAfter chatting with @dralley it sounds like this may be slower performance than expected.\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n rest_of_repos = Repository.objects.exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}], "after_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n rest_of_repos = Repository.objects.exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]}
| 2,830 | 134 |
gh_patches_debug_12737
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-495
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failing `socket_info.getpeername()` with uvloop against a UDS.
When running against a unix domain socket, we seem to occassionaly get an OSError when looking up the remote address. This appears to only occur with `uvloop`.
```
ERROR: Exception in callback HttpToolsProtocol.connection_made(<_SelectorSoc...e, bufsize=0>>)
handle: <Handle HttpToolsProtocol.connection_made(<_SelectorSoc...e, bufsize=0>>)>
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/events.py", line 127, in _run
self._callback(*self._args)
File "/Users/tomchristie/GitHub/encode/uvicorn/uvicorn/protocols/http/httptools_impl.py", line 129, in connection_made
self.client = get_remote_addr(transport)
File "/Users/tomchristie/GitHub/encode/uvicorn/uvicorn/protocols/utils.py", line 8, in get_remote_addr
info = socket_info.getpeername()
OSError: [Errno 22] Invalid argument
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/protocols/utils.py`
Content:
```
1 import socket
2
3
4 def get_remote_addr(transport):
5 socket_info = transport.get_extra_info("socket")
6 if socket_info is not None:
7 info = socket_info.getpeername()
8 family = socket_info.family
9 if family in (socket.AF_INET, socket.AF_INET6):
10 return (str(info[0]), int(info[1]))
11 return None
12 info = transport.get_extra_info("peername")
13 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
14 return (str(info[0]), int(info[1]))
15 return None
16
17
18 def get_local_addr(transport):
19 socket_info = transport.get_extra_info("socket")
20 if socket_info is not None:
21 info = socket_info.getsockname()
22 family = socket_info.family
23 if family in (socket.AF_INET, socket.AF_INET6):
24 return (str(info[0]), int(info[1]))
25 return None
26 info = transport.get_extra_info("sockname")
27 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
28 return (str(info[0]), int(info[1]))
29 return None
30
31
32 def is_ssl(transport):
33 return bool(transport.get_extra_info("sslcontext"))
34
35
36 def get_client_addr(scope):
37 client = scope.get("client")
38 if not client:
39 return ""
40 return "%s:%d" % client
41
42
43 def get_path_with_query_string(scope):
44 path_with_query_string = scope.get("root_path", "") + scope["path"]
45 if scope["query_string"]:
46 path_with_query_string = "{}?{}".format(
47 path_with_query_string, scope["query_string"].decode("ascii")
48 )
49 return path_with_query_string
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py
--- a/uvicorn/protocols/utils.py
+++ b/uvicorn/protocols/utils.py
@@ -4,8 +4,16 @@
def get_remote_addr(transport):
socket_info = transport.get_extra_info("socket")
if socket_info is not None:
- info = socket_info.getpeername()
- family = socket_info.family
+ try:
+ info = socket_info.getpeername()
+ except OSError:
+ # This case appears to inconsistently occur with uvloop
+ # bound to a unix domain socket.
+ family = None
+ info = None
+ else:
+ family = socket_info.family
+
if family in (socket.AF_INET, socket.AF_INET6):
return (str(info[0]), int(info[1]))
return None
|
{"golden_diff": "diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py\n--- a/uvicorn/protocols/utils.py\n+++ b/uvicorn/protocols/utils.py\n@@ -4,8 +4,16 @@\n def get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n- info = socket_info.getpeername()\n- family = socket_info.family\n+ try:\n+ info = socket_info.getpeername()\n+ except OSError:\n+ # This case appears to inconsistently occur with uvloop\n+ # bound to a unix domain socket.\n+ family = None\n+ info = None\n+ else:\n+ family = socket_info.family\n+\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n", "issue": "Failing `socket_info.getpeername()` with uvloop against a UDS.\nWhen running against a unix domain socket, we seem to occassionaly get an OSError when looking up the remote address. This appears to only occur with `uvloop`.\r\n\r\n```\r\nERROR: Exception in callback HttpToolsProtocol.connection_made(<_SelectorSoc...e, bufsize=0>>)\r\nhandle: <Handle HttpToolsProtocol.connection_made(<_SelectorSoc...e, bufsize=0>>)>\r\nTraceback (most recent call last):\r\n File \"/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/events.py\", line 127, in _run\r\n self._callback(*self._args)\r\n File \"/Users/tomchristie/GitHub/encode/uvicorn/uvicorn/protocols/http/httptools_impl.py\", line 129, in connection_made\r\n self.client = get_remote_addr(transport)\r\n File \"/Users/tomchristie/GitHub/encode/uvicorn/uvicorn/protocols/utils.py\", line 8, in get_remote_addr\r\n info = socket_info.getpeername()\r\nOSError: [Errno 22] Invalid argument\r\n```\n", "before_files": [{"content": "import socket\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getpeername()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}], "after_files": [{"content": "import socket\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}]}
| 993 | 194 |
gh_patches_debug_20967
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-17081
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pants `export` doesn't work on all machines
**Describe the bug**
I am able to run `./pants export ::` successfully on my machine. When I ran this through our CI system, I got a permission denied error when trying to clone a VCS requirement from our requirements.txt file. I know the CI worker had access to the repos because it cloned a repo to get this far (the VCS requirement is another internal repo).
I already have the following added to `pants.toml`.
```
[subprocess-environment]
env_vars.add = ["SSH_AUTH_SOCK"]
```
I'm curious if it's possible this isn't getting passed through on my CI system and causing the export step to fail.
**Pants version**
2.11.0
**OS**
ubuntu 20.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/util_rules/pex_cli.py`
Content:
```
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 import dataclasses
7 from dataclasses import dataclass
8 from typing import Iterable, List, Mapping, Optional, Tuple
9
10 from pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem
11 from pants.backend.python.util_rules import pex_environment
12 from pants.backend.python.util_rules.pex_environment import (
13 PexEnvironment,
14 PexSubsystem,
15 PythonExecutable,
16 )
17 from pants.core.util_rules import external_tool
18 from pants.core.util_rules.external_tool import (
19 DownloadedExternalTool,
20 ExternalToolRequest,
21 TemplatedExternalTool,
22 )
23 from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests
24 from pants.engine.internals.selectors import MultiGet
25 from pants.engine.platform import Platform
26 from pants.engine.process import Process, ProcessCacheScope
27 from pants.engine.rules import Get, collect_rules, rule
28 from pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content
29 from pants.util.frozendict import FrozenDict
30 from pants.util.logging import LogLevel
31 from pants.util.meta import classproperty, frozen_after_init
32 from pants.util.strutil import create_path_env_var
33
34
35 class PexCli(TemplatedExternalTool):
36 options_scope = "pex-cli"
37 name = "pex"
38 help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
39
40 default_version = "v2.1.103"
41 default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
42 version_constraints = ">=2.1.103,<3.0"
43
44 @classproperty
45 def default_known_versions(cls):
46 return [
47 "|".join(
48 (
49 cls.default_version,
50 plat,
51 "4d45336511484100ae4e2bab24542a8b86b12c8cb89230463593c60d08c4b8d3",
52 "3814407",
53 )
54 )
55 for plat in ["macos_arm64", "macos_x86_64", "linux_x86_64", "linux_arm64"]
56 ]
57
58
59 @frozen_after_init
60 @dataclass(unsafe_hash=True)
61 class PexCliProcess:
62 subcommand: tuple[str, ...]
63 extra_args: tuple[str, ...]
64 set_resolve_args: bool
65 description: str = dataclasses.field(compare=False)
66 additional_input_digest: Optional[Digest]
67 extra_env: Optional[FrozenDict[str, str]]
68 output_files: Optional[Tuple[str, ...]]
69 output_directories: Optional[Tuple[str, ...]]
70 python: Optional[PythonExecutable]
71 level: LogLevel
72 concurrency_available: int
73 cache_scope: ProcessCacheScope
74
75 def __init__(
76 self,
77 *,
78 subcommand: Iterable[str],
79 extra_args: Iterable[str],
80 description: str,
81 set_resolve_args: bool = True,
82 additional_input_digest: Optional[Digest] = None,
83 extra_env: Optional[Mapping[str, str]] = None,
84 output_files: Optional[Iterable[str]] = None,
85 output_directories: Optional[Iterable[str]] = None,
86 python: Optional[PythonExecutable] = None,
87 level: LogLevel = LogLevel.INFO,
88 concurrency_available: int = 0,
89 cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,
90 ) -> None:
91 self.subcommand = tuple(subcommand)
92 self.extra_args = tuple(extra_args)
93 self.set_resolve_args = set_resolve_args
94 self.description = description
95 self.additional_input_digest = additional_input_digest
96 self.extra_env = FrozenDict(extra_env) if extra_env else None
97 self.output_files = tuple(output_files) if output_files else None
98 self.output_directories = tuple(output_directories) if output_directories else None
99 self.python = python
100 self.level = level
101 self.concurrency_available = concurrency_available
102 self.cache_scope = cache_scope
103 self.__post_init__()
104
105 def __post_init__(self) -> None:
106 if "--pex-root-path" in self.extra_args:
107 raise ValueError("`--pex-root` flag not allowed. We set its value for you.")
108
109
110 class PexPEX(DownloadedExternalTool):
111 """The Pex PEX binary."""
112
113
114 @rule
115 async def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:
116 pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))
117 return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)
118
119
120 @rule
121 async def setup_pex_cli_process(
122 request: PexCliProcess,
123 pex_pex: PexPEX,
124 pex_env: PexEnvironment,
125 python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,
126 global_options: GlobalOptions,
127 pex_subsystem: PexSubsystem,
128 ) -> Process:
129 tmpdir = ".tmp"
130 gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]
131
132 cert_args = []
133 if global_options.ca_certs_path:
134 ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)
135 gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))
136 cert_args = ["--cert", ca_certs_fc.path]
137
138 digests_to_merge = [pex_pex.digest]
139 digests_to_merge.extend(await MultiGet(gets))
140 if request.additional_input_digest:
141 digests_to_merge.append(request.additional_input_digest)
142 input_digest = await Get(Digest, MergeDigests(digests_to_merge))
143
144 global_args = [
145 # Ensure Pex and its subprocesses create temporary files in the the process execution
146 # sandbox. It may make sense to do this generally for Processes, but in the short term we
147 # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to
148 # perform. Making the TMPDIR local to the sandbox allows control via
149 # --local-execution-root-dir for the local case and should work well with remote cases where
150 # a remoting implementation has to allow for processes producing large binaries in a
151 # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via
152 # environment variable allows Pex to absolutize the path ensuring subprocesses that change
153 # CWD can find the TMPDIR.
154 "--tmpdir",
155 tmpdir,
156 ]
157
158 if request.concurrency_available > 0:
159 global_args.extend(["--jobs", "{pants_concurrency}"])
160
161 verbosity_args = [f"-{'v' * pex_subsystem.verbosity}"] if pex_subsystem.verbosity > 0 else []
162
163 resolve_args = (
164 [*cert_args, "--python-path", create_path_env_var(pex_env.interpreter_search_paths)]
165 if request.set_resolve_args
166 else []
167 )
168 args = [
169 *request.subcommand,
170 *global_args,
171 *verbosity_args,
172 *resolve_args,
173 # NB: This comes at the end because it may use `--` passthrough args, # which must come at
174 # the end.
175 *request.extra_args,
176 ]
177
178 complete_pex_env = pex_env.in_sandbox(working_directory=None)
179 normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args, python=request.python)
180 env = {
181 **complete_pex_env.environment_dict(python_configured=request.python is not None),
182 **python_native_code.subprocess_env_vars,
183 **(request.extra_env or {}),
184 # If a subcommand is used, we need to use the `pex3` console script.
185 **({"PEX_SCRIPT": "pex3"} if request.subcommand else {}),
186 }
187
188 return Process(
189 normalized_argv,
190 description=request.description,
191 input_digest=input_digest,
192 env=env,
193 output_files=request.output_files,
194 output_directories=request.output_directories,
195 append_only_caches=complete_pex_env.append_only_caches,
196 level=request.level,
197 concurrency_available=request.concurrency_available,
198 cache_scope=request.cache_scope,
199 )
200
201
202 def rules():
203 return [
204 *collect_rules(),
205 *external_tool.rules(),
206 *pex_environment.rules(),
207 ]
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py
--- a/src/python/pants/backend/python/util_rules/pex_cli.py
+++ b/src/python/pants/backend/python/util_rules/pex_cli.py
@@ -37,7 +37,7 @@
name = "pex"
help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
- default_version = "v2.1.103"
+ default_version = "v2.1.107"
default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
version_constraints = ">=2.1.103,<3.0"
@@ -48,8 +48,8 @@
(
cls.default_version,
plat,
- "4d45336511484100ae4e2bab24542a8b86b12c8cb89230463593c60d08c4b8d3",
- "3814407",
+ "bfc19b16e0f298742dd933289bd8057dd503f9ad0678310412d382800d48b3ae",
+ "3840814",
)
)
for plat in ["macos_arm64", "macos_x86_64", "linux_x86_64", "linux_arm64"]
|
{"golden_diff": "diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py\n--- a/src/python/pants/backend/python/util_rules/pex_cli.py\n+++ b/src/python/pants/backend/python/util_rules/pex_cli.py\n@@ -37,7 +37,7 @@\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n \n- default_version = \"v2.1.103\"\n+ default_version = \"v2.1.107\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n version_constraints = \">=2.1.103,<3.0\"\n \n@@ -48,8 +48,8 @@\n (\n cls.default_version,\n plat,\n- \"4d45336511484100ae4e2bab24542a8b86b12c8cb89230463593c60d08c4b8d3\",\n- \"3814407\",\n+ \"bfc19b16e0f298742dd933289bd8057dd503f9ad0678310412d382800d48b3ae\",\n+ \"3840814\",\n )\n )\n for plat in [\"macos_arm64\", \"macos_x86_64\", \"linux_x86_64\", \"linux_arm64\"]\n", "issue": "Pants `export` doesn't work on all machines\n**Describe the bug**\r\nI am able to run `./pants export ::` successfully on my machine. When I ran this through our CI system, I got a permission denied error when trying to clone a VCS requirement from our requirements.txt file. I know the CI worker had access to the repos because it cloned a repo to get this far (the VCS requirement is another internal repo).\r\n\r\nI already have the following added to `pants.toml`.\r\n```\r\n[subprocess-environment]\r\nenv_vars.add = [\"SSH_AUTH_SOCK\"]\r\n```\r\n\r\nI'm curious if it's possible this isn't getting passed through on my CI system and causing the export step to fail.\r\n\r\n**Pants version**\r\n2.11.0\r\n\r\n**OS**\r\nubuntu 20.04\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import (\n PexEnvironment,\n PexSubsystem,\n PythonExecutable,\n)\nfrom pants.core.util_rules import external_tool\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty, frozen_after_init\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexCli(TemplatedExternalTool):\n options_scope = \"pex-cli\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.103\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n version_constraints = \">=2.1.103,<3.0\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"4d45336511484100ae4e2bab24542a8b86b12c8cb89230463593c60d08c4b8d3\",\n \"3814407\",\n )\n )\n for plat in [\"macos_arm64\", \"macos_x86_64\", \"linux_x86_64\", \"linux_arm64\"]\n ]\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PexCliProcess:\n subcommand: tuple[str, ...]\n extra_args: tuple[str, ...]\n set_resolve_args: bool\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n python: Optional[PythonExecutable]\n level: LogLevel\n concurrency_available: int\n cache_scope: ProcessCacheScope\n\n def __init__(\n self,\n *,\n subcommand: Iterable[str],\n extra_args: Iterable[str],\n description: str,\n set_resolve_args: bool = True,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n python: Optional[PythonExecutable] = None,\n level: LogLevel = LogLevel.INFO,\n concurrency_available: int = 0,\n cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,\n ) -> None:\n self.subcommand = tuple(subcommand)\n self.extra_args = tuple(extra_args)\n self.set_resolve_args = set_resolve_args\n self.description = description\n self.additional_input_digest = additional_input_digest\n self.extra_env = FrozenDict(extra_env) if extra_env else None\n self.output_files = tuple(output_files) if output_files else None\n self.output_directories = tuple(output_directories) if output_directories else None\n self.python = python\n self.level = level\n self.concurrency_available = concurrency_available\n self.cache_scope = cache_scope\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.extra_args:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\nclass PexPEX(DownloadedExternalTool):\n \"\"\"The Pex PEX binary.\"\"\"\n\n\n@rule\nasync def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:\n pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))\n return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_pex: PexPEX,\n pex_env: PexEnvironment,\n python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,\n global_options: GlobalOptions,\n pex_subsystem: PexSubsystem,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]\n\n cert_args = []\n if global_options.ca_certs_path:\n ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)\n gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))\n cert_args = [\"--cert\", ca_certs_fc.path]\n\n digests_to_merge = [pex_pex.digest]\n digests_to_merge.extend(await MultiGet(gets))\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n global_args = [\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n\n if request.concurrency_available > 0:\n global_args.extend([\"--jobs\", \"{pants_concurrency}\"])\n\n verbosity_args = [f\"-{'v' * pex_subsystem.verbosity}\"] if pex_subsystem.verbosity > 0 else []\n\n resolve_args = (\n [*cert_args, \"--python-path\", create_path_env_var(pex_env.interpreter_search_paths)]\n if request.set_resolve_args\n else []\n )\n args = [\n *request.subcommand,\n *global_args,\n *verbosity_args,\n *resolve_args,\n # NB: This comes at the end because it may use `--` passthrough args, # which must come at\n # the end.\n *request.extra_args,\n ]\n\n complete_pex_env = pex_env.in_sandbox(working_directory=None)\n normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args, python=request.python)\n env = {\n **complete_pex_env.environment_dict(python_configured=request.python is not None),\n **python_native_code.subprocess_env_vars,\n **(request.extra_env or {}),\n # If a subcommand is used, we need to use the `pex3` console script.\n **({\"PEX_SCRIPT\": \"pex3\"} if request.subcommand else {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches=complete_pex_env.append_only_caches,\n level=request.level,\n concurrency_available=request.concurrency_available,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n *external_tool.rules(),\n *pex_environment.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}], "after_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import (\n PexEnvironment,\n PexSubsystem,\n PythonExecutable,\n)\nfrom pants.core.util_rules import external_tool\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty, frozen_after_init\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexCli(TemplatedExternalTool):\n options_scope = \"pex-cli\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.107\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n version_constraints = \">=2.1.103,<3.0\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"bfc19b16e0f298742dd933289bd8057dd503f9ad0678310412d382800d48b3ae\",\n \"3840814\",\n )\n )\n for plat in [\"macos_arm64\", \"macos_x86_64\", \"linux_x86_64\", \"linux_arm64\"]\n ]\n\n\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass PexCliProcess:\n subcommand: tuple[str, ...]\n extra_args: tuple[str, ...]\n set_resolve_args: bool\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n python: Optional[PythonExecutable]\n level: LogLevel\n concurrency_available: int\n cache_scope: ProcessCacheScope\n\n def __init__(\n self,\n *,\n subcommand: Iterable[str],\n extra_args: Iterable[str],\n description: str,\n set_resolve_args: bool = True,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n python: Optional[PythonExecutable] = None,\n level: LogLevel = LogLevel.INFO,\n concurrency_available: int = 0,\n cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,\n ) -> None:\n self.subcommand = tuple(subcommand)\n self.extra_args = tuple(extra_args)\n self.set_resolve_args = set_resolve_args\n self.description = description\n self.additional_input_digest = additional_input_digest\n self.extra_env = FrozenDict(extra_env) if extra_env else None\n self.output_files = tuple(output_files) if output_files else None\n self.output_directories = tuple(output_directories) if output_directories else None\n self.python = python\n self.level = level\n self.concurrency_available = concurrency_available\n self.cache_scope = cache_scope\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.extra_args:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\nclass PexPEX(DownloadedExternalTool):\n \"\"\"The Pex PEX binary.\"\"\"\n\n\n@rule\nasync def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:\n pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))\n return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_pex: PexPEX,\n pex_env: PexEnvironment,\n python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,\n global_options: GlobalOptions,\n pex_subsystem: PexSubsystem,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]\n\n cert_args = []\n if global_options.ca_certs_path:\n ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)\n gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))\n cert_args = [\"--cert\", ca_certs_fc.path]\n\n digests_to_merge = [pex_pex.digest]\n digests_to_merge.extend(await MultiGet(gets))\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n global_args = [\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n\n if request.concurrency_available > 0:\n global_args.extend([\"--jobs\", \"{pants_concurrency}\"])\n\n verbosity_args = [f\"-{'v' * pex_subsystem.verbosity}\"] if pex_subsystem.verbosity > 0 else []\n\n resolve_args = (\n [*cert_args, \"--python-path\", create_path_env_var(pex_env.interpreter_search_paths)]\n if request.set_resolve_args\n else []\n )\n args = [\n *request.subcommand,\n *global_args,\n *verbosity_args,\n *resolve_args,\n # NB: This comes at the end because it may use `--` passthrough args, # which must come at\n # the end.\n *request.extra_args,\n ]\n\n complete_pex_env = pex_env.in_sandbox(working_directory=None)\n normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args, python=request.python)\n env = {\n **complete_pex_env.environment_dict(python_configured=request.python is not None),\n **python_native_code.subprocess_env_vars,\n **(request.extra_env or {}),\n # If a subcommand is used, we need to use the `pex3` console script.\n **({\"PEX_SCRIPT\": \"pex3\"} if request.subcommand else {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches=complete_pex_env.append_only_caches,\n level=request.level,\n concurrency_available=request.concurrency_available,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n *external_tool.rules(),\n *pex_environment.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}]}
| 2,818 | 388 |
gh_patches_debug_40654
|
rasdani/github-patches
|
git_diff
|
cisagov__manage.get.gov-1186
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change availability endpoint to not require a user to be logged in
### Issue description and context
Current api/v1/available requires a user to be logged in, even though the login required tag was removed already. Troubleshoot this and ensure that a non-logged in user can access the availability endpoint (api/v1/available)
### Acceptance criteria
- [ ] a user not logged in can access api/v1/available/<domain> and will see the response to whether the domain string passed is in fact available.
### Links to other issues
Blocks: #476
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/api/views.py`
Content:
```
1 """Internal API views"""
2 from django.apps import apps
3 from django.views.decorators.http import require_http_methods
4 from django.http import JsonResponse
5
6 import requests
7
8 from cachetools.func import ttl_cache
9
10
11 DOMAIN_FILE_URL = (
12 "https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv"
13 )
14
15
16 DOMAIN_API_MESSAGES = {
17 "required": "Enter the .gov domain you want. Don’t include “www” or “.gov.”"
18 " For example, if you want www.city.gov, you would enter “city”"
19 " (without the quotes).",
20 "extra_dots": "Enter the .gov domain you want without any periods.",
21 "unavailable": "That domain isn’t available. Try entering another one."
22 " Contact us if you need help coming up with a domain.",
23 "invalid": "Enter a domain using only letters,"
24 " numbers, or hyphens (though we don't recommend using hyphens).",
25 "success": "That domain is available!",
26 }
27
28
29 # this file doesn't change that often, nor is it that big, so cache the result
30 # in memory for ten minutes
31 @ttl_cache(ttl=600)
32 def _domains():
33 """Return a list of the current .gov domains.
34
35 Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,
36 lowercase everything and return the list.
37 """
38 DraftDomain = apps.get_model("registrar.DraftDomain")
39 # 5 second timeout
40 file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text
41 domains = set()
42 # skip the first line
43 for line in file_contents.splitlines()[1:]:
44 # get the domain before the first comma
45 domain = line.split(",", 1)[0]
46 # sanity-check the string we got from the file here
47 if DraftDomain.string_could_be_domain(domain):
48 # lowercase everything when we put it in domains
49 domains.add(domain.lower())
50 return domains
51
52
53 def in_domains(domain):
54 """Return true if the given domain is in the domains list.
55
56 The given domain is lowercased to match against the domains list. If the
57 given domain doesn't end with .gov, ".gov" is added when looking for
58 a match.
59 """
60 Domain = apps.get_model("registrar.Domain")
61 if domain.endswith(".gov"):
62 return Domain.available(domain)
63 else:
64 # domain search string doesn't end with .gov, add it on here
65 return Domain.available(domain + ".gov")
66
67
68 @require_http_methods(["GET"])
69 def available(request, domain=""):
70 """Is a given domain available or not.
71
72 Response is a JSON dictionary with the key "available" and value true or
73 false.
74 """
75 DraftDomain = apps.get_model("registrar.DraftDomain")
76 # validate that the given domain could be a domain name and fail early if
77 # not.
78 if not (
79 DraftDomain.string_could_be_domain(domain)
80 or DraftDomain.string_could_be_domain(domain + ".gov")
81 ):
82 return JsonResponse(
83 {"available": False, "message": DOMAIN_API_MESSAGES["invalid"]}
84 )
85 # a domain is available if it is NOT in the list of current domains
86 if in_domains(domain):
87 return JsonResponse(
88 {"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]}
89 )
90 else:
91 return JsonResponse(
92 {"available": True, "message": DOMAIN_API_MESSAGES["success"]}
93 )
94
```
Path: `src/registrar/models/utility/domain_helper.py`
Content:
```
1 import re
2
3 from api.views import in_domains
4 from registrar.utility import errors
5
6
7 class DomainHelper:
8 """Utility functions and constants for domain names."""
9
10 # a domain name is alphanumeric or hyphen, up to 63 characters, doesn't
11 # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters
12 DOMAIN_REGEX = re.compile(r"^(?!-)[A-Za-z0-9-]{1,63}(?<!-)\.[A-Za-z]{2,6}$")
13
14 # a domain name is alphanumeric or hyphen, has at least 2 dots, doesn't
15 # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters
16 HOST_REGEX = re.compile(r"^((?!-)[A-Za-z0-9-]{1,63}(?<!-)\.){2,}([A-Za-z]){2,6}$")
17
18 # a domain can be no longer than 253 characters in total
19 MAX_LENGTH = 253
20
21 @classmethod
22 def string_could_be_domain(cls, domain: str | None) -> bool:
23 """Return True if the string could be a domain name, otherwise False."""
24 if not isinstance(domain, str):
25 return False
26 return bool(cls.DOMAIN_REGEX.match(domain))
27
28 @classmethod
29 def validate(cls, domain: str | None, blank_ok=False) -> str:
30 """Attempt to determine if a domain name could be requested."""
31 if domain is None:
32 raise errors.BlankValueError()
33 if not isinstance(domain, str):
34 raise ValueError("Domain name must be a string")
35 domain = domain.lower().strip()
36 if domain == "":
37 if blank_ok:
38 return domain
39 else:
40 raise errors.BlankValueError()
41 if domain.endswith(".gov"):
42 domain = domain[:-4]
43 if "." in domain:
44 raise errors.ExtraDotsError()
45 if not DomainHelper.string_could_be_domain(domain + ".gov"):
46 raise ValueError()
47 if in_domains(domain):
48 raise errors.DomainUnavailableError()
49 return domain
50
51 @classmethod
52 def sld(cls, domain: str):
53 """
54 Get the second level domain. Example: `gsa.gov` -> `gsa`.
55
56 If no TLD is present, returns the original string.
57 """
58 return domain.split(".")[0]
59
60 @classmethod
61 def tld(cls, domain: str):
62 """Get the top level domain. Example: `gsa.gov` -> `gov`."""
63 parts = domain.rsplit(".")
64 return parts[-1] if len(parts) > 1 else ""
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/api/views.py b/src/api/views.py
--- a/src/api/views.py
+++ b/src/api/views.py
@@ -5,6 +5,8 @@
import requests
+from login_required import login_not_required
+
from cachetools.func import ttl_cache
@@ -23,6 +25,7 @@
"invalid": "Enter a domain using only letters,"
" numbers, or hyphens (though we don't recommend using hyphens).",
"success": "That domain is available!",
+ "error": "Error finding domain availability.",
}
@@ -50,22 +53,26 @@
return domains
-def in_domains(domain):
- """Return true if the given domain is in the domains list.
+def check_domain_available(domain):
+ """Return true if the given domain is available.
The given domain is lowercased to match against the domains list. If the
given domain doesn't end with .gov, ".gov" is added when looking for
a match.
"""
Domain = apps.get_model("registrar.Domain")
- if domain.endswith(".gov"):
- return Domain.available(domain)
- else:
- # domain search string doesn't end with .gov, add it on here
- return Domain.available(domain + ".gov")
+ try:
+ if domain.endswith(".gov"):
+ return Domain.available(domain)
+ else:
+ # domain search string doesn't end with .gov, add it on here
+ return Domain.available(domain + ".gov")
+ except Exception:
+ return False
@require_http_methods(["GET"])
+@login_not_required
def available(request, domain=""):
"""Is a given domain available or not.
@@ -83,11 +90,16 @@
{"available": False, "message": DOMAIN_API_MESSAGES["invalid"]}
)
# a domain is available if it is NOT in the list of current domains
- if in_domains(domain):
- return JsonResponse(
- {"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]}
- )
- else:
+ try:
+ if check_domain_available(domain):
+ return JsonResponse(
+ {"available": True, "message": DOMAIN_API_MESSAGES["success"]}
+ )
+ else:
+ return JsonResponse(
+ {"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]}
+ )
+ except Exception:
return JsonResponse(
- {"available": True, "message": DOMAIN_API_MESSAGES["success"]}
+ {"available": False, "message": DOMAIN_API_MESSAGES["error"]}
)
diff --git a/src/registrar/models/utility/domain_helper.py b/src/registrar/models/utility/domain_helper.py
--- a/src/registrar/models/utility/domain_helper.py
+++ b/src/registrar/models/utility/domain_helper.py
@@ -1,6 +1,6 @@
import re
-from api.views import in_domains
+from api.views import check_domain_available
from registrar.utility import errors
@@ -44,7 +44,7 @@
raise errors.ExtraDotsError()
if not DomainHelper.string_could_be_domain(domain + ".gov"):
raise ValueError()
- if in_domains(domain):
+ if not check_domain_available(domain):
raise errors.DomainUnavailableError()
return domain
|
{"golden_diff": "diff --git a/src/api/views.py b/src/api/views.py\n--- a/src/api/views.py\n+++ b/src/api/views.py\n@@ -5,6 +5,8 @@\n \n import requests\n \n+from login_required import login_not_required\n+\n from cachetools.func import ttl_cache\n \n \n@@ -23,6 +25,7 @@\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n+ \"error\": \"Error finding domain availability.\",\n }\n \n \n@@ -50,22 +53,26 @@\n return domains\n \n \n-def in_domains(domain):\n- \"\"\"Return true if the given domain is in the domains list.\n+def check_domain_available(domain):\n+ \"\"\"Return true if the given domain is available.\n \n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n Domain = apps.get_model(\"registrar.Domain\")\n- if domain.endswith(\".gov\"):\n- return Domain.available(domain)\n- else:\n- # domain search string doesn't end with .gov, add it on here\n- return Domain.available(domain + \".gov\")\n+ try:\n+ if domain.endswith(\".gov\"):\n+ return Domain.available(domain)\n+ else:\n+ # domain search string doesn't end with .gov, add it on here\n+ return Domain.available(domain + \".gov\")\n+ except Exception:\n+ return False\n \n \n @require_http_methods([\"GET\"])\n+@login_not_required\n def available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n \n@@ -83,11 +90,16 @@\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n- if in_domains(domain):\n- return JsonResponse(\n- {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n- )\n- else:\n+ try:\n+ if check_domain_available(domain):\n+ return JsonResponse(\n+ {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n+ )\n+ else:\n+ return JsonResponse(\n+ {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n+ )\n+ except Exception:\n return JsonResponse(\n- {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n+ {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"error\"]}\n )\ndiff --git a/src/registrar/models/utility/domain_helper.py b/src/registrar/models/utility/domain_helper.py\n--- a/src/registrar/models/utility/domain_helper.py\n+++ b/src/registrar/models/utility/domain_helper.py\n@@ -1,6 +1,6 @@\n import re\n \n-from api.views import in_domains\n+from api.views import check_domain_available\n from registrar.utility import errors\n \n \n@@ -44,7 +44,7 @@\n raise errors.ExtraDotsError()\n if not DomainHelper.string_could_be_domain(domain + \".gov\"):\n raise ValueError()\n- if in_domains(domain):\n+ if not check_domain_available(domain):\n raise errors.DomainUnavailableError()\n return domain\n", "issue": "Change availability endpoint to not require a user to be logged in\n### Issue description and context\n\nCurrent api/v1/available requires a user to be logged in, even though the login required tag was removed already. Troubleshoot this and ensure that a non-logged in user can access the availability endpoint (api/v1/available)\n\n### Acceptance criteria\n\n- [ ] a user not logged in can access api/v1/available/<domain> and will see the response to whether the domain string passed is in fact available.\n\n### Links to other issues\n\nBlocks: #476\n", "before_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nimport requests\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = (\n \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n)\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef in_domains(domain):\n \"\"\"Return true if the given domain is in the domains list.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n return Domain.available(domain + \".gov\")\n\n\n@require_http_methods([\"GET\"])\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (\n DraftDomain.string_could_be_domain(domain)\n or DraftDomain.string_could_be_domain(domain + \".gov\")\n ):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n if in_domains(domain):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n )\n else:\n return JsonResponse(\n {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n )\n", "path": "src/api/views.py"}, {"content": "import re\n\nfrom api.views import in_domains\nfrom registrar.utility import errors\n\n\nclass DomainHelper:\n \"\"\"Utility functions and constants for domain names.\"\"\"\n\n # a domain name is alphanumeric or hyphen, up to 63 characters, doesn't\n # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters\n DOMAIN_REGEX = re.compile(r\"^(?!-)[A-Za-z0-9-]{1,63}(?<!-)\\.[A-Za-z]{2,6}$\")\n\n # a domain name is alphanumeric or hyphen, has at least 2 dots, doesn't\n # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters\n HOST_REGEX = re.compile(r\"^((?!-)[A-Za-z0-9-]{1,63}(?<!-)\\.){2,}([A-Za-z]){2,6}$\")\n\n # a domain can be no longer than 253 characters in total\n MAX_LENGTH = 253\n\n @classmethod\n def string_could_be_domain(cls, domain: str | None) -> bool:\n \"\"\"Return True if the string could be a domain name, otherwise False.\"\"\"\n if not isinstance(domain, str):\n return False\n return bool(cls.DOMAIN_REGEX.match(domain))\n\n @classmethod\n def validate(cls, domain: str | None, blank_ok=False) -> str:\n \"\"\"Attempt to determine if a domain name could be requested.\"\"\"\n if domain is None:\n raise errors.BlankValueError()\n if not isinstance(domain, str):\n raise ValueError(\"Domain name must be a string\")\n domain = domain.lower().strip()\n if domain == \"\":\n if blank_ok:\n return domain\n else:\n raise errors.BlankValueError()\n if domain.endswith(\".gov\"):\n domain = domain[:-4]\n if \".\" in domain:\n raise errors.ExtraDotsError()\n if not DomainHelper.string_could_be_domain(domain + \".gov\"):\n raise ValueError()\n if in_domains(domain):\n raise errors.DomainUnavailableError()\n return domain\n\n @classmethod\n def sld(cls, domain: str):\n \"\"\"\n Get the second level domain. Example: `gsa.gov` -> `gsa`.\n\n If no TLD is present, returns the original string.\n \"\"\"\n return domain.split(\".\")[0]\n\n @classmethod\n def tld(cls, domain: str):\n \"\"\"Get the top level domain. Example: `gsa.gov` -> `gov`.\"\"\"\n parts = domain.rsplit(\".\")\n return parts[-1] if len(parts) > 1 else \"\"\n", "path": "src/registrar/models/utility/domain_helper.py"}], "after_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nimport requests\n\nfrom login_required import login_not_required\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = (\n \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n)\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n \"error\": \"Error finding domain availability.\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef check_domain_available(domain):\n \"\"\"Return true if the given domain is available.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n Domain = apps.get_model(\"registrar.Domain\")\n try:\n if domain.endswith(\".gov\"):\n return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n return Domain.available(domain + \".gov\")\n except Exception:\n return False\n\n\n@require_http_methods([\"GET\"])\n@login_not_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (\n DraftDomain.string_could_be_domain(domain)\n or DraftDomain.string_could_be_domain(domain + \".gov\")\n ):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n try:\n if check_domain_available(domain):\n return JsonResponse(\n {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n )\n else:\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n )\n except Exception:\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"error\"]}\n )\n", "path": "src/api/views.py"}, {"content": "import re\n\nfrom api.views import check_domain_available\nfrom registrar.utility import errors\n\n\nclass DomainHelper:\n \"\"\"Utility functions and constants for domain names.\"\"\"\n\n # a domain name is alphanumeric or hyphen, up to 63 characters, doesn't\n # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters\n DOMAIN_REGEX = re.compile(r\"^(?!-)[A-Za-z0-9-]{1,63}(?<!-)\\.[A-Za-z]{2,6}$\")\n\n # a domain name is alphanumeric or hyphen, has at least 2 dots, doesn't\n # begin or end with a hyphen, followed by a TLD of 2-6 alphabetic characters\n HOST_REGEX = re.compile(r\"^((?!-)[A-Za-z0-9-]{1,63}(?<!-)\\.){2,}([A-Za-z]){2,6}$\")\n\n # a domain can be no longer than 253 characters in total\n MAX_LENGTH = 253\n\n @classmethod\n def string_could_be_domain(cls, domain: str | None) -> bool:\n \"\"\"Return True if the string could be a domain name, otherwise False.\"\"\"\n if not isinstance(domain, str):\n return False\n return bool(cls.DOMAIN_REGEX.match(domain))\n\n @classmethod\n def validate(cls, domain: str | None, blank_ok=False) -> str:\n \"\"\"Attempt to determine if a domain name could be requested.\"\"\"\n if domain is None:\n raise errors.BlankValueError()\n if not isinstance(domain, str):\n raise ValueError(\"Domain name must be a string\")\n domain = domain.lower().strip()\n if domain == \"\":\n if blank_ok:\n return domain\n else:\n raise errors.BlankValueError()\n if domain.endswith(\".gov\"):\n domain = domain[:-4]\n if \".\" in domain:\n raise errors.ExtraDotsError()\n if not DomainHelper.string_could_be_domain(domain + \".gov\"):\n raise ValueError()\n if not check_domain_available(domain):\n raise errors.DomainUnavailableError()\n return domain\n\n @classmethod\n def sld(cls, domain: str):\n \"\"\"\n Get the second level domain. Example: `gsa.gov` -> `gsa`.\n\n If no TLD is present, returns the original string.\n \"\"\"\n return domain.split(\".\")[0]\n\n @classmethod\n def tld(cls, domain: str):\n \"\"\"Get the top level domain. Example: `gsa.gov` -> `gov`.\"\"\"\n parts = domain.rsplit(\".\")\n return parts[-1] if len(parts) > 1 else \"\"\n", "path": "src/registrar/models/utility/domain_helper.py"}]}
| 2,029 | 720 |
gh_patches_debug_33685
|
rasdani/github-patches
|
git_diff
|
cornellius-gp__gpytorch-1593
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Wrong formulation of the PeriodicKernel
# 🐛 Bug
The current formulation of the periodic kernel is the following:
<img src="https://render.githubusercontent.com/render/math?math=k(\mathbf{x^1, x^2}) = \exp ( \frac{-2 \sin^2(\frac{\pi}{p} \sum_i | x^{1}_{i} - x^{2}_{i} |)}{l^2} )">
However, the original formula proposed [Mackay, 1998](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.1927&rep=rep1&type=pdf) (Eq. 47) is the following:
<img src="https://render.githubusercontent.com/render/math?math=k(\mathbf{x^1, x^2}) = \exp ( \frac{-0.5 \sum_i \sin^2(\frac{\pi}{p} | x^{1}_{i} - x^{2}_{i} |)}{l^2} )">
The constant is not -2 or -0.5 is not really important (it will have an impact on the derivative only). However, the original formulation is taking the the sum of the sine squared of the difference. Currently, the formulation is the sine squared of the sum of the difference. Note that the confusion could come from the fact that in the literature, the kernel is used for 1D signal and thus there is no sum, thus both formulations are equivalent.
In addition, looking at the implementation, it seems that the distance computed between the samples is norm-2 distance and not the norm-1 distance.
The current formulation make it that the kernel is not PSD. In addition, there is still another bug with a missing square over the length scale as reported in https://github.com/cornellius-gp/gpytorch/issues/1020.
Note: I found this issue because I am currently working on a similar issues in scikit-learn:
https://github.com/scikit-learn/scikit-learn/issues/19343
https://github.com/scikit-learn/scikit-learn/pull/20070
## To reproduce
** Code snippet to reproduce **
```python
import numpy as np
import torch
from gpytorch.kernels import PeriodicKernel
r = np.linspace(0, 1, num=4)
train_x, train_y = np.meshgrid(r, r)
X = np.stack((train_x.flatten(), train_y.flatten()), axis=-1)
periodicity, length_scale=1.5, 0.5
kernel_pytorch = PeriodicKernel(
period=periodicity, length_scale=length_scale
)
K_pytorch = kernel_pytorch(torch.from_numpy(X))
np.linalg.eigh(K_pytorch.numpy())[0]
```
** Stack trace/error message **
```
array([-1.27815495, -1.21435487, -1.21435487, -0.93566031, -0.75456704,
-0.24301193, -0.24301193, 0.47435512, 0.72370254, 1.58773803,
1.58773803, 2.42356711, 2.67356606, 2.67356606, 3.10565507,
6.63322787])
```
There is some negative eigenvalues.
## Expected Behavior
We are investigating the issue in scikit-learn and already implemented a naive approach: https://github.com/scikit-learn/scikit-learn/pull/20070 for tracking the bug. With the code of the PR, there is no negative eigenvalues:
```python
from sklearn.gaussian_process.kernels import ExpSineSquared
kernel_sklearn = ExpSineSquared(
periodicity=periodicity, length_scale=length_scale,
)
K_sklearn = kernel_sklearn(X)
np.linalg.eigh(K_sklearn)[0]
```
```
array([0.88422032, 0.92031869, 0.92031869, 0.95789078, 0.95961332,
0.95961332, 0.99716727, 0.99716727, 0.99878962, 0.99878962,
1.03787671, 1.03787671, 1.0414347 , 1.08219069, 1.08219069,
1.12454163])
```
On the process, we also worked with the implementation of tensorflow-probability that follow the equation of the paper, and we have consistent results:
```python
from tensorflow_probability import math
kernel_tfp = math.psd_kernels.ExpSinSquared(
period=periodicity, length_scale=length_scale
)
K_tfp = kernel_tfp.matrix(X, X)
np.linalg.eigh(K_tfp.numpy())[0]
```
```
array([0.8842203 , 0.92031866, 0.92031866, 0.95789075, 0.9596133 ,
0.9596133 , 0.9971673 , 0.9971673 , 0.9987896 , 0.9987896 ,
1.0378767 , 1.0378767 , 1.0414346 , 1.0821906 , 1.0821906 ,
1.1245416 ], dtype=float32)
```
## System information
**Please complete the following information:**
- GPyTorch version: `1.4.1`
- PyTorch version: `1.8.1+cpu`
- Linux 5.8.0-50-generic #56-Ubuntu SMP Mon Apr 12 17:18:36 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/kernels/periodic_kernel.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import math
4
5 import torch
6
7 from ..constraints import Positive
8 from .kernel import Kernel
9
10
11 class PeriodicKernel(Kernel):
12 r"""Computes a covariance matrix based on the periodic kernel
13 between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
14
15 .. math::
16
17 \begin{equation*}
18 k_{\text{Periodic}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left(
19 \frac{2 \sin^2 \left( \pi \Vert \mathbf{x_1} - \mathbf{x_2} \Vert_1 / p \right) }
20 { \ell^2 } \right)
21 \end{equation*}
22
23 where
24
25 * :math:`p` is the periord length parameter.
26 * :math:`\ell` is a lengthscale parameter.
27
28 .. note::
29
30 This kernel does not have an `outputscale` parameter. To add a scaling parameter,
31 decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.
32
33 .. note::
34
35 This kernel does not have an ARD lengthscale option.
36
37 Args:
38 :attr:`batch_shape` (torch.Size, optional):
39 Set this if you want a separate lengthscale for each
40 batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.
41 :attr:`active_dims` (tuple of ints, optional):
42 Set this if you want to compute the covariance of only a few input dimensions. The ints
43 corresponds to the indices of the dimensions. Default: `None`.
44 :attr:`period_length_prior` (Prior, optional):
45 Set this if you want to apply a prior to the period length parameter. Default: `None`.
46 :attr:`lengthscale_prior` (Prior, optional):
47 Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.
48 :attr:`lengthscale_constraint` (Constraint, optional):
49 Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.
50 :attr:`period_length_constraint` (Constraint, optional):
51 Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.
52 :attr:`eps` (float):
53 The minimum value that the lengthscale/period length can take
54 (prevents divide by zero errors). Default: `1e-6`.
55
56 Attributes:
57 :attr:`lengthscale` (Tensor):
58 The lengthscale parameter. Size = `*batch_shape x 1 x 1`.
59 :attr:`period_length` (Tensor):
60 The period length parameter. Size = `*batch_shape x 1 x 1`.
61
62 Example:
63 >>> x = torch.randn(10, 5)
64 >>> # Non-batch: Simple option
65 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
66 >>>
67 >>> batch_x = torch.randn(2, 10, 5)
68 >>> # Batch: Simple option
69 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
70 >>> # Batch: different lengthscale for each batch
71 >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))
72 >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)
73 """
74
75 has_lengthscale = True
76
77 def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):
78 super(PeriodicKernel, self).__init__(**kwargs)
79 if period_length_constraint is None:
80 period_length_constraint = Positive()
81
82 self.register_parameter(
83 name="raw_period_length", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))
84 )
85
86 if period_length_prior is not None:
87 self.register_prior(
88 "period_length_prior",
89 period_length_prior,
90 lambda m: m.period_length,
91 lambda m, v: m._set_period_length(v),
92 )
93
94 self.register_constraint("raw_period_length", period_length_constraint)
95
96 @property
97 def period_length(self):
98 return self.raw_period_length_constraint.transform(self.raw_period_length)
99
100 @period_length.setter
101 def period_length(self, value):
102 self._set_period_length(value)
103
104 def _set_period_length(self, value):
105 if not torch.is_tensor(value):
106 value = torch.as_tensor(value).to(self.raw_period_length)
107 self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))
108
109 def forward(self, x1, x2, diag=False, **params):
110 x1_ = x1.div(self.period_length)
111 x2_ = x2.div(self.period_length)
112 diff = self.covar_dist(x1_, x2_, diag=diag, **params)
113 res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
114 if diag:
115 res = res.squeeze(0)
116 return res
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gpytorch/kernels/periodic_kernel.py b/gpytorch/kernels/periodic_kernel.py
--- a/gpytorch/kernels/periodic_kernel.py
+++ b/gpytorch/kernels/periodic_kernel.py
@@ -14,16 +14,22 @@
.. math::
- \begin{equation*}
- k_{\text{Periodic}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left(
- \frac{2 \sin^2 \left( \pi \Vert \mathbf{x_1} - \mathbf{x_2} \Vert_1 / p \right) }
- { \ell^2 } \right)
- \end{equation*}
+ \begin{equation*}
+ k_{\text{Periodic}}(\mathbf{x_1}, \mathbf{x_2}) = \exp \left(
+ -2 \sum_i
+ \frac{\sin ^2 \left( \frac{\pi}{p} (\mathbf{x_{1,i}} - \mathbf{x_{2,i}} ) \right)}{\lambda}
+ \right)
+ \end{equation*}
where
- * :math:`p` is the periord length parameter.
- * :math:`\ell` is a lengthscale parameter.
+ * :math:`p` is the period length parameter.
+ * :math:`\lambda` is a lengthscale parameter.
+
+ Equation is based on [David Mackay's Introduction to Gaussian Processes equation 47]
+ (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.1927&rep=rep1&type=pdf)
+ albeit without feature-specific lengthscales and period lengths. The exponential
+ coefficient was changed and lengthscale is not squared to maintain backwards compatibility
.. note::
@@ -32,7 +38,7 @@
.. note::
- This kernel does not have an ARD lengthscale option.
+ This kernel does not have an ARD lengthscale or period length option.
Args:
:attr:`batch_shape` (torch.Size, optional):
@@ -107,10 +113,10 @@
self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))
def forward(self, x1, x2, diag=False, **params):
- x1_ = x1.div(self.period_length)
- x2_ = x2.div(self.period_length)
- diff = self.covar_dist(x1_, x2_, diag=diag, **params)
- res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
+ x1_ = x1.div(self.period_length).mul(math.pi)
+ x2_ = x2.div(self.period_length).mul(math.pi)
+ diff = x1_.unsqueeze(-2) - x2_.unsqueeze(-3)
+ res = diff.sin().pow(2).sum(dim=-1).div(self.lengthscale).mul(-2.0).exp_()
if diag:
res = res.squeeze(0)
return res
|
{"golden_diff": "diff --git a/gpytorch/kernels/periodic_kernel.py b/gpytorch/kernels/periodic_kernel.py\n--- a/gpytorch/kernels/periodic_kernel.py\n+++ b/gpytorch/kernels/periodic_kernel.py\n@@ -14,16 +14,22 @@\n \n .. math::\n \n- \\begin{equation*}\n- k_{\\text{Periodic}}(\\mathbf{x_1}, \\mathbf{x_2}) = \\exp \\left(\n- \\frac{2 \\sin^2 \\left( \\pi \\Vert \\mathbf{x_1} - \\mathbf{x_2} \\Vert_1 / p \\right) }\n- { \\ell^2 } \\right)\n- \\end{equation*}\n+ \\begin{equation*}\n+ k_{\\text{Periodic}}(\\mathbf{x_1}, \\mathbf{x_2}) = \\exp \\left(\n+ -2 \\sum_i\n+ \\frac{\\sin ^2 \\left( \\frac{\\pi}{p} (\\mathbf{x_{1,i}} - \\mathbf{x_{2,i}} ) \\right)}{\\lambda}\n+ \\right)\n+ \\end{equation*}\n \n where\n \n- * :math:`p` is the periord length parameter.\n- * :math:`\\ell` is a lengthscale parameter.\n+ * :math:`p` is the period length parameter.\n+ * :math:`\\lambda` is a lengthscale parameter.\n+\n+ Equation is based on [David Mackay's Introduction to Gaussian Processes equation 47]\n+ (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.1927&rep=rep1&type=pdf)\n+ albeit without feature-specific lengthscales and period lengths. The exponential\n+ coefficient was changed and lengthscale is not squared to maintain backwards compatibility\n \n .. note::\n \n@@ -32,7 +38,7 @@\n \n .. note::\n \n- This kernel does not have an ARD lengthscale option.\n+ This kernel does not have an ARD lengthscale or period length option.\n \n Args:\n :attr:`batch_shape` (torch.Size, optional):\n@@ -107,10 +113,10 @@\n self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))\n \n def forward(self, x1, x2, diag=False, **params):\n- x1_ = x1.div(self.period_length)\n- x2_ = x2.div(self.period_length)\n- diff = self.covar_dist(x1_, x2_, diag=diag, **params)\n- res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()\n+ x1_ = x1.div(self.period_length).mul(math.pi)\n+ x2_ = x2.div(self.period_length).mul(math.pi)\n+ diff = x1_.unsqueeze(-2) - x2_.unsqueeze(-3)\n+ res = diff.sin().pow(2).sum(dim=-1).div(self.lengthscale).mul(-2.0).exp_()\n if diag:\n res = res.squeeze(0)\n return res\n", "issue": "[Bug] Wrong formulation of the PeriodicKernel\n# \ud83d\udc1b Bug\r\n\r\nThe current formulation of the periodic kernel is the following:\r\n\r\n<img src=\"https://render.githubusercontent.com/render/math?math=k(\\mathbf{x^1, x^2}) = \\exp ( \\frac{-2 \\sin^2(\\frac{\\pi}{p} \\sum_i | x^{1}_{i} - x^{2}_{i} |)}{l^2} )\">\r\n\r\nHowever, the original formula proposed [Mackay, 1998](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.1927&rep=rep1&type=pdf) (Eq. 47) is the following:\r\n\r\n<img src=\"https://render.githubusercontent.com/render/math?math=k(\\mathbf{x^1, x^2}) = \\exp ( \\frac{-0.5 \\sum_i \\sin^2(\\frac{\\pi}{p} | x^{1}_{i} - x^{2}_{i} |)}{l^2} )\">\r\n\r\nThe constant is not -2 or -0.5 is not really important (it will have an impact on the derivative only). However, the original formulation is taking the the sum of the sine squared of the difference. Currently, the formulation is the sine squared of the sum of the difference. Note that the confusion could come from the fact that in the literature, the kernel is used for 1D signal and thus there is no sum, thus both formulations are equivalent.\r\n\r\nIn addition, looking at the implementation, it seems that the distance computed between the samples is norm-2 distance and not the norm-1 distance.\r\n\r\nThe current formulation make it that the kernel is not PSD. In addition, there is still another bug with a missing square over the length scale as reported in https://github.com/cornellius-gp/gpytorch/issues/1020.\r\n\r\nNote: I found this issue because I am currently working on a similar issues in scikit-learn:\r\nhttps://github.com/scikit-learn/scikit-learn/issues/19343\r\nhttps://github.com/scikit-learn/scikit-learn/pull/20070\r\n\r\n## To reproduce\r\n\r\n** Code snippet to reproduce **\r\n```python\r\nimport numpy as np\r\nimport torch\r\nfrom gpytorch.kernels import PeriodicKernel\r\n\r\nr = np.linspace(0, 1, num=4)\r\ntrain_x, train_y = np.meshgrid(r, r)\r\nX = np.stack((train_x.flatten(), train_y.flatten()), axis=-1)\r\n\r\nperiodicity, length_scale=1.5, 0.5\r\nkernel_pytorch = PeriodicKernel(\r\n period=periodicity, length_scale=length_scale\r\n)\r\nK_pytorch = kernel_pytorch(torch.from_numpy(X))\r\n\r\nnp.linalg.eigh(K_pytorch.numpy())[0]\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\narray([-1.27815495, -1.21435487, -1.21435487, -0.93566031, -0.75456704,\r\n -0.24301193, -0.24301193, 0.47435512, 0.72370254, 1.58773803,\r\n 1.58773803, 2.42356711, 2.67356606, 2.67356606, 3.10565507,\r\n 6.63322787])\r\n```\r\n\r\nThere is some negative eigenvalues.\r\n\r\n## Expected Behavior\r\n\r\nWe are investigating the issue in scikit-learn and already implemented a naive approach: https://github.com/scikit-learn/scikit-learn/pull/20070 for tracking the bug. With the code of the PR, there is no negative eigenvalues:\r\n\r\n```python\r\nfrom sklearn.gaussian_process.kernels import ExpSineSquared\r\n\r\nkernel_sklearn = ExpSineSquared(\r\n periodicity=periodicity, length_scale=length_scale,\r\n)\r\nK_sklearn = kernel_sklearn(X)\r\nnp.linalg.eigh(K_sklearn)[0]\r\n```\r\n\r\n```\r\narray([0.88422032, 0.92031869, 0.92031869, 0.95789078, 0.95961332,\r\n 0.95961332, 0.99716727, 0.99716727, 0.99878962, 0.99878962,\r\n 1.03787671, 1.03787671, 1.0414347 , 1.08219069, 1.08219069,\r\n 1.12454163])\r\n```\r\n\r\nOn the process, we also worked with the implementation of tensorflow-probability that follow the equation of the paper, and we have consistent results:\r\n\r\n```python\r\nfrom tensorflow_probability import math\r\n\r\nkernel_tfp = math.psd_kernels.ExpSinSquared(\r\n period=periodicity, length_scale=length_scale\r\n)\r\nK_tfp = kernel_tfp.matrix(X, X)\r\nnp.linalg.eigh(K_tfp.numpy())[0]\r\n``` \r\n\r\n```\r\narray([0.8842203 , 0.92031866, 0.92031866, 0.95789075, 0.9596133 ,\r\n 0.9596133 , 0.9971673 , 0.9971673 , 0.9987896 , 0.9987896 ,\r\n 1.0378767 , 1.0378767 , 1.0414346 , 1.0821906 , 1.0821906 ,\r\n 1.1245416 ], dtype=float32)\r\n```\r\n\r\n## System information\r\n\r\n**Please complete the following information:**\r\n- GPyTorch version: `1.4.1`\r\n- PyTorch version: `1.8.1+cpu`\r\n- Linux 5.8.0-50-generic #56-Ubuntu SMP Mon Apr 12 17:18:36 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport math\n\nimport torch\n\nfrom ..constraints import Positive\nfrom .kernel import Kernel\n\n\nclass PeriodicKernel(Kernel):\n r\"\"\"Computes a covariance matrix based on the periodic kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}`:\n\n .. math::\n\n \\begin{equation*}\n k_{\\text{Periodic}}(\\mathbf{x_1}, \\mathbf{x_2}) = \\exp \\left(\n \\frac{2 \\sin^2 \\left( \\pi \\Vert \\mathbf{x_1} - \\mathbf{x_2} \\Vert_1 / p \\right) }\n { \\ell^2 } \\right)\n \\end{equation*}\n\n where\n\n * :math:`p` is the periord length parameter.\n * :math:`\\ell` is a lengthscale parameter.\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n .. note::\n\n This kernel does not have an ARD lengthscale option.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`period_length_prior` (Prior, optional):\n Set this if you want to apply a prior to the period length parameter. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`lengthscale_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.\n :attr:`period_length_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.\n :attr:`eps` (float):\n The minimum value that the lengthscale/period length can take\n (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size = `*batch_shape x 1 x 1`.\n :attr:`period_length` (Tensor):\n The period length parameter. Size = `*batch_shape x 1 x 1`.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))\n >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)\n \"\"\"\n\n has_lengthscale = True\n\n def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):\n super(PeriodicKernel, self).__init__(**kwargs)\n if period_length_constraint is None:\n period_length_constraint = Positive()\n\n self.register_parameter(\n name=\"raw_period_length\", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))\n )\n\n if period_length_prior is not None:\n self.register_prior(\n \"period_length_prior\",\n period_length_prior,\n lambda m: m.period_length,\n lambda m, v: m._set_period_length(v),\n )\n\n self.register_constraint(\"raw_period_length\", period_length_constraint)\n\n @property\n def period_length(self):\n return self.raw_period_length_constraint.transform(self.raw_period_length)\n\n @period_length.setter\n def period_length(self, value):\n self._set_period_length(value)\n\n def _set_period_length(self, value):\n if not torch.is_tensor(value):\n value = torch.as_tensor(value).to(self.raw_period_length)\n self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))\n\n def forward(self, x1, x2, diag=False, **params):\n x1_ = x1.div(self.period_length)\n x2_ = x2.div(self.period_length)\n diff = self.covar_dist(x1_, x2_, diag=diag, **params)\n res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()\n if diag:\n res = res.squeeze(0)\n return res\n", "path": "gpytorch/kernels/periodic_kernel.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport math\n\nimport torch\n\nfrom ..constraints import Positive\nfrom .kernel import Kernel\n\n\nclass PeriodicKernel(Kernel):\n r\"\"\"Computes a covariance matrix based on the periodic kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}`:\n\n .. math::\n\n \\begin{equation*}\n k_{\\text{Periodic}}(\\mathbf{x_1}, \\mathbf{x_2}) = \\exp \\left(\n -2 \\sum_i\n \\frac{\\sin ^2 \\left( \\frac{\\pi}{p} (\\mathbf{x_{1,i}} - \\mathbf{x_{2,i}} ) \\right)}{\\lambda}\n \\right)\n \\end{equation*}\n\n where\n\n * :math:`p` is the period length parameter.\n * :math:`\\lambda` is a lengthscale parameter.\n\n Equation is based on [David Mackay's Introduction to Gaussian Processes equation 47]\n (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.81.1927&rep=rep1&type=pdf)\n albeit without feature-specific lengthscales and period lengths. The exponential\n coefficient was changed and lengthscale is not squared to maintain backwards compatibility\n\n .. note::\n\n This kernel does not have an `outputscale` parameter. To add a scaling parameter,\n decorate this kernel with a :class:`gpytorch.kernels.ScaleKernel`.\n\n .. note::\n\n This kernel does not have an ARD lengthscale or period length option.\n\n Args:\n :attr:`batch_shape` (torch.Size, optional):\n Set this if you want a separate lengthscale for each\n batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `torch.Size([])`.\n :attr:`active_dims` (tuple of ints, optional):\n Set this if you want to compute the covariance of only a few input dimensions. The ints\n corresponds to the indices of the dimensions. Default: `None`.\n :attr:`period_length_prior` (Prior, optional):\n Set this if you want to apply a prior to the period length parameter. Default: `None`.\n :attr:`lengthscale_prior` (Prior, optional):\n Set this if you want to apply a prior to the lengthscale parameter. Default: `None`.\n :attr:`lengthscale_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the lengthscale. Default: `Positive`.\n :attr:`period_length_constraint` (Constraint, optional):\n Set this if you want to apply a constraint to the value of the period length. Default: `Positive`.\n :attr:`eps` (float):\n The minimum value that the lengthscale/period length can take\n (prevents divide by zero errors). Default: `1e-6`.\n\n Attributes:\n :attr:`lengthscale` (Tensor):\n The lengthscale parameter. Size = `*batch_shape x 1 x 1`.\n :attr:`period_length` (Tensor):\n The period length parameter. Size = `*batch_shape x 1 x 1`.\n\n Example:\n >>> x = torch.randn(10, 5)\n >>> # Non-batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>>\n >>> batch_x = torch.randn(2, 10, 5)\n >>> # Batch: Simple option\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())\n >>> # Batch: different lengthscale for each batch\n >>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel(batch_size=2))\n >>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)\n \"\"\"\n\n has_lengthscale = True\n\n def __init__(self, period_length_prior=None, period_length_constraint=None, **kwargs):\n super(PeriodicKernel, self).__init__(**kwargs)\n if period_length_constraint is None:\n period_length_constraint = Positive()\n\n self.register_parameter(\n name=\"raw_period_length\", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))\n )\n\n if period_length_prior is not None:\n self.register_prior(\n \"period_length_prior\",\n period_length_prior,\n lambda m: m.period_length,\n lambda m, v: m._set_period_length(v),\n )\n\n self.register_constraint(\"raw_period_length\", period_length_constraint)\n\n @property\n def period_length(self):\n return self.raw_period_length_constraint.transform(self.raw_period_length)\n\n @period_length.setter\n def period_length(self, value):\n self._set_period_length(value)\n\n def _set_period_length(self, value):\n if not torch.is_tensor(value):\n value = torch.as_tensor(value).to(self.raw_period_length)\n self.initialize(raw_period_length=self.raw_period_length_constraint.inverse_transform(value))\n\n def forward(self, x1, x2, diag=False, **params):\n x1_ = x1.div(self.period_length).mul(math.pi)\n x2_ = x2.div(self.period_length).mul(math.pi)\n diff = x1_.unsqueeze(-2) - x2_.unsqueeze(-3)\n res = diff.sin().pow(2).sum(dim=-1).div(self.lengthscale).mul(-2.0).exp_()\n if diag:\n res = res.squeeze(0)\n return res\n", "path": "gpytorch/kernels/periodic_kernel.py"}]}
| 3,195 | 725 |
gh_patches_debug_41978
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-3239
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
break history page into multiple pages
Each week should get its own page, with paging nav added in the box.
break history page into multiple pages
Each week should get its own page, with paging nav added in the box.
Total Given on history pages for teams includes takes
On the history pages for teams, the "Total Given" amount includes both tips to people **and** takes by members, which is not _correct_.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/utils/history.py`
Content:
```
1 def iter_payday_events(db, participant):
2 """Yields payday events for the given participant.
3 """
4 username = participant.username
5 exchanges = db.all("""
6 SELECT *
7 FROM exchanges
8 WHERE participant=%s
9 """, (username,), back_as=dict)
10 transfers = db.all("""
11 SELECT *
12 FROM transfers
13 WHERE tipper=%(username)s OR tippee=%(username)s
14 """, locals(), back_as=dict)
15
16 if not (exchanges or transfers):
17 return
18
19 if transfers:
20 yield dict(
21 kind='totals',
22 given=sum(t['amount'] for t in transfers if t['tipper'] == username),
23 received=sum(t['amount'] for t in transfers if t['tippee'] == username),
24 )
25
26 payday_dates = db.all("""
27 SELECT ts_start::date
28 FROM paydays
29 ORDER BY ts_start ASC
30 """)
31
32 balance = participant.balance
33 prev_date = None
34 get_timestamp = lambda e: e['timestamp']
35 events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)
36 for event in events:
37
38 event['balance'] = balance
39
40 event_date = event['timestamp'].date()
41 if event_date != prev_date:
42 if prev_date:
43 yield dict(kind='day-close', balance=balance)
44 day_open = dict(kind='day-open', date=event_date, balance=balance)
45 if payday_dates:
46 while payday_dates and payday_dates[-1] > event_date:
47 payday_dates.pop()
48 payday_date = payday_dates[-1] if payday_dates else None
49 if event_date == payday_date:
50 day_open['payday_number'] = len(payday_dates) - 1
51 yield day_open
52 prev_date = event_date
53
54 if 'fee' in event:
55 if event['amount'] > 0:
56 kind = 'charge'
57 if event['status'] in (None, 'succeeded'):
58 balance -= event['amount']
59 else:
60 kind = 'credit'
61 if event['status'] != 'failed':
62 balance -= event['amount'] - event['fee']
63 else:
64 kind = 'transfer'
65 if event['tippee'] == username:
66 balance -= event['amount']
67 else:
68 balance += event['amount']
69 event['kind'] = kind
70
71 yield event
72
73 yield dict(kind='day-close', balance='0.00')
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gratipay/utils/history.py b/gratipay/utils/history.py
--- a/gratipay/utils/history.py
+++ b/gratipay/utils/history.py
@@ -1,16 +1,83 @@
-def iter_payday_events(db, participant):
+from datetime import datetime
+from decimal import Decimal
+
+from psycopg2 import IntegrityError
+
+
+def get_end_of_year_balance(db, participant, year, current_year):
+ if year == current_year:
+ return participant.balance
+ if year < participant.claimed_time.year:
+ return Decimal('0.00')
+
+ balance = db.one("""
+ SELECT balance
+ FROM balances_at
+ WHERE participant = %s
+ AND "at" = %s
+ """, (participant.id, datetime(year+1, 1, 1)))
+ if balance is not None:
+ return balance
+
+ username = participant.username
+ start_balance = get_end_of_year_balance(db, participant, year-1, current_year)
+ delta = db.one("""
+ SELECT (
+ SELECT COALESCE(sum(amount), 0) AS a
+ FROM exchanges
+ WHERE participant = %(username)s
+ AND extract(year from timestamp) = %(year)s
+ AND amount > 0
+ AND (status is null OR status = 'succeeded')
+ ) + (
+ SELECT COALESCE(sum(amount-fee), 0) AS a
+ FROM exchanges
+ WHERE participant = %(username)s
+ AND extract(year from timestamp) = %(year)s
+ AND amount < 0
+ AND (status is null OR status <> 'failed')
+ ) + (
+ SELECT COALESCE(sum(-amount), 0) AS a
+ FROM transfers
+ WHERE tipper = %(username)s
+ AND extract(year from timestamp) = %(year)s
+ ) + (
+ SELECT COALESCE(sum(amount), 0) AS a
+ FROM transfers
+ WHERE tippee = %(username)s
+ AND extract(year from timestamp) = %(year)s
+ ) AS delta
+ """, locals())
+ balance = start_balance + delta
+ try:
+ db.run("""
+ INSERT INTO balances_at
+ (participant, at, balance)
+ VALUES (%s, %s, %s)
+ """, (participant.id, datetime(year+1, 1, 1), balance))
+ except IntegrityError:
+ pass
+ return balance
+
+
+def iter_payday_events(db, participant, year=None):
"""Yields payday events for the given participant.
"""
+ current_year = datetime.utcnow().year
+ year = year or current_year
+
username = participant.username
exchanges = db.all("""
SELECT *
FROM exchanges
- WHERE participant=%s
- """, (username,), back_as=dict)
+ WHERE participant=%(username)s
+ AND extract(year from timestamp) = %(year)s
+ """, locals(), back_as=dict)
transfers = db.all("""
SELECT *
FROM transfers
- WHERE tipper=%(username)s OR tippee=%(username)s
+ WHERE (tipper=%(username)s OR tippee=%(username)s)
+ AND extract(year from timestamp) = %(year)s
""", locals(), back_as=dict)
if not (exchanges or transfers):
@@ -19,7 +86,7 @@
if transfers:
yield dict(
kind='totals',
- given=sum(t['amount'] for t in transfers if t['tipper'] == username),
+ given=sum(t['amount'] for t in transfers if t['tipper'] == username and t['context'] != 'take'),
received=sum(t['amount'] for t in transfers if t['tippee'] == username),
)
@@ -29,7 +96,7 @@
ORDER BY ts_start ASC
""")
- balance = participant.balance
+ balance = get_end_of_year_balance(db, participant, year, current_year)
prev_date = None
get_timestamp = lambda e: e['timestamp']
events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)
@@ -70,4 +137,4 @@
yield event
- yield dict(kind='day-close', balance='0.00')
+ yield dict(kind='day-close', balance=balance)
|
{"golden_diff": "diff --git a/gratipay/utils/history.py b/gratipay/utils/history.py\n--- a/gratipay/utils/history.py\n+++ b/gratipay/utils/history.py\n@@ -1,16 +1,83 @@\n-def iter_payday_events(db, participant):\n+from datetime import datetime\n+from decimal import Decimal\n+\n+from psycopg2 import IntegrityError\n+\n+\n+def get_end_of_year_balance(db, participant, year, current_year):\n+ if year == current_year:\n+ return participant.balance\n+ if year < participant.claimed_time.year:\n+ return Decimal('0.00')\n+\n+ balance = db.one(\"\"\"\n+ SELECT balance\n+ FROM balances_at\n+ WHERE participant = %s\n+ AND \"at\" = %s\n+ \"\"\", (participant.id, datetime(year+1, 1, 1)))\n+ if balance is not None:\n+ return balance\n+\n+ username = participant.username\n+ start_balance = get_end_of_year_balance(db, participant, year-1, current_year)\n+ delta = db.one(\"\"\"\n+ SELECT (\n+ SELECT COALESCE(sum(amount), 0) AS a\n+ FROM exchanges\n+ WHERE participant = %(username)s\n+ AND extract(year from timestamp) = %(year)s\n+ AND amount > 0\n+ AND (status is null OR status = 'succeeded')\n+ ) + (\n+ SELECT COALESCE(sum(amount-fee), 0) AS a\n+ FROM exchanges\n+ WHERE participant = %(username)s\n+ AND extract(year from timestamp) = %(year)s\n+ AND amount < 0\n+ AND (status is null OR status <> 'failed')\n+ ) + (\n+ SELECT COALESCE(sum(-amount), 0) AS a\n+ FROM transfers\n+ WHERE tipper = %(username)s\n+ AND extract(year from timestamp) = %(year)s\n+ ) + (\n+ SELECT COALESCE(sum(amount), 0) AS a\n+ FROM transfers\n+ WHERE tippee = %(username)s\n+ AND extract(year from timestamp) = %(year)s\n+ ) AS delta\n+ \"\"\", locals())\n+ balance = start_balance + delta\n+ try:\n+ db.run(\"\"\"\n+ INSERT INTO balances_at\n+ (participant, at, balance)\n+ VALUES (%s, %s, %s)\n+ \"\"\", (participant.id, datetime(year+1, 1, 1), balance))\n+ except IntegrityError:\n+ pass\n+ return balance\n+\n+\n+def iter_payday_events(db, participant, year=None):\n \"\"\"Yields payday events for the given participant.\n \"\"\"\n+ current_year = datetime.utcnow().year\n+ year = year or current_year\n+\n username = participant.username\n exchanges = db.all(\"\"\"\n SELECT *\n FROM exchanges\n- WHERE participant=%s\n- \"\"\", (username,), back_as=dict)\n+ WHERE participant=%(username)s\n+ AND extract(year from timestamp) = %(year)s\n+ \"\"\", locals(), back_as=dict)\n transfers = db.all(\"\"\"\n SELECT *\n FROM transfers\n- WHERE tipper=%(username)s OR tippee=%(username)s\n+ WHERE (tipper=%(username)s OR tippee=%(username)s)\n+ AND extract(year from timestamp) = %(year)s\n \"\"\", locals(), back_as=dict)\n \n if not (exchanges or transfers):\n@@ -19,7 +86,7 @@\n if transfers:\n yield dict(\n kind='totals',\n- given=sum(t['amount'] for t in transfers if t['tipper'] == username),\n+ given=sum(t['amount'] for t in transfers if t['tipper'] == username and t['context'] != 'take'),\n received=sum(t['amount'] for t in transfers if t['tippee'] == username),\n )\n \n@@ -29,7 +96,7 @@\n ORDER BY ts_start ASC\n \"\"\")\n \n- balance = participant.balance\n+ balance = get_end_of_year_balance(db, participant, year, current_year)\n prev_date = None\n get_timestamp = lambda e: e['timestamp']\n events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)\n@@ -70,4 +137,4 @@\n \n yield event\n \n- yield dict(kind='day-close', balance='0.00')\n+ yield dict(kind='day-close', balance=balance)\n", "issue": "break history page into multiple pages\nEach week should get its own page, with paging nav added in the box.\n\nbreak history page into multiple pages\nEach week should get its own page, with paging nav added in the box.\n\nTotal Given on history pages for teams includes takes\nOn the history pages for teams, the \"Total Given\" amount includes both tips to people **and** takes by members, which is not _correct_. \n\n", "before_files": [{"content": "def iter_payday_events(db, participant):\n \"\"\"Yields payday events for the given participant.\n \"\"\"\n username = participant.username\n exchanges = db.all(\"\"\"\n SELECT *\n FROM exchanges\n WHERE participant=%s\n \"\"\", (username,), back_as=dict)\n transfers = db.all(\"\"\"\n SELECT *\n FROM transfers\n WHERE tipper=%(username)s OR tippee=%(username)s\n \"\"\", locals(), back_as=dict)\n\n if not (exchanges or transfers):\n return\n\n if transfers:\n yield dict(\n kind='totals',\n given=sum(t['amount'] for t in transfers if t['tipper'] == username),\n received=sum(t['amount'] for t in transfers if t['tippee'] == username),\n )\n\n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n FROM paydays\n ORDER BY ts_start ASC\n \"\"\")\n\n balance = participant.balance\n prev_date = None\n get_timestamp = lambda e: e['timestamp']\n events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)\n for event in events:\n\n event['balance'] = balance\n\n event_date = event['timestamp'].date()\n if event_date != prev_date:\n if prev_date:\n yield dict(kind='day-close', balance=balance)\n day_open = dict(kind='day-open', date=event_date, balance=balance)\n if payday_dates:\n while payday_dates and payday_dates[-1] > event_date:\n payday_dates.pop()\n payday_date = payday_dates[-1] if payday_dates else None\n if event_date == payday_date:\n day_open['payday_number'] = len(payday_dates) - 1\n yield day_open\n prev_date = event_date\n\n if 'fee' in event:\n if event['amount'] > 0:\n kind = 'charge'\n if event['status'] in (None, 'succeeded'):\n balance -= event['amount']\n else:\n kind = 'credit'\n if event['status'] != 'failed':\n balance -= event['amount'] - event['fee']\n else:\n kind = 'transfer'\n if event['tippee'] == username:\n balance -= event['amount']\n else:\n balance += event['amount']\n event['kind'] = kind\n\n yield event\n\n yield dict(kind='day-close', balance='0.00')\n", "path": "gratipay/utils/history.py"}], "after_files": [{"content": "from datetime import datetime\nfrom decimal import Decimal\n\nfrom psycopg2 import IntegrityError\n\n\ndef get_end_of_year_balance(db, participant, year, current_year):\n if year == current_year:\n return participant.balance\n if year < participant.claimed_time.year:\n return Decimal('0.00')\n\n balance = db.one(\"\"\"\n SELECT balance\n FROM balances_at\n WHERE participant = %s\n AND \"at\" = %s\n \"\"\", (participant.id, datetime(year+1, 1, 1)))\n if balance is not None:\n return balance\n\n username = participant.username\n start_balance = get_end_of_year_balance(db, participant, year-1, current_year)\n delta = db.one(\"\"\"\n SELECT (\n SELECT COALESCE(sum(amount), 0) AS a\n FROM exchanges\n WHERE participant = %(username)s\n AND extract(year from timestamp) = %(year)s\n AND amount > 0\n AND (status is null OR status = 'succeeded')\n ) + (\n SELECT COALESCE(sum(amount-fee), 0) AS a\n FROM exchanges\n WHERE participant = %(username)s\n AND extract(year from timestamp) = %(year)s\n AND amount < 0\n AND (status is null OR status <> 'failed')\n ) + (\n SELECT COALESCE(sum(-amount), 0) AS a\n FROM transfers\n WHERE tipper = %(username)s\n AND extract(year from timestamp) = %(year)s\n ) + (\n SELECT COALESCE(sum(amount), 0) AS a\n FROM transfers\n WHERE tippee = %(username)s\n AND extract(year from timestamp) = %(year)s\n ) AS delta\n \"\"\", locals())\n balance = start_balance + delta\n try:\n db.run(\"\"\"\n INSERT INTO balances_at\n (participant, at, balance)\n VALUES (%s, %s, %s)\n \"\"\", (participant.id, datetime(year+1, 1, 1), balance))\n except IntegrityError:\n pass\n return balance\n\n\ndef iter_payday_events(db, participant, year=None):\n \"\"\"Yields payday events for the given participant.\n \"\"\"\n current_year = datetime.utcnow().year\n year = year or current_year\n\n username = participant.username\n exchanges = db.all(\"\"\"\n SELECT *\n FROM exchanges\n WHERE participant=%(username)s\n AND extract(year from timestamp) = %(year)s\n \"\"\", locals(), back_as=dict)\n transfers = db.all(\"\"\"\n SELECT *\n FROM transfers\n WHERE (tipper=%(username)s OR tippee=%(username)s)\n AND extract(year from timestamp) = %(year)s\n \"\"\", locals(), back_as=dict)\n\n if not (exchanges or transfers):\n return\n\n if transfers:\n yield dict(\n kind='totals',\n given=sum(t['amount'] for t in transfers if t['tipper'] == username and t['context'] != 'take'),\n received=sum(t['amount'] for t in transfers if t['tippee'] == username),\n )\n\n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n FROM paydays\n ORDER BY ts_start ASC\n \"\"\")\n\n balance = get_end_of_year_balance(db, participant, year, current_year)\n prev_date = None\n get_timestamp = lambda e: e['timestamp']\n events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)\n for event in events:\n\n event['balance'] = balance\n\n event_date = event['timestamp'].date()\n if event_date != prev_date:\n if prev_date:\n yield dict(kind='day-close', balance=balance)\n day_open = dict(kind='day-open', date=event_date, balance=balance)\n if payday_dates:\n while payday_dates and payday_dates[-1] > event_date:\n payday_dates.pop()\n payday_date = payday_dates[-1] if payday_dates else None\n if event_date == payday_date:\n day_open['payday_number'] = len(payday_dates) - 1\n yield day_open\n prev_date = event_date\n\n if 'fee' in event:\n if event['amount'] > 0:\n kind = 'charge'\n if event['status'] in (None, 'succeeded'):\n balance -= event['amount']\n else:\n kind = 'credit'\n if event['status'] != 'failed':\n balance -= event['amount'] - event['fee']\n else:\n kind = 'transfer'\n if event['tippee'] == username:\n balance -= event['amount']\n else:\n balance += event['amount']\n event['kind'] = kind\n\n yield event\n\n yield dict(kind='day-close', balance=balance)\n", "path": "gratipay/utils/history.py"}]}
| 1,011 | 990 |
gh_patches_debug_5260
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-535
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive for Z121 when using `_` for unused var names
# Bug report
## What's wrong
This code triggers `Z121`:
```python
def logo_and_user():
user, _ = User.objects.get_or_create(username='sobolevn')
logo, _ = Logo.objects.get_or_create(username='sobolevn') # here
```
Log: `test.py:4:11: Z121 Found usage of a variable marked as unused: _`
But it should not, since these variables are unused.
## How is that should be
It should not raise any `Z121` violations here.
## System information
I am using `8.0.1`
False positive Z309 for keys
# Bug report
<!--
Hi, thanks for submitting a bug. We appreciate that.
But, we will need some information about what's wrong to help you.
-->
## What's wrong
```python
def test(office, returned_item):
assert returned_item['id'] == office.id
```
raises `Z309 Found reversed comparison order` violation
## How is that should be
It should not raise a violation. We use variables on both sides.
## System information
I am using `0.8.1` release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/ast/functions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from typing import ClassVar, Dict, List, Optional, Union
5
6 from wemake_python_styleguide.constants import (
7 FUNCTIONS_BLACKLIST,
8 UNUSED_VARIABLE,
9 )
10 from wemake_python_styleguide.logics import functions
11 from wemake_python_styleguide.logics.naming import access
12 from wemake_python_styleguide.types import AnyFunctionDef, AnyNodes, final
13 from wemake_python_styleguide.violations.best_practices import (
14 BooleanPositionalArgumentViolation,
15 ComplexDefaultValuesViolation,
16 IncorrectSuperCallViolation,
17 WrongFunctionCallViolation,
18 )
19 from wemake_python_styleguide.violations.naming import (
20 UnusedVariableIsUsedViolation,
21 )
22 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
23 from wemake_python_styleguide.visitors.decorators import alias
24
25 LocalVariable = Union[ast.Name, ast.ExceptHandler]
26
27
28 @final
29 class WrongFunctionCallVisitor(BaseNodeVisitor):
30 """
31 Responsible for restricting some dangerous function calls.
32
33 All these functions are defined in ``FUNCTIONS_BLACKLIST``.
34 """
35
36 def _check_wrong_function_called(self, node: ast.Call) -> None:
37 function_name = functions.given_function_called(
38 node, FUNCTIONS_BLACKLIST,
39 )
40 if function_name:
41 self.add_violation(
42 WrongFunctionCallViolation(node, text=function_name),
43 )
44
45 def _check_boolean_arguments(self, node: ast.Call) -> None:
46 for arg in node.args:
47 if isinstance(arg, ast.NameConstant):
48 # We do not check for `None` values here:
49 if arg.value is True or arg.value is False:
50 self.add_violation(
51 BooleanPositionalArgumentViolation(
52 arg, text=str(arg.value),
53 ),
54 )
55
56 def _ensure_super_context(self, node: ast.Call) -> None:
57 parent_context = getattr(node, 'wps_context', None)
58 if isinstance(parent_context, (ast.FunctionDef, ast.AsyncFunctionDef)):
59 grand_context = getattr(parent_context, 'wps_context', None)
60 if isinstance(grand_context, ast.ClassDef):
61 return
62 self.add_violation(
63 IncorrectSuperCallViolation(node, text='not inside method'),
64 )
65
66 def _ensure_super_arguments(self, node: ast.Call) -> None:
67 if len(node.args) > 0 or len(node.keywords) > 0:
68 self.add_violation(
69 IncorrectSuperCallViolation(node, text='remove arguments'),
70 )
71
72 def _check_super_call(self, node: ast.Call) -> None:
73 function_name = functions.given_function_called(node, ['super'])
74 if function_name:
75 self._ensure_super_context(node)
76 self._ensure_super_arguments(node)
77
78 def visit_Call(self, node: ast.Call) -> None:
79 """
80 Used to find ``FUNCTIONS_BLACKLIST`` calls.
81
82 Raises:
83 BooleanPositionalArgumentViolation
84 WrongFunctionCallViolation
85 IncorrectSuperCallViolation
86
87 """
88 self._check_wrong_function_called(node)
89 self._check_boolean_arguments(node)
90 self._check_super_call(node)
91 self.generic_visit(node)
92
93
94 @final
95 @alias('visit_any_function', (
96 'visit_AsyncFunctionDef',
97 'visit_FunctionDef',
98 ))
99 class FunctionDefinitionVisitor(BaseNodeVisitor):
100 """Responsible for checking function internals."""
101
102 _allowed_default_value_types: ClassVar[AnyNodes] = (
103 ast.Name,
104 ast.Attribute,
105 ast.Str,
106 ast.NameConstant,
107 ast.Tuple,
108 ast.Bytes,
109 ast.Num,
110 )
111
112 def _check_used_variables(
113 self,
114 local_variables: Dict[str, List[LocalVariable]],
115 ) -> None:
116 for varname, usages in local_variables.items():
117 for node in usages:
118 if access.is_protected(varname) or varname == UNUSED_VARIABLE:
119 self.add_violation(
120 UnusedVariableIsUsedViolation(node, text=varname),
121 )
122
123 def _maybe_update_variable(
124 self,
125 sub_node: LocalVariable,
126 var_name: str,
127 local_variables: Dict[str, List[LocalVariable]],
128 ) -> None:
129 if var_name in local_variables:
130 local_variables[var_name].append(sub_node)
131 return
132
133 is_name_def = isinstance(
134 sub_node, ast.Name,
135 ) and isinstance(
136 sub_node.ctx, ast.Store,
137 )
138
139 if is_name_def or isinstance(sub_node, ast.ExceptHandler):
140 local_variables[var_name] = []
141
142 def _get_variable_name(self, node: LocalVariable) -> Optional[str]:
143 if isinstance(node, ast.Name):
144 return node.id
145 return getattr(node, 'name', None)
146
147 def _check_unused_variables(self, node: AnyFunctionDef) -> None:
148 local_variables: Dict[str, List[LocalVariable]] = {}
149 for body_item in node.body:
150 for sub_node in ast.walk(body_item):
151 if not isinstance(sub_node, (ast.Name, ast.ExceptHandler)):
152 continue
153
154 var_name = self._get_variable_name(sub_node)
155 if not var_name:
156 continue
157
158 self._maybe_update_variable(
159 sub_node, var_name, local_variables,
160 )
161 self._check_used_variables(local_variables)
162
163 def _check_argument_default_values(self, node: AnyFunctionDef) -> None:
164
165 for arg in node.args.defaults:
166 if not isinstance(arg, self._allowed_default_value_types):
167 self.add_violation(
168 ComplexDefaultValuesViolation(node, text='Test text'),
169 )
170
171 def visit_any_function(self, node: AnyFunctionDef) -> None:
172 """
173 Checks regular, lambda, and async functions.
174
175 Raises:
176 UnusedVariableIsUsedViolation
177 ComplexDefaultValuesViolation
178
179 """
180 self._check_argument_default_values(node)
181 self._check_unused_variables(node)
182 self.generic_visit(node)
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wemake_python_styleguide/visitors/ast/functions.py b/wemake_python_styleguide/visitors/ast/functions.py
--- a/wemake_python_styleguide/visitors/ast/functions.py
+++ b/wemake_python_styleguide/visitors/ast/functions.py
@@ -127,6 +127,9 @@
local_variables: Dict[str, List[LocalVariable]],
) -> None:
if var_name in local_variables:
+ if var_name == UNUSED_VARIABLE:
+ if isinstance(getattr(sub_node, 'ctx', None), ast.Store):
+ return
local_variables[var_name].append(sub_node)
return
|
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/functions.py b/wemake_python_styleguide/visitors/ast/functions.py\n--- a/wemake_python_styleguide/visitors/ast/functions.py\n+++ b/wemake_python_styleguide/visitors/ast/functions.py\n@@ -127,6 +127,9 @@\n local_variables: Dict[str, List[LocalVariable]],\n ) -> None:\n if var_name in local_variables:\n+ if var_name == UNUSED_VARIABLE:\n+ if isinstance(getattr(sub_node, 'ctx', None), ast.Store):\n+ return\n local_variables[var_name].append(sub_node)\n return\n", "issue": "False positive for Z121 when using `_` for unused var names\n# Bug report\r\n\r\n## What's wrong\r\n\r\nThis code triggers `Z121`:\r\n\r\n```python\r\ndef logo_and_user():\r\n user, _ = User.objects.get_or_create(username='sobolevn')\r\n logo, _ = Logo.objects.get_or_create(username='sobolevn') # here\r\n```\r\n\r\nLog: `test.py:4:11: Z121 Found usage of a variable marked as unused: _`\r\nBut it should not, since these variables are unused.\r\n\r\n## How is that should be\r\n\r\nIt should not raise any `Z121` violations here.\r\n\r\n## System information\r\n\r\nI am using `8.0.1`\r\n\nFalse positive Z309 for keys\n# Bug report\r\n\r\n<!--\r\nHi, thanks for submitting a bug. We appreciate that.\r\n\r\nBut, we will need some information about what's wrong to help you.\r\n-->\r\n\r\n## What's wrong\r\n\r\n```python\r\ndef test(office, returned_item):\r\n assert returned_item['id'] == office.id\r\n```\r\n\r\nraises `Z309 Found reversed comparison order` violation\r\n\r\n## How is that should be\r\n\r\nIt should not raise a violation. We use variables on both sides.\r\n\r\n## System information\r\n\r\nI am using `0.8.1` release.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Dict, List, Optional, Union\n\nfrom wemake_python_styleguide.constants import (\n FUNCTIONS_BLACKLIST,\n UNUSED_VARIABLE,\n)\nfrom wemake_python_styleguide.logics import functions\nfrom wemake_python_styleguide.logics.naming import access\nfrom wemake_python_styleguide.types import AnyFunctionDef, AnyNodes, final\nfrom wemake_python_styleguide.violations.best_practices import (\n BooleanPositionalArgumentViolation,\n ComplexDefaultValuesViolation,\n IncorrectSuperCallViolation,\n WrongFunctionCallViolation,\n)\nfrom wemake_python_styleguide.violations.naming import (\n UnusedVariableIsUsedViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\nLocalVariable = Union[ast.Name, ast.ExceptHandler]\n\n\n@final\nclass WrongFunctionCallVisitor(BaseNodeVisitor):\n \"\"\"\n Responsible for restricting some dangerous function calls.\n\n All these functions are defined in ``FUNCTIONS_BLACKLIST``.\n \"\"\"\n\n def _check_wrong_function_called(self, node: ast.Call) -> None:\n function_name = functions.given_function_called(\n node, FUNCTIONS_BLACKLIST,\n )\n if function_name:\n self.add_violation(\n WrongFunctionCallViolation(node, text=function_name),\n )\n\n def _check_boolean_arguments(self, node: ast.Call) -> None:\n for arg in node.args:\n if isinstance(arg, ast.NameConstant):\n # We do not check for `None` values here:\n if arg.value is True or arg.value is False:\n self.add_violation(\n BooleanPositionalArgumentViolation(\n arg, text=str(arg.value),\n ),\n )\n\n def _ensure_super_context(self, node: ast.Call) -> None:\n parent_context = getattr(node, 'wps_context', None)\n if isinstance(parent_context, (ast.FunctionDef, ast.AsyncFunctionDef)):\n grand_context = getattr(parent_context, 'wps_context', None)\n if isinstance(grand_context, ast.ClassDef):\n return\n self.add_violation(\n IncorrectSuperCallViolation(node, text='not inside method'),\n )\n\n def _ensure_super_arguments(self, node: ast.Call) -> None:\n if len(node.args) > 0 or len(node.keywords) > 0:\n self.add_violation(\n IncorrectSuperCallViolation(node, text='remove arguments'),\n )\n\n def _check_super_call(self, node: ast.Call) -> None:\n function_name = functions.given_function_called(node, ['super'])\n if function_name:\n self._ensure_super_context(node)\n self._ensure_super_arguments(node)\n\n def visit_Call(self, node: ast.Call) -> None:\n \"\"\"\n Used to find ``FUNCTIONS_BLACKLIST`` calls.\n\n Raises:\n BooleanPositionalArgumentViolation\n WrongFunctionCallViolation\n IncorrectSuperCallViolation\n\n \"\"\"\n self._check_wrong_function_called(node)\n self._check_boolean_arguments(node)\n self._check_super_call(node)\n self.generic_visit(node)\n\n\n@final\n@alias('visit_any_function', (\n 'visit_AsyncFunctionDef',\n 'visit_FunctionDef',\n))\nclass FunctionDefinitionVisitor(BaseNodeVisitor):\n \"\"\"Responsible for checking function internals.\"\"\"\n\n _allowed_default_value_types: ClassVar[AnyNodes] = (\n ast.Name,\n ast.Attribute,\n ast.Str,\n ast.NameConstant,\n ast.Tuple,\n ast.Bytes,\n ast.Num,\n )\n\n def _check_used_variables(\n self,\n local_variables: Dict[str, List[LocalVariable]],\n ) -> None:\n for varname, usages in local_variables.items():\n for node in usages:\n if access.is_protected(varname) or varname == UNUSED_VARIABLE:\n self.add_violation(\n UnusedVariableIsUsedViolation(node, text=varname),\n )\n\n def _maybe_update_variable(\n self,\n sub_node: LocalVariable,\n var_name: str,\n local_variables: Dict[str, List[LocalVariable]],\n ) -> None:\n if var_name in local_variables:\n local_variables[var_name].append(sub_node)\n return\n\n is_name_def = isinstance(\n sub_node, ast.Name,\n ) and isinstance(\n sub_node.ctx, ast.Store,\n )\n\n if is_name_def or isinstance(sub_node, ast.ExceptHandler):\n local_variables[var_name] = []\n\n def _get_variable_name(self, node: LocalVariable) -> Optional[str]:\n if isinstance(node, ast.Name):\n return node.id\n return getattr(node, 'name', None)\n\n def _check_unused_variables(self, node: AnyFunctionDef) -> None:\n local_variables: Dict[str, List[LocalVariable]] = {}\n for body_item in node.body:\n for sub_node in ast.walk(body_item):\n if not isinstance(sub_node, (ast.Name, ast.ExceptHandler)):\n continue\n\n var_name = self._get_variable_name(sub_node)\n if not var_name:\n continue\n\n self._maybe_update_variable(\n sub_node, var_name, local_variables,\n )\n self._check_used_variables(local_variables)\n\n def _check_argument_default_values(self, node: AnyFunctionDef) -> None:\n\n for arg in node.args.defaults:\n if not isinstance(arg, self._allowed_default_value_types):\n self.add_violation(\n ComplexDefaultValuesViolation(node, text='Test text'),\n )\n\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Checks regular, lambda, and async functions.\n\n Raises:\n UnusedVariableIsUsedViolation\n ComplexDefaultValuesViolation\n\n \"\"\"\n self._check_argument_default_values(node)\n self._check_unused_variables(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/functions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, Dict, List, Optional, Union\n\nfrom wemake_python_styleguide.constants import (\n FUNCTIONS_BLACKLIST,\n UNUSED_VARIABLE,\n)\nfrom wemake_python_styleguide.logics import functions\nfrom wemake_python_styleguide.logics.naming import access\nfrom wemake_python_styleguide.types import AnyFunctionDef, AnyNodes, final\nfrom wemake_python_styleguide.violations.best_practices import (\n BooleanPositionalArgumentViolation,\n ComplexDefaultValuesViolation,\n IncorrectSuperCallViolation,\n WrongFunctionCallViolation,\n)\nfrom wemake_python_styleguide.violations.naming import (\n UnusedVariableIsUsedViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\nLocalVariable = Union[ast.Name, ast.ExceptHandler]\n\n\n@final\nclass WrongFunctionCallVisitor(BaseNodeVisitor):\n \"\"\"\n Responsible for restricting some dangerous function calls.\n\n All these functions are defined in ``FUNCTIONS_BLACKLIST``.\n \"\"\"\n\n def _check_wrong_function_called(self, node: ast.Call) -> None:\n function_name = functions.given_function_called(\n node, FUNCTIONS_BLACKLIST,\n )\n if function_name:\n self.add_violation(\n WrongFunctionCallViolation(node, text=function_name),\n )\n\n def _check_boolean_arguments(self, node: ast.Call) -> None:\n for arg in node.args:\n if isinstance(arg, ast.NameConstant):\n # We do not check for `None` values here:\n if arg.value is True or arg.value is False:\n self.add_violation(\n BooleanPositionalArgumentViolation(\n arg, text=str(arg.value),\n ),\n )\n\n def _ensure_super_context(self, node: ast.Call) -> None:\n parent_context = getattr(node, 'wps_context', None)\n if isinstance(parent_context, (ast.FunctionDef, ast.AsyncFunctionDef)):\n grand_context = getattr(parent_context, 'wps_context', None)\n if isinstance(grand_context, ast.ClassDef):\n return\n self.add_violation(\n IncorrectSuperCallViolation(node, text='not inside method'),\n )\n\n def _ensure_super_arguments(self, node: ast.Call) -> None:\n if len(node.args) > 0 or len(node.keywords) > 0:\n self.add_violation(\n IncorrectSuperCallViolation(node, text='remove arguments'),\n )\n\n def _check_super_call(self, node: ast.Call) -> None:\n function_name = functions.given_function_called(node, ['super'])\n if function_name:\n self._ensure_super_context(node)\n self._ensure_super_arguments(node)\n\n def visit_Call(self, node: ast.Call) -> None:\n \"\"\"\n Used to find ``FUNCTIONS_BLACKLIST`` calls.\n\n Raises:\n BooleanPositionalArgumentViolation\n WrongFunctionCallViolation\n IncorrectSuperCallViolation\n\n \"\"\"\n self._check_wrong_function_called(node)\n self._check_boolean_arguments(node)\n self._check_super_call(node)\n self.generic_visit(node)\n\n\n@final\n@alias('visit_any_function', (\n 'visit_AsyncFunctionDef',\n 'visit_FunctionDef',\n))\nclass FunctionDefinitionVisitor(BaseNodeVisitor):\n \"\"\"Responsible for checking function internals.\"\"\"\n\n _allowed_default_value_types: ClassVar[AnyNodes] = (\n ast.Name,\n ast.Attribute,\n ast.Str,\n ast.NameConstant,\n ast.Tuple,\n ast.Bytes,\n ast.Num,\n )\n\n def _check_used_variables(\n self,\n local_variables: Dict[str, List[LocalVariable]],\n ) -> None:\n for varname, usages in local_variables.items():\n for node in usages:\n if access.is_protected(varname) or varname == UNUSED_VARIABLE:\n self.add_violation(\n UnusedVariableIsUsedViolation(node, text=varname),\n )\n\n def _maybe_update_variable(\n self,\n sub_node: LocalVariable,\n var_name: str,\n local_variables: Dict[str, List[LocalVariable]],\n ) -> None:\n if var_name in local_variables:\n if var_name == UNUSED_VARIABLE:\n if isinstance(getattr(sub_node, 'ctx', None), ast.Store):\n return\n local_variables[var_name].append(sub_node)\n return\n\n is_name_def = isinstance(\n sub_node, ast.Name,\n ) and isinstance(\n sub_node.ctx, ast.Store,\n )\n\n if is_name_def or isinstance(sub_node, ast.ExceptHandler):\n local_variables[var_name] = []\n\n def _get_variable_name(self, node: LocalVariable) -> Optional[str]:\n if isinstance(node, ast.Name):\n return node.id\n return getattr(node, 'name', None)\n\n def _check_unused_variables(self, node: AnyFunctionDef) -> None:\n local_variables: Dict[str, List[LocalVariable]] = {}\n for body_item in node.body:\n for sub_node in ast.walk(body_item):\n if not isinstance(sub_node, (ast.Name, ast.ExceptHandler)):\n continue\n\n var_name = self._get_variable_name(sub_node)\n if not var_name:\n continue\n\n self._maybe_update_variable(\n sub_node, var_name, local_variables,\n )\n self._check_used_variables(local_variables)\n\n def _check_argument_default_values(self, node: AnyFunctionDef) -> None:\n\n for arg in node.args.defaults:\n if not isinstance(arg, self._allowed_default_value_types):\n self.add_violation(\n ComplexDefaultValuesViolation(node, text='Test text'),\n )\n\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Checks regular, lambda, and async functions.\n\n Raises:\n UnusedVariableIsUsedViolation\n ComplexDefaultValuesViolation\n\n \"\"\"\n self._check_argument_default_values(node)\n self._check_unused_variables(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/functions.py"}]}
| 2,253 | 146 |
gh_patches_debug_13737
|
rasdani/github-patches
|
git_diff
|
translate__pootle-4191
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A user can't download the terminology files from a Terminology project
I know we switched off download of `pootle-terminology.po` files as they cause a number of problem mostly because there is no associated file. But this seems to have extended to also include the 'Terminology' project.
I can't recall if at the time we decided it was an OK compromise. But now with @phlax's work which means we can have non-existent files we should likely be able to bring this back. At the very least we shouldn't be disabling download of files within Terminology ad only restrict `pootle-terminology.po`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_translationproject/views.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.conf import settings
11 from django.shortcuts import render
12
13 from import_export.views import handle_upload_form
14 from pootle.core.browser import (get_children, get_table_headings, get_parent,
15 get_vfolders)
16 from pootle.core.decorators import (get_path_obj, get_resource,
17 permission_required)
18 from pootle.core.helpers import (get_export_view_context, get_browser_context,
19 get_sidebar_announcements_context,
20 get_translation_context, SIDEBAR_COOKIE_NAME)
21 from pootle.core.utils.json import jsonify
22 from pootle_app.models.permissions import check_permission
23 from pootle_app.views.admin.permissions import admin_permissions as admin_perms
24
25
26 @get_path_obj
27 @permission_required('administrate')
28 def admin_permissions(request, translation_project):
29 language = translation_project.language
30 project = translation_project.project
31
32 ctx = {
33 'page': 'admin-permissions',
34
35 'translation_project': translation_project,
36 'project': project,
37 'language': language,
38 'directory': translation_project.directory,
39 }
40
41 return admin_perms(request, translation_project.directory,
42 'translation_projects/admin/permissions.html', ctx)
43
44
45 @get_path_obj
46 @permission_required('view')
47 @get_resource
48 def browse(request, translation_project, dir_path, filename=None):
49 project = translation_project.project
50 language = translation_project.language
51
52 directory = request.directory
53 store = request.store
54 is_admin = check_permission('administrate', request)
55
56 ctx, cookie_data = get_sidebar_announcements_context(
57 request,
58 (project, language, translation_project, ),
59 )
60
61 ctx.update(get_browser_context(request))
62
63 # TODO improve plugin logic
64 if "import_export" in settings.INSTALLED_APPS:
65 if not translation_project.is_terminology_project:
66 if request.user.is_authenticated():
67 if check_permission('translate', request):
68 ctx.update(handle_upload_form(request, project))
69 ctx.update({'display_download': True,
70 'has_sidebar': True})
71
72 stats = request.resource_obj.get_stats()
73
74 if store is None:
75 table_fields = ['name', 'progress', 'total', 'need-translation',
76 'suggestions', 'critical', 'last-updated', 'activity']
77 ctx.update({
78 'table': {
79 'id': 'tp',
80 'fields': table_fields,
81 'headings': get_table_headings(table_fields),
82 'items': get_children(directory),
83 }
84 })
85
86 if 'virtualfolder' in settings.INSTALLED_APPS:
87 vfolders = get_vfolders(directory, all_vfolders=is_admin)
88 if len(vfolders) > 0:
89 table_fields = ['name', 'priority', 'progress', 'total',
90 'need-translation', 'suggestions', 'critical',
91 'last-updated', 'activity']
92 ctx.update({
93 'vfolders': {
94 'id': 'vfolders',
95 'fields': table_fields,
96 'headings': get_table_headings(table_fields),
97 'items': vfolders,
98 },
99 })
100
101 # FIXME: set vfolders stats in the resource, don't inject them
102 # here.
103 stats['vfolders'] = {}
104
105 for vfolder_treeitem in directory.vf_treeitems.iterator():
106 if request.user.is_superuser or vfolder_treeitem.is_visible:
107 stats['vfolders'][vfolder_treeitem.code] = \
108 vfolder_treeitem.get_stats(include_children=False)
109
110 ctx.update({
111 'parent': get_parent(directory if store is None else store),
112 'translation_project': translation_project,
113 'project': project,
114 'language': language,
115 'stats': jsonify(stats),
116 'is_admin': is_admin,
117 'is_store': store is not None,
118
119 'browser_extends': 'translation_projects/base.html',
120 })
121
122 response = render(request, 'browser/index.html', ctx)
123
124 if cookie_data:
125 response.set_cookie(SIDEBAR_COOKIE_NAME, cookie_data)
126
127 return response
128
129
130 @get_path_obj
131 @permission_required('view')
132 @get_resource
133 def translate(request, translation_project, dir_path, filename):
134 project = translation_project.project
135
136 ctx = get_translation_context(request)
137
138 ctx.update({
139 'language': translation_project.language,
140 'project': project,
141 'translation_project': translation_project,
142
143 'editor_extends': 'translation_projects/base.html',
144 })
145
146 return render(request, "editor/main.html", ctx)
147
148
149 @get_path_obj
150 @permission_required('view')
151 @get_resource
152 def export_view(request, translation_project, dir_path, filename=None):
153 """Displays a list of units with filters applied."""
154 ctx = get_export_view_context(request)
155 ctx.update({
156 'source_language': translation_project.project.source_language,
157 'language': translation_project.language,
158 'project': translation_project.project,
159 })
160
161 return render(request, 'editor/export_view.html', ctx)
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pootle/apps/pootle_translationproject/views.py b/pootle/apps/pootle_translationproject/views.py
--- a/pootle/apps/pootle_translationproject/views.py
+++ b/pootle/apps/pootle_translationproject/views.py
@@ -62,12 +62,11 @@
# TODO improve plugin logic
if "import_export" in settings.INSTALLED_APPS:
- if not translation_project.is_terminology_project:
- if request.user.is_authenticated():
- if check_permission('translate', request):
- ctx.update(handle_upload_form(request, project))
- ctx.update({'display_download': True,
- 'has_sidebar': True})
+ if request.user.is_authenticated():
+ if check_permission('translate', request):
+ ctx.update(handle_upload_form(request, project))
+ ctx.update({'display_download': True,
+ 'has_sidebar': True})
stats = request.resource_obj.get_stats()
|
{"golden_diff": "diff --git a/pootle/apps/pootle_translationproject/views.py b/pootle/apps/pootle_translationproject/views.py\n--- a/pootle/apps/pootle_translationproject/views.py\n+++ b/pootle/apps/pootle_translationproject/views.py\n@@ -62,12 +62,11 @@\n \n # TODO improve plugin logic\n if \"import_export\" in settings.INSTALLED_APPS:\n- if not translation_project.is_terminology_project:\n- if request.user.is_authenticated():\n- if check_permission('translate', request):\n- ctx.update(handle_upload_form(request, project))\n- ctx.update({'display_download': True,\n- 'has_sidebar': True})\n+ if request.user.is_authenticated():\n+ if check_permission('translate', request):\n+ ctx.update(handle_upload_form(request, project))\n+ ctx.update({'display_download': True,\n+ 'has_sidebar': True})\n \n stats = request.resource_obj.get_stats()\n", "issue": "A user can't download the terminology files from a Terminology project\nI know we switched off download of `pootle-terminology.po` files as they cause a number of problem mostly because there is no associated file. But this seems to have extended to also include the 'Terminology' project.\n\nI can't recall if at the time we decided it was an OK compromise. But now with @phlax's work which means we can have non-existent files we should likely be able to bring this back. At the very least we shouldn't be disabling download of files within Terminology ad only restrict `pootle-terminology.po`\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\nfrom django.shortcuts import render\n\nfrom import_export.views import handle_upload_form\nfrom pootle.core.browser import (get_children, get_table_headings, get_parent,\n get_vfolders)\nfrom pootle.core.decorators import (get_path_obj, get_resource,\n permission_required)\nfrom pootle.core.helpers import (get_export_view_context, get_browser_context,\n get_sidebar_announcements_context,\n get_translation_context, SIDEBAR_COOKIE_NAME)\nfrom pootle.core.utils.json import jsonify\nfrom pootle_app.models.permissions import check_permission\nfrom pootle_app.views.admin.permissions import admin_permissions as admin_perms\n\n\n@get_path_obj\n@permission_required('administrate')\ndef admin_permissions(request, translation_project):\n language = translation_project.language\n project = translation_project.project\n\n ctx = {\n 'page': 'admin-permissions',\n\n 'translation_project': translation_project,\n 'project': project,\n 'language': language,\n 'directory': translation_project.directory,\n }\n\n return admin_perms(request, translation_project.directory,\n 'translation_projects/admin/permissions.html', ctx)\n\n\n@get_path_obj\n@permission_required('view')\n@get_resource\ndef browse(request, translation_project, dir_path, filename=None):\n project = translation_project.project\n language = translation_project.language\n\n directory = request.directory\n store = request.store\n is_admin = check_permission('administrate', request)\n\n ctx, cookie_data = get_sidebar_announcements_context(\n request,\n (project, language, translation_project, ),\n )\n\n ctx.update(get_browser_context(request))\n\n # TODO improve plugin logic\n if \"import_export\" in settings.INSTALLED_APPS:\n if not translation_project.is_terminology_project:\n if request.user.is_authenticated():\n if check_permission('translate', request):\n ctx.update(handle_upload_form(request, project))\n ctx.update({'display_download': True,\n 'has_sidebar': True})\n\n stats = request.resource_obj.get_stats()\n\n if store is None:\n table_fields = ['name', 'progress', 'total', 'need-translation',\n 'suggestions', 'critical', 'last-updated', 'activity']\n ctx.update({\n 'table': {\n 'id': 'tp',\n 'fields': table_fields,\n 'headings': get_table_headings(table_fields),\n 'items': get_children(directory),\n }\n })\n\n if 'virtualfolder' in settings.INSTALLED_APPS:\n vfolders = get_vfolders(directory, all_vfolders=is_admin)\n if len(vfolders) > 0:\n table_fields = ['name', 'priority', 'progress', 'total',\n 'need-translation', 'suggestions', 'critical',\n 'last-updated', 'activity']\n ctx.update({\n 'vfolders': {\n 'id': 'vfolders',\n 'fields': table_fields,\n 'headings': get_table_headings(table_fields),\n 'items': vfolders,\n },\n })\n\n # FIXME: set vfolders stats in the resource, don't inject them\n # here.\n stats['vfolders'] = {}\n\n for vfolder_treeitem in directory.vf_treeitems.iterator():\n if request.user.is_superuser or vfolder_treeitem.is_visible:\n stats['vfolders'][vfolder_treeitem.code] = \\\n vfolder_treeitem.get_stats(include_children=False)\n\n ctx.update({\n 'parent': get_parent(directory if store is None else store),\n 'translation_project': translation_project,\n 'project': project,\n 'language': language,\n 'stats': jsonify(stats),\n 'is_admin': is_admin,\n 'is_store': store is not None,\n\n 'browser_extends': 'translation_projects/base.html',\n })\n\n response = render(request, 'browser/index.html', ctx)\n\n if cookie_data:\n response.set_cookie(SIDEBAR_COOKIE_NAME, cookie_data)\n\n return response\n\n\n@get_path_obj\n@permission_required('view')\n@get_resource\ndef translate(request, translation_project, dir_path, filename):\n project = translation_project.project\n\n ctx = get_translation_context(request)\n\n ctx.update({\n 'language': translation_project.language,\n 'project': project,\n 'translation_project': translation_project,\n\n 'editor_extends': 'translation_projects/base.html',\n })\n\n return render(request, \"editor/main.html\", ctx)\n\n\n@get_path_obj\n@permission_required('view')\n@get_resource\ndef export_view(request, translation_project, dir_path, filename=None):\n \"\"\"Displays a list of units with filters applied.\"\"\"\n ctx = get_export_view_context(request)\n ctx.update({\n 'source_language': translation_project.project.source_language,\n 'language': translation_project.language,\n 'project': translation_project.project,\n })\n\n return render(request, 'editor/export_view.html', ctx)\n", "path": "pootle/apps/pootle_translationproject/views.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\nfrom django.shortcuts import render\n\nfrom import_export.views import handle_upload_form\nfrom pootle.core.browser import (get_children, get_table_headings, get_parent,\n get_vfolders)\nfrom pootle.core.decorators import (get_path_obj, get_resource,\n permission_required)\nfrom pootle.core.helpers import (get_export_view_context, get_browser_context,\n get_sidebar_announcements_context,\n get_translation_context, SIDEBAR_COOKIE_NAME)\nfrom pootle.core.utils.json import jsonify\nfrom pootle_app.models.permissions import check_permission\nfrom pootle_app.views.admin.permissions import admin_permissions as admin_perms\n\n\n@get_path_obj\n@permission_required('administrate')\ndef admin_permissions(request, translation_project):\n language = translation_project.language\n project = translation_project.project\n\n ctx = {\n 'page': 'admin-permissions',\n\n 'translation_project': translation_project,\n 'project': project,\n 'language': language,\n 'directory': translation_project.directory,\n }\n\n return admin_perms(request, translation_project.directory,\n 'translation_projects/admin/permissions.html', ctx)\n\n\n@get_path_obj\n@permission_required('view')\n@get_resource\ndef browse(request, translation_project, dir_path, filename=None):\n project = translation_project.project\n language = translation_project.language\n\n directory = request.directory\n store = request.store\n is_admin = check_permission('administrate', request)\n\n ctx, cookie_data = get_sidebar_announcements_context(\n request,\n (project, language, translation_project, ),\n )\n\n ctx.update(get_browser_context(request))\n\n # TODO improve plugin logic\n if \"import_export\" in settings.INSTALLED_APPS:\n if request.user.is_authenticated():\n if check_permission('translate', request):\n ctx.update(handle_upload_form(request, project))\n ctx.update({'display_download': True,\n 'has_sidebar': True})\n\n stats = request.resource_obj.get_stats()\n\n if store is None:\n table_fields = ['name', 'progress', 'total', 'need-translation',\n 'suggestions', 'critical', 'last-updated', 'activity']\n ctx.update({\n 'table': {\n 'id': 'tp',\n 'fields': table_fields,\n 'headings': get_table_headings(table_fields),\n 'items': get_children(directory),\n }\n })\n\n if 'virtualfolder' in settings.INSTALLED_APPS:\n vfolders = get_vfolders(directory, all_vfolders=is_admin)\n if len(vfolders) > 0:\n table_fields = ['name', 'priority', 'progress', 'total',\n 'need-translation', 'suggestions', 'critical',\n 'last-updated', 'activity']\n ctx.update({\n 'vfolders': {\n 'id': 'vfolders',\n 'fields': table_fields,\n 'headings': get_table_headings(table_fields),\n 'items': vfolders,\n },\n })\n\n # FIXME: set vfolders stats in the resource, don't inject them\n # here.\n stats['vfolders'] = {}\n\n for vfolder_treeitem in directory.vf_treeitems.iterator():\n if request.user.is_superuser or vfolder_treeitem.is_visible:\n stats['vfolders'][vfolder_treeitem.code] = \\\n vfolder_treeitem.get_stats(include_children=False)\n\n ctx.update({\n 'parent': get_parent(directory if store is None else store),\n 'translation_project': translation_project,\n 'project': project,\n 'language': language,\n 'stats': jsonify(stats),\n 'is_admin': is_admin,\n 'is_store': store is not None,\n\n 'browser_extends': 'translation_projects/base.html',\n })\n\n response = render(request, 'browser/index.html', ctx)\n\n if cookie_data:\n response.set_cookie(SIDEBAR_COOKIE_NAME, cookie_data)\n\n return response\n\n\n@get_path_obj\n@permission_required('view')\n@get_resource\ndef translate(request, translation_project, dir_path, filename):\n project = translation_project.project\n\n ctx = get_translation_context(request)\n\n ctx.update({\n 'language': translation_project.language,\n 'project': project,\n 'translation_project': translation_project,\n\n 'editor_extends': 'translation_projects/base.html',\n })\n\n return render(request, \"editor/main.html\", ctx)\n\n\n@get_path_obj\n@permission_required('view')\n@get_resource\ndef export_view(request, translation_project, dir_path, filename=None):\n \"\"\"Displays a list of units with filters applied.\"\"\"\n ctx = get_export_view_context(request)\n ctx.update({\n 'source_language': translation_project.project.source_language,\n 'language': translation_project.language,\n 'project': translation_project.project,\n })\n\n return render(request, 'editor/export_view.html', ctx)\n", "path": "pootle/apps/pootle_translationproject/views.py"}]}
| 1,898 | 207 |
gh_patches_debug_18652
|
rasdani/github-patches
|
git_diff
|
flairNLP__flair-536
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When running the calculate perplexity, got the error
Here is the error, sounds likely it needs GPU, would you suggestion what I should I modify in the code of perplexity calculation?
File "/flair/models/language_model.py", line 69, in forward
encoded = self.encoder(input)
File ".local/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File ".local/lib/python3.6/site-packages/torch/nn/modules/sparse.py", line 110, in forward
self.norm_type, self.scale_grad_by_freq, self.sparse)
File ".local/lib/python3.6/site-packages/torch/nn/functional.py", line 1110, in embedding
return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
RuntimeError: Expected object of type torch.cuda.LongTensor but found type torch.LongTensor for argument #3 'index'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/models/language_model.py`
Content:
```
1 from pathlib import Path
2
3 import torch.nn as nn
4 import torch
5 import math
6 from typing import Union, Tuple
7 from typing import List
8
9 from torch.optim import Optimizer
10
11 import flair
12 from flair.data import Dictionary
13
14
15 class LanguageModel(nn.Module):
16 """Container module with an encoder, a recurrent module, and a decoder."""
17
18 def __init__(self,
19 dictionary: Dictionary,
20 is_forward_lm: bool,
21 hidden_size: int,
22 nlayers: int,
23 embedding_size: int = 100,
24 nout=None,
25 dropout=0.1):
26
27 super(LanguageModel, self).__init__()
28
29 self.dictionary = dictionary
30 self.is_forward_lm: bool = is_forward_lm
31
32 self.dropout = dropout
33 self.hidden_size = hidden_size
34 self.embedding_size = embedding_size
35 self.nlayers = nlayers
36
37 self.drop = nn.Dropout(dropout)
38 self.encoder = nn.Embedding(len(dictionary), embedding_size)
39
40 if nlayers == 1:
41 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)
42 else:
43 self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)
44
45 self.hidden = None
46
47 self.nout = nout
48 if nout is not None:
49 self.proj = nn.Linear(hidden_size, nout)
50 self.initialize(self.proj.weight)
51 self.decoder = nn.Linear(nout, len(dictionary))
52 else:
53 self.proj = None
54 self.decoder = nn.Linear(hidden_size, len(dictionary))
55
56 self.init_weights()
57
58 # auto-spawn on GPU if available
59 self.to(flair.device)
60
61 def init_weights(self):
62 initrange = 0.1
63 self.encoder.weight.detach().uniform_(-initrange, initrange)
64 self.decoder.bias.detach().fill_(0)
65 self.decoder.weight.detach().uniform_(-initrange, initrange)
66
67 def set_hidden(self, hidden):
68 self.hidden = hidden
69
70 def forward(self, input, hidden, ordered_sequence_lengths=None):
71 encoded = self.encoder(input)
72 emb = self.drop(encoded)
73
74 self.rnn.flatten_parameters()
75
76 output, hidden = self.rnn(emb, hidden)
77
78 if self.proj is not None:
79 output = self.proj(output)
80
81 output = self.drop(output)
82
83 decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
84
85 return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden
86
87 def init_hidden(self, bsz):
88 weight = next(self.parameters()).detach()
89 return (weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach(),
90 weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach())
91
92 def get_representation(self, strings: List[str], chars_per_chunk: int = 512):
93
94 # cut up the input into chunks of max charlength = chunk_size
95 longest = len(strings[0])
96 chunks = []
97 splice_begin = 0
98 for splice_end in range(chars_per_chunk, longest, chars_per_chunk):
99 chunks.append([text[splice_begin:splice_end] for text in strings])
100 splice_begin = splice_end
101
102 chunks.append([text[splice_begin:longest] for text in strings])
103 hidden = self.init_hidden(len(chunks[0]))
104
105 output_parts = []
106
107 # push each chunk through the RNN language model
108 for chunk in chunks:
109
110 sequences_as_char_indices: List[List[int]] = []
111 for string in chunk:
112 char_indices = [self.dictionary.get_idx_for_item(char) for char in string]
113 sequences_as_char_indices.append(char_indices)
114
115 batch = torch.LongTensor(sequences_as_char_indices).transpose(0, 1)
116 batch = batch.to(flair.device)
117
118 prediction, rnn_output, hidden = self.forward(batch, hidden)
119 rnn_output = rnn_output.detach()
120
121 output_parts.append(rnn_output)
122
123 # concatenate all chunks to make final output
124 output = torch.cat(output_parts)
125
126 return output
127
128 def get_output(self, text: str):
129 char_indices = [self.dictionary.get_idx_for_item(char) for char in text]
130 input_vector = torch.LongTensor([char_indices]).transpose(0, 1)
131
132 hidden = self.init_hidden(1)
133 prediction, rnn_output, hidden = self.forward(input_vector, hidden)
134
135 return self.repackage_hidden(hidden)
136
137 def repackage_hidden(self, h):
138 """Wraps hidden states in new Variables, to detach them from their history."""
139 if type(h) == torch.Tensor:
140 return h.clone().detach()
141 else:
142 return tuple(self.repackage_hidden(v) for v in h)
143
144 def initialize(self, matrix):
145 in_, out_ = matrix.size()
146 stdv = math.sqrt(3. / (in_ + out_))
147 matrix.detach().uniform_(-stdv, stdv)
148
149 @classmethod
150 def load_language_model(cls, model_file: Union[Path, str]):
151
152 state = torch.load(str(model_file), map_location=flair.device)
153
154 model = LanguageModel(state['dictionary'],
155 state['is_forward_lm'],
156 state['hidden_size'],
157 state['nlayers'],
158 state['embedding_size'],
159 state['nout'],
160 state['dropout'])
161 model.load_state_dict(state['state_dict'])
162 model.eval()
163 model.to(flair.device)
164
165 return model
166
167 @classmethod
168 def load_checkpoint(cls, model_file: Path):
169 state = torch.load(str(model_file), map_location=flair.device)
170
171 epoch = state['epoch'] if 'epoch' in state else None
172 split = state['split'] if 'split' in state else None
173 loss = state['loss'] if 'loss' in state else None
174 optimizer_state_dict = state['optimizer_state_dict'] if 'optimizer_state_dict' in state else None
175
176 model = LanguageModel(state['dictionary'],
177 state['is_forward_lm'],
178 state['hidden_size'],
179 state['nlayers'],
180 state['embedding_size'],
181 state['nout'],
182 state['dropout'])
183 model.load_state_dict(state['state_dict'])
184 model.eval()
185 model.to(flair.device)
186
187 return {'model': model, 'epoch': epoch, 'split': split, 'loss': loss,
188 'optimizer_state_dict': optimizer_state_dict}
189
190 def save_checkpoint(self, file: Path, optimizer: Optimizer, epoch: int, split: int, loss: float):
191 model_state = {
192 'state_dict': self.state_dict(),
193 'dictionary': self.dictionary,
194 'is_forward_lm': self.is_forward_lm,
195 'hidden_size': self.hidden_size,
196 'nlayers': self.nlayers,
197 'embedding_size': self.embedding_size,
198 'nout': self.nout,
199 'dropout': self.dropout,
200 'optimizer_state_dict': optimizer.state_dict(),
201 'epoch': epoch,
202 'split': split,
203 'loss': loss
204 }
205
206 torch.save(model_state, str(file), pickle_protocol=4)
207
208 def save(self, file: Path):
209 model_state = {
210 'state_dict': self.state_dict(),
211 'dictionary': self.dictionary,
212 'is_forward_lm': self.is_forward_lm,
213 'hidden_size': self.hidden_size,
214 'nlayers': self.nlayers,
215 'embedding_size': self.embedding_size,
216 'nout': self.nout,
217 'dropout': self.dropout
218 }
219
220 torch.save(model_state, str(file), pickle_protocol=4)
221
222 def generate_text(self, prefix: str = '\n', number_of_characters: int = 1000, temperature: float = 1.0,
223 break_on_suffix=None) -> Tuple[str, float]:
224
225 if prefix == '':
226 prefix = '\n'
227
228 with torch.no_grad():
229 characters = []
230
231 idx2item = self.dictionary.idx2item
232
233 # initial hidden state
234 hidden = self.init_hidden(1)
235
236 if len(prefix) > 1:
237
238 char_tensors = []
239 for character in prefix[:-1]:
240 char_tensors.append(
241 torch.tensor(self.dictionary.get_idx_for_item(character)).unsqueeze(0).unsqueeze(0))
242
243 input = torch.cat(char_tensors)
244 if torch.cuda.is_available():
245 input = input.cuda()
246
247 prediction, _, hidden = self.forward(input, hidden)
248
249 input = torch.tensor(self.dictionary.get_idx_for_item(prefix[-1])).unsqueeze(0).unsqueeze(0)
250
251 log_prob = 0.
252
253 for i in range(number_of_characters):
254
255 if torch.cuda.is_available():
256 input = input.cuda()
257
258 # get predicted weights
259 prediction, _, hidden = self.forward(input, hidden)
260 prediction = prediction.squeeze().detach()
261 decoder_output = prediction
262
263 # divide by temperature
264 prediction = prediction.div(temperature)
265
266 # to prevent overflow problem with small temperature values, substract largest value from all
267 # this makes a vector in which the largest value is 0
268 max = torch.max(prediction)
269 prediction -= max
270
271 # compute word weights with exponential function
272 word_weights = prediction.exp().cpu()
273
274 # try sampling multinomial distribution for next character
275 try:
276 word_idx = torch.multinomial(word_weights, 1)[0]
277 except:
278 word_idx = torch.tensor(0)
279
280 # print(word_idx)
281 prob = decoder_output[word_idx]
282 log_prob += prob
283
284 input = word_idx.detach().unsqueeze(0).unsqueeze(0)
285 word = idx2item[word_idx].decode('UTF-8')
286 characters.append(word)
287
288 if break_on_suffix is not None:
289 if ''.join(characters).endswith(break_on_suffix):
290 break
291
292 text = prefix + ''.join(characters)
293
294 log_prob = log_prob.item()
295 log_prob /= len(characters)
296
297 if not self.is_forward_lm:
298 text = text[::-1]
299
300 return text, log_prob
301
302 def calculate_perplexity(self, text: str) -> float:
303
304 if not self.is_forward_lm:
305 text = text[::-1]
306
307 # input ids
308 input = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[:-1]]).unsqueeze(1)
309
310 # push list of character IDs through model
311 hidden = self.init_hidden(1)
312 prediction, _, hidden = self.forward(input, hidden)
313
314 # the target is always the next character
315 targets = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[1:]])
316
317 # use cross entropy loss to compare output of forward pass with targets
318 cross_entroy_loss = torch.nn.CrossEntropyLoss()
319 loss = cross_entroy_loss(prediction.view(-1, len(self.dictionary)), targets).item()
320
321 # exponentiate cross-entropy loss to calculate perplexity
322 perplexity = math.exp(loss)
323
324 return perplexity
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flair/models/language_model.py b/flair/models/language_model.py
--- a/flair/models/language_model.py
+++ b/flair/models/language_model.py
@@ -306,6 +306,7 @@
# input ids
input = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[:-1]]).unsqueeze(1)
+ input = input.to(flair.device)
# push list of character IDs through model
hidden = self.init_hidden(1)
@@ -313,6 +314,7 @@
# the target is always the next character
targets = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[1:]])
+ targets = targets.to(flair.device)
# use cross entropy loss to compare output of forward pass with targets
cross_entroy_loss = torch.nn.CrossEntropyLoss()
|
{"golden_diff": "diff --git a/flair/models/language_model.py b/flair/models/language_model.py\n--- a/flair/models/language_model.py\n+++ b/flair/models/language_model.py\n@@ -306,6 +306,7 @@\n \n # input ids\n input = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[:-1]]).unsqueeze(1)\n+ input = input.to(flair.device)\n \n # push list of character IDs through model\n hidden = self.init_hidden(1)\n@@ -313,6 +314,7 @@\n \n # the target is always the next character\n targets = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[1:]])\n+ targets = targets.to(flair.device)\n \n # use cross entropy loss to compare output of forward pass with targets\n cross_entroy_loss = torch.nn.CrossEntropyLoss()\n", "issue": "When running the calculate perplexity, got the error\nHere is the error, sounds likely it needs GPU, would you suggestion what I should I modify in the code of perplexity calculation?\r\nFile \"/flair/models/language_model.py\", line 69, in forward\r\n encoded = self.encoder(input)\r\n File \".local/lib/python3.6/site-packages/torch/nn/modules/module.py\", line 477, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \".local/lib/python3.6/site-packages/torch/nn/modules/sparse.py\", line 110, in forward\r\n self.norm_type, self.scale_grad_by_freq, self.sparse)\r\n File \".local/lib/python3.6/site-packages/torch/nn/functional.py\", line 1110, in embedding\r\n return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)\r\nRuntimeError: Expected object of type torch.cuda.LongTensor but found type torch.LongTensor for argument #3 'index'\n", "before_files": [{"content": "from pathlib import Path\n\nimport torch.nn as nn\nimport torch\nimport math\nfrom typing import Union, Tuple\nfrom typing import List\n\nfrom torch.optim import Optimizer\n\nimport flair\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.1):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n self.to(flair.device)\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.detach().uniform_(-initrange, initrange)\n self.decoder.bias.detach().fill_(0)\n self.decoder.weight.detach().uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).detach()\n return (weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach(),\n weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach())\n\n def get_representation(self, strings: List[str], chars_per_chunk: int = 512):\n\n # cut up the input into chunks of max charlength = chunk_size\n longest = len(strings[0])\n chunks = []\n splice_begin = 0\n for splice_end in range(chars_per_chunk, longest, chars_per_chunk):\n chunks.append([text[splice_begin:splice_end] for text in strings])\n splice_begin = splice_end\n\n chunks.append([text[splice_begin:longest] for text in strings])\n hidden = self.init_hidden(len(chunks[0]))\n\n output_parts = []\n\n # push each chunk through the RNN language model\n for chunk in chunks:\n\n sequences_as_char_indices: List[List[int]] = []\n for string in chunk:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = torch.LongTensor(sequences_as_char_indices).transpose(0, 1)\n batch = batch.to(flair.device)\n\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n rnn_output = rnn_output.detach()\n\n output_parts.append(rnn_output)\n\n # concatenate all chunks to make final output\n output = torch.cat(output_parts)\n\n return output\n\n def get_output(self, text: str):\n char_indices = [self.dictionary.get_idx_for_item(char) for char in text]\n input_vector = torch.LongTensor([char_indices]).transpose(0, 1)\n\n hidden = self.init_hidden(1)\n prediction, rnn_output, hidden = self.forward(input_vector, hidden)\n\n return self.repackage_hidden(hidden)\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return h.clone().detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.detach().uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file: Union[Path, str]):\n\n state = torch.load(str(model_file), map_location=flair.device)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n model.to(flair.device)\n\n return model\n\n @classmethod\n def load_checkpoint(cls, model_file: Path):\n state = torch.load(str(model_file), map_location=flair.device)\n\n epoch = state['epoch'] if 'epoch' in state else None\n split = state['split'] if 'split' in state else None\n loss = state['loss'] if 'loss' in state else None\n optimizer_state_dict = state['optimizer_state_dict'] if 'optimizer_state_dict' in state else None\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n model.to(flair.device)\n\n return {'model': model, 'epoch': epoch, 'split': split, 'loss': loss,\n 'optimizer_state_dict': optimizer_state_dict}\n\n def save_checkpoint(self, file: Path, optimizer: Optimizer, epoch: int, split: int, loss: float):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout,\n 'optimizer_state_dict': optimizer.state_dict(),\n 'epoch': epoch,\n 'split': split,\n 'loss': loss\n }\n\n torch.save(model_state, str(file), pickle_protocol=4)\n\n def save(self, file: Path):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n\n torch.save(model_state, str(file), pickle_protocol=4)\n\n def generate_text(self, prefix: str = '\\n', number_of_characters: int = 1000, temperature: float = 1.0,\n break_on_suffix=None) -> Tuple[str, float]:\n\n if prefix == '':\n prefix = '\\n'\n\n with torch.no_grad():\n characters = []\n\n idx2item = self.dictionary.idx2item\n\n # initial hidden state\n hidden = self.init_hidden(1)\n\n if len(prefix) > 1:\n\n char_tensors = []\n for character in prefix[:-1]:\n char_tensors.append(\n torch.tensor(self.dictionary.get_idx_for_item(character)).unsqueeze(0).unsqueeze(0))\n\n input = torch.cat(char_tensors)\n if torch.cuda.is_available():\n input = input.cuda()\n\n prediction, _, hidden = self.forward(input, hidden)\n\n input = torch.tensor(self.dictionary.get_idx_for_item(prefix[-1])).unsqueeze(0).unsqueeze(0)\n\n log_prob = 0.\n\n for i in range(number_of_characters):\n\n if torch.cuda.is_available():\n input = input.cuda()\n\n # get predicted weights\n prediction, _, hidden = self.forward(input, hidden)\n prediction = prediction.squeeze().detach()\n decoder_output = prediction\n\n # divide by temperature\n prediction = prediction.div(temperature)\n\n # to prevent overflow problem with small temperature values, substract largest value from all\n # this makes a vector in which the largest value is 0\n max = torch.max(prediction)\n prediction -= max\n\n # compute word weights with exponential function\n word_weights = prediction.exp().cpu()\n\n # try sampling multinomial distribution for next character\n try:\n word_idx = torch.multinomial(word_weights, 1)[0]\n except:\n word_idx = torch.tensor(0)\n\n # print(word_idx)\n prob = decoder_output[word_idx]\n log_prob += prob\n\n input = word_idx.detach().unsqueeze(0).unsqueeze(0)\n word = idx2item[word_idx].decode('UTF-8')\n characters.append(word)\n\n if break_on_suffix is not None:\n if ''.join(characters).endswith(break_on_suffix):\n break\n\n text = prefix + ''.join(characters)\n\n log_prob = log_prob.item()\n log_prob /= len(characters)\n\n if not self.is_forward_lm:\n text = text[::-1]\n\n return text, log_prob\n\n def calculate_perplexity(self, text: str) -> float:\n\n if not self.is_forward_lm:\n text = text[::-1]\n\n # input ids\n input = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[:-1]]).unsqueeze(1)\n\n # push list of character IDs through model\n hidden = self.init_hidden(1)\n prediction, _, hidden = self.forward(input, hidden)\n\n # the target is always the next character\n targets = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[1:]])\n\n # use cross entropy loss to compare output of forward pass with targets\n cross_entroy_loss = torch.nn.CrossEntropyLoss()\n loss = cross_entroy_loss(prediction.view(-1, len(self.dictionary)), targets).item()\n\n # exponentiate cross-entropy loss to calculate perplexity\n perplexity = math.exp(loss)\n\n return perplexity\n", "path": "flair/models/language_model.py"}], "after_files": [{"content": "from pathlib import Path\n\nimport torch.nn as nn\nimport torch\nimport math\nfrom typing import Union, Tuple\nfrom typing import List\n\nfrom torch.optim import Optimizer\n\nimport flair\nfrom flair.data import Dictionary\n\n\nclass LanguageModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self,\n dictionary: Dictionary,\n is_forward_lm: bool,\n hidden_size: int,\n nlayers: int,\n embedding_size: int = 100,\n nout=None,\n dropout=0.1):\n\n super(LanguageModel, self).__init__()\n\n self.dictionary = dictionary\n self.is_forward_lm: bool = is_forward_lm\n\n self.dropout = dropout\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.nlayers = nlayers\n\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(len(dictionary), embedding_size)\n\n if nlayers == 1:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers)\n else:\n self.rnn = nn.LSTM(embedding_size, hidden_size, nlayers, dropout=dropout)\n\n self.hidden = None\n\n self.nout = nout\n if nout is not None:\n self.proj = nn.Linear(hidden_size, nout)\n self.initialize(self.proj.weight)\n self.decoder = nn.Linear(nout, len(dictionary))\n else:\n self.proj = None\n self.decoder = nn.Linear(hidden_size, len(dictionary))\n\n self.init_weights()\n\n # auto-spawn on GPU if available\n self.to(flair.device)\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.detach().uniform_(-initrange, initrange)\n self.decoder.bias.detach().fill_(0)\n self.decoder.weight.detach().uniform_(-initrange, initrange)\n\n def set_hidden(self, hidden):\n self.hidden = hidden\n\n def forward(self, input, hidden, ordered_sequence_lengths=None):\n encoded = self.encoder(input)\n emb = self.drop(encoded)\n\n self.rnn.flatten_parameters()\n\n output, hidden = self.rnn(emb, hidden)\n\n if self.proj is not None:\n output = self.proj(output)\n\n output = self.drop(output)\n\n decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))\n\n return decoded.view(output.size(0), output.size(1), decoded.size(1)), output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).detach()\n return (weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach(),\n weight.new(self.nlayers, bsz, self.hidden_size).zero_().clone().detach())\n\n def get_representation(self, strings: List[str], chars_per_chunk: int = 512):\n\n # cut up the input into chunks of max charlength = chunk_size\n longest = len(strings[0])\n chunks = []\n splice_begin = 0\n for splice_end in range(chars_per_chunk, longest, chars_per_chunk):\n chunks.append([text[splice_begin:splice_end] for text in strings])\n splice_begin = splice_end\n\n chunks.append([text[splice_begin:longest] for text in strings])\n hidden = self.init_hidden(len(chunks[0]))\n\n output_parts = []\n\n # push each chunk through the RNN language model\n for chunk in chunks:\n\n sequences_as_char_indices: List[List[int]] = []\n for string in chunk:\n char_indices = [self.dictionary.get_idx_for_item(char) for char in string]\n sequences_as_char_indices.append(char_indices)\n\n batch = torch.LongTensor(sequences_as_char_indices).transpose(0, 1)\n batch = batch.to(flair.device)\n\n prediction, rnn_output, hidden = self.forward(batch, hidden)\n rnn_output = rnn_output.detach()\n\n output_parts.append(rnn_output)\n\n # concatenate all chunks to make final output\n output = torch.cat(output_parts)\n\n return output\n\n def get_output(self, text: str):\n char_indices = [self.dictionary.get_idx_for_item(char) for char in text]\n input_vector = torch.LongTensor([char_indices]).transpose(0, 1)\n\n hidden = self.init_hidden(1)\n prediction, rnn_output, hidden = self.forward(input_vector, hidden)\n\n return self.repackage_hidden(hidden)\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == torch.Tensor:\n return h.clone().detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initialize(self, matrix):\n in_, out_ = matrix.size()\n stdv = math.sqrt(3. / (in_ + out_))\n matrix.detach().uniform_(-stdv, stdv)\n\n @classmethod\n def load_language_model(cls, model_file: Union[Path, str]):\n\n state = torch.load(str(model_file), map_location=flair.device)\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n model.to(flair.device)\n\n return model\n\n @classmethod\n def load_checkpoint(cls, model_file: Path):\n state = torch.load(str(model_file), map_location=flair.device)\n\n epoch = state['epoch'] if 'epoch' in state else None\n split = state['split'] if 'split' in state else None\n loss = state['loss'] if 'loss' in state else None\n optimizer_state_dict = state['optimizer_state_dict'] if 'optimizer_state_dict' in state else None\n\n model = LanguageModel(state['dictionary'],\n state['is_forward_lm'],\n state['hidden_size'],\n state['nlayers'],\n state['embedding_size'],\n state['nout'],\n state['dropout'])\n model.load_state_dict(state['state_dict'])\n model.eval()\n model.to(flair.device)\n\n return {'model': model, 'epoch': epoch, 'split': split, 'loss': loss,\n 'optimizer_state_dict': optimizer_state_dict}\n\n def save_checkpoint(self, file: Path, optimizer: Optimizer, epoch: int, split: int, loss: float):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout,\n 'optimizer_state_dict': optimizer.state_dict(),\n 'epoch': epoch,\n 'split': split,\n 'loss': loss\n }\n\n torch.save(model_state, str(file), pickle_protocol=4)\n\n def save(self, file: Path):\n model_state = {\n 'state_dict': self.state_dict(),\n 'dictionary': self.dictionary,\n 'is_forward_lm': self.is_forward_lm,\n 'hidden_size': self.hidden_size,\n 'nlayers': self.nlayers,\n 'embedding_size': self.embedding_size,\n 'nout': self.nout,\n 'dropout': self.dropout\n }\n\n torch.save(model_state, str(file), pickle_protocol=4)\n\n def generate_text(self, prefix: str = '\\n', number_of_characters: int = 1000, temperature: float = 1.0,\n break_on_suffix=None) -> Tuple[str, float]:\n\n if prefix == '':\n prefix = '\\n'\n\n with torch.no_grad():\n characters = []\n\n idx2item = self.dictionary.idx2item\n\n # initial hidden state\n hidden = self.init_hidden(1)\n\n if len(prefix) > 1:\n\n char_tensors = []\n for character in prefix[:-1]:\n char_tensors.append(\n torch.tensor(self.dictionary.get_idx_for_item(character)).unsqueeze(0).unsqueeze(0))\n\n input = torch.cat(char_tensors)\n if torch.cuda.is_available():\n input = input.cuda()\n\n prediction, _, hidden = self.forward(input, hidden)\n\n input = torch.tensor(self.dictionary.get_idx_for_item(prefix[-1])).unsqueeze(0).unsqueeze(0)\n\n log_prob = 0.\n\n for i in range(number_of_characters):\n\n if torch.cuda.is_available():\n input = input.cuda()\n\n # get predicted weights\n prediction, _, hidden = self.forward(input, hidden)\n prediction = prediction.squeeze().detach()\n decoder_output = prediction\n\n # divide by temperature\n prediction = prediction.div(temperature)\n\n # to prevent overflow problem with small temperature values, substract largest value from all\n # this makes a vector in which the largest value is 0\n max = torch.max(prediction)\n prediction -= max\n\n # compute word weights with exponential function\n word_weights = prediction.exp().cpu()\n\n # try sampling multinomial distribution for next character\n try:\n word_idx = torch.multinomial(word_weights, 1)[0]\n except:\n word_idx = torch.tensor(0)\n\n # print(word_idx)\n prob = decoder_output[word_idx]\n log_prob += prob\n\n input = word_idx.detach().unsqueeze(0).unsqueeze(0)\n word = idx2item[word_idx].decode('UTF-8')\n characters.append(word)\n\n if break_on_suffix is not None:\n if ''.join(characters).endswith(break_on_suffix):\n break\n\n text = prefix + ''.join(characters)\n\n log_prob = log_prob.item()\n log_prob /= len(characters)\n\n if not self.is_forward_lm:\n text = text[::-1]\n\n return text, log_prob\n\n def calculate_perplexity(self, text: str) -> float:\n\n if not self.is_forward_lm:\n text = text[::-1]\n\n # input ids\n input = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[:-1]]).unsqueeze(1)\n input = input.to(flair.device)\n\n # push list of character IDs through model\n hidden = self.init_hidden(1)\n prediction, _, hidden = self.forward(input, hidden)\n\n # the target is always the next character\n targets = torch.tensor([self.dictionary.get_idx_for_item(char) for char in text[1:]])\n targets = targets.to(flair.device)\n\n # use cross entropy loss to compare output of forward pass with targets\n cross_entroy_loss = torch.nn.CrossEntropyLoss()\n loss = cross_entroy_loss(prediction.view(-1, len(self.dictionary)), targets).item()\n\n # exponentiate cross-entropy loss to calculate perplexity\n perplexity = math.exp(loss)\n\n return perplexity\n", "path": "flair/models/language_model.py"}]}
| 3,729 | 197 |
gh_patches_debug_11596
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-5881
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Weight type should use WeightUnitsEnum
### What I'm trying to achieve
To have `Weight` type defined as
```graphql
type Weight {
unit: WeightUnitsEnum!
value: Float!
}
```
to maintain consistency with `Shop.defaultWeightUnit` field.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/core/types/common.py`
Content:
```
1 import graphene
2
3 from ....product.templatetags.product_images import get_thumbnail
4 from ...translations.enums import LanguageCodeEnum
5 from ..enums import (
6 AccountErrorCode,
7 AppErrorCode,
8 CheckoutErrorCode,
9 DiscountErrorCode,
10 ExportErrorCode,
11 GiftCardErrorCode,
12 InvoiceErrorCode,
13 JobStatusEnum,
14 MenuErrorCode,
15 MetadataErrorCode,
16 OrderErrorCode,
17 PageErrorCode,
18 PaymentErrorCode,
19 PermissionEnum,
20 PermissionGroupErrorCode,
21 PluginErrorCode,
22 ProductErrorCode,
23 ShippingErrorCode,
24 ShopErrorCode,
25 StockErrorCode,
26 TranslationErrorCode,
27 WarehouseErrorCode,
28 WebhookErrorCode,
29 WishlistErrorCode,
30 )
31 from .money import VAT
32
33
34 class CountryDisplay(graphene.ObjectType):
35 code = graphene.String(description="Country code.", required=True)
36 country = graphene.String(description="Country name.", required=True)
37 vat = graphene.Field(VAT, description="Country tax.")
38
39
40 class LanguageDisplay(graphene.ObjectType):
41 code = LanguageCodeEnum(
42 description="ISO 639 representation of the language name.", required=True
43 )
44 language = graphene.String(description="Full name of the language.", required=True)
45
46
47 class Permission(graphene.ObjectType):
48 code = PermissionEnum(description="Internal code for permission.", required=True)
49 name = graphene.String(
50 description="Describe action(s) allowed to do by permission.", required=True
51 )
52
53 class Meta:
54 description = "Represents a permission object in a friendly form."
55
56
57 class Error(graphene.ObjectType):
58 field = graphene.String(
59 description=(
60 "Name of a field that caused the error. A value of `null` indicates that "
61 "the error isn't associated with a particular field."
62 ),
63 required=False,
64 )
65 message = graphene.String(description="The error message.")
66
67 class Meta:
68 description = "Represents an error in the input of a mutation."
69
70
71 class AccountError(Error):
72 code = AccountErrorCode(description="The error code.", required=True)
73
74
75 class AppError(Error):
76 code = AppErrorCode(description="The error code.", required=True)
77 permissions = graphene.List(
78 graphene.NonNull(PermissionEnum),
79 description="List of permissions which causes the error.",
80 required=False,
81 )
82
83
84 class StaffError(AccountError):
85 permissions = graphene.List(
86 graphene.NonNull(PermissionEnum),
87 description="List of permissions which causes the error.",
88 required=False,
89 )
90 groups = graphene.List(
91 graphene.NonNull(graphene.ID),
92 description="List of permission group IDs which cause the error.",
93 required=False,
94 )
95 users = graphene.List(
96 graphene.NonNull(graphene.ID),
97 description="List of user IDs which causes the error.",
98 required=False,
99 )
100
101
102 class CheckoutError(Error):
103 code = CheckoutErrorCode(description="The error code.", required=True)
104
105
106 class DiscountError(Error):
107 code = DiscountErrorCode(description="The error code.", required=True)
108
109
110 class ExportError(Error):
111 code = ExportErrorCode(description="The error code.", required=True)
112
113
114 class MenuError(Error):
115 code = MenuErrorCode(description="The error code.", required=True)
116
117
118 class MetadataError(Error):
119 code = MetadataErrorCode(description="The error code.", required=True)
120
121
122 class OrderError(Error):
123 code = OrderErrorCode(description="The error code.", required=True)
124 warehouse = graphene.ID(
125 description="Warehouse ID which causes the error.", required=False,
126 )
127 order_line = graphene.ID(
128 description="Order line ID which causes the error.", required=False,
129 )
130
131
132 class InvoiceError(Error):
133 code = InvoiceErrorCode(description="The error code.", required=True)
134
135
136 class PermissionGroupError(Error):
137 code = PermissionGroupErrorCode(description="The error code.", required=True)
138 permissions = graphene.List(
139 graphene.NonNull(PermissionEnum),
140 description="List of permissions which causes the error.",
141 required=False,
142 )
143 users = graphene.List(
144 graphene.NonNull(graphene.ID),
145 description="List of user IDs which causes the error.",
146 required=False,
147 )
148
149
150 class ProductError(Error):
151 code = ProductErrorCode(description="The error code.", required=True)
152
153
154 class ProductAttributeError(ProductError):
155 attributes = graphene.List(
156 graphene.NonNull(graphene.ID),
157 description="List of attributes IDs which causes the error.",
158 required=False,
159 )
160
161
162 class BulkProductError(ProductError):
163 index = graphene.Int(
164 description="Index of an input list item that caused the error."
165 )
166 warehouses = graphene.List(
167 graphene.NonNull(graphene.ID),
168 description="List of warehouse IDs which causes the error.",
169 required=False,
170 )
171
172
173 class ShopError(Error):
174 code = ShopErrorCode(description="The error code.", required=True)
175
176
177 class ShippingError(Error):
178 code = ShippingErrorCode(description="The error code.", required=True)
179 warehouses = graphene.List(
180 graphene.NonNull(graphene.ID),
181 description="List of warehouse IDs which causes the error.",
182 required=False,
183 )
184
185
186 class PageError(Error):
187 code = PageErrorCode(description="The error code.", required=True)
188
189
190 class PaymentError(Error):
191 code = PaymentErrorCode(description="The error code.", required=True)
192
193
194 class GiftCardError(Error):
195 code = GiftCardErrorCode(description="The error code.", required=True)
196
197
198 class PluginError(Error):
199 code = PluginErrorCode(description="The error code.", required=True)
200
201
202 class StockError(Error):
203 code = StockErrorCode(description="The error code.", required=True)
204
205
206 class BulkStockError(ProductError):
207 index = graphene.Int(
208 description="Index of an input list item that caused the error."
209 )
210
211
212 class WarehouseError(Error):
213 code = WarehouseErrorCode(description="The error code.", required=True)
214
215
216 class WebhookError(Error):
217 code = WebhookErrorCode(description="The error code.", required=True)
218
219
220 class WishlistError(Error):
221 code = WishlistErrorCode(description="The error code.", required=True)
222
223
224 class TranslationError(Error):
225 code = TranslationErrorCode(description="The error code.", required=True)
226
227
228 class SeoInput(graphene.InputObjectType):
229 title = graphene.String(description="SEO title.")
230 description = graphene.String(description="SEO description.")
231
232
233 class Weight(graphene.ObjectType):
234 unit = graphene.String(description="Weight unit.", required=True)
235 value = graphene.Float(description="Weight value.", required=True)
236
237 class Meta:
238 description = "Represents weight value in a specific weight unit."
239
240
241 class Image(graphene.ObjectType):
242 url = graphene.String(required=True, description="The URL of the image.")
243 alt = graphene.String(description="Alt text for an image.")
244
245 class Meta:
246 description = "Represents an image."
247
248 @staticmethod
249 def get_adjusted(image, alt, size, rendition_key_set, info):
250 """Return Image adjusted with given size."""
251 if size:
252 url = get_thumbnail(
253 image_file=image,
254 size=size,
255 method="thumbnail",
256 rendition_key_set=rendition_key_set,
257 )
258 else:
259 url = image.url
260 url = info.context.build_absolute_uri(url)
261 return Image(url, alt)
262
263
264 class PriceRangeInput(graphene.InputObjectType):
265 gte = graphene.Float(description="Price greater than or equal to.", required=False)
266 lte = graphene.Float(description="Price less than or equal to.", required=False)
267
268
269 class DateRangeInput(graphene.InputObjectType):
270 gte = graphene.Date(description="Start date.", required=False)
271 lte = graphene.Date(description="End date.", required=False)
272
273
274 class DateTimeRangeInput(graphene.InputObjectType):
275 gte = graphene.DateTime(description="Start date.", required=False)
276 lte = graphene.DateTime(description="End date.", required=False)
277
278
279 class IntRangeInput(graphene.InputObjectType):
280 gte = graphene.Int(description="Value greater than or equal to.", required=False)
281 lte = graphene.Int(description="Value less than or equal to.", required=False)
282
283
284 class TaxType(graphene.ObjectType):
285 """Representation of tax types fetched from tax gateway."""
286
287 description = graphene.String(description="Description of the tax type.")
288 tax_code = graphene.String(
289 description="External tax code used to identify given tax group."
290 )
291
292
293 class Job(graphene.Interface):
294 status = JobStatusEnum(description="Job status.", required=True)
295 created_at = graphene.DateTime(
296 description="Created date time of job in ISO 8601 format.", required=True
297 )
298 updated_at = graphene.DateTime(
299 description="Date time of job last update in ISO 8601 format.", required=True
300 )
301 message = graphene.String(description="Job message.")
302
303 @classmethod
304 def resolve_type(cls, instance, _info):
305 """Map a data object to a Graphene type."""
306 MODEL_TO_TYPE_MAP = {
307 # <DjangoModel>: <GrapheneType>
308 }
309 return MODEL_TO_TYPE_MAP.get(type(instance))
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/graphql/core/types/common.py b/saleor/graphql/core/types/common.py
--- a/saleor/graphql/core/types/common.py
+++ b/saleor/graphql/core/types/common.py
@@ -26,6 +26,7 @@
TranslationErrorCode,
WarehouseErrorCode,
WebhookErrorCode,
+ WeightUnitsEnum,
WishlistErrorCode,
)
from .money import VAT
@@ -231,7 +232,7 @@
class Weight(graphene.ObjectType):
- unit = graphene.String(description="Weight unit.", required=True)
+ unit = WeightUnitsEnum(description="Weight unit.", required=True)
value = graphene.Float(description="Weight value.", required=True)
class Meta:
|
{"golden_diff": "diff --git a/saleor/graphql/core/types/common.py b/saleor/graphql/core/types/common.py\n--- a/saleor/graphql/core/types/common.py\n+++ b/saleor/graphql/core/types/common.py\n@@ -26,6 +26,7 @@\n TranslationErrorCode,\n WarehouseErrorCode,\n WebhookErrorCode,\n+ WeightUnitsEnum,\n WishlistErrorCode,\n )\n from .money import VAT\n@@ -231,7 +232,7 @@\n \n \n class Weight(graphene.ObjectType):\n- unit = graphene.String(description=\"Weight unit.\", required=True)\n+ unit = WeightUnitsEnum(description=\"Weight unit.\", required=True)\n value = graphene.Float(description=\"Weight value.\", required=True)\n \n class Meta:\n", "issue": "Weight type should use WeightUnitsEnum\n### What I'm trying to achieve\r\nTo have `Weight` type defined as\r\n```graphql\r\ntype Weight {\r\n unit: WeightUnitsEnum!\r\n value: Float!\r\n}\r\n```\r\nto maintain consistency with `Shop.defaultWeightUnit` field.\n", "before_files": [{"content": "import graphene\n\nfrom ....product.templatetags.product_images import get_thumbnail\nfrom ...translations.enums import LanguageCodeEnum\nfrom ..enums import (\n AccountErrorCode,\n AppErrorCode,\n CheckoutErrorCode,\n DiscountErrorCode,\n ExportErrorCode,\n GiftCardErrorCode,\n InvoiceErrorCode,\n JobStatusEnum,\n MenuErrorCode,\n MetadataErrorCode,\n OrderErrorCode,\n PageErrorCode,\n PaymentErrorCode,\n PermissionEnum,\n PermissionGroupErrorCode,\n PluginErrorCode,\n ProductErrorCode,\n ShippingErrorCode,\n ShopErrorCode,\n StockErrorCode,\n TranslationErrorCode,\n WarehouseErrorCode,\n WebhookErrorCode,\n WishlistErrorCode,\n)\nfrom .money import VAT\n\n\nclass CountryDisplay(graphene.ObjectType):\n code = graphene.String(description=\"Country code.\", required=True)\n country = graphene.String(description=\"Country name.\", required=True)\n vat = graphene.Field(VAT, description=\"Country tax.\")\n\n\nclass LanguageDisplay(graphene.ObjectType):\n code = LanguageCodeEnum(\n description=\"ISO 639 representation of the language name.\", required=True\n )\n language = graphene.String(description=\"Full name of the language.\", required=True)\n\n\nclass Permission(graphene.ObjectType):\n code = PermissionEnum(description=\"Internal code for permission.\", required=True)\n name = graphene.String(\n description=\"Describe action(s) allowed to do by permission.\", required=True\n )\n\n class Meta:\n description = \"Represents a permission object in a friendly form.\"\n\n\nclass Error(graphene.ObjectType):\n field = graphene.String(\n description=(\n \"Name of a field that caused the error. A value of `null` indicates that \"\n \"the error isn't associated with a particular field.\"\n ),\n required=False,\n )\n message = graphene.String(description=\"The error message.\")\n\n class Meta:\n description = \"Represents an error in the input of a mutation.\"\n\n\nclass AccountError(Error):\n code = AccountErrorCode(description=\"The error code.\", required=True)\n\n\nclass AppError(Error):\n code = AppErrorCode(description=\"The error code.\", required=True)\n permissions = graphene.List(\n graphene.NonNull(PermissionEnum),\n description=\"List of permissions which causes the error.\",\n required=False,\n )\n\n\nclass StaffError(AccountError):\n permissions = graphene.List(\n graphene.NonNull(PermissionEnum),\n description=\"List of permissions which causes the error.\",\n required=False,\n )\n groups = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of permission group IDs which cause the error.\",\n required=False,\n )\n users = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of user IDs which causes the error.\",\n required=False,\n )\n\n\nclass CheckoutError(Error):\n code = CheckoutErrorCode(description=\"The error code.\", required=True)\n\n\nclass DiscountError(Error):\n code = DiscountErrorCode(description=\"The error code.\", required=True)\n\n\nclass ExportError(Error):\n code = ExportErrorCode(description=\"The error code.\", required=True)\n\n\nclass MenuError(Error):\n code = MenuErrorCode(description=\"The error code.\", required=True)\n\n\nclass MetadataError(Error):\n code = MetadataErrorCode(description=\"The error code.\", required=True)\n\n\nclass OrderError(Error):\n code = OrderErrorCode(description=\"The error code.\", required=True)\n warehouse = graphene.ID(\n description=\"Warehouse ID which causes the error.\", required=False,\n )\n order_line = graphene.ID(\n description=\"Order line ID which causes the error.\", required=False,\n )\n\n\nclass InvoiceError(Error):\n code = InvoiceErrorCode(description=\"The error code.\", required=True)\n\n\nclass PermissionGroupError(Error):\n code = PermissionGroupErrorCode(description=\"The error code.\", required=True)\n permissions = graphene.List(\n graphene.NonNull(PermissionEnum),\n description=\"List of permissions which causes the error.\",\n required=False,\n )\n users = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of user IDs which causes the error.\",\n required=False,\n )\n\n\nclass ProductError(Error):\n code = ProductErrorCode(description=\"The error code.\", required=True)\n\n\nclass ProductAttributeError(ProductError):\n attributes = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of attributes IDs which causes the error.\",\n required=False,\n )\n\n\nclass BulkProductError(ProductError):\n index = graphene.Int(\n description=\"Index of an input list item that caused the error.\"\n )\n warehouses = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of warehouse IDs which causes the error.\",\n required=False,\n )\n\n\nclass ShopError(Error):\n code = ShopErrorCode(description=\"The error code.\", required=True)\n\n\nclass ShippingError(Error):\n code = ShippingErrorCode(description=\"The error code.\", required=True)\n warehouses = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of warehouse IDs which causes the error.\",\n required=False,\n )\n\n\nclass PageError(Error):\n code = PageErrorCode(description=\"The error code.\", required=True)\n\n\nclass PaymentError(Error):\n code = PaymentErrorCode(description=\"The error code.\", required=True)\n\n\nclass GiftCardError(Error):\n code = GiftCardErrorCode(description=\"The error code.\", required=True)\n\n\nclass PluginError(Error):\n code = PluginErrorCode(description=\"The error code.\", required=True)\n\n\nclass StockError(Error):\n code = StockErrorCode(description=\"The error code.\", required=True)\n\n\nclass BulkStockError(ProductError):\n index = graphene.Int(\n description=\"Index of an input list item that caused the error.\"\n )\n\n\nclass WarehouseError(Error):\n code = WarehouseErrorCode(description=\"The error code.\", required=True)\n\n\nclass WebhookError(Error):\n code = WebhookErrorCode(description=\"The error code.\", required=True)\n\n\nclass WishlistError(Error):\n code = WishlistErrorCode(description=\"The error code.\", required=True)\n\n\nclass TranslationError(Error):\n code = TranslationErrorCode(description=\"The error code.\", required=True)\n\n\nclass SeoInput(graphene.InputObjectType):\n title = graphene.String(description=\"SEO title.\")\n description = graphene.String(description=\"SEO description.\")\n\n\nclass Weight(graphene.ObjectType):\n unit = graphene.String(description=\"Weight unit.\", required=True)\n value = graphene.Float(description=\"Weight value.\", required=True)\n\n class Meta:\n description = \"Represents weight value in a specific weight unit.\"\n\n\nclass Image(graphene.ObjectType):\n url = graphene.String(required=True, description=\"The URL of the image.\")\n alt = graphene.String(description=\"Alt text for an image.\")\n\n class Meta:\n description = \"Represents an image.\"\n\n @staticmethod\n def get_adjusted(image, alt, size, rendition_key_set, info):\n \"\"\"Return Image adjusted with given size.\"\"\"\n if size:\n url = get_thumbnail(\n image_file=image,\n size=size,\n method=\"thumbnail\",\n rendition_key_set=rendition_key_set,\n )\n else:\n url = image.url\n url = info.context.build_absolute_uri(url)\n return Image(url, alt)\n\n\nclass PriceRangeInput(graphene.InputObjectType):\n gte = graphene.Float(description=\"Price greater than or equal to.\", required=False)\n lte = graphene.Float(description=\"Price less than or equal to.\", required=False)\n\n\nclass DateRangeInput(graphene.InputObjectType):\n gte = graphene.Date(description=\"Start date.\", required=False)\n lte = graphene.Date(description=\"End date.\", required=False)\n\n\nclass DateTimeRangeInput(graphene.InputObjectType):\n gte = graphene.DateTime(description=\"Start date.\", required=False)\n lte = graphene.DateTime(description=\"End date.\", required=False)\n\n\nclass IntRangeInput(graphene.InputObjectType):\n gte = graphene.Int(description=\"Value greater than or equal to.\", required=False)\n lte = graphene.Int(description=\"Value less than or equal to.\", required=False)\n\n\nclass TaxType(graphene.ObjectType):\n \"\"\"Representation of tax types fetched from tax gateway.\"\"\"\n\n description = graphene.String(description=\"Description of the tax type.\")\n tax_code = graphene.String(\n description=\"External tax code used to identify given tax group.\"\n )\n\n\nclass Job(graphene.Interface):\n status = JobStatusEnum(description=\"Job status.\", required=True)\n created_at = graphene.DateTime(\n description=\"Created date time of job in ISO 8601 format.\", required=True\n )\n updated_at = graphene.DateTime(\n description=\"Date time of job last update in ISO 8601 format.\", required=True\n )\n message = graphene.String(description=\"Job message.\")\n\n @classmethod\n def resolve_type(cls, instance, _info):\n \"\"\"Map a data object to a Graphene type.\"\"\"\n MODEL_TO_TYPE_MAP = {\n # <DjangoModel>: <GrapheneType>\n }\n return MODEL_TO_TYPE_MAP.get(type(instance))\n", "path": "saleor/graphql/core/types/common.py"}], "after_files": [{"content": "import graphene\n\nfrom ....product.templatetags.product_images import get_thumbnail\nfrom ...translations.enums import LanguageCodeEnum\nfrom ..enums import (\n AccountErrorCode,\n AppErrorCode,\n CheckoutErrorCode,\n DiscountErrorCode,\n ExportErrorCode,\n GiftCardErrorCode,\n InvoiceErrorCode,\n JobStatusEnum,\n MenuErrorCode,\n MetadataErrorCode,\n OrderErrorCode,\n PageErrorCode,\n PaymentErrorCode,\n PermissionEnum,\n PermissionGroupErrorCode,\n PluginErrorCode,\n ProductErrorCode,\n ShippingErrorCode,\n ShopErrorCode,\n StockErrorCode,\n TranslationErrorCode,\n WarehouseErrorCode,\n WebhookErrorCode,\n WeightUnitsEnum,\n WishlistErrorCode,\n)\nfrom .money import VAT\n\n\nclass CountryDisplay(graphene.ObjectType):\n code = graphene.String(description=\"Country code.\", required=True)\n country = graphene.String(description=\"Country name.\", required=True)\n vat = graphene.Field(VAT, description=\"Country tax.\")\n\n\nclass LanguageDisplay(graphene.ObjectType):\n code = LanguageCodeEnum(\n description=\"ISO 639 representation of the language name.\", required=True\n )\n language = graphene.String(description=\"Full name of the language.\", required=True)\n\n\nclass Permission(graphene.ObjectType):\n code = PermissionEnum(description=\"Internal code for permission.\", required=True)\n name = graphene.String(\n description=\"Describe action(s) allowed to do by permission.\", required=True\n )\n\n class Meta:\n description = \"Represents a permission object in a friendly form.\"\n\n\nclass Error(graphene.ObjectType):\n field = graphene.String(\n description=(\n \"Name of a field that caused the error. A value of `null` indicates that \"\n \"the error isn't associated with a particular field.\"\n ),\n required=False,\n )\n message = graphene.String(description=\"The error message.\")\n\n class Meta:\n description = \"Represents an error in the input of a mutation.\"\n\n\nclass AccountError(Error):\n code = AccountErrorCode(description=\"The error code.\", required=True)\n\n\nclass AppError(Error):\n code = AppErrorCode(description=\"The error code.\", required=True)\n permissions = graphene.List(\n graphene.NonNull(PermissionEnum),\n description=\"List of permissions which causes the error.\",\n required=False,\n )\n\n\nclass StaffError(AccountError):\n permissions = graphene.List(\n graphene.NonNull(PermissionEnum),\n description=\"List of permissions which causes the error.\",\n required=False,\n )\n groups = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of permission group IDs which cause the error.\",\n required=False,\n )\n users = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of user IDs which causes the error.\",\n required=False,\n )\n\n\nclass CheckoutError(Error):\n code = CheckoutErrorCode(description=\"The error code.\", required=True)\n\n\nclass DiscountError(Error):\n code = DiscountErrorCode(description=\"The error code.\", required=True)\n\n\nclass ExportError(Error):\n code = ExportErrorCode(description=\"The error code.\", required=True)\n\n\nclass MenuError(Error):\n code = MenuErrorCode(description=\"The error code.\", required=True)\n\n\nclass MetadataError(Error):\n code = MetadataErrorCode(description=\"The error code.\", required=True)\n\n\nclass OrderError(Error):\n code = OrderErrorCode(description=\"The error code.\", required=True)\n warehouse = graphene.ID(\n description=\"Warehouse ID which causes the error.\", required=False,\n )\n order_line = graphene.ID(\n description=\"Order line ID which causes the error.\", required=False,\n )\n\n\nclass InvoiceError(Error):\n code = InvoiceErrorCode(description=\"The error code.\", required=True)\n\n\nclass PermissionGroupError(Error):\n code = PermissionGroupErrorCode(description=\"The error code.\", required=True)\n permissions = graphene.List(\n graphene.NonNull(PermissionEnum),\n description=\"List of permissions which causes the error.\",\n required=False,\n )\n users = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of user IDs which causes the error.\",\n required=False,\n )\n\n\nclass ProductError(Error):\n code = ProductErrorCode(description=\"The error code.\", required=True)\n\n\nclass ProductAttributeError(ProductError):\n attributes = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of attributes IDs which causes the error.\",\n required=False,\n )\n\n\nclass BulkProductError(ProductError):\n index = graphene.Int(\n description=\"Index of an input list item that caused the error.\"\n )\n warehouses = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of warehouse IDs which causes the error.\",\n required=False,\n )\n\n\nclass ShopError(Error):\n code = ShopErrorCode(description=\"The error code.\", required=True)\n\n\nclass ShippingError(Error):\n code = ShippingErrorCode(description=\"The error code.\", required=True)\n warehouses = graphene.List(\n graphene.NonNull(graphene.ID),\n description=\"List of warehouse IDs which causes the error.\",\n required=False,\n )\n\n\nclass PageError(Error):\n code = PageErrorCode(description=\"The error code.\", required=True)\n\n\nclass PaymentError(Error):\n code = PaymentErrorCode(description=\"The error code.\", required=True)\n\n\nclass GiftCardError(Error):\n code = GiftCardErrorCode(description=\"The error code.\", required=True)\n\n\nclass PluginError(Error):\n code = PluginErrorCode(description=\"The error code.\", required=True)\n\n\nclass StockError(Error):\n code = StockErrorCode(description=\"The error code.\", required=True)\n\n\nclass BulkStockError(ProductError):\n index = graphene.Int(\n description=\"Index of an input list item that caused the error.\"\n )\n\n\nclass WarehouseError(Error):\n code = WarehouseErrorCode(description=\"The error code.\", required=True)\n\n\nclass WebhookError(Error):\n code = WebhookErrorCode(description=\"The error code.\", required=True)\n\n\nclass WishlistError(Error):\n code = WishlistErrorCode(description=\"The error code.\", required=True)\n\n\nclass TranslationError(Error):\n code = TranslationErrorCode(description=\"The error code.\", required=True)\n\n\nclass SeoInput(graphene.InputObjectType):\n title = graphene.String(description=\"SEO title.\")\n description = graphene.String(description=\"SEO description.\")\n\n\nclass Weight(graphene.ObjectType):\n unit = WeightUnitsEnum(description=\"Weight unit.\", required=True)\n value = graphene.Float(description=\"Weight value.\", required=True)\n\n class Meta:\n description = \"Represents weight value in a specific weight unit.\"\n\n\nclass Image(graphene.ObjectType):\n url = graphene.String(required=True, description=\"The URL of the image.\")\n alt = graphene.String(description=\"Alt text for an image.\")\n\n class Meta:\n description = \"Represents an image.\"\n\n @staticmethod\n def get_adjusted(image, alt, size, rendition_key_set, info):\n \"\"\"Return Image adjusted with given size.\"\"\"\n if size:\n url = get_thumbnail(\n image_file=image,\n size=size,\n method=\"thumbnail\",\n rendition_key_set=rendition_key_set,\n )\n else:\n url = image.url\n url = info.context.build_absolute_uri(url)\n return Image(url, alt)\n\n\nclass PriceRangeInput(graphene.InputObjectType):\n gte = graphene.Float(description=\"Price greater than or equal to.\", required=False)\n lte = graphene.Float(description=\"Price less than or equal to.\", required=False)\n\n\nclass DateRangeInput(graphene.InputObjectType):\n gte = graphene.Date(description=\"Start date.\", required=False)\n lte = graphene.Date(description=\"End date.\", required=False)\n\n\nclass DateTimeRangeInput(graphene.InputObjectType):\n gte = graphene.DateTime(description=\"Start date.\", required=False)\n lte = graphene.DateTime(description=\"End date.\", required=False)\n\n\nclass IntRangeInput(graphene.InputObjectType):\n gte = graphene.Int(description=\"Value greater than or equal to.\", required=False)\n lte = graphene.Int(description=\"Value less than or equal to.\", required=False)\n\n\nclass TaxType(graphene.ObjectType):\n \"\"\"Representation of tax types fetched from tax gateway.\"\"\"\n\n description = graphene.String(description=\"Description of the tax type.\")\n tax_code = graphene.String(\n description=\"External tax code used to identify given tax group.\"\n )\n\n\nclass Job(graphene.Interface):\n status = JobStatusEnum(description=\"Job status.\", required=True)\n created_at = graphene.DateTime(\n description=\"Created date time of job in ISO 8601 format.\", required=True\n )\n updated_at = graphene.DateTime(\n description=\"Date time of job last update in ISO 8601 format.\", required=True\n )\n message = graphene.String(description=\"Job message.\")\n\n @classmethod\n def resolve_type(cls, instance, _info):\n \"\"\"Map a data object to a Graphene type.\"\"\"\n MODEL_TO_TYPE_MAP = {\n # <DjangoModel>: <GrapheneType>\n }\n return MODEL_TO_TYPE_MAP.get(type(instance))\n", "path": "saleor/graphql/core/types/common.py"}]}
| 3,014 | 155 |
gh_patches_debug_34415
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-639
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clean library/frameworks from backtraces
We should clean all non-user code from backtraces as much as possible. Here's a heroku app running 2.13.0:
<img width="890" alt="Screenshot 2020-04-13 14 39 56" src="https://user-images.githubusercontent.com/102774/79160522-26a36e80-7d97-11ea-8376-f8dfcfd6ece0.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/core/backtrace.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import sys
5 import traceback
6
7 # Maximum non-Scout frames to target retrieving
8 LIMIT = 50
9 # How many upper frames from inside Scout to ignore
10 IGNORED = 1
11
12
13 if sys.version_info >= (3, 5):
14
15 def capture():
16 return [
17 {"file": frame.filename, "line": frame.lineno, "function": frame.name}
18 for frame in reversed(
19 traceback.extract_stack(limit=LIMIT + IGNORED)[:-IGNORED]
20 )
21 ]
22
23
24 else:
25
26 def capture():
27 return [
28 {"file": frame[0], "line": frame[1], "function": frame[3]}
29 for frame in reversed(
30 traceback.extract_stack(limit=LIMIT + IGNORED)[:-IGNORED]
31 )
32 ]
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/scout_apm/core/backtrace.py b/src/scout_apm/core/backtrace.py
--- a/src/scout_apm/core/backtrace.py
+++ b/src/scout_apm/core/backtrace.py
@@ -1,7 +1,9 @@
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
+import itertools
import sys
+import sysconfig
import traceback
# Maximum non-Scout frames to target retrieving
@@ -10,23 +12,55 @@
IGNORED = 1
+def filter_frames(frames):
+ """Filter the stack trace frames down to non-library code."""
+ paths = sysconfig.get_paths()
+ library_paths = {paths["purelib"], paths["platlib"]}
+ for frame in frames:
+ if not any(frame["file"].startswith(exclusion) for exclusion in library_paths):
+ yield frame
+
+
if sys.version_info >= (3, 5):
+ def frame_walker():
+ """Iterate over each frame of the stack.
+
+ Taken from python3/traceback.ExtractSummary.extract to support
+ iterating over the entire stack, but without creating a large
+ data structure.
+ """
+ for frame, lineno in traceback.walk_stack(sys._getframe().f_back):
+ co = frame.f_code
+ filename = co.co_filename
+ name = co.co_name
+ yield {"file": filename, "line": lineno, "function": name}
+
def capture():
- return [
- {"file": frame.filename, "line": frame.lineno, "function": frame.name}
- for frame in reversed(
- traceback.extract_stack(limit=LIMIT + IGNORED)[:-IGNORED]
- )
- ]
+ return list(itertools.islice(filter_frames(frame_walker()), LIMIT))
else:
+ def frame_walker():
+ """Iterate over each frame of the stack.
+
+ Taken from python2.7/traceback.extract_stack to support iterating
+ over the entire stack, but without creating a large data structure.
+ """
+ try:
+ raise ZeroDivisionError
+ except ZeroDivisionError:
+ # Get the current frame
+ f = sys.exc_info()[2].tb_frame.f_back
+
+ while f is not None:
+ lineno = f.f_lineno
+ co = f.f_code
+ filename = co.co_filename
+ name = co.co_name
+ yield {"file": filename, "line": lineno, "function": name}
+ f = f.f_back
+
def capture():
- return [
- {"file": frame[0], "line": frame[1], "function": frame[3]}
- for frame in reversed(
- traceback.extract_stack(limit=LIMIT + IGNORED)[:-IGNORED]
- )
- ]
+ return list(itertools.islice(filter_frames(frame_walker()), LIMIT))
|
{"golden_diff": "diff --git a/src/scout_apm/core/backtrace.py b/src/scout_apm/core/backtrace.py\n--- a/src/scout_apm/core/backtrace.py\n+++ b/src/scout_apm/core/backtrace.py\n@@ -1,7 +1,9 @@\n # coding=utf-8\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+import itertools\n import sys\n+import sysconfig\n import traceback\n \n # Maximum non-Scout frames to target retrieving\n@@ -10,23 +12,55 @@\n IGNORED = 1\n \n \n+def filter_frames(frames):\n+ \"\"\"Filter the stack trace frames down to non-library code.\"\"\"\n+ paths = sysconfig.get_paths()\n+ library_paths = {paths[\"purelib\"], paths[\"platlib\"]}\n+ for frame in frames:\n+ if not any(frame[\"file\"].startswith(exclusion) for exclusion in library_paths):\n+ yield frame\n+\n+\n if sys.version_info >= (3, 5):\n \n+ def frame_walker():\n+ \"\"\"Iterate over each frame of the stack.\n+\n+ Taken from python3/traceback.ExtractSummary.extract to support\n+ iterating over the entire stack, but without creating a large\n+ data structure.\n+ \"\"\"\n+ for frame, lineno in traceback.walk_stack(sys._getframe().f_back):\n+ co = frame.f_code\n+ filename = co.co_filename\n+ name = co.co_name\n+ yield {\"file\": filename, \"line\": lineno, \"function\": name}\n+\n def capture():\n- return [\n- {\"file\": frame.filename, \"line\": frame.lineno, \"function\": frame.name}\n- for frame in reversed(\n- traceback.extract_stack(limit=LIMIT + IGNORED)[:-IGNORED]\n- )\n- ]\n+ return list(itertools.islice(filter_frames(frame_walker()), LIMIT))\n \n \n else:\n \n+ def frame_walker():\n+ \"\"\"Iterate over each frame of the stack.\n+\n+ Taken from python2.7/traceback.extract_stack to support iterating\n+ over the entire stack, but without creating a large data structure.\n+ \"\"\"\n+ try:\n+ raise ZeroDivisionError\n+ except ZeroDivisionError:\n+ # Get the current frame\n+ f = sys.exc_info()[2].tb_frame.f_back\n+\n+ while f is not None:\n+ lineno = f.f_lineno\n+ co = f.f_code\n+ filename = co.co_filename\n+ name = co.co_name\n+ yield {\"file\": filename, \"line\": lineno, \"function\": name}\n+ f = f.f_back\n+\n def capture():\n- return [\n- {\"file\": frame[0], \"line\": frame[1], \"function\": frame[3]}\n- for frame in reversed(\n- traceback.extract_stack(limit=LIMIT + IGNORED)[:-IGNORED]\n- )\n- ]\n+ return list(itertools.islice(filter_frames(frame_walker()), LIMIT))\n", "issue": "Clean library/frameworks from backtraces\nWe should clean all non-user code from backtraces as much as possible. Here's a heroku app running 2.13.0:\r\n\r\n<img width=\"890\" alt=\"Screenshot 2020-04-13 14 39 56\" src=\"https://user-images.githubusercontent.com/102774/79160522-26a36e80-7d97-11ea-8376-f8dfcfd6ece0.png\">\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\nimport traceback\n\n# Maximum non-Scout frames to target retrieving\nLIMIT = 50\n# How many upper frames from inside Scout to ignore\nIGNORED = 1\n\n\nif sys.version_info >= (3, 5):\n\n def capture():\n return [\n {\"file\": frame.filename, \"line\": frame.lineno, \"function\": frame.name}\n for frame in reversed(\n traceback.extract_stack(limit=LIMIT + IGNORED)[:-IGNORED]\n )\n ]\n\n\nelse:\n\n def capture():\n return [\n {\"file\": frame[0], \"line\": frame[1], \"function\": frame[3]}\n for frame in reversed(\n traceback.extract_stack(limit=LIMIT + IGNORED)[:-IGNORED]\n )\n ]\n", "path": "src/scout_apm/core/backtrace.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport itertools\nimport sys\nimport sysconfig\nimport traceback\n\n# Maximum non-Scout frames to target retrieving\nLIMIT = 50\n# How many upper frames from inside Scout to ignore\nIGNORED = 1\n\n\ndef filter_frames(frames):\n \"\"\"Filter the stack trace frames down to non-library code.\"\"\"\n paths = sysconfig.get_paths()\n library_paths = {paths[\"purelib\"], paths[\"platlib\"]}\n for frame in frames:\n if not any(frame[\"file\"].startswith(exclusion) for exclusion in library_paths):\n yield frame\n\n\nif sys.version_info >= (3, 5):\n\n def frame_walker():\n \"\"\"Iterate over each frame of the stack.\n\n Taken from python3/traceback.ExtractSummary.extract to support\n iterating over the entire stack, but without creating a large\n data structure.\n \"\"\"\n for frame, lineno in traceback.walk_stack(sys._getframe().f_back):\n co = frame.f_code\n filename = co.co_filename\n name = co.co_name\n yield {\"file\": filename, \"line\": lineno, \"function\": name}\n\n def capture():\n return list(itertools.islice(filter_frames(frame_walker()), LIMIT))\n\n\nelse:\n\n def frame_walker():\n \"\"\"Iterate over each frame of the stack.\n\n Taken from python2.7/traceback.extract_stack to support iterating\n over the entire stack, but without creating a large data structure.\n \"\"\"\n try:\n raise ZeroDivisionError\n except ZeroDivisionError:\n # Get the current frame\n f = sys.exc_info()[2].tb_frame.f_back\n\n while f is not None:\n lineno = f.f_lineno\n co = f.f_code\n filename = co.co_filename\n name = co.co_name\n yield {\"file\": filename, \"line\": lineno, \"function\": name}\n f = f.f_back\n\n def capture():\n return list(itertools.islice(filter_frames(frame_walker()), LIMIT))\n", "path": "src/scout_apm/core/backtrace.py"}]}
| 640 | 654 |
gh_patches_debug_571
|
rasdani/github-patches
|
git_diff
|
Uberspace__lab-28
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change project name to lab in config
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Uberspace 7 lab documentation build configuration file, created by
5 # sphinx-quickstart on Tue Feb 13 12:19:29 2018.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 # import os
21 # import sys
22 # sys.path.insert(0, os.path.abspath('.'))
23
24 import sphinx_rtd_theme
25
26 # -- General configuration ------------------------------------------------
27
28 # If your documentation needs a minimal Sphinx version, state it here.
29 #
30 # needs_sphinx = '1.0'
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = []
36
37 # Add any paths that contain templates here, relative to this directory.
38 templates_path = ['_templates']
39
40 # The suffix(es) of source filenames.
41 # You can specify multiple suffix as a list of string:
42 #
43 # source_suffix = ['.rst', '.md']
44 source_suffix = '.rst'
45
46 # The master toctree document.
47 master_doc = 'index'
48
49 # General information about the project.
50 project = 'Uberspace 7 Lab'
51 copyright = '2018, uberspace.de'
52 author = 'uberspace.de'
53
54 # The version info for the project you're documenting, acts as replacement for
55 # |version| and |release|, also used in various other places throughout the
56 # built documents.
57 #
58 # The short X.Y version.
59 release = version = '7'
60
61 # The language for content autogenerated by Sphinx. Refer to documentation
62 # for a list of supported languages.
63 #
64 # This is also used if you do content translation via gettext catalogs.
65 # Usually you set "language" from the command line for these cases.
66 language = None
67
68 # List of patterns, relative to source directory, that match files and
69 # directories to ignore when looking for source files.
70 # This patterns also effect to html_static_path and html_extra_path
71 exclude_patterns = []
72
73 # The name of the Pygments (syntax highlighting) style to use.
74 pygments_style = 'sphinx'
75
76 # If true, `todo` and `todoList` produce output, else they produce nothing.
77 todo_include_todos = False
78
79
80 # -- Options for HTML output ----------------------------------------------
81
82 html_theme = 'sphinx_rtd_theme'
83 html_theme_options = {
84 'display_version': False,
85 'navigation_depth': 2,
86 'collapse_navigation': True
87 }
88 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
89 html_last_updated_fmt = '%b %d, %Y'
90 html_context = {
91 'css_files': ['_static/css/custom.css'],
92 'display_github': True,
93 'github_user': 'Uberspace',
94 'github_repo': 'lab',
95 'github_version': 'master',
96 'conf_py_path': '/source/'
97 }
98 html_show_copyright = False
99 html_favicon = '_static/favicon.ico'
100
101
102 # Theme options are theme-specific and customize the look and feel of a theme
103 # further. For a list of options available for each theme, see the
104 # documentation.
105 #
106 # html_theme_options = {}
107
108 # Add any paths that contain custom static files (such as style sheets) here,
109 # relative to this directory. They are copied after the builtin static files,
110 # so a file named "default.css" will overwrite the builtin "default.css".
111 html_static_path = ['_static']
112
113 # Custom sidebar templates, must be a dictionary that maps document names
114 # to template names.
115 #
116 # This is required for the alabaster theme
117 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
118 html_sidebars = {
119 '**': [
120 'relations.html', # needs 'show_related': True theme option to display
121 'searchbox.html',
122 ]
123 }
124
125
126 # -- Options for HTMLHelp output ------------------------------------------
127
128 # Output file base name for HTML help builder.
129 htmlhelp_basename = 'Uberspace7labdoc'
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/conf.py b/source/conf.py
--- a/source/conf.py
+++ b/source/conf.py
@@ -47,7 +47,7 @@
master_doc = 'index'
# General information about the project.
-project = 'Uberspace 7 Lab'
+project = 'UberLab'
copyright = '2018, uberspace.de'
author = 'uberspace.de'
|
{"golden_diff": "diff --git a/source/conf.py b/source/conf.py\n--- a/source/conf.py\n+++ b/source/conf.py\n@@ -47,7 +47,7 @@\n master_doc = 'index'\n \n # General information about the project.\n-project = 'Uberspace 7 Lab'\n+project = 'UberLab'\n copyright = '2018, uberspace.de'\n author = 'uberspace.de'\n", "issue": "Change project name to lab in config\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Uberspace 7 lab documentation build configuration file, created by\n# sphinx-quickstart on Tue Feb 13 12:19:29 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport sphinx_rtd_theme\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Uberspace 7 Lab'\ncopyright = '2018, uberspace.de'\nauthor = 'uberspace.de'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nrelease = version = '7'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'display_version': False,\n 'navigation_depth': 2,\n 'collapse_navigation': True\n}\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_last_updated_fmt = '%b %d, %Y'\nhtml_context = {\n 'css_files': ['_static/css/custom.css'],\n 'display_github': True,\n 'github_user': 'Uberspace', \n 'github_repo': 'lab', \n 'github_version': 'master',\n 'conf_py_path': '/source/'\n}\nhtml_show_copyright = False\nhtml_favicon = '_static/favicon.ico'\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Uberspace7labdoc'\n", "path": "source/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Uberspace 7 lab documentation build configuration file, created by\n# sphinx-quickstart on Tue Feb 13 12:19:29 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nimport sphinx_rtd_theme\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'UberLab'\ncopyright = '2018, uberspace.de'\nauthor = 'uberspace.de'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nrelease = version = '7'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'display_version': False,\n 'navigation_depth': 2,\n 'collapse_navigation': True\n}\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_last_updated_fmt = '%b %d, %Y'\nhtml_context = {\n 'css_files': ['_static/css/custom.css'],\n 'display_github': True,\n 'github_user': 'Uberspace', \n 'github_repo': 'lab', \n 'github_version': 'master',\n 'conf_py_path': '/source/'\n}\nhtml_show_copyright = False\nhtml_favicon = '_static/favicon.ico'\n\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Uberspace7labdoc'\n", "path": "source/conf.py"}]}
| 1,524 | 88 |
gh_patches_debug_33354
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-15728
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Heise] ERROR: Unable to extract container ID
## Please follow the guide below
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)
- Use the *Preview* tab to see what your issue will actually look like
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.12.10*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.12.10**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
---
### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*
---
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
Add the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):
```
youtube-dl -v "https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html"
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['-v', 'https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html']
[debug] Encodings: locale cp1252, fs mbcs, out cp850, pref cp1252
[debug] youtube-dl version 2017.12.10
[debug] Python version 3.4.4 - Windows-10-10.0.10240
[debug] exe versions: ffmpeg 3.4, ffprobe 3.4
[debug] Proxy map: {}
[Heise] 3700244: Downloading webpage
ERROR: Unable to extract container ID; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmp9arpqqmf\build\youtube_dl\YoutubeDL.py", line 784, in extract_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmp9arpqqmf\build\youtube_dl\extractor\common.py", line 437, in extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmp9arpqqmf\build\youtube_dl\extractor\heise.py", line 72, in _real_extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmp9arpqqmf\build\youtube_dl\extractor\common.py", line 792, in _search_regex
youtube_dl.utils.RegexNotFoundError: Unable to extract container ID; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.<end of log>
```
---
### Description of your *issue*, suggested solution and other information
Heise extractor seems to fail with videos on https://www.heise.de/video/ - getting ERROR: Unable to extract container ID
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/heise.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from .youtube import YoutubeIE
6 from ..utils import (
7 determine_ext,
8 int_or_none,
9 parse_iso8601,
10 xpath_text,
11 )
12
13
14 class HeiseIE(InfoExtractor):
15 _VALID_URL = r'https?://(?:www\.)?heise\.de/(?:[^/]+/)+[^/]+-(?P<id>[0-9]+)\.html'
16 _TESTS = [{
17 'url': 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html',
18 'md5': 'ffed432483e922e88545ad9f2f15d30e',
19 'info_dict': {
20 'id': '2404147',
21 'ext': 'mp4',
22 'title': "Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone",
23 'format_id': 'mp4_720p',
24 'timestamp': 1411812600,
25 'upload_date': '20140927',
26 'description': 'md5:c934cbfb326c669c2bcabcbe3d3fcd20',
27 'thumbnail': r're:^https?://.*/gallery/$',
28 }
29 }, {
30 # YouTube embed
31 'url': 'http://www.heise.de/newsticker/meldung/Netflix-In-20-Jahren-vom-Videoverleih-zum-TV-Revolutionaer-3814130.html',
32 'md5': 'e403d2b43fea8e405e88e3f8623909f1',
33 'info_dict': {
34 'id': '6kmWbXleKW4',
35 'ext': 'mp4',
36 'title': 'NEU IM SEPTEMBER | Netflix',
37 'description': 'md5:2131f3c7525e540d5fd841de938bd452',
38 'upload_date': '20170830',
39 'uploader': 'Netflix Deutschland, Österreich und Schweiz',
40 'uploader_id': 'netflixdach',
41 },
42 'params': {
43 'skip_download': True,
44 },
45 }, {
46 'url': 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html',
47 'only_matching': True,
48 }, {
49 'url': 'http://www.heise.de/newsticker/meldung/c-t-uplink-Owncloud-Tastaturen-Peilsender-Smartphone-2404251.html?wt_mc=rss.ho.beitrag.atom',
50 'only_matching': True,
51 }, {
52 'url': 'http://www.heise.de/ct/ausgabe/2016-12-Spiele-3214137.html',
53 'only_matching': True,
54 }]
55
56 def _real_extract(self, url):
57 video_id = self._match_id(url)
58 webpage = self._download_webpage(url, video_id)
59
60 title = self._html_search_meta('fulltitle', webpage, default=None)
61 if not title or title == "c't":
62 title = self._search_regex(
63 r'<div[^>]+class="videoplayerjw"[^>]+data-title="([^"]+)"',
64 webpage, 'title')
65
66 yt_urls = YoutubeIE._extract_urls(webpage)
67 if yt_urls:
68 return self.playlist_from_matches(yt_urls, video_id, title, ie=YoutubeIE.ie_key())
69
70 container_id = self._search_regex(
71 r'<div class="videoplayerjw"[^>]+data-container="([0-9]+)"',
72 webpage, 'container ID')
73 sequenz_id = self._search_regex(
74 r'<div class="videoplayerjw"[^>]+data-sequenz="([0-9]+)"',
75 webpage, 'sequenz ID')
76
77 doc = self._download_xml(
78 'http://www.heise.de/videout/feed', video_id, query={
79 'container': container_id,
80 'sequenz': sequenz_id,
81 })
82
83 formats = []
84 for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):
85 label = source_node.attrib['label']
86 height = int_or_none(self._search_regex(
87 r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
88 video_url = source_node.attrib['file']
89 ext = determine_ext(video_url, '')
90 formats.append({
91 'url': video_url,
92 'format_note': label,
93 'format_id': '%s_%s' % (ext, label),
94 'height': height,
95 })
96 self._sort_formats(formats)
97
98 description = self._og_search_description(
99 webpage, default=None) or self._html_search_meta(
100 'description', webpage)
101
102 return {
103 'id': video_id,
104 'title': title,
105 'description': description,
106 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or
107 self._og_search_thumbnail(webpage)),
108 'timestamp': parse_iso8601(
109 self._html_search_meta('date', webpage)),
110 'formats': formats,
111 }
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/youtube_dl/extractor/heise.py b/youtube_dl/extractor/heise.py
--- a/youtube_dl/extractor/heise.py
+++ b/youtube_dl/extractor/heise.py
@@ -2,11 +2,13 @@
from __future__ import unicode_literals
from .common import InfoExtractor
+from .kaltura import KalturaIE
from .youtube import YoutubeIE
from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
+ smuggle_url,
xpath_text,
)
@@ -42,6 +44,19 @@
'params': {
'skip_download': True,
},
+ }, {
+ 'url': 'https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html',
+ 'md5': '4b58058b46625bdbd841fc2804df95fc',
+ 'info_dict': {
+ 'id': '1_ntrmio2s',
+ 'timestamp': 1512470717,
+ 'upload_date': '20171205',
+ 'ext': 'mp4',
+ 'title': 'ct10 nachgehakt hos restrictor',
+ },
+ 'params': {
+ 'skip_download': True,
+ },
}, {
'url': 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html',
'only_matching': True,
@@ -67,9 +82,14 @@
if yt_urls:
return self.playlist_from_matches(yt_urls, video_id, title, ie=YoutubeIE.ie_key())
+ kaltura_url = KalturaIE._extract_url(webpage)
+ if kaltura_url:
+ return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())
+
container_id = self._search_regex(
r'<div class="videoplayerjw"[^>]+data-container="([0-9]+)"',
webpage, 'container ID')
+
sequenz_id = self._search_regex(
r'<div class="videoplayerjw"[^>]+data-sequenz="([0-9]+)"',
webpage, 'sequenz ID')
|
{"golden_diff": "diff --git a/youtube_dl/extractor/heise.py b/youtube_dl/extractor/heise.py\n--- a/youtube_dl/extractor/heise.py\n+++ b/youtube_dl/extractor/heise.py\n@@ -2,11 +2,13 @@\n from __future__ import unicode_literals\n \n from .common import InfoExtractor\n+from .kaltura import KalturaIE\n from .youtube import YoutubeIE\n from ..utils import (\n determine_ext,\n int_or_none,\n parse_iso8601,\n+ smuggle_url,\n xpath_text,\n )\n \n@@ -42,6 +44,19 @@\n 'params': {\n 'skip_download': True,\n },\n+ }, {\n+ 'url': 'https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html',\n+ 'md5': '4b58058b46625bdbd841fc2804df95fc',\n+ 'info_dict': {\n+ 'id': '1_ntrmio2s',\n+ 'timestamp': 1512470717,\n+ 'upload_date': '20171205',\n+ 'ext': 'mp4',\n+ 'title': 'ct10 nachgehakt hos restrictor',\n+ },\n+ 'params': {\n+ 'skip_download': True,\n+ },\n }, {\n 'url': 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html',\n 'only_matching': True,\n@@ -67,9 +82,14 @@\n if yt_urls:\n return self.playlist_from_matches(yt_urls, video_id, title, ie=YoutubeIE.ie_key())\n \n+ kaltura_url = KalturaIE._extract_url(webpage)\n+ if kaltura_url:\n+ return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())\n+\n container_id = self._search_regex(\n r'<div class=\"videoplayerjw\"[^>]+data-container=\"([0-9]+)\"',\n webpage, 'container ID')\n+\n sequenz_id = self._search_regex(\n r'<div class=\"videoplayerjw\"[^>]+data-sequenz=\"([0-9]+)\"',\n webpage, 'sequenz ID')\n", "issue": "[Heise] ERROR: Unable to extract container ID\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.12.10*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.12.10**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:\r\n\r\nAdd the `-v` flag to **your command line** you run youtube-dl with (`youtube-dl -v <your command line>`), copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):\r\n\r\n```\r\nyoutube-dl -v \"https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html\"\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['-v', 'https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html']\r\n[debug] Encodings: locale cp1252, fs mbcs, out cp850, pref cp1252\r\n[debug] youtube-dl version 2017.12.10\r\n[debug] Python version 3.4.4 - Windows-10-10.0.10240\r\n[debug] exe versions: ffmpeg 3.4, ffprobe 3.4\r\n[debug] Proxy map: {}\r\n[Heise] 3700244: Downloading webpage\r\nERROR: Unable to extract container ID; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmp9arpqqmf\\build\\youtube_dl\\YoutubeDL.py\", line 784, in extract_info\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmp9arpqqmf\\build\\youtube_dl\\extractor\\common.py\", line 437, in extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmp9arpqqmf\\build\\youtube_dl\\extractor\\heise.py\", line 72, in _real_extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmp9arpqqmf\\build\\youtube_dl\\extractor\\common.py\", line 792, in _search_regex\r\nyoutube_dl.utils.RegexNotFoundError: Unable to extract container ID; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.<end of log>\r\n```\r\n\r\n---\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nHeise extractor seems to fail with videos on https://www.heise.de/video/ - getting ERROR: Unable to extract container ID\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom .youtube import YoutubeIE\nfrom ..utils import (\n determine_ext,\n int_or_none,\n parse_iso8601,\n xpath_text,\n)\n\n\nclass HeiseIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?heise\\.de/(?:[^/]+/)+[^/]+-(?P<id>[0-9]+)\\.html'\n _TESTS = [{\n 'url': 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html',\n 'md5': 'ffed432483e922e88545ad9f2f15d30e',\n 'info_dict': {\n 'id': '2404147',\n 'ext': 'mp4',\n 'title': \"Podcast: c't uplink 3.3 \u2013 Owncloud / Tastaturen / Peilsender Smartphone\",\n 'format_id': 'mp4_720p',\n 'timestamp': 1411812600,\n 'upload_date': '20140927',\n 'description': 'md5:c934cbfb326c669c2bcabcbe3d3fcd20',\n 'thumbnail': r're:^https?://.*/gallery/$',\n }\n }, {\n # YouTube embed\n 'url': 'http://www.heise.de/newsticker/meldung/Netflix-In-20-Jahren-vom-Videoverleih-zum-TV-Revolutionaer-3814130.html',\n 'md5': 'e403d2b43fea8e405e88e3f8623909f1',\n 'info_dict': {\n 'id': '6kmWbXleKW4',\n 'ext': 'mp4',\n 'title': 'NEU IM SEPTEMBER | Netflix',\n 'description': 'md5:2131f3c7525e540d5fd841de938bd452',\n 'upload_date': '20170830',\n 'uploader': 'Netflix Deutschland, \u00d6sterreich und Schweiz',\n 'uploader_id': 'netflixdach',\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html',\n 'only_matching': True,\n }, {\n 'url': 'http://www.heise.de/newsticker/meldung/c-t-uplink-Owncloud-Tastaturen-Peilsender-Smartphone-2404251.html?wt_mc=rss.ho.beitrag.atom',\n 'only_matching': True,\n }, {\n 'url': 'http://www.heise.de/ct/ausgabe/2016-12-Spiele-3214137.html',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n\n title = self._html_search_meta('fulltitle', webpage, default=None)\n if not title or title == \"c't\":\n title = self._search_regex(\n r'<div[^>]+class=\"videoplayerjw\"[^>]+data-title=\"([^\"]+)\"',\n webpage, 'title')\n\n yt_urls = YoutubeIE._extract_urls(webpage)\n if yt_urls:\n return self.playlist_from_matches(yt_urls, video_id, title, ie=YoutubeIE.ie_key())\n\n container_id = self._search_regex(\n r'<div class=\"videoplayerjw\"[^>]+data-container=\"([0-9]+)\"',\n webpage, 'container ID')\n sequenz_id = self._search_regex(\n r'<div class=\"videoplayerjw\"[^>]+data-sequenz=\"([0-9]+)\"',\n webpage, 'sequenz ID')\n\n doc = self._download_xml(\n 'http://www.heise.de/videout/feed', video_id, query={\n 'container': container_id,\n 'sequenz': sequenz_id,\n })\n\n formats = []\n for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):\n label = source_node.attrib['label']\n height = int_or_none(self._search_regex(\n r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))\n video_url = source_node.attrib['file']\n ext = determine_ext(video_url, '')\n formats.append({\n 'url': video_url,\n 'format_note': label,\n 'format_id': '%s_%s' % (ext, label),\n 'height': height,\n })\n self._sort_formats(formats)\n\n description = self._og_search_description(\n webpage, default=None) or self._html_search_meta(\n 'description', webpage)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or\n self._og_search_thumbnail(webpage)),\n 'timestamp': parse_iso8601(\n self._html_search_meta('date', webpage)),\n 'formats': formats,\n }\n", "path": "youtube_dl/extractor/heise.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom .kaltura import KalturaIE\nfrom .youtube import YoutubeIE\nfrom ..utils import (\n determine_ext,\n int_or_none,\n parse_iso8601,\n smuggle_url,\n xpath_text,\n)\n\n\nclass HeiseIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?heise\\.de/(?:[^/]+/)+[^/]+-(?P<id>[0-9]+)\\.html'\n _TESTS = [{\n 'url': 'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html',\n 'md5': 'ffed432483e922e88545ad9f2f15d30e',\n 'info_dict': {\n 'id': '2404147',\n 'ext': 'mp4',\n 'title': \"Podcast: c't uplink 3.3 \u2013 Owncloud / Tastaturen / Peilsender Smartphone\",\n 'format_id': 'mp4_720p',\n 'timestamp': 1411812600,\n 'upload_date': '20140927',\n 'description': 'md5:c934cbfb326c669c2bcabcbe3d3fcd20',\n 'thumbnail': r're:^https?://.*/gallery/$',\n }\n }, {\n # YouTube embed\n 'url': 'http://www.heise.de/newsticker/meldung/Netflix-In-20-Jahren-vom-Videoverleih-zum-TV-Revolutionaer-3814130.html',\n 'md5': 'e403d2b43fea8e405e88e3f8623909f1',\n 'info_dict': {\n 'id': '6kmWbXleKW4',\n 'ext': 'mp4',\n 'title': 'NEU IM SEPTEMBER | Netflix',\n 'description': 'md5:2131f3c7525e540d5fd841de938bd452',\n 'upload_date': '20170830',\n 'uploader': 'Netflix Deutschland, \u00d6sterreich und Schweiz',\n 'uploader_id': 'netflixdach',\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'https://www.heise.de/video/artikel/nachgehakt-Wie-sichert-das-c-t-Tool-Restric-tor-Windows-10-ab-3700244.html',\n 'md5': '4b58058b46625bdbd841fc2804df95fc',\n 'info_dict': {\n 'id': '1_ntrmio2s',\n 'timestamp': 1512470717,\n 'upload_date': '20171205',\n 'ext': 'mp4',\n 'title': 'ct10 nachgehakt hos restrictor',\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'http://www.heise.de/ct/artikel/c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2403911.html',\n 'only_matching': True,\n }, {\n 'url': 'http://www.heise.de/newsticker/meldung/c-t-uplink-Owncloud-Tastaturen-Peilsender-Smartphone-2404251.html?wt_mc=rss.ho.beitrag.atom',\n 'only_matching': True,\n }, {\n 'url': 'http://www.heise.de/ct/ausgabe/2016-12-Spiele-3214137.html',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n\n title = self._html_search_meta('fulltitle', webpage, default=None)\n if not title or title == \"c't\":\n title = self._search_regex(\n r'<div[^>]+class=\"videoplayerjw\"[^>]+data-title=\"([^\"]+)\"',\n webpage, 'title')\n\n yt_urls = YoutubeIE._extract_urls(webpage)\n if yt_urls:\n return self.playlist_from_matches(yt_urls, video_id, title, ie=YoutubeIE.ie_key())\n\n kaltura_url = KalturaIE._extract_url(webpage)\n if kaltura_url:\n return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())\n\n container_id = self._search_regex(\n r'<div class=\"videoplayerjw\"[^>]+data-container=\"([0-9]+)\"',\n webpage, 'container ID')\n\n sequenz_id = self._search_regex(\n r'<div class=\"videoplayerjw\"[^>]+data-sequenz=\"([0-9]+)\"',\n webpage, 'sequenz ID')\n\n doc = self._download_xml(\n 'http://www.heise.de/videout/feed', video_id, query={\n 'container': container_id,\n 'sequenz': sequenz_id,\n })\n\n formats = []\n for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):\n label = source_node.attrib['label']\n height = int_or_none(self._search_regex(\n r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))\n video_url = source_node.attrib['file']\n ext = determine_ext(video_url, '')\n formats.append({\n 'url': video_url,\n 'format_note': label,\n 'format_id': '%s_%s' % (ext, label),\n 'height': height,\n })\n self._sort_formats(formats)\n\n description = self._og_search_description(\n webpage, default=None) or self._html_search_meta(\n 'description', webpage)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'thumbnail': (xpath_text(doc, './/{http://rss.jwpcdn.com/}image') or\n self._og_search_thumbnail(webpage)),\n 'timestamp': parse_iso8601(\n self._html_search_meta('date', webpage)),\n 'formats': formats,\n }\n", "path": "youtube_dl/extractor/heise.py"}]}
| 2,923 | 572 |
gh_patches_debug_5357
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1011
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not specifying an `on_select` callback for Table leads to a TypeError
**Describe the bug**
If you leave out a `on_select` callable on `Table` and click a row, you get a traceback in the terminal:
```python
Traceback (most recent call last):
File "C:\\Users\\brcan\\Desktop\\snippets\\.venv\\lib\\site-packages\\toga_winforms\\widgets\\table.py", line 74, in _native_item_selection_changed
self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])
TypeError : 'NoneType' object is not callable
```
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://toga.readthedocs.io/en/latest/tutorial/tutorial-2.html
2. Run the example
3. Click on any row of the table
4. See error
**Expected behavior**
No error.
**Environment:**
- Operating System: Windows 10
- Python version: CPython 3.8.3
- Software versions:
- Briefcase:
- Toga: 0.3.0dev22
- ...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/winforms/toga_winforms/widgets/table.py`
Content:
```
1 from travertino.size import at_least
2
3 from toga_winforms.libs import WinForms
4
5 from .base import Widget
6
7
8 class Table(Widget):
9 def create(self):
10 self._container = self
11 self.native = WinForms.ListView()
12 self.native.View = WinForms.View.Details
13 self._cache = []
14 self._first_item = 0
15
16 dataColumn = []
17 for i, (heading, accessor) in enumerate(zip(
18 self.interface.headings,
19 self.interface._accessors
20 )):
21 dataColumn.append(self._create_column(heading, accessor))
22
23 self.native.FullRowSelect = True
24 self.native.MultiSelect = self.interface.multiple_select
25 self.native.DoubleBuffered = True
26 self.native.VirtualMode = True
27 self.native.Columns.AddRange(dataColumn)
28
29 self.native.ItemSelectionChanged += self._native_item_selection_changed
30 self.native.RetrieveVirtualItem += self._native_retrieve_virtual_item
31 self.native.CacheVirtualItems += self._native_cache_virtual_items
32 self.native.VirtualItemsSelectionRangeChanged += self._native_virtual_item_selection_range_changed
33
34 def _native_virtual_item_selection_range_changed(self, sender, e):
35 # update selection interface property
36 self.interface._selection = self._selected_rows()
37
38 # `Shift` key or Range selection handler
39 if e.IsSelected and self.interface.multiple_select and self.interface.on_select:
40 # call on select with the last row of the multi selection
41 selected = self.interface.data[e.EndIndex]
42 self.interface.on_select(self.interface, row=selected)
43
44 def _native_retrieve_virtual_item(self, sender, e):
45 # Because ListView is in VirtualMode, it's necessary implement
46 # VirtualItemsSelectionRangeChanged event to create ListViewItem when it's needed
47 if self._cache and e.ItemIndex >= self._first_item and \
48 e.ItemIndex < self._first_item + len(self._cache):
49 e.Item = self._cache[e.ItemIndex - self._first_item]
50 else:
51 e.Item = WinForms.ListViewItem(self.row_data(self.interface.data[e.ItemIndex]))
52
53 def _native_cache_virtual_items(self, sender, e):
54 if self._cache and e.StartIndex >= self._first_item and \
55 e.EndIndex <= self._first_item + len(self._cache):
56 # If the newly requested cache is a subset of the old cache,
57 # no need to rebuild everything, so do nothing
58 return
59
60 # Now we need to rebuild the cache.
61 self._first_item = e.StartIndex
62 new_length = e.EndIndex - e.StartIndex + 1
63 self._cache = []
64
65 # Fill the cache with the appropriate ListViewItems.
66 for i in range(new_length):
67 self._cache.append(WinForms.ListViewItem(self.row_data(self.interface.data[i])))
68
69 def _native_item_selection_changed(self, sender, e):
70 # update selection interface property
71 self.interface._selection = self._selected_rows()
72
73 if e.IsSelected:
74 self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])
75
76 def _selected_rows(self):
77 if not self.native.SelectedIndices.Count:
78 return None
79
80 if self.interface.multiple_select:
81 selected = [row for i, row in enumerate(self.interface.data) if i in self.native.SelectedIndices]
82 return selected
83 else:
84 return self.interface.data[self.native.SelectedIndices[0]]
85
86 def _create_column(self, heading, accessor):
87 col = WinForms.ColumnHeader()
88 col.Text = heading
89 col.Name = accessor
90 return col
91
92 def change_source(self, source):
93 self.update_data()
94
95 def row_data(self, item):
96 # TODO: Winforms can't support icons in tree cells; so, if the data source
97 # specifies an icon, strip it when converting to row data.
98 def strip_icon(item, attr):
99 val = getattr(item, attr, self.interface.missing_value)
100
101 if isinstance(val, tuple):
102 return str(val[1])
103 return str(val)
104
105 return [
106 strip_icon(item, attr)
107 for attr in self.interface._accessors
108 ]
109
110 def update_data(self):
111 self.native.VirtualListSize = len(self.interface.data)
112 self._cache = []
113
114 def insert(self, index, item):
115 self.update_data()
116
117 def change(self, item):
118 self.interface.factory.not_implemented('Table.change()')
119
120 def remove(self, item):
121 self.update_data()
122
123 def clear(self):
124 self.update_data()
125
126 def set_on_select(self, handler):
127 pass
128
129 def scroll_to_row(self, row):
130 self.native.EnsureVisible(row)
131
132 def rehint(self):
133 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
134 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
135
136 def remove_column(self, accessor):
137 self.native.Columns.RemoveByKey(accessor)
138
139 def add_column(self, heading, accessor):
140 self.native.Columns.Add(self._create_column(heading, accessor))
141 self.update_data()
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py
--- a/src/winforms/toga_winforms/widgets/table.py
+++ b/src/winforms/toga_winforms/widgets/table.py
@@ -70,7 +70,7 @@
# update selection interface property
self.interface._selection = self._selected_rows()
- if e.IsSelected:
+ if e.IsSelected and self.interface.on_select:
self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])
def _selected_rows(self):
|
{"golden_diff": "diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py\n--- a/src/winforms/toga_winforms/widgets/table.py\n+++ b/src/winforms/toga_winforms/widgets/table.py\n@@ -70,7 +70,7 @@\n # update selection interface property\n self.interface._selection = self._selected_rows()\n \n- if e.IsSelected:\n+ if e.IsSelected and self.interface.on_select:\n self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])\n \n def _selected_rows(self):\n", "issue": "Not specifying an `on_select` callback for Table leads to a TypeError\n**Describe the bug**\r\nIf you leave out a `on_select` callable on `Table` and click a row, you get a traceback in the terminal:\r\n```python\r\nTraceback (most recent call last):\r\n File \"C:\\\\Users\\\\brcan\\\\Desktop\\\\snippets\\\\.venv\\\\lib\\\\site-packages\\\\toga_winforms\\\\widgets\\\\table.py\", line 74, in _native_item_selection_changed\r\n self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])\r\nTypeError : 'NoneType' object is not callable\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://toga.readthedocs.io/en/latest/tutorial/tutorial-2.html\r\n2. Run the example\r\n3. Click on any row of the table\r\n4. See error\r\n\r\n**Expected behavior**\r\nNo error.\r\n\r\n**Environment:**\r\n - Operating System: Windows 10\r\n - Python version: CPython 3.8.3\r\n - Software versions:\r\n - Briefcase:\r\n - Toga: 0.3.0dev22\r\n - ...\r\n\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass Table(Widget):\n def create(self):\n self._container = self\n self.native = WinForms.ListView()\n self.native.View = WinForms.View.Details\n self._cache = []\n self._first_item = 0\n\n dataColumn = []\n for i, (heading, accessor) in enumerate(zip(\n self.interface.headings,\n self.interface._accessors\n )):\n dataColumn.append(self._create_column(heading, accessor))\n\n self.native.FullRowSelect = True\n self.native.MultiSelect = self.interface.multiple_select\n self.native.DoubleBuffered = True\n self.native.VirtualMode = True\n self.native.Columns.AddRange(dataColumn)\n\n self.native.ItemSelectionChanged += self._native_item_selection_changed\n self.native.RetrieveVirtualItem += self._native_retrieve_virtual_item\n self.native.CacheVirtualItems += self._native_cache_virtual_items\n self.native.VirtualItemsSelectionRangeChanged += self._native_virtual_item_selection_range_changed\n\n def _native_virtual_item_selection_range_changed(self, sender, e):\n # update selection interface property\n self.interface._selection = self._selected_rows()\n\n # `Shift` key or Range selection handler\n if e.IsSelected and self.interface.multiple_select and self.interface.on_select:\n # call on select with the last row of the multi selection\n selected = self.interface.data[e.EndIndex]\n self.interface.on_select(self.interface, row=selected)\n\n def _native_retrieve_virtual_item(self, sender, e):\n # Because ListView is in VirtualMode, it's necessary implement\n # VirtualItemsSelectionRangeChanged event to create ListViewItem when it's needed\n if self._cache and e.ItemIndex >= self._first_item and \\\n e.ItemIndex < self._first_item + len(self._cache):\n e.Item = self._cache[e.ItemIndex - self._first_item]\n else:\n e.Item = WinForms.ListViewItem(self.row_data(self.interface.data[e.ItemIndex]))\n\n def _native_cache_virtual_items(self, sender, e):\n if self._cache and e.StartIndex >= self._first_item and \\\n e.EndIndex <= self._first_item + len(self._cache):\n # If the newly requested cache is a subset of the old cache,\n # no need to rebuild everything, so do nothing\n return\n\n # Now we need to rebuild the cache.\n self._first_item = e.StartIndex\n new_length = e.EndIndex - e.StartIndex + 1\n self._cache = []\n\n # Fill the cache with the appropriate ListViewItems.\n for i in range(new_length):\n self._cache.append(WinForms.ListViewItem(self.row_data(self.interface.data[i])))\n\n def _native_item_selection_changed(self, sender, e):\n # update selection interface property\n self.interface._selection = self._selected_rows()\n\n if e.IsSelected:\n self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])\n\n def _selected_rows(self):\n if not self.native.SelectedIndices.Count:\n return None\n\n if self.interface.multiple_select:\n selected = [row for i, row in enumerate(self.interface.data) if i in self.native.SelectedIndices]\n return selected\n else:\n return self.interface.data[self.native.SelectedIndices[0]]\n\n def _create_column(self, heading, accessor):\n col = WinForms.ColumnHeader()\n col.Text = heading\n col.Name = accessor\n return col\n\n def change_source(self, source):\n self.update_data()\n\n def row_data(self, item):\n # TODO: Winforms can't support icons in tree cells; so, if the data source\n # specifies an icon, strip it when converting to row data.\n def strip_icon(item, attr):\n val = getattr(item, attr, self.interface.missing_value)\n\n if isinstance(val, tuple):\n return str(val[1])\n return str(val)\n\n return [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n\n def update_data(self):\n self.native.VirtualListSize = len(self.interface.data)\n self._cache = []\n\n def insert(self, index, item):\n self.update_data()\n\n def change(self, item):\n self.interface.factory.not_implemented('Table.change()')\n\n def remove(self, item):\n self.update_data()\n\n def clear(self):\n self.update_data()\n\n def set_on_select(self, handler):\n pass\n\n def scroll_to_row(self, row):\n self.native.EnsureVisible(row)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n\n def remove_column(self, accessor):\n self.native.Columns.RemoveByKey(accessor)\n\n def add_column(self, heading, accessor):\n self.native.Columns.Add(self._create_column(heading, accessor))\n self.update_data()\n", "path": "src/winforms/toga_winforms/widgets/table.py"}], "after_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass Table(Widget):\n def create(self):\n self._container = self\n self.native = WinForms.ListView()\n self.native.View = WinForms.View.Details\n self._cache = []\n self._first_item = 0\n\n dataColumn = []\n for i, (heading, accessor) in enumerate(zip(\n self.interface.headings,\n self.interface._accessors\n )):\n dataColumn.append(self._create_column(heading, accessor))\n\n self.native.FullRowSelect = True\n self.native.MultiSelect = self.interface.multiple_select\n self.native.DoubleBuffered = True\n self.native.VirtualMode = True\n self.native.Columns.AddRange(dataColumn)\n\n self.native.ItemSelectionChanged += self._native_item_selection_changed\n self.native.RetrieveVirtualItem += self._native_retrieve_virtual_item\n self.native.CacheVirtualItems += self._native_cache_virtual_items\n self.native.VirtualItemsSelectionRangeChanged += self._native_virtual_item_selection_range_changed\n\n def _native_virtual_item_selection_range_changed(self, sender, e):\n # update selection interface property\n self.interface._selection = self._selected_rows()\n\n # `Shift` key or Range selection handler\n if e.IsSelected and self.interface.multiple_select and self.interface.on_select:\n # call on select with the last row of the multi selection\n selected = self.interface.data[e.EndIndex]\n self.interface.on_select(self.interface, row=selected)\n\n def _native_retrieve_virtual_item(self, sender, e):\n # Because ListView is in VirtualMode, it's necessary implement\n # VirtualItemsSelectionRangeChanged event to create ListViewItem when it's needed\n if self._cache and e.ItemIndex >= self._first_item and \\\n e.ItemIndex < self._first_item + len(self._cache):\n e.Item = self._cache[e.ItemIndex - self._first_item]\n else:\n e.Item = WinForms.ListViewItem(self.row_data(self.interface.data[e.ItemIndex]))\n\n def _native_cache_virtual_items(self, sender, e):\n if self._cache and e.StartIndex >= self._first_item and \\\n e.EndIndex <= self._first_item + len(self._cache):\n # If the newly requested cache is a subset of the old cache,\n # no need to rebuild everything, so do nothing\n return\n\n # Now we need to rebuild the cache.\n self._first_item = e.StartIndex\n new_length = e.EndIndex - e.StartIndex + 1\n self._cache = []\n\n # Fill the cache with the appropriate ListViewItems.\n for i in range(new_length):\n self._cache.append(WinForms.ListViewItem(self.row_data(self.interface.data[i])))\n\n def _native_item_selection_changed(self, sender, e):\n # update selection interface property\n self.interface._selection = self._selected_rows()\n\n if e.IsSelected and self.interface.on_select:\n self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])\n\n def _selected_rows(self):\n if not self.native.SelectedIndices.Count:\n return None\n\n if self.interface.multiple_select:\n selected = [row for i, row in enumerate(self.interface.data) if i in self.native.SelectedIndices]\n return selected\n else:\n return self.interface.data[self.native.SelectedIndices[0]]\n\n def _create_column(self, heading, accessor):\n col = WinForms.ColumnHeader()\n col.Text = heading\n col.Name = accessor\n return col\n\n def change_source(self, source):\n self.update_data()\n\n def row_data(self, item):\n # TODO: Winforms can't support icons in tree cells; so, if the data source\n # specifies an icon, strip it when converting to row data.\n def strip_icon(item, attr):\n val = getattr(item, attr, self.interface.missing_value)\n\n if isinstance(val, tuple):\n return str(val[1])\n return str(val)\n\n return [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n\n def update_data(self):\n self.native.VirtualListSize = len(self.interface.data)\n self._cache = []\n\n def insert(self, index, item):\n self.update_data()\n\n def change(self, item):\n self.interface.factory.not_implemented('Table.change()')\n\n def remove(self, item):\n self.update_data()\n\n def clear(self):\n self.update_data()\n\n def set_on_select(self, handler):\n pass\n\n def scroll_to_row(self, row):\n self.native.EnsureVisible(row)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n\n def remove_column(self, accessor):\n self.native.Columns.RemoveByKey(accessor)\n\n def add_column(self, heading, accessor):\n self.native.Columns.Add(self._create_column(heading, accessor))\n self.update_data()\n", "path": "src/winforms/toga_winforms/widgets/table.py"}]}
| 1,915 | 126 |
gh_patches_debug_28387
|
rasdani/github-patches
|
git_diff
|
apache__airflow-8265
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move Kubernetes tests and kind setup out of the container to host
**Description**
We should move kind setup out of the container for CI to the host environment
**Use case / motivation**
Makes CI image smaller and setup easier for local testing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/kubernetes/volume_mount.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 """
18 Classes for interacting with Kubernetes API
19 """
20
21 import copy
22
23 import kubernetes.client.models as k8s
24
25 from airflow.kubernetes.k8s_model import K8SModel
26
27
28 class VolumeMount(K8SModel):
29 """
30 Initialize a Kubernetes Volume Mount. Used to mount pod level volumes to
31 running container.
32
33 :param name: the name of the volume mount
34 :type name: str
35 :param mount_path:
36 :type mount_path: str
37 :param sub_path: subpath within the volume mount
38 :type sub_path: str
39 :param read_only: whether to access pod with read-only mode
40 :type read_only: bool
41 """
42 def __init__(self, name, mount_path, sub_path, read_only):
43 self.name = name
44 self.mount_path = mount_path
45 self.sub_path = sub_path
46 self.read_only = read_only
47
48 def to_k8s_client_obj(self) -> k8s.V1VolumeMount:
49 """
50 Converts to k8s object.
51
52 :return Volume Mount k8s object
53
54 """
55 return k8s.V1VolumeMount(
56 name=self.name,
57 mount_path=self.mount_path,
58 sub_path=self.sub_path,
59 read_only=self.read_only
60 )
61
62 def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod:
63 """
64 Attaches to pod
65
66 :return Copy of the Pod object
67
68 """
69 cp_pod = copy.deepcopy(pod)
70 volume_mount = self.to_k8s_client_obj()
71 cp_pod.spec.containers[0].volume_mounts = pod.spec.containers[0].volume_mounts or []
72 cp_pod.spec.containers[0].volume_mounts.append(volume_mount)
73 return cp_pod
74
```
Path: `airflow/kubernetes/refresh_config.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 """
19 NOTE: this module can be removed once upstream client supports token refresh
20 see: https://github.com/kubernetes-client/python/issues/741
21 """
22
23 import calendar
24 import logging
25 import os
26 import time
27 from datetime import datetime
28
29 import yaml
30 from kubernetes.client import Configuration
31 from kubernetes.config.exec_provider import ExecProvider
32 from kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION, KubeConfigLoader
33
34
35 class RefreshKubeConfigLoader(KubeConfigLoader):
36 """
37 Patched KubeConfigLoader, this subclass takes expirationTimestamp into
38 account and sets api key refresh callback hook in Configuration object
39 """
40 def __init__(self, *args, **kwargs):
41 KubeConfigLoader.__init__(self, *args, **kwargs)
42 self.api_key_expire_ts = None
43
44 def _load_from_exec_plugin(self):
45 """
46 We override _load_from_exec_plugin method to also read and store
47 expiration timestamp for aws-iam-authenticator. It will be later
48 used for api token refresh.
49 """
50 if 'exec' not in self._user:
51 return None
52 try:
53 status = ExecProvider(self._user['exec']).run()
54 if 'token' not in status:
55 logging.error('exec: missing token field in plugin output')
56 return None
57 self.token = "Bearer %s" % status['token'] # pylint: disable=W0201
58 ts_str = status.get('expirationTimestamp')
59 if ts_str:
60 self.api_key_expire_ts = calendar.timegm(
61 datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z").timetuple(),
62 )
63 return True
64 except Exception as e: # pylint: disable=W0703
65 logging.error(str(e))
66
67 def refresh_api_key(self, client_configuration):
68 """
69 Refresh API key if expired
70 """
71 if self.api_key_expire_ts and time.time() >= self.api_key_expire_ts:
72 self.load_and_set(client_configuration)
73
74 def load_and_set(self, client_configuration):
75 KubeConfigLoader.load_and_set(self, client_configuration)
76 client_configuration.refresh_api_key = self.refresh_api_key
77
78
79 class RefreshConfiguration(Configuration):
80 """
81 Patched Configuration, this subclass taskes api key refresh callback hook
82 into account
83 """
84 def __init__(self, *args, **kwargs):
85 Configuration.__init__(self, *args, **kwargs)
86 self.refresh_api_key = None
87
88 def get_api_key_with_prefix(self, identifier):
89 if self.refresh_api_key:
90 self.refresh_api_key(self) # pylint: disable=E1102
91 return Configuration.get_api_key_with_prefix(self, identifier)
92
93
94 def _get_kube_config_loader_for_yaml_file(filename, **kwargs):
95 """
96 Adapted from the upstream _get_kube_config_loader_for_yaml_file function, changed
97 KubeConfigLoader to RefreshKubeConfigLoader
98 """
99 with open(filename) as f:
100 return RefreshKubeConfigLoader(
101 config_dict=yaml.safe_load(f),
102 config_base_path=os.path.abspath(os.path.dirname(filename)),
103 **kwargs)
104
105
106 def load_kube_config(client_configuration, config_file=None, context=None):
107 """
108 Adapted from the upstream load_kube_config function, changes:
109 - removed persist_config argument since it's not being used
110 - remove `client_configuration is None` branch since we always pass
111 in client configuration
112 """
113 if config_file is None:
114 config_file = os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION)
115
116 loader = _get_kube_config_loader_for_yaml_file(
117 config_file, active_context=context, config_persister=None)
118
119 loader.load_and_set(client_configuration)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/kubernetes/refresh_config.py b/airflow/kubernetes/refresh_config.py
--- a/airflow/kubernetes/refresh_config.py
+++ b/airflow/kubernetes/refresh_config.py
@@ -25,6 +25,7 @@
import os
import time
from datetime import datetime
+from typing import Optional
import yaml
from kubernetes.client import Configuration
@@ -91,7 +92,7 @@
return Configuration.get_api_key_with_prefix(self, identifier)
-def _get_kube_config_loader_for_yaml_file(filename, **kwargs):
+def _get_kube_config_loader_for_yaml_file(filename, **kwargs) -> Optional[RefreshKubeConfigLoader]:
"""
Adapted from the upstream _get_kube_config_loader_for_yaml_file function, changed
KubeConfigLoader to RefreshKubeConfigLoader
@@ -115,5 +116,4 @@
loader = _get_kube_config_loader_for_yaml_file(
config_file, active_context=context, config_persister=None)
-
loader.load_and_set(client_configuration)
diff --git a/airflow/kubernetes/volume_mount.py b/airflow/kubernetes/volume_mount.py
--- a/airflow/kubernetes/volume_mount.py
+++ b/airflow/kubernetes/volume_mount.py
@@ -35,7 +35,7 @@
:param mount_path:
:type mount_path: str
:param sub_path: subpath within the volume mount
- :type sub_path: str
+ :type sub_path: Optional[str]
:param read_only: whether to access pod with read-only mode
:type read_only: bool
"""
|
{"golden_diff": "diff --git a/airflow/kubernetes/refresh_config.py b/airflow/kubernetes/refresh_config.py\n--- a/airflow/kubernetes/refresh_config.py\n+++ b/airflow/kubernetes/refresh_config.py\n@@ -25,6 +25,7 @@\n import os\n import time\n from datetime import datetime\n+from typing import Optional\n \n import yaml\n from kubernetes.client import Configuration\n@@ -91,7 +92,7 @@\n return Configuration.get_api_key_with_prefix(self, identifier)\n \n \n-def _get_kube_config_loader_for_yaml_file(filename, **kwargs):\n+def _get_kube_config_loader_for_yaml_file(filename, **kwargs) -> Optional[RefreshKubeConfigLoader]:\n \"\"\"\n Adapted from the upstream _get_kube_config_loader_for_yaml_file function, changed\n KubeConfigLoader to RefreshKubeConfigLoader\n@@ -115,5 +116,4 @@\n \n loader = _get_kube_config_loader_for_yaml_file(\n config_file, active_context=context, config_persister=None)\n-\n loader.load_and_set(client_configuration)\ndiff --git a/airflow/kubernetes/volume_mount.py b/airflow/kubernetes/volume_mount.py\n--- a/airflow/kubernetes/volume_mount.py\n+++ b/airflow/kubernetes/volume_mount.py\n@@ -35,7 +35,7 @@\n :param mount_path:\n :type mount_path: str\n :param sub_path: subpath within the volume mount\n- :type sub_path: str\n+ :type sub_path: Optional[str]\n :param read_only: whether to access pod with read-only mode\n :type read_only: bool\n \"\"\"\n", "issue": "Move Kubernetes tests and kind setup out of the container to host\n**Description**\r\n\r\nWe should move kind setup out of the container for CI to the host environment\r\n\r\n**Use case / motivation**\r\n\r\nMakes CI image smaller and setup easier for local testing.\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nClasses for interacting with Kubernetes API\n\"\"\"\n\nimport copy\n\nimport kubernetes.client.models as k8s\n\nfrom airflow.kubernetes.k8s_model import K8SModel\n\n\nclass VolumeMount(K8SModel):\n \"\"\"\n Initialize a Kubernetes Volume Mount. Used to mount pod level volumes to\n running container.\n\n :param name: the name of the volume mount\n :type name: str\n :param mount_path:\n :type mount_path: str\n :param sub_path: subpath within the volume mount\n :type sub_path: str\n :param read_only: whether to access pod with read-only mode\n :type read_only: bool\n \"\"\"\n def __init__(self, name, mount_path, sub_path, read_only):\n self.name = name\n self.mount_path = mount_path\n self.sub_path = sub_path\n self.read_only = read_only\n\n def to_k8s_client_obj(self) -> k8s.V1VolumeMount:\n \"\"\"\n Converts to k8s object.\n\n :return Volume Mount k8s object\n\n \"\"\"\n return k8s.V1VolumeMount(\n name=self.name,\n mount_path=self.mount_path,\n sub_path=self.sub_path,\n read_only=self.read_only\n )\n\n def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod:\n \"\"\"\n Attaches to pod\n\n :return Copy of the Pod object\n\n \"\"\"\n cp_pod = copy.deepcopy(pod)\n volume_mount = self.to_k8s_client_obj()\n cp_pod.spec.containers[0].volume_mounts = pod.spec.containers[0].volume_mounts or []\n cp_pod.spec.containers[0].volume_mounts.append(volume_mount)\n return cp_pod\n", "path": "airflow/kubernetes/volume_mount.py"}, {"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nNOTE: this module can be removed once upstream client supports token refresh\nsee: https://github.com/kubernetes-client/python/issues/741\n\"\"\"\n\nimport calendar\nimport logging\nimport os\nimport time\nfrom datetime import datetime\n\nimport yaml\nfrom kubernetes.client import Configuration\nfrom kubernetes.config.exec_provider import ExecProvider\nfrom kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION, KubeConfigLoader\n\n\nclass RefreshKubeConfigLoader(KubeConfigLoader):\n \"\"\"\n Patched KubeConfigLoader, this subclass takes expirationTimestamp into\n account and sets api key refresh callback hook in Configuration object\n \"\"\"\n def __init__(self, *args, **kwargs):\n KubeConfigLoader.__init__(self, *args, **kwargs)\n self.api_key_expire_ts = None\n\n def _load_from_exec_plugin(self):\n \"\"\"\n We override _load_from_exec_plugin method to also read and store\n expiration timestamp for aws-iam-authenticator. It will be later\n used for api token refresh.\n \"\"\"\n if 'exec' not in self._user:\n return None\n try:\n status = ExecProvider(self._user['exec']).run()\n if 'token' not in status:\n logging.error('exec: missing token field in plugin output')\n return None\n self.token = \"Bearer %s\" % status['token'] # pylint: disable=W0201\n ts_str = status.get('expirationTimestamp')\n if ts_str:\n self.api_key_expire_ts = calendar.timegm(\n datetime.strptime(ts_str, \"%Y-%m-%dT%H:%M:%S%z\").timetuple(),\n )\n return True\n except Exception as e: # pylint: disable=W0703\n logging.error(str(e))\n\n def refresh_api_key(self, client_configuration):\n \"\"\"\n Refresh API key if expired\n \"\"\"\n if self.api_key_expire_ts and time.time() >= self.api_key_expire_ts:\n self.load_and_set(client_configuration)\n\n def load_and_set(self, client_configuration):\n KubeConfigLoader.load_and_set(self, client_configuration)\n client_configuration.refresh_api_key = self.refresh_api_key\n\n\nclass RefreshConfiguration(Configuration):\n \"\"\"\n Patched Configuration, this subclass taskes api key refresh callback hook\n into account\n \"\"\"\n def __init__(self, *args, **kwargs):\n Configuration.__init__(self, *args, **kwargs)\n self.refresh_api_key = None\n\n def get_api_key_with_prefix(self, identifier):\n if self.refresh_api_key:\n self.refresh_api_key(self) # pylint: disable=E1102\n return Configuration.get_api_key_with_prefix(self, identifier)\n\n\ndef _get_kube_config_loader_for_yaml_file(filename, **kwargs):\n \"\"\"\n Adapted from the upstream _get_kube_config_loader_for_yaml_file function, changed\n KubeConfigLoader to RefreshKubeConfigLoader\n \"\"\"\n with open(filename) as f:\n return RefreshKubeConfigLoader(\n config_dict=yaml.safe_load(f),\n config_base_path=os.path.abspath(os.path.dirname(filename)),\n **kwargs)\n\n\ndef load_kube_config(client_configuration, config_file=None, context=None):\n \"\"\"\n Adapted from the upstream load_kube_config function, changes:\n - removed persist_config argument since it's not being used\n - remove `client_configuration is None` branch since we always pass\n in client configuration\n \"\"\"\n if config_file is None:\n config_file = os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION)\n\n loader = _get_kube_config_loader_for_yaml_file(\n config_file, active_context=context, config_persister=None)\n\n loader.load_and_set(client_configuration)\n", "path": "airflow/kubernetes/refresh_config.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nClasses for interacting with Kubernetes API\n\"\"\"\n\nimport copy\n\nimport kubernetes.client.models as k8s\n\nfrom airflow.kubernetes.k8s_model import K8SModel\n\n\nclass VolumeMount(K8SModel):\n \"\"\"\n Initialize a Kubernetes Volume Mount. Used to mount pod level volumes to\n running container.\n\n :param name: the name of the volume mount\n :type name: str\n :param mount_path:\n :type mount_path: str\n :param sub_path: subpath within the volume mount\n :type sub_path: Optional[str]\n :param read_only: whether to access pod with read-only mode\n :type read_only: bool\n \"\"\"\n def __init__(self, name, mount_path, sub_path, read_only):\n self.name = name\n self.mount_path = mount_path\n self.sub_path = sub_path\n self.read_only = read_only\n\n def to_k8s_client_obj(self) -> k8s.V1VolumeMount:\n \"\"\"\n Converts to k8s object.\n\n :return Volume Mount k8s object\n\n \"\"\"\n return k8s.V1VolumeMount(\n name=self.name,\n mount_path=self.mount_path,\n sub_path=self.sub_path,\n read_only=self.read_only\n )\n\n def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod:\n \"\"\"\n Attaches to pod\n\n :return Copy of the Pod object\n\n \"\"\"\n cp_pod = copy.deepcopy(pod)\n volume_mount = self.to_k8s_client_obj()\n cp_pod.spec.containers[0].volume_mounts = pod.spec.containers[0].volume_mounts or []\n cp_pod.spec.containers[0].volume_mounts.append(volume_mount)\n return cp_pod\n", "path": "airflow/kubernetes/volume_mount.py"}, {"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nNOTE: this module can be removed once upstream client supports token refresh\nsee: https://github.com/kubernetes-client/python/issues/741\n\"\"\"\n\nimport calendar\nimport logging\nimport os\nimport time\nfrom datetime import datetime\nfrom typing import Optional\n\nimport yaml\nfrom kubernetes.client import Configuration\nfrom kubernetes.config.exec_provider import ExecProvider\nfrom kubernetes.config.kube_config import KUBE_CONFIG_DEFAULT_LOCATION, KubeConfigLoader\n\n\nclass RefreshKubeConfigLoader(KubeConfigLoader):\n \"\"\"\n Patched KubeConfigLoader, this subclass takes expirationTimestamp into\n account and sets api key refresh callback hook in Configuration object\n \"\"\"\n def __init__(self, *args, **kwargs):\n KubeConfigLoader.__init__(self, *args, **kwargs)\n self.api_key_expire_ts = None\n\n def _load_from_exec_plugin(self):\n \"\"\"\n We override _load_from_exec_plugin method to also read and store\n expiration timestamp for aws-iam-authenticator. It will be later\n used for api token refresh.\n \"\"\"\n if 'exec' not in self._user:\n return None\n try:\n status = ExecProvider(self._user['exec']).run()\n if 'token' not in status:\n logging.error('exec: missing token field in plugin output')\n return None\n self.token = \"Bearer %s\" % status['token'] # pylint: disable=W0201\n ts_str = status.get('expirationTimestamp')\n if ts_str:\n self.api_key_expire_ts = calendar.timegm(\n datetime.strptime(ts_str, \"%Y-%m-%dT%H:%M:%S%z\").timetuple(),\n )\n return True\n except Exception as e: # pylint: disable=W0703\n logging.error(str(e))\n\n def refresh_api_key(self, client_configuration):\n \"\"\"\n Refresh API key if expired\n \"\"\"\n if self.api_key_expire_ts and time.time() >= self.api_key_expire_ts:\n self.load_and_set(client_configuration)\n\n def load_and_set(self, client_configuration):\n KubeConfigLoader.load_and_set(self, client_configuration)\n client_configuration.refresh_api_key = self.refresh_api_key\n\n\nclass RefreshConfiguration(Configuration):\n \"\"\"\n Patched Configuration, this subclass taskes api key refresh callback hook\n into account\n \"\"\"\n def __init__(self, *args, **kwargs):\n Configuration.__init__(self, *args, **kwargs)\n self.refresh_api_key = None\n\n def get_api_key_with_prefix(self, identifier):\n if self.refresh_api_key:\n self.refresh_api_key(self) # pylint: disable=E1102\n return Configuration.get_api_key_with_prefix(self, identifier)\n\n\ndef _get_kube_config_loader_for_yaml_file(filename, **kwargs) -> Optional[RefreshKubeConfigLoader]:\n \"\"\"\n Adapted from the upstream _get_kube_config_loader_for_yaml_file function, changed\n KubeConfigLoader to RefreshKubeConfigLoader\n \"\"\"\n with open(filename) as f:\n return RefreshKubeConfigLoader(\n config_dict=yaml.safe_load(f),\n config_base_path=os.path.abspath(os.path.dirname(filename)),\n **kwargs)\n\n\ndef load_kube_config(client_configuration, config_file=None, context=None):\n \"\"\"\n Adapted from the upstream load_kube_config function, changes:\n - removed persist_config argument since it's not being used\n - remove `client_configuration is None` branch since we always pass\n in client configuration\n \"\"\"\n if config_file is None:\n config_file = os.path.expanduser(KUBE_CONFIG_DEFAULT_LOCATION)\n\n loader = _get_kube_config_loader_for_yaml_file(\n config_file, active_context=context, config_persister=None)\n loader.load_and_set(client_configuration)\n", "path": "airflow/kubernetes/refresh_config.py"}]}
| 2,265 | 359 |
gh_patches_debug_1093
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-755
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle NoneType comparison in _scores_to_ranks
```
TypeError: '<' not supported between instances of 'NoneType' and 'float'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/evaluation/utils.py`
Content:
```
1 from collections import OrderedDict
2 from typing import Tuple, NamedTuple, List, Callable, Iterable, Dict
3
4 from grandchallenge.evaluation.models import Result
5 from grandchallenge.evaluation.templatetags.evaluation_extras import (
6 get_jsonpath
7 )
8
9
10 class Metric(NamedTuple):
11 path: str
12 reverse: bool
13
14
15 class Positions(NamedTuple):
16 ranks: Dict[str, float]
17 rank_scores: Dict[str, float]
18 rank_per_metric: Dict[str, Dict[str, float]]
19
20
21 def rank_results(
22 *,
23 results: Tuple[Result, ...],
24 metrics: Tuple[Metric, ...],
25 score_method: Callable,
26 ) -> Positions:
27 """
28 Calculates the overall rank for each result, along with the rank_score
29 and the rank per metric.
30 """
31
32 results = _filter_valid_results(results=results, metrics=metrics)
33
34 rank_per_metric = _get_rank_per_metric(results=results, metrics=metrics)
35
36 rank_scores = {
37 pk: score_method([m for m in metrics.values()])
38 for pk, metrics in rank_per_metric.items()
39 }
40
41 return Positions(
42 ranks=_scores_to_ranks(scores=rank_scores, reverse=False),
43 rank_scores=rank_scores,
44 rank_per_metric=rank_per_metric,
45 )
46
47
48 def _filter_valid_results(
49 *, results: Iterable[Result], metrics: Tuple[Metric, ...]
50 ) -> List[Result]:
51 """ Ensure that all of the metrics are in every result """
52 return [
53 res
54 for res in results
55 if all(get_jsonpath(res.metrics, m.path) != "" for m in metrics)
56 ]
57
58
59 def _get_rank_per_metric(
60 *, results: Iterable[Result], metrics: Tuple[Metric, ...]
61 ) -> Dict[str, Dict[str, float]]:
62 """
63 Takes results and calculates the rank for each of the individual metrics
64
65 Returns a dictionary where the key is the pk of the result, and the
66 values is another dictionary where the key is the path of the metric and
67 the value is the rank of this result for this metric
68 """
69 metric_rank = {}
70 for metric in metrics:
71 # Extract the value of the metric for this primary key and sort on the
72 # value of the metric
73 metric_scores = {
74 res.pk: get_jsonpath(res.metrics, metric.path) for res in results
75 }
76 metric_rank[metric.path] = _scores_to_ranks(
77 scores=metric_scores, reverse=metric.reverse
78 )
79
80 return {
81 res.pk: {
82 metric_path: ranks[res.pk]
83 for metric_path, ranks in metric_rank.items()
84 }
85 for res in results
86 }
87
88
89 def _scores_to_ranks(
90 *, scores: Dict, reverse: bool = False
91 ) -> Dict[str, float]:
92 """
93 Go from a score (a scalar) to a rank (integer). If two scalars are the
94 same then they will have the same rank.
95
96 Takes a dictionary where the keys are the pk of the results and the values
97 are the scores.
98
99 Outputs a dictionary where they keys are the pk of the results and the
100 values are the ranks.
101 """
102 scores = OrderedDict(
103 sorted(scores.items(), key=lambda t: t[1], reverse=reverse)
104 )
105
106 ranks = {}
107 current_score = current_rank = None
108
109 for idx, (pk, score) in enumerate(scores.items()):
110 if score != current_score:
111 current_score = score
112 current_rank = idx + 1
113
114 ranks[pk] = current_rank
115
116 return ranks
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/evaluation/utils.py b/app/grandchallenge/evaluation/utils.py
--- a/app/grandchallenge/evaluation/utils.py
+++ b/app/grandchallenge/evaluation/utils.py
@@ -52,7 +52,10 @@
return [
res
for res in results
- if all(get_jsonpath(res.metrics, m.path) != "" for m in metrics)
+ if all(
+ get_jsonpath(res.metrics, m.path) not in ["", None]
+ for m in metrics
+ )
]
|
{"golden_diff": "diff --git a/app/grandchallenge/evaluation/utils.py b/app/grandchallenge/evaluation/utils.py\n--- a/app/grandchallenge/evaluation/utils.py\n+++ b/app/grandchallenge/evaluation/utils.py\n@@ -52,7 +52,10 @@\n return [\n res\n for res in results\n- if all(get_jsonpath(res.metrics, m.path) != \"\" for m in metrics)\n+ if all(\n+ get_jsonpath(res.metrics, m.path) not in [\"\", None]\n+ for m in metrics\n+ )\n ]\n", "issue": "Handle NoneType comparison in _scores_to_ranks\n```\r\nTypeError: '<' not supported between instances of 'NoneType' and 'float'\r\n```\n", "before_files": [{"content": "from collections import OrderedDict\nfrom typing import Tuple, NamedTuple, List, Callable, Iterable, Dict\n\nfrom grandchallenge.evaluation.models import Result\nfrom grandchallenge.evaluation.templatetags.evaluation_extras import (\n get_jsonpath\n)\n\n\nclass Metric(NamedTuple):\n path: str\n reverse: bool\n\n\nclass Positions(NamedTuple):\n ranks: Dict[str, float]\n rank_scores: Dict[str, float]\n rank_per_metric: Dict[str, Dict[str, float]]\n\n\ndef rank_results(\n *,\n results: Tuple[Result, ...],\n metrics: Tuple[Metric, ...],\n score_method: Callable,\n) -> Positions:\n \"\"\"\n Calculates the overall rank for each result, along with the rank_score\n and the rank per metric.\n \"\"\"\n\n results = _filter_valid_results(results=results, metrics=metrics)\n\n rank_per_metric = _get_rank_per_metric(results=results, metrics=metrics)\n\n rank_scores = {\n pk: score_method([m for m in metrics.values()])\n for pk, metrics in rank_per_metric.items()\n }\n\n return Positions(\n ranks=_scores_to_ranks(scores=rank_scores, reverse=False),\n rank_scores=rank_scores,\n rank_per_metric=rank_per_metric,\n )\n\n\ndef _filter_valid_results(\n *, results: Iterable[Result], metrics: Tuple[Metric, ...]\n) -> List[Result]:\n \"\"\" Ensure that all of the metrics are in every result \"\"\"\n return [\n res\n for res in results\n if all(get_jsonpath(res.metrics, m.path) != \"\" for m in metrics)\n ]\n\n\ndef _get_rank_per_metric(\n *, results: Iterable[Result], metrics: Tuple[Metric, ...]\n) -> Dict[str, Dict[str, float]]:\n \"\"\"\n Takes results and calculates the rank for each of the individual metrics\n\n Returns a dictionary where the key is the pk of the result, and the\n values is another dictionary where the key is the path of the metric and\n the value is the rank of this result for this metric\n \"\"\"\n metric_rank = {}\n for metric in metrics:\n # Extract the value of the metric for this primary key and sort on the\n # value of the metric\n metric_scores = {\n res.pk: get_jsonpath(res.metrics, metric.path) for res in results\n }\n metric_rank[metric.path] = _scores_to_ranks(\n scores=metric_scores, reverse=metric.reverse\n )\n\n return {\n res.pk: {\n metric_path: ranks[res.pk]\n for metric_path, ranks in metric_rank.items()\n }\n for res in results\n }\n\n\ndef _scores_to_ranks(\n *, scores: Dict, reverse: bool = False\n) -> Dict[str, float]:\n \"\"\"\n Go from a score (a scalar) to a rank (integer). If two scalars are the\n same then they will have the same rank.\n\n Takes a dictionary where the keys are the pk of the results and the values\n are the scores.\n\n Outputs a dictionary where they keys are the pk of the results and the\n values are the ranks.\n \"\"\"\n scores = OrderedDict(\n sorted(scores.items(), key=lambda t: t[1], reverse=reverse)\n )\n\n ranks = {}\n current_score = current_rank = None\n\n for idx, (pk, score) in enumerate(scores.items()):\n if score != current_score:\n current_score = score\n current_rank = idx + 1\n\n ranks[pk] = current_rank\n\n return ranks\n", "path": "app/grandchallenge/evaluation/utils.py"}], "after_files": [{"content": "from collections import OrderedDict\nfrom typing import Tuple, NamedTuple, List, Callable, Iterable, Dict\n\nfrom grandchallenge.evaluation.models import Result\nfrom grandchallenge.evaluation.templatetags.evaluation_extras import (\n get_jsonpath\n)\n\n\nclass Metric(NamedTuple):\n path: str\n reverse: bool\n\n\nclass Positions(NamedTuple):\n ranks: Dict[str, float]\n rank_scores: Dict[str, float]\n rank_per_metric: Dict[str, Dict[str, float]]\n\n\ndef rank_results(\n *,\n results: Tuple[Result, ...],\n metrics: Tuple[Metric, ...],\n score_method: Callable,\n) -> Positions:\n \"\"\"\n Calculates the overall rank for each result, along with the rank_score\n and the rank per metric.\n \"\"\"\n\n results = _filter_valid_results(results=results, metrics=metrics)\n\n rank_per_metric = _get_rank_per_metric(results=results, metrics=metrics)\n\n rank_scores = {\n pk: score_method([m for m in metrics.values()])\n for pk, metrics in rank_per_metric.items()\n }\n\n return Positions(\n ranks=_scores_to_ranks(scores=rank_scores, reverse=False),\n rank_scores=rank_scores,\n rank_per_metric=rank_per_metric,\n )\n\n\ndef _filter_valid_results(\n *, results: Iterable[Result], metrics: Tuple[Metric, ...]\n) -> List[Result]:\n \"\"\" Ensure that all of the metrics are in every result \"\"\"\n return [\n res\n for res in results\n if all(\n get_jsonpath(res.metrics, m.path) not in [\"\", None]\n for m in metrics\n )\n ]\n\n\ndef _get_rank_per_metric(\n *, results: Iterable[Result], metrics: Tuple[Metric, ...]\n) -> Dict[str, Dict[str, float]]:\n \"\"\"\n Takes results and calculates the rank for each of the individual metrics\n\n Returns a dictionary where the key is the pk of the result, and the\n values is another dictionary where the key is the path of the metric and\n the value is the rank of this result for this metric\n \"\"\"\n metric_rank = {}\n for metric in metrics:\n # Extract the value of the metric for this primary key and sort on the\n # value of the metric\n metric_scores = {\n res.pk: get_jsonpath(res.metrics, metric.path) for res in results\n }\n metric_rank[metric.path] = _scores_to_ranks(\n scores=metric_scores, reverse=metric.reverse\n )\n\n return {\n res.pk: {\n metric_path: ranks[res.pk]\n for metric_path, ranks in metric_rank.items()\n }\n for res in results\n }\n\n\ndef _scores_to_ranks(\n *, scores: Dict, reverse: bool = False\n) -> Dict[str, float]:\n \"\"\"\n Go from a score (a scalar) to a rank (integer). If two scalars are the\n same then they will have the same rank.\n\n Takes a dictionary where the keys are the pk of the results and the values\n are the scores.\n\n Outputs a dictionary where they keys are the pk of the results and the\n values are the ranks.\n \"\"\"\n scores = OrderedDict(\n sorted(scores.items(), key=lambda t: t[1], reverse=reverse)\n )\n\n ranks = {}\n current_score = current_rank = None\n\n for idx, (pk, score) in enumerate(scores.items()):\n if score != current_score:\n current_score = score\n current_rank = idx + 1\n\n ranks[pk] = current_rank\n\n return ranks\n", "path": "app/grandchallenge/evaluation/utils.py"}]}
| 1,314 | 123 |
gh_patches_debug_7340
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-808
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Debugger raising IndexError
Microsoft/vscode-python#2499
```
PS C:\Users\Hugh.Groves> cd '\\global.arup.com\americas\Jobs\STJ\230000\230597-00(ACDO)\4-04 Engineering\4-04-02 Concrete\02 Calculations\19 Staged Construction\99-Tools\CreateBatches'; ${env:PYTHONIOENCODING}='UTF-8'; ${env:PYTHONUNBUFFNIOENCODING}='UTF-8'; ${env:PYTHONUNBUFFERED}='1'; ${env:PYTHONPATH}='c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd'; & 'C:\Users\Hugh.Groves\AppDthon.exe' '-m' 'ptvsd' '--host' 'localhoata\Local\Continuum\Anaconda3\envs\p3\python.exe' '-m' 'ptvsd' '--host' 'localhost' '--port' '53026' '\\global.arup.com\americas\Jobs\STJ\230000\230597-00(ACDO)\4-04 Engineering\4-04-02 Concrete\02 Calculations\19 Staged Construction\99-Tools\CreateBatches\CreateBatches_02.py'
Traceback (most recent call last):
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\wrapper.py", line 1254, in done
fut.result()
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\futures.py", line 40, in result
reraise(self._exc_info)
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\reraise3.py", line 8, in reraise
raise exc_info[1].with_traceback(exc_info[2])
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\futures.py", line 155, in callback
x = it.send(fut.result())
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\wrapper.py", line 1605, in on_stackTrace
norm_path = self.path_casing.un_normcase(unquote(str(xframe['file']))) # noqa
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\pathutils.py", line 30, in un_normcase
file_path_to_return = self._get_actual_filename(file_path)
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\pathutils.py", line 60, in _get_actual_filename
fragments += ["{}[{}]".format(d[:-1], d[-1])]
IndexError: string index out of range
Traceback (most recent call last):
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\wrapper.py", line 1254, in donetal\ptvsd\ptvsd\wrapper.py", line 1254, in done
fut.result() tal\ptvsd\ptvsd\futures.py", line 40, in result
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\futures.py", line 40, in result tal\ptvsd\ptvsd\reraise3.py", line 8, in reraise
reraise(self._exc_info)
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\futures.py", line 155, in callbacktal\ptvsd\ptvsd\reraise3.py", line 8, in reraise
raise exc_info[1].with_traceback(exc_info[2])
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\futures.py", line 155, in callback x = it.send(fut.result())
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\wrapper.py", line 1605, in on_stackTrace
norm_path = self.path_casing.un_normcase(unquote(str(xframe['file']))) # noqa File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\pathutils.py", line 30, in un_normcase
file_path_to_return = self._get_actual_filename(file_path)
File "c:\Users\Hugh.Groves\.vscode\extensions\ms-python.python-2018.8.0\pythonFiles\experimental\ptvsd\ptvsd\pathutils.py", line 60, in _get_actual_filename fragments += ["{}[{}]".format(d[:-1], d[-1])]
IndexError: string index out of range
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/pathutils.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 from __future__ import print_function, with_statement, absolute_import
6
7 from glob import glob
8 import os.path
9 import platform
10
11
12 MAX_FILES_TO_CACHE = 1000
13
14
15 class PathUnNormcase(object):
16 """Ensures path names of files are returned as they exist on the fs."""
17
18 def __init__(self):
19 self._dict = {}
20 self._enabled = False
21
22 def enable(self):
23 self._enabled = platform.system() == 'Windows'
24
25 def un_normcase(self, file_path):
26 if not self._enabled or len(file_path) == 0:
27 return file_path
28 if file_path in self._dict:
29 return self._dict[file_path]
30 file_path_to_return = self._get_actual_filename(file_path)
31 self.track_file_path_case(file_path_to_return)
32 return file_path_to_return
33
34 def track_file_path_case(self, file_path):
35 if not self._enabled:
36 return
37 if len(self._dict) > MAX_FILES_TO_CACHE:
38 self._dict.clear()
39 self._dict[file_path] = file_path
40
41 def _get_actual_filename(self, name):
42 """
43 Use glob to search for a file by building a regex.
44 Original source from https://stackoverflow.com/a/30374360/4443457
45 (Modified to match file name as well).
46 """
47
48 sep = os.path.sep
49 parts = os.path.normpath(name).split(sep)
50 dirs = parts[0:-1]
51 filename = '{}[{}]'.format(parts[-1][:-1], parts[-1][-1:])
52 path_fragment1 = dirs[0].upper()
53 if dirs[0] == os.path.splitdrive(name)[0]:
54 fragments = [path_fragment1]
55 else:
56 dir_names = os.listdir(os.getcwd())
57 fragments = list(filter(
58 lambda x: x.upper() == path_fragment1, dir_names))
59 for d in dirs[1:]:
60 fragments += ["{}[{}]".format(d[:-1], d[-1])]
61 path = glob(sep.join(fragments))
62 if not path:
63 return name
64 res = glob(sep.join((path[0], filename)))
65 if not res:
66 return name
67 return res[0]
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ptvsd/pathutils.py b/ptvsd/pathutils.py
--- a/ptvsd/pathutils.py
+++ b/ptvsd/pathutils.py
@@ -56,8 +56,7 @@
dir_names = os.listdir(os.getcwd())
fragments = list(filter(
lambda x: x.upper() == path_fragment1, dir_names))
- for d in dirs[1:]:
- fragments += ["{}[{}]".format(d[:-1], d[-1])]
+ fragments += list(d for d in dirs[1:] if d)
path = glob(sep.join(fragments))
if not path:
return name
|
{"golden_diff": "diff --git a/ptvsd/pathutils.py b/ptvsd/pathutils.py\n--- a/ptvsd/pathutils.py\n+++ b/ptvsd/pathutils.py\n@@ -56,8 +56,7 @@\n dir_names = os.listdir(os.getcwd())\n fragments = list(filter(\n lambda x: x.upper() == path_fragment1, dir_names))\n- for d in dirs[1:]:\n- fragments += [\"{}[{}]\".format(d[:-1], d[-1])]\n+ fragments += list(d for d in dirs[1:] if d)\n path = glob(sep.join(fragments))\n if not path:\n return name\n", "issue": "Debugger raising IndexError\nMicrosoft/vscode-python#2499\r\n\r\n```\r\nPS C:\\Users\\Hugh.Groves> cd '\\\\global.arup.com\\americas\\Jobs\\STJ\\230000\\230597-00(ACDO)\\4-04 Engineering\\4-04-02 Concrete\\02 Calculations\\19 Staged Construction\\99-Tools\\CreateBatches'; ${env:PYTHONIOENCODING}='UTF-8'; ${env:PYTHONUNBUFFNIOENCODING}='UTF-8'; ${env:PYTHONUNBUFFERED}='1'; ${env:PYTHONPATH}='c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd'; & 'C:\\Users\\Hugh.Groves\\AppDthon.exe' '-m' 'ptvsd' '--host' 'localhoata\\Local\\Continuum\\Anaconda3\\envs\\p3\\python.exe' '-m' 'ptvsd' '--host' 'localhost' '--port' '53026' '\\\\global.arup.com\\americas\\Jobs\\STJ\\230000\\230597-00(ACDO)\\4-04 Engineering\\4-04-02 Concrete\\02 Calculations\\19 Staged Construction\\99-Tools\\CreateBatches\\CreateBatches_02.py'\r\nTraceback (most recent call last):\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\wrapper.py\", line 1254, in done\r\n fut.result()\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\futures.py\", line 40, in result\r\n reraise(self._exc_info)\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\reraise3.py\", line 8, in reraise\r\n raise exc_info[1].with_traceback(exc_info[2])\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\futures.py\", line 155, in callback\r\n x = it.send(fut.result())\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\wrapper.py\", line 1605, in on_stackTrace\r\n norm_path = self.path_casing.un_normcase(unquote(str(xframe['file']))) # noqa\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\pathutils.py\", line 30, in un_normcase\r\n file_path_to_return = self._get_actual_filename(file_path)\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\pathutils.py\", line 60, in _get_actual_filename\r\n fragments += [\"{}[{}]\".format(d[:-1], d[-1])]\r\nIndexError: string index out of range\r\nTraceback (most recent call last):\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\wrapper.py\", line 1254, in donetal\\ptvsd\\ptvsd\\wrapper.py\", line 1254, in done\r\n fut.result() tal\\ptvsd\\ptvsd\\futures.py\", line 40, in result\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\futures.py\", line 40, in result tal\\ptvsd\\ptvsd\\reraise3.py\", line 8, in reraise\r\n reraise(self._exc_info)\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\futures.py\", line 155, in callbacktal\\ptvsd\\ptvsd\\reraise3.py\", line 8, in reraise\r\n raise exc_info[1].with_traceback(exc_info[2])\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\futures.py\", line 155, in callback x = it.send(fut.result())\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\wrapper.py\", line 1605, in on_stackTrace\r\n norm_path = self.path_casing.un_normcase(unquote(str(xframe['file']))) # noqa File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\pathutils.py\", line 30, in un_normcase\r\n file_path_to_return = self._get_actual_filename(file_path)\r\n File \"c:\\Users\\Hugh.Groves\\.vscode\\extensions\\ms-python.python-2018.8.0\\pythonFiles\\experimental\\ptvsd\\ptvsd\\pathutils.py\", line 60, in _get_actual_filename fragments += [\"{}[{}]\".format(d[:-1], d[-1])]\r\nIndexError: string index out of range\r\n```\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import print_function, with_statement, absolute_import\n\nfrom glob import glob\nimport os.path\nimport platform\n\n\nMAX_FILES_TO_CACHE = 1000\n\n\nclass PathUnNormcase(object):\n \"\"\"Ensures path names of files are returned as they exist on the fs.\"\"\"\n\n def __init__(self):\n self._dict = {}\n self._enabled = False\n\n def enable(self):\n self._enabled = platform.system() == 'Windows'\n\n def un_normcase(self, file_path):\n if not self._enabled or len(file_path) == 0:\n return file_path\n if file_path in self._dict:\n return self._dict[file_path]\n file_path_to_return = self._get_actual_filename(file_path)\n self.track_file_path_case(file_path_to_return)\n return file_path_to_return\n\n def track_file_path_case(self, file_path):\n if not self._enabled:\n return\n if len(self._dict) > MAX_FILES_TO_CACHE:\n self._dict.clear()\n self._dict[file_path] = file_path\n\n def _get_actual_filename(self, name):\n \"\"\"\n Use glob to search for a file by building a regex.\n Original source from https://stackoverflow.com/a/30374360/4443457\n (Modified to match file name as well).\n \"\"\"\n\n sep = os.path.sep\n parts = os.path.normpath(name).split(sep)\n dirs = parts[0:-1]\n filename = '{}[{}]'.format(parts[-1][:-1], parts[-1][-1:])\n path_fragment1 = dirs[0].upper()\n if dirs[0] == os.path.splitdrive(name)[0]:\n fragments = [path_fragment1]\n else:\n dir_names = os.listdir(os.getcwd())\n fragments = list(filter(\n lambda x: x.upper() == path_fragment1, dir_names))\n for d in dirs[1:]:\n fragments += [\"{}[{}]\".format(d[:-1], d[-1])]\n path = glob(sep.join(fragments))\n if not path:\n return name\n res = glob(sep.join((path[0], filename)))\n if not res:\n return name\n return res[0]\n", "path": "ptvsd/pathutils.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import print_function, with_statement, absolute_import\n\nfrom glob import glob\nimport os.path\nimport platform\n\n\nMAX_FILES_TO_CACHE = 1000\n\n\nclass PathUnNormcase(object):\n \"\"\"Ensures path names of files are returned as they exist on the fs.\"\"\"\n\n def __init__(self):\n self._dict = {}\n self._enabled = False\n\n def enable(self):\n self._enabled = platform.system() == 'Windows'\n\n def un_normcase(self, file_path):\n if not self._enabled or len(file_path) == 0:\n return file_path\n if file_path in self._dict:\n return self._dict[file_path]\n file_path_to_return = self._get_actual_filename(file_path)\n self.track_file_path_case(file_path_to_return)\n return file_path_to_return\n\n def track_file_path_case(self, file_path):\n if not self._enabled:\n return\n if len(self._dict) > MAX_FILES_TO_CACHE:\n self._dict.clear()\n self._dict[file_path] = file_path\n\n def _get_actual_filename(self, name):\n \"\"\"\n Use glob to search for a file by building a regex.\n Original source from https://stackoverflow.com/a/30374360/4443457\n (Modified to match file name as well).\n \"\"\"\n\n sep = os.path.sep\n parts = os.path.normpath(name).split(sep)\n dirs = parts[0:-1]\n filename = '{}[{}]'.format(parts[-1][:-1], parts[-1][-1:])\n path_fragment1 = dirs[0].upper()\n if dirs[0] == os.path.splitdrive(name)[0]:\n fragments = [path_fragment1]\n else:\n dir_names = os.listdir(os.getcwd())\n fragments = list(filter(\n lambda x: x.upper() == path_fragment1, dir_names))\n fragments += list(d for d in dirs[1:] if d)\n path = glob(sep.join(fragments))\n if not path:\n return name\n res = glob(sep.join((path[0], filename)))\n if not res:\n return name\n return res[0]\n", "path": "ptvsd/pathutils.py"}]}
| 2,344 | 144 |
gh_patches_debug_6584
|
rasdani/github-patches
|
git_diff
|
spack__spack-4017
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
spack install broken on python3
Using 1e18ace6472c94893cc567ca757032269e02460f and python3 I still get:
```
$ spack install pax-utils
==> Installing pax-utils
==> Fetching https://dev.gentoo.org/~vapier/dist/pax-utils-1.2.2.tar.xz
######################################################################## 100.0%
==> Staging archive: /Users/junghans/spack/var/spack/stage/pax-utils-1.2.2-xiaxjymvkqyzqrlkwwxr33nggcitgidv/pax-utils-1.2.2.tar.xz
==> Created stage in /Users/junghans/spack/var/spack/stage/pax-utils-1.2.2-xiaxjymvkqyzqrlkwwxr33nggcitgidv
==> Ran patch() for pax-utils
==> Building pax-utils [AutotoolsPackage]
==> Executing phase : 'autoreconf'
==> Executing phase : 'configure'
==> Executing phase : 'build'
==> Executing phase : 'install'
==> Error: UnicodeDecodeError: 'utf-8' codec can't decode byte 0xf8 in position 40: invalid start byte
/Users/junghans/spack/lib/spack/spack/package.py:1291, in build_process:
1219 def build_process(input_stream):
1220 """Forked for each build. Has its own process and python
1221 module space set up by build_environment.fork()."""
1222
1223 # We are in the child process. This means that our sys.stdin is
1224 # equal to open(os.devnull). Python did this to prevent our process
1225 # and the parent process from possible simultaneous reading from
1226 # the original standard input. But we assume that the parent
1227 # process is not going to read from it till we are done here,
1228 # otherwise it should not have passed us the copy of the stream.
1229 # Thus, we are free to work with the the copy (input_stream)
1230 # however we want. For example, we might want to call functions
1231 # (e.g. input()) that implicitly read from whatever stream is
1232 # assigned to sys.stdin. Since we want them to work with the
1233 # original input stream, we are making the following assignment:
1234 sys.stdin = input_stream
1235
1236 start_time = time.time()
1237 if not fake:
1238 if not skip_patch:
1239 self.do_patch()
1240 else:
1241 self.do_stage()
1242
1243 tty.msg(
1244 'Building {0} [{1}]'.format(self.name, self.build_system_class)
1245 )
1246
1247 self.stage.keep = keep_stage
1248 with self._stage_and_write_lock():
1249 # Run the pre-install hook in the child process after
1250 # the directory is created.
1251 spack.hooks.pre_install(self.spec)
1252 if fake:
1253 self.do_fake_install()
1254 else:
1255 # Do the real install in the source directory.
1256 self.stage.chdir_to_source()
1257
1258 # Save the build environment in a file before building.
1259 env_path = join_path(os.getcwd(), 'spack-build.env')
1260
1261 # Redirect I/O to a build log (and optionally to
1262 # the terminal)
1263 log_path = join_path(os.getcwd(), 'spack-build.out')
1264
1265 # FIXME : refactor this assignment
1266 self.log_path = log_path
1267 self.env_path = env_path
1268 dump_environment(env_path)
1269
1270 # Spawn a daemon that reads from a pipe and redirects
1271 # everything to log_path
1272 redirection_context = log_output(
1273 log_path,
1274 echo=verbose,
1275 force_color=sys.stdout.isatty(),
1276 debug=True,
1277 input_stream=input_stream
1278 )
1279 with redirection_context as log_redirection:
1280 for phase_name, phase in zip(
1281 self.phases, self._InstallPhase_phases):
1282 tty.msg(
1283 'Executing phase : \'{0}\''.format(phase_name)
1284 )
1285 # Redirect stdout and stderr to daemon pipe
1286 with log_redirection:
1287 getattr(self, phase)(
1288 self.spec, self.prefix)
1289 self.log()
1290 # Run post install hooks before build stage is removed.
>> 1291 spack.hooks.post_install(self.spec)
1292
1293 # Stop timer.
1294 self._total_time = time.time() - start_time
1295 build_time = self._total_time - self._fetch_time
1296
1297 tty.msg("Successfully installed %s" % self.name,
1298 "Fetch: %s. Build: %s. Total: %s." %
1299 (_hms(self._fetch_time), _hms(build_time),
1300 _hms(self._total_time)))
1301 print_pkg(self.prefix)
See build log for details:
/tmp/junghans/spack-stage/spack-stage-vsdhukt3/pax-utils-1.2.2/spack-build.out
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/hooks/sbang.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 import os
26 import stat
27 import re
28
29 import llnl.util.tty as tty
30
31 import spack
32 import spack.modules
33
34 # Character limit for shebang line. Using Linux's 127 characters
35 # here, as it is the shortest I could find on a modern OS.
36 shebang_limit = 127
37
38
39 def shebang_too_long(path):
40 """Detects whether a file has a shebang line that is too long."""
41 with open(path, 'r') as script:
42 bytes = script.read(2)
43 if bytes != '#!':
44 return False
45
46 line = bytes + script.readline()
47 return len(line) > shebang_limit
48
49
50 def filter_shebang(path):
51 """Adds a second shebang line, using sbang, at the beginning of a file."""
52 with open(path, 'r') as original_file:
53 original = original_file.read()
54
55 # This line will be prepended to file
56 new_sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root
57
58 # Skip files that are already using sbang.
59 if original.startswith(new_sbang_line):
60 return
61
62 # Use --! instead of #! on second line for lua.
63 if re.search(r'^#!(/[^/]*)*lua\b', original):
64 original = re.sub(r'^#', '--', original)
65
66 # Change non-writable files to be writable if needed.
67 saved_mode = None
68 if not os.access(path, os.W_OK):
69 st = os.stat(path)
70 saved_mode = st.st_mode
71 os.chmod(path, saved_mode | stat.S_IWRITE)
72
73 with open(path, 'w') as new_file:
74 new_file.write(new_sbang_line)
75 new_file.write(original)
76
77 # Restore original permissions.
78 if saved_mode is not None:
79 os.chmod(path, saved_mode)
80
81 tty.warn("Patched overlong shebang in %s" % path)
82
83
84 def filter_shebangs_in_directory(directory, filenames=None):
85 if filenames is None:
86 filenames = os.listdir(directory)
87 for file in filenames:
88 path = os.path.join(directory, file)
89
90 # only handle files
91 if not os.path.isfile(path):
92 continue
93
94 # only handle links that resolve within THIS package's prefix.
95 if os.path.islink(path):
96 real_path = os.path.realpath(path)
97 if not real_path.startswith(directory + os.sep):
98 continue
99
100 # test the file for a long shebang, and filter
101 if shebang_too_long(path):
102 filter_shebang(path)
103
104
105 def post_install(spec):
106 """This hook edits scripts so that they call /bin/bash
107 $spack_prefix/bin/sbang instead of something longer than the
108 shebang limit.
109 """
110 if spec.external:
111 tty.debug('SKIP: shebang filtering [external package]')
112 return
113
114 for directory, _, filenames in os.walk(spec.prefix):
115 filter_shebangs_in_directory(directory, filenames)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/spack/spack/hooks/sbang.py b/lib/spack/spack/hooks/sbang.py
--- a/lib/spack/spack/hooks/sbang.py
+++ b/lib/spack/spack/hooks/sbang.py
@@ -38,9 +38,12 @@
def shebang_too_long(path):
"""Detects whether a file has a shebang line that is too long."""
- with open(path, 'r') as script:
+ if not os.path.isfile(path):
+ return False
+
+ with open(path, 'rb') as script:
bytes = script.read(2)
- if bytes != '#!':
+ if bytes != b'#!':
return False
line = bytes + script.readline()
|
{"golden_diff": "diff --git a/lib/spack/spack/hooks/sbang.py b/lib/spack/spack/hooks/sbang.py\n--- a/lib/spack/spack/hooks/sbang.py\n+++ b/lib/spack/spack/hooks/sbang.py\n@@ -38,9 +38,12 @@\n \n def shebang_too_long(path):\n \"\"\"Detects whether a file has a shebang line that is too long.\"\"\"\n- with open(path, 'r') as script:\n+ if not os.path.isfile(path):\n+ return False\n+\n+ with open(path, 'rb') as script:\n bytes = script.read(2)\n- if bytes != '#!':\n+ if bytes != b'#!':\n return False\n \n line = bytes + script.readline()\n", "issue": "spack install broken on python3\nUsing 1e18ace6472c94893cc567ca757032269e02460f and python3 I still get:\r\n```\r\n$ spack install pax-utils\r\n==> Installing pax-utils\r\n==> Fetching https://dev.gentoo.org/~vapier/dist/pax-utils-1.2.2.tar.xz\r\n######################################################################## 100.0%\r\n==> Staging archive: /Users/junghans/spack/var/spack/stage/pax-utils-1.2.2-xiaxjymvkqyzqrlkwwxr33nggcitgidv/pax-utils-1.2.2.tar.xz\r\n==> Created stage in /Users/junghans/spack/var/spack/stage/pax-utils-1.2.2-xiaxjymvkqyzqrlkwwxr33nggcitgidv\r\n==> Ran patch() for pax-utils\r\n==> Building pax-utils [AutotoolsPackage]\r\n==> Executing phase : 'autoreconf'\r\n==> Executing phase : 'configure'\r\n==> Executing phase : 'build'\r\n==> Executing phase : 'install'\r\n==> Error: UnicodeDecodeError: 'utf-8' codec can't decode byte 0xf8 in position 40: invalid start byte\r\n/Users/junghans/spack/lib/spack/spack/package.py:1291, in build_process:\r\n 1219 def build_process(input_stream):\r\n 1220 \"\"\"Forked for each build. Has its own process and python\r\n 1221 module space set up by build_environment.fork().\"\"\"\r\n 1222\r\n 1223 # We are in the child process. This means that our sys.stdin is\r\n 1224 # equal to open(os.devnull). Python did this to prevent our process\r\n 1225 # and the parent process from possible simultaneous reading from\r\n 1226 # the original standard input. But we assume that the parent\r\n 1227 # process is not going to read from it till we are done here,\r\n 1228 # otherwise it should not have passed us the copy of the stream.\r\n 1229 # Thus, we are free to work with the the copy (input_stream)\r\n 1230 # however we want. For example, we might want to call functions\r\n 1231 # (e.g. input()) that implicitly read from whatever stream is\r\n 1232 # assigned to sys.stdin. Since we want them to work with the\r\n 1233 # original input stream, we are making the following assignment:\r\n 1234 sys.stdin = input_stream\r\n 1235\r\n 1236 start_time = time.time()\r\n 1237 if not fake:\r\n 1238 if not skip_patch:\r\n 1239 self.do_patch()\r\n 1240 else:\r\n 1241 self.do_stage()\r\n 1242\r\n 1243 tty.msg(\r\n 1244 'Building {0} [{1}]'.format(self.name, self.build_system_class)\r\n 1245 )\r\n 1246\r\n 1247 self.stage.keep = keep_stage\r\n 1248 with self._stage_and_write_lock():\r\n 1249 # Run the pre-install hook in the child process after\r\n 1250 # the directory is created.\r\n 1251 spack.hooks.pre_install(self.spec)\r\n 1252 if fake:\r\n 1253 self.do_fake_install()\r\n 1254 else:\r\n 1255 # Do the real install in the source directory.\r\n 1256 self.stage.chdir_to_source()\r\n 1257\r\n 1258 # Save the build environment in a file before building.\r\n 1259 env_path = join_path(os.getcwd(), 'spack-build.env')\r\n 1260\r\n 1261 # Redirect I/O to a build log (and optionally to\r\n 1262 # the terminal)\r\n 1263 log_path = join_path(os.getcwd(), 'spack-build.out')\r\n 1264\r\n 1265 # FIXME : refactor this assignment\r\n 1266 self.log_path = log_path\r\n 1267 self.env_path = env_path\r\n 1268 dump_environment(env_path)\r\n 1269\r\n 1270 # Spawn a daemon that reads from a pipe and redirects\r\n 1271 # everything to log_path\r\n 1272 redirection_context = log_output(\r\n 1273 log_path,\r\n 1274 echo=verbose,\r\n 1275 force_color=sys.stdout.isatty(),\r\n 1276 debug=True,\r\n 1277 input_stream=input_stream\r\n 1278 )\r\n 1279 with redirection_context as log_redirection:\r\n 1280 for phase_name, phase in zip(\r\n 1281 self.phases, self._InstallPhase_phases):\r\n 1282 tty.msg(\r\n 1283 'Executing phase : \\'{0}\\''.format(phase_name)\r\n 1284 )\r\n 1285 # Redirect stdout and stderr to daemon pipe\r\n 1286 with log_redirection:\r\n 1287 getattr(self, phase)(\r\n 1288 self.spec, self.prefix)\r\n 1289 self.log()\r\n 1290 # Run post install hooks before build stage is removed.\r\n >> 1291 spack.hooks.post_install(self.spec)\r\n 1292\r\n 1293 # Stop timer.\r\n 1294 self._total_time = time.time() - start_time\r\n 1295 build_time = self._total_time - self._fetch_time\r\n 1296\r\n 1297 tty.msg(\"Successfully installed %s\" % self.name,\r\n 1298 \"Fetch: %s. Build: %s. Total: %s.\" %\r\n 1299 (_hms(self._fetch_time), _hms(build_time),\r\n 1300 _hms(self._total_time)))\r\n 1301 print_pkg(self.prefix)\r\n\r\nSee build log for details:\r\n /tmp/junghans/spack-stage/spack-stage-vsdhukt3/pax-utils-1.2.2/spack-build.out\r\n```\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nimport os\nimport stat\nimport re\n\nimport llnl.util.tty as tty\n\nimport spack\nimport spack.modules\n\n# Character limit for shebang line. Using Linux's 127 characters\n# here, as it is the shortest I could find on a modern OS.\nshebang_limit = 127\n\n\ndef shebang_too_long(path):\n \"\"\"Detects whether a file has a shebang line that is too long.\"\"\"\n with open(path, 'r') as script:\n bytes = script.read(2)\n if bytes != '#!':\n return False\n\n line = bytes + script.readline()\n return len(line) > shebang_limit\n\n\ndef filter_shebang(path):\n \"\"\"Adds a second shebang line, using sbang, at the beginning of a file.\"\"\"\n with open(path, 'r') as original_file:\n original = original_file.read()\n\n # This line will be prepended to file\n new_sbang_line = '#!/bin/bash %s/bin/sbang\\n' % spack.spack_root\n\n # Skip files that are already using sbang.\n if original.startswith(new_sbang_line):\n return\n\n # Use --! instead of #! on second line for lua.\n if re.search(r'^#!(/[^/]*)*lua\\b', original):\n original = re.sub(r'^#', '--', original)\n\n # Change non-writable files to be writable if needed.\n saved_mode = None\n if not os.access(path, os.W_OK):\n st = os.stat(path)\n saved_mode = st.st_mode\n os.chmod(path, saved_mode | stat.S_IWRITE)\n\n with open(path, 'w') as new_file:\n new_file.write(new_sbang_line)\n new_file.write(original)\n\n # Restore original permissions.\n if saved_mode is not None:\n os.chmod(path, saved_mode)\n\n tty.warn(\"Patched overlong shebang in %s\" % path)\n\n\ndef filter_shebangs_in_directory(directory, filenames=None):\n if filenames is None:\n filenames = os.listdir(directory)\n for file in filenames:\n path = os.path.join(directory, file)\n\n # only handle files\n if not os.path.isfile(path):\n continue\n\n # only handle links that resolve within THIS package's prefix.\n if os.path.islink(path):\n real_path = os.path.realpath(path)\n if not real_path.startswith(directory + os.sep):\n continue\n\n # test the file for a long shebang, and filter\n if shebang_too_long(path):\n filter_shebang(path)\n\n\ndef post_install(spec):\n \"\"\"This hook edits scripts so that they call /bin/bash\n $spack_prefix/bin/sbang instead of something longer than the\n shebang limit.\n \"\"\"\n if spec.external:\n tty.debug('SKIP: shebang filtering [external package]')\n return\n\n for directory, _, filenames in os.walk(spec.prefix):\n filter_shebangs_in_directory(directory, filenames)\n", "path": "lib/spack/spack/hooks/sbang.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nimport os\nimport stat\nimport re\n\nimport llnl.util.tty as tty\n\nimport spack\nimport spack.modules\n\n# Character limit for shebang line. Using Linux's 127 characters\n# here, as it is the shortest I could find on a modern OS.\nshebang_limit = 127\n\n\ndef shebang_too_long(path):\n \"\"\"Detects whether a file has a shebang line that is too long.\"\"\"\n if not os.path.isfile(path):\n return False\n\n with open(path, 'rb') as script:\n bytes = script.read(2)\n if bytes != b'#!':\n return False\n\n line = bytes + script.readline()\n return len(line) > shebang_limit\n\n\ndef filter_shebang(path):\n \"\"\"Adds a second shebang line, using sbang, at the beginning of a file.\"\"\"\n with open(path, 'r') as original_file:\n original = original_file.read()\n\n # This line will be prepended to file\n new_sbang_line = '#!/bin/bash %s/bin/sbang\\n' % spack.spack_root\n\n # Skip files that are already using sbang.\n if original.startswith(new_sbang_line):\n return\n\n # Use --! instead of #! on second line for lua.\n if re.search(r'^#!(/[^/]*)*lua\\b', original):\n original = re.sub(r'^#', '--', original)\n\n # Change non-writable files to be writable if needed.\n saved_mode = None\n if not os.access(path, os.W_OK):\n st = os.stat(path)\n saved_mode = st.st_mode\n os.chmod(path, saved_mode | stat.S_IWRITE)\n\n with open(path, 'w') as new_file:\n new_file.write(new_sbang_line)\n new_file.write(original)\n\n # Restore original permissions.\n if saved_mode is not None:\n os.chmod(path, saved_mode)\n\n tty.warn(\"Patched overlong shebang in %s\" % path)\n\n\ndef filter_shebangs_in_directory(directory, filenames=None):\n if filenames is None:\n filenames = os.listdir(directory)\n for file in filenames:\n path = os.path.join(directory, file)\n\n # only handle files\n if not os.path.isfile(path):\n continue\n\n # only handle links that resolve within THIS package's prefix.\n if os.path.islink(path):\n real_path = os.path.realpath(path)\n if not real_path.startswith(directory + os.sep):\n continue\n\n # test the file for a long shebang, and filter\n if shebang_too_long(path):\n filter_shebang(path)\n\n\ndef post_install(spec):\n \"\"\"This hook edits scripts so that they call /bin/bash\n $spack_prefix/bin/sbang instead of something longer than the\n shebang limit.\n \"\"\"\n if spec.external:\n tty.debug('SKIP: shebang filtering [external package]')\n return\n\n for directory, _, filenames in os.walk(spec.prefix):\n filter_shebangs_in_directory(directory, filenames)\n", "path": "lib/spack/spack/hooks/sbang.py"}]}
| 3,013 | 163 |
gh_patches_debug_7745
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1302
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TVCatchup plugin is not working - "This service is ending soon"
### Checklist
- [x] This is a bug report.
- [ ] This is a feature request.
- [x] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
TVCatchup plugin is not working for some time. The problem is that plugin is able to connect to a stream without any errors but the stream is different comparing to the TVCatchup website's stream. It looks like streamlink gets a different type of stream deliberately prepared by the service provider to send the message: "This service is ending soon. Please download TVCatchup from the app store". Assuming that there is a real stream available on the website and mobile app, is it still possible to open it by streamlink?
Current stream for all of the channels:

Thanks
### Reproduction steps / Explicit stream URLs to test
streamlink http://tvcatchup.com/watch/bbctwo best
### Environment details
Operating system and version: Windows/Linux
Streamlink and Python version: Streamlink 0.8.1
[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/tvcatchup.py`
Content:
```
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugin.api import http
5 from streamlink.stream import HLSStream
6
7 USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
8 _url_re = re.compile(r"http://(?:www\.)?tvcatchup.com/watch/\w+")
9 _stream_re = re.compile(r'''(?P<q>["'])(?P<stream_url>https?://.*m3u8\?.*clientKey=.*?)(?P=q)''')
10
11
12 class TVCatchup(Plugin):
13 @classmethod
14 def can_handle_url(cls, url):
15 return _url_re.match(url)
16
17 def _get_streams(self):
18 """
19 Finds the streams from tvcatchup.com.
20 """
21 http.headers.update({"User-Agent": USER_AGENT})
22 res = http.get(self.url)
23
24 match = _stream_re.search(res.text, re.IGNORECASE | re.MULTILINE)
25
26 if match:
27 stream_url = match.group("stream_url")
28
29 if stream_url:
30 if "_adp" in stream_url:
31 return HLSStream.parse_variant_playlist(self.session, stream_url)
32 else:
33 return {'576p': HLSStream(self.session, stream_url)}
34
35
36 __plugin__ = TVCatchup
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/tvcatchup.py b/src/streamlink/plugins/tvcatchup.py
--- a/src/streamlink/plugins/tvcatchup.py
+++ b/src/streamlink/plugins/tvcatchup.py
@@ -6,7 +6,7 @@
USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
_url_re = re.compile(r"http://(?:www\.)?tvcatchup.com/watch/\w+")
-_stream_re = re.compile(r'''(?P<q>["'])(?P<stream_url>https?://.*m3u8\?.*clientKey=.*?)(?P=q)''')
+_stream_re = re.compile(r'''source.*?(?P<q>["'])(?P<stream_url>https?://.*m3u8\?.*clientKey=.*?)(?P=q)''')
class TVCatchup(Plugin):
|
{"golden_diff": "diff --git a/src/streamlink/plugins/tvcatchup.py b/src/streamlink/plugins/tvcatchup.py\n--- a/src/streamlink/plugins/tvcatchup.py\n+++ b/src/streamlink/plugins/tvcatchup.py\n@@ -6,7 +6,7 @@\n \n USER_AGENT = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"\n _url_re = re.compile(r\"http://(?:www\\.)?tvcatchup.com/watch/\\w+\")\n-_stream_re = re.compile(r'''(?P<q>[\"'])(?P<stream_url>https?://.*m3u8\\?.*clientKey=.*?)(?P=q)''')\n+_stream_re = re.compile(r'''source.*?(?P<q>[\"'])(?P<stream_url>https?://.*m3u8\\?.*clientKey=.*?)(?P=q)''')\n \n \n class TVCatchup(Plugin):\n", "issue": "TVCatchup plugin is not working - \"This service is ending soon\"\n### Checklist\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [x] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\nTVCatchup plugin is not working for some time. The problem is that plugin is able to connect to a stream without any errors but the stream is different comparing to the TVCatchup website's stream. It looks like streamlink gets a different type of stream deliberately prepared by the service provider to send the message: \"This service is ending soon. Please download TVCatchup from the app store\". Assuming that there is a real stream available on the website and mobile app, is it still possible to open it by streamlink?\r\n\r\nCurrent stream for all of the channels:\r\n\r\n\r\n\r\nThanks\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\nstreamlink http://tvcatchup.com/watch/bbctwo best\r\n\r\n### Environment details\r\n\r\nOperating system and version: Windows/Linux\r\nStreamlink and Python version: Streamlink 0.8.1\r\n\r\n[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\n\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"\n_url_re = re.compile(r\"http://(?:www\\.)?tvcatchup.com/watch/\\w+\")\n_stream_re = re.compile(r'''(?P<q>[\"'])(?P<stream_url>https?://.*m3u8\\?.*clientKey=.*?)(?P=q)''')\n\n\nclass TVCatchup(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n \"\"\"\n Finds the streams from tvcatchup.com.\n \"\"\"\n http.headers.update({\"User-Agent\": USER_AGENT})\n res = http.get(self.url)\n\n match = _stream_re.search(res.text, re.IGNORECASE | re.MULTILINE)\n\n if match:\n stream_url = match.group(\"stream_url\")\n\n if stream_url:\n if \"_adp\" in stream_url:\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n else:\n return {'576p': HLSStream(self.session, stream_url)}\n\n\n__plugin__ = TVCatchup\n", "path": "src/streamlink/plugins/tvcatchup.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\n\nUSER_AGENT = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"\n_url_re = re.compile(r\"http://(?:www\\.)?tvcatchup.com/watch/\\w+\")\n_stream_re = re.compile(r'''source.*?(?P<q>[\"'])(?P<stream_url>https?://.*m3u8\\?.*clientKey=.*?)(?P=q)''')\n\n\nclass TVCatchup(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n \"\"\"\n Finds the streams from tvcatchup.com.\n \"\"\"\n http.headers.update({\"User-Agent\": USER_AGENT})\n res = http.get(self.url)\n\n match = _stream_re.search(res.text, re.IGNORECASE | re.MULTILINE)\n\n if match:\n stream_url = match.group(\"stream_url\")\n\n if stream_url:\n if \"_adp\" in stream_url:\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n else:\n return {'576p': HLSStream(self.session, stream_url)}\n\n\n__plugin__ = TVCatchup\n", "path": "src/streamlink/plugins/tvcatchup.py"}]}
| 966 | 236 |
gh_patches_debug_19747
|
rasdani/github-patches
|
git_diff
|
ddionrails__ddionrails-715
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make foreign key to study in publications.Publication required
### Subject of the issue
The save() method of publications.Publication requires a related study. But the existence of a related study is not enforced.
Remove "null=True" and "blank=True".
https://github.com/ddionrails/ddionrails/blob/191e0f0fa95c81e58783a1b1317e0c705a19e51a/ddionrails/publications/models.py#L50-L57
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddionrails/publications/models.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """ Model definitions for ddionrails.publications app """
3
4 import uuid
5 from typing import Dict
6
7 from django.db import models
8 from django.urls import reverse
9
10 from config.helpers import render_markdown
11 from ddionrails.base.mixins import ModelMixin as DorMixin
12 from ddionrails.data.models import Dataset, Variable
13 from ddionrails.elastic.mixins import ModelMixin as ElasticMixin
14 from ddionrails.instruments.models import Instrument, Question
15 from ddionrails.studies.models import Study
16
17
18 class Publication(ElasticMixin, DorMixin, models.Model):
19 """
20 Stores a single publication, related to :model:`studies.Study`.
21 """
22
23 ##############
24 # attributes #
25 ##############
26 id = models.UUIDField( # pylint: disable=C0103
27 primary_key=True,
28 default=uuid.uuid4,
29 editable=True,
30 db_index=True,
31 help_text="UUID of the publication. Dependent on the associated study.",
32 )
33
34 name = models.CharField(
35 max_length=255, db_index=True, help_text="Name of the publication"
36 )
37 sub_type = models.CharField(
38 max_length=255,
39 blank=True,
40 help_text="Type of the publication (e.g., journal article or dissertation)",
41 )
42 title = models.TextField(blank=True, help_text="Title of the publication")
43 author = models.TextField(blank=True, help_text="Name(s) of the author(s)")
44 year = models.TextField(blank=True, help_text="Year of publication")
45 abstract = models.TextField(blank=True, help_text="Abstract of the publication")
46 cite = models.TextField(blank=True, help_text="Suggested citation of the publication")
47 url = models.TextField(
48 blank=True, verbose_name="URL", help_text="URL referencing the publication"
49 )
50 doi = models.TextField(
51 blank=True,
52 verbose_name="DOI",
53 help_text="DOI of the publication (DOI only, not the URL to the DOI)",
54 )
55 studies = models.TextField(
56 blank=True,
57 help_text="Description of studies/data sources used in the publication",
58 )
59
60 #############
61 # relations #
62 #############
63 study = models.ForeignKey(
64 Study,
65 blank=True,
66 null=True,
67 related_name="publications",
68 on_delete=models.CASCADE,
69 help_text="Foreign key to studies.Study",
70 )
71
72 # Used by ElasticMixin when indexed into Elasticsearch
73 DOC_TYPE = "publication"
74
75 def save(
76 self, force_insert=False, force_update=False, using=None, update_fields=None
77 ):
78 """"Set id and call parents save(). """
79 self.id = uuid.uuid5(self.study_id, self.name) # pylint: disable=C0103
80 super().save(
81 force_insert=force_insert,
82 force_update=force_update,
83 using=using,
84 update_fields=update_fields,
85 )
86
87 class Meta: # pylint: disable=missing-docstring,too-few-public-methods
88 unique_together = ("study", "name")
89
90 def __str__(self) -> str:
91 """ Returns a string representation using the "study" and "name" fields """
92 return f"{self.study}/publ/{self.name}"
93
94 def get_absolute_url(self) -> str:
95 """ Returns a canonical URL for the model using the "study" and "name" fields """
96 return reverse(
97 "publ:publication",
98 kwargs={"study_name": self.study.name, "publication_name": self.name},
99 )
100
101 def html_abstract(self) -> str:
102 """ Returns the "abstract" field as a string containing HTML markup """
103 return render_markdown(self.abstract)
104
105 def html_cite(self) -> str:
106 """ Returns the "cite" field as a string containing HTML markup """
107 return render_markdown(self.cite)
108
109 def to_elastic_dict(self) -> Dict[str, str]:
110 """ Returns a dictionary to be indexed by Elasticsearch """
111 try:
112 study_name = self.study.name
113 except AttributeError:
114 study_name = ""
115 return dict(
116 name=self.name,
117 sub_type=self.sub_type,
118 title=self.title,
119 author=self.author,
120 year=self.year,
121 period=self.year,
122 abstract=self.abstract,
123 cite=self.cite,
124 url=self.url,
125 doi=self.doi,
126 study=study_name,
127 )
128
129
130 class Attachment(models.Model):
131 """
132 Stores a single attachment, related to:
133 :model:`studies.Study`,
134 :model:`data.Dataset`,
135 :model:`data.Variable`,
136 :model:`instruments.Instrument` and
137 :model:`instruments.Question`
138
139 """
140
141 ##############
142 # attributes #
143 ##############
144 url = models.TextField(
145 blank=True, verbose_name="URL", help_text="Link (URL) to the attachment"
146 )
147 url_text = models.TextField(
148 blank=True, verbose_name="URL text", help_text="Text to be displayed for the link"
149 )
150
151 #############
152 # relations #
153 #############
154 context_study = models.ForeignKey(
155 Study,
156 related_name="related_attachments",
157 on_delete=models.CASCADE,
158 help_text="Foreign key to studies.Study",
159 )
160 study = models.ForeignKey(
161 Study,
162 blank=True,
163 null=True,
164 related_name="attachments",
165 on_delete=models.CASCADE,
166 help_text="Foreign key to studies.Study",
167 )
168 dataset = models.ForeignKey(
169 Dataset,
170 blank=True,
171 null=True,
172 related_name="attachments",
173 on_delete=models.CASCADE,
174 help_text="Foreign key to data.Dataset",
175 )
176 variable = models.ForeignKey(
177 Variable,
178 blank=True,
179 null=True,
180 related_name="attachments",
181 on_delete=models.CASCADE,
182 help_text="Foreign key to data.Variable",
183 )
184 instrument = models.ForeignKey(
185 Instrument,
186 blank=True,
187 null=True,
188 related_name="attachments",
189 on_delete=models.CASCADE,
190 help_text="Foreign key to instruments.Instrument",
191 )
192 question = models.ForeignKey(
193 Question,
194 blank=True,
195 null=True,
196 related_name="attachments",
197 on_delete=models.CASCADE,
198 help_text="Foreign key to instruments.Question",
199 )
200
```
Path: `ddionrails/publications/migrations/0001_initial.py`
Content:
```
1 # Generated by Django 2.2.2 on 2019-06-27 11:16
2
3 import uuid
4
5 import django.db.models.deletion
6 from django.db import migrations, models
7
8 import ddionrails.base.mixins
9 import ddionrails.elastic.mixins
10
11
12 class Migration(migrations.Migration):
13
14 initial = True
15
16 dependencies = [
17 ("instruments", "__first__"),
18 ("data", "0001_initial"),
19 ("studies", "0001_initial"),
20 ]
21
22 operations = [
23 migrations.CreateModel(
24 name="Attachment",
25 fields=[
26 (
27 "id",
28 models.AutoField(
29 auto_created=True,
30 primary_key=True,
31 serialize=False,
32 verbose_name="ID",
33 ),
34 ),
35 (
36 "url",
37 models.TextField(
38 blank=True,
39 help_text="Link (URL) to the attachment",
40 verbose_name="URL",
41 ),
42 ),
43 (
44 "url_text",
45 models.TextField(
46 blank=True,
47 help_text="Text to be displayed for the link",
48 verbose_name="URL text",
49 ),
50 ),
51 (
52 "context_study",
53 models.ForeignKey(
54 help_text="Foreign key to studies.Study",
55 on_delete=django.db.models.deletion.CASCADE,
56 related_name="related_attachments",
57 to="studies.Study",
58 ),
59 ),
60 (
61 "dataset",
62 models.ForeignKey(
63 blank=True,
64 help_text="Foreign key to data.Dataset",
65 null=True,
66 on_delete=django.db.models.deletion.CASCADE,
67 related_name="attachments",
68 to="data.Dataset",
69 ),
70 ),
71 (
72 "instrument",
73 models.ForeignKey(
74 blank=True,
75 help_text="Foreign key to instruments.Instrument",
76 null=True,
77 on_delete=django.db.models.deletion.CASCADE,
78 related_name="attachments",
79 to="instruments.Instrument",
80 ),
81 ),
82 (
83 "question",
84 models.ForeignKey(
85 blank=True,
86 help_text="Foreign key to instruments.Question",
87 null=True,
88 on_delete=django.db.models.deletion.CASCADE,
89 related_name="attachments",
90 to="instruments.Question",
91 ),
92 ),
93 (
94 "study",
95 models.ForeignKey(
96 blank=True,
97 help_text="Foreign key to studies.Study",
98 null=True,
99 on_delete=django.db.models.deletion.CASCADE,
100 related_name="attachments",
101 to="studies.Study",
102 ),
103 ),
104 (
105 "variable",
106 models.ForeignKey(
107 blank=True,
108 help_text="Foreign key to data.Variable",
109 null=True,
110 on_delete=django.db.models.deletion.CASCADE,
111 related_name="attachments",
112 to="data.Variable",
113 ),
114 ),
115 ],
116 ),
117 migrations.CreateModel(
118 name="Publication",
119 fields=[
120 (
121 "id",
122 models.UUIDField(
123 db_index=True,
124 default=uuid.uuid4,
125 help_text="UUID of the publication. Dependent on the associated study.",
126 primary_key=True,
127 serialize=False,
128 ),
129 ),
130 (
131 "name",
132 models.CharField(
133 db_index=True, help_text="Name of the publication", max_length=255
134 ),
135 ),
136 (
137 "sub_type",
138 models.CharField(
139 blank=True,
140 help_text="Type of the publication (e.g., journal article or dissertation)",
141 max_length=255,
142 ),
143 ),
144 (
145 "title",
146 models.TextField(blank=True, help_text="Title of the publication"),
147 ),
148 (
149 "author",
150 models.TextField(blank=True, help_text="Name(s) of the author(s)"),
151 ),
152 ("year", models.TextField(blank=True, help_text="Year of publication")),
153 (
154 "abstract",
155 models.TextField(blank=True, help_text="Abstract of the publication"),
156 ),
157 (
158 "cite",
159 models.TextField(
160 blank=True, help_text="Suggested citation of the publication"
161 ),
162 ),
163 (
164 "url",
165 models.TextField(
166 blank=True,
167 help_text="URL referencing the publication",
168 verbose_name="URL",
169 ),
170 ),
171 (
172 "doi",
173 models.TextField(
174 blank=True,
175 help_text="DOI of the publication (DOI only, not the URL to the DOI)",
176 verbose_name="DOI",
177 ),
178 ),
179 (
180 "studies",
181 models.TextField(
182 blank=True,
183 help_text="Description of studies/data sources used in the publication",
184 ),
185 ),
186 (
187 "study",
188 models.ForeignKey(
189 blank=True,
190 help_text="Foreign key to studies.Study",
191 null=True,
192 on_delete=django.db.models.deletion.CASCADE,
193 related_name="publications",
194 to="studies.Study",
195 ),
196 ),
197 ],
198 options={"unique_together": {("study", "name")}},
199 bases=(
200 ddionrails.elastic.mixins.ModelMixin,
201 ddionrails.base.mixins.ModelMixin,
202 models.Model,
203 ),
204 ),
205 ]
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddionrails/publications/migrations/0001_initial.py b/ddionrails/publications/migrations/0001_initial.py
--- a/ddionrails/publications/migrations/0001_initial.py
+++ b/ddionrails/publications/migrations/0001_initial.py
@@ -186,9 +186,7 @@
(
"study",
models.ForeignKey(
- blank=True,
help_text="Foreign key to studies.Study",
- null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="publications",
to="studies.Study",
diff --git a/ddionrails/publications/models.py b/ddionrails/publications/models.py
--- a/ddionrails/publications/models.py
+++ b/ddionrails/publications/models.py
@@ -62,8 +62,6 @@
#############
study = models.ForeignKey(
Study,
- blank=True,
- null=True,
related_name="publications",
on_delete=models.CASCADE,
help_text="Foreign key to studies.Study",
|
{"golden_diff": "diff --git a/ddionrails/publications/migrations/0001_initial.py b/ddionrails/publications/migrations/0001_initial.py\n--- a/ddionrails/publications/migrations/0001_initial.py\n+++ b/ddionrails/publications/migrations/0001_initial.py\n@@ -186,9 +186,7 @@\n (\n \"study\",\n models.ForeignKey(\n- blank=True,\n help_text=\"Foreign key to studies.Study\",\n- null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"publications\",\n to=\"studies.Study\",\ndiff --git a/ddionrails/publications/models.py b/ddionrails/publications/models.py\n--- a/ddionrails/publications/models.py\n+++ b/ddionrails/publications/models.py\n@@ -62,8 +62,6 @@\n #############\n study = models.ForeignKey(\n Study,\n- blank=True,\n- null=True,\n related_name=\"publications\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to studies.Study\",\n", "issue": "Make foreign key to study in publications.Publication required\n### Subject of the issue\r\n\r\nThe save() method of publications.Publication requires a related study. But the existence of a related study is not enforced.\r\n\r\nRemove \"null=True\" and \"blank=True\".\r\n\r\nhttps://github.com/ddionrails/ddionrails/blob/191e0f0fa95c81e58783a1b1317e0c705a19e51a/ddionrails/publications/models.py#L50-L57\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\" Model definitions for ddionrails.publications app \"\"\"\n\nimport uuid\nfrom typing import Dict\n\nfrom django.db import models\nfrom django.urls import reverse\n\nfrom config.helpers import render_markdown\nfrom ddionrails.base.mixins import ModelMixin as DorMixin\nfrom ddionrails.data.models import Dataset, Variable\nfrom ddionrails.elastic.mixins import ModelMixin as ElasticMixin\nfrom ddionrails.instruments.models import Instrument, Question\nfrom ddionrails.studies.models import Study\n\n\nclass Publication(ElasticMixin, DorMixin, models.Model):\n \"\"\"\n Stores a single publication, related to :model:`studies.Study`.\n \"\"\"\n\n ##############\n # attributes #\n ##############\n id = models.UUIDField( # pylint: disable=C0103\n primary_key=True,\n default=uuid.uuid4,\n editable=True,\n db_index=True,\n help_text=\"UUID of the publication. Dependent on the associated study.\",\n )\n\n name = models.CharField(\n max_length=255, db_index=True, help_text=\"Name of the publication\"\n )\n sub_type = models.CharField(\n max_length=255,\n blank=True,\n help_text=\"Type of the publication (e.g., journal article or dissertation)\",\n )\n title = models.TextField(blank=True, help_text=\"Title of the publication\")\n author = models.TextField(blank=True, help_text=\"Name(s) of the author(s)\")\n year = models.TextField(blank=True, help_text=\"Year of publication\")\n abstract = models.TextField(blank=True, help_text=\"Abstract of the publication\")\n cite = models.TextField(blank=True, help_text=\"Suggested citation of the publication\")\n url = models.TextField(\n blank=True, verbose_name=\"URL\", help_text=\"URL referencing the publication\"\n )\n doi = models.TextField(\n blank=True,\n verbose_name=\"DOI\",\n help_text=\"DOI of the publication (DOI only, not the URL to the DOI)\",\n )\n studies = models.TextField(\n blank=True,\n help_text=\"Description of studies/data sources used in the publication\",\n )\n\n #############\n # relations #\n #############\n study = models.ForeignKey(\n Study,\n blank=True,\n null=True,\n related_name=\"publications\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to studies.Study\",\n )\n\n # Used by ElasticMixin when indexed into Elasticsearch\n DOC_TYPE = \"publication\"\n\n def save(\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n \"\"\"\"Set id and call parents save(). \"\"\"\n self.id = uuid.uuid5(self.study_id, self.name) # pylint: disable=C0103\n super().save(\n force_insert=force_insert,\n force_update=force_update,\n using=using,\n update_fields=update_fields,\n )\n\n class Meta: # pylint: disable=missing-docstring,too-few-public-methods\n unique_together = (\"study\", \"name\")\n\n def __str__(self) -> str:\n \"\"\" Returns a string representation using the \"study\" and \"name\" fields \"\"\"\n return f\"{self.study}/publ/{self.name}\"\n\n def get_absolute_url(self) -> str:\n \"\"\" Returns a canonical URL for the model using the \"study\" and \"name\" fields \"\"\"\n return reverse(\n \"publ:publication\",\n kwargs={\"study_name\": self.study.name, \"publication_name\": self.name},\n )\n\n def html_abstract(self) -> str:\n \"\"\" Returns the \"abstract\" field as a string containing HTML markup \"\"\"\n return render_markdown(self.abstract)\n\n def html_cite(self) -> str:\n \"\"\" Returns the \"cite\" field as a string containing HTML markup \"\"\"\n return render_markdown(self.cite)\n\n def to_elastic_dict(self) -> Dict[str, str]:\n \"\"\" Returns a dictionary to be indexed by Elasticsearch \"\"\"\n try:\n study_name = self.study.name\n except AttributeError:\n study_name = \"\"\n return dict(\n name=self.name,\n sub_type=self.sub_type,\n title=self.title,\n author=self.author,\n year=self.year,\n period=self.year,\n abstract=self.abstract,\n cite=self.cite,\n url=self.url,\n doi=self.doi,\n study=study_name,\n )\n\n\nclass Attachment(models.Model):\n \"\"\"\n Stores a single attachment, related to:\n :model:`studies.Study`,\n :model:`data.Dataset`,\n :model:`data.Variable`,\n :model:`instruments.Instrument` and\n :model:`instruments.Question`\n\n \"\"\"\n\n ##############\n # attributes #\n ##############\n url = models.TextField(\n blank=True, verbose_name=\"URL\", help_text=\"Link (URL) to the attachment\"\n )\n url_text = models.TextField(\n blank=True, verbose_name=\"URL text\", help_text=\"Text to be displayed for the link\"\n )\n\n #############\n # relations #\n #############\n context_study = models.ForeignKey(\n Study,\n related_name=\"related_attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to studies.Study\",\n )\n study = models.ForeignKey(\n Study,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to studies.Study\",\n )\n dataset = models.ForeignKey(\n Dataset,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to data.Dataset\",\n )\n variable = models.ForeignKey(\n Variable,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to data.Variable\",\n )\n instrument = models.ForeignKey(\n Instrument,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to instruments.Instrument\",\n )\n question = models.ForeignKey(\n Question,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to instruments.Question\",\n )\n", "path": "ddionrails/publications/models.py"}, {"content": "# Generated by Django 2.2.2 on 2019-06-27 11:16\n\nimport uuid\n\nimport django.db.models.deletion\nfrom django.db import migrations, models\n\nimport ddionrails.base.mixins\nimport ddionrails.elastic.mixins\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n (\"instruments\", \"__first__\"),\n (\"data\", \"0001_initial\"),\n (\"studies\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Attachment\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"url\",\n models.TextField(\n blank=True,\n help_text=\"Link (URL) to the attachment\",\n verbose_name=\"URL\",\n ),\n ),\n (\n \"url_text\",\n models.TextField(\n blank=True,\n help_text=\"Text to be displayed for the link\",\n verbose_name=\"URL text\",\n ),\n ),\n (\n \"context_study\",\n models.ForeignKey(\n help_text=\"Foreign key to studies.Study\",\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"related_attachments\",\n to=\"studies.Study\",\n ),\n ),\n (\n \"dataset\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to data.Dataset\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"data.Dataset\",\n ),\n ),\n (\n \"instrument\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to instruments.Instrument\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"instruments.Instrument\",\n ),\n ),\n (\n \"question\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to instruments.Question\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"instruments.Question\",\n ),\n ),\n (\n \"study\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to studies.Study\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"studies.Study\",\n ),\n ),\n (\n \"variable\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to data.Variable\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"data.Variable\",\n ),\n ),\n ],\n ),\n migrations.CreateModel(\n name=\"Publication\",\n fields=[\n (\n \"id\",\n models.UUIDField(\n db_index=True,\n default=uuid.uuid4,\n help_text=\"UUID of the publication. Dependent on the associated study.\",\n primary_key=True,\n serialize=False,\n ),\n ),\n (\n \"name\",\n models.CharField(\n db_index=True, help_text=\"Name of the publication\", max_length=255\n ),\n ),\n (\n \"sub_type\",\n models.CharField(\n blank=True,\n help_text=\"Type of the publication (e.g., journal article or dissertation)\",\n max_length=255,\n ),\n ),\n (\n \"title\",\n models.TextField(blank=True, help_text=\"Title of the publication\"),\n ),\n (\n \"author\",\n models.TextField(blank=True, help_text=\"Name(s) of the author(s)\"),\n ),\n (\"year\", models.TextField(blank=True, help_text=\"Year of publication\")),\n (\n \"abstract\",\n models.TextField(blank=True, help_text=\"Abstract of the publication\"),\n ),\n (\n \"cite\",\n models.TextField(\n blank=True, help_text=\"Suggested citation of the publication\"\n ),\n ),\n (\n \"url\",\n models.TextField(\n blank=True,\n help_text=\"URL referencing the publication\",\n verbose_name=\"URL\",\n ),\n ),\n (\n \"doi\",\n models.TextField(\n blank=True,\n help_text=\"DOI of the publication (DOI only, not the URL to the DOI)\",\n verbose_name=\"DOI\",\n ),\n ),\n (\n \"studies\",\n models.TextField(\n blank=True,\n help_text=\"Description of studies/data sources used in the publication\",\n ),\n ),\n (\n \"study\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to studies.Study\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"publications\",\n to=\"studies.Study\",\n ),\n ),\n ],\n options={\"unique_together\": {(\"study\", \"name\")}},\n bases=(\n ddionrails.elastic.mixins.ModelMixin,\n ddionrails.base.mixins.ModelMixin,\n models.Model,\n ),\n ),\n ]\n", "path": "ddionrails/publications/migrations/0001_initial.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\" Model definitions for ddionrails.publications app \"\"\"\n\nimport uuid\nfrom typing import Dict\n\nfrom django.db import models\nfrom django.urls import reverse\n\nfrom config.helpers import render_markdown\nfrom ddionrails.base.mixins import ModelMixin as DorMixin\nfrom ddionrails.data.models import Dataset, Variable\nfrom ddionrails.elastic.mixins import ModelMixin as ElasticMixin\nfrom ddionrails.instruments.models import Instrument, Question\nfrom ddionrails.studies.models import Study\n\n\nclass Publication(ElasticMixin, DorMixin, models.Model):\n \"\"\"\n Stores a single publication, related to :model:`studies.Study`.\n \"\"\"\n\n ##############\n # attributes #\n ##############\n id = models.UUIDField( # pylint: disable=C0103\n primary_key=True,\n default=uuid.uuid4,\n editable=True,\n db_index=True,\n help_text=\"UUID of the publication. Dependent on the associated study.\",\n )\n\n name = models.CharField(\n max_length=255, db_index=True, help_text=\"Name of the publication\"\n )\n sub_type = models.CharField(\n max_length=255,\n blank=True,\n help_text=\"Type of the publication (e.g., journal article or dissertation)\",\n )\n title = models.TextField(blank=True, help_text=\"Title of the publication\")\n author = models.TextField(blank=True, help_text=\"Name(s) of the author(s)\")\n year = models.TextField(blank=True, help_text=\"Year of publication\")\n abstract = models.TextField(blank=True, help_text=\"Abstract of the publication\")\n cite = models.TextField(blank=True, help_text=\"Suggested citation of the publication\")\n url = models.TextField(\n blank=True, verbose_name=\"URL\", help_text=\"URL referencing the publication\"\n )\n doi = models.TextField(\n blank=True,\n verbose_name=\"DOI\",\n help_text=\"DOI of the publication (DOI only, not the URL to the DOI)\",\n )\n studies = models.TextField(\n blank=True,\n help_text=\"Description of studies/data sources used in the publication\",\n )\n\n #############\n # relations #\n #############\n study = models.ForeignKey(\n Study,\n related_name=\"publications\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to studies.Study\",\n )\n\n # Used by ElasticMixin when indexed into Elasticsearch\n DOC_TYPE = \"publication\"\n\n def save(\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n \"\"\"\"Set id and call parents save(). \"\"\"\n self.id = uuid.uuid5(self.study_id, self.name) # pylint: disable=C0103\n super().save(\n force_insert=force_insert,\n force_update=force_update,\n using=using,\n update_fields=update_fields,\n )\n\n class Meta: # pylint: disable=missing-docstring,too-few-public-methods\n unique_together = (\"study\", \"name\")\n\n def __str__(self) -> str:\n \"\"\" Returns a string representation using the \"study\" and \"name\" fields \"\"\"\n return f\"{self.study}/publ/{self.name}\"\n\n def get_absolute_url(self) -> str:\n \"\"\" Returns a canonical URL for the model using the \"study\" and \"name\" fields \"\"\"\n return reverse(\n \"publ:publication\",\n kwargs={\"study_name\": self.study.name, \"publication_name\": self.name},\n )\n\n def html_abstract(self) -> str:\n \"\"\" Returns the \"abstract\" field as a string containing HTML markup \"\"\"\n return render_markdown(self.abstract)\n\n def html_cite(self) -> str:\n \"\"\" Returns the \"cite\" field as a string containing HTML markup \"\"\"\n return render_markdown(self.cite)\n\n def to_elastic_dict(self) -> Dict[str, str]:\n \"\"\" Returns a dictionary to be indexed by Elasticsearch \"\"\"\n try:\n study_name = self.study.name\n except AttributeError:\n study_name = \"\"\n return dict(\n name=self.name,\n sub_type=self.sub_type,\n title=self.title,\n author=self.author,\n year=self.year,\n period=self.year,\n abstract=self.abstract,\n cite=self.cite,\n url=self.url,\n doi=self.doi,\n study=study_name,\n )\n\n\nclass Attachment(models.Model):\n \"\"\"\n Stores a single attachment, related to:\n :model:`studies.Study`,\n :model:`data.Dataset`,\n :model:`data.Variable`,\n :model:`instruments.Instrument` and\n :model:`instruments.Question`\n\n \"\"\"\n\n ##############\n # attributes #\n ##############\n url = models.TextField(\n blank=True, verbose_name=\"URL\", help_text=\"Link (URL) to the attachment\"\n )\n url_text = models.TextField(\n blank=True, verbose_name=\"URL text\", help_text=\"Text to be displayed for the link\"\n )\n\n #############\n # relations #\n #############\n context_study = models.ForeignKey(\n Study,\n related_name=\"related_attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to studies.Study\",\n )\n study = models.ForeignKey(\n Study,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to studies.Study\",\n )\n dataset = models.ForeignKey(\n Dataset,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to data.Dataset\",\n )\n variable = models.ForeignKey(\n Variable,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to data.Variable\",\n )\n instrument = models.ForeignKey(\n Instrument,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to instruments.Instrument\",\n )\n question = models.ForeignKey(\n Question,\n blank=True,\n null=True,\n related_name=\"attachments\",\n on_delete=models.CASCADE,\n help_text=\"Foreign key to instruments.Question\",\n )\n", "path": "ddionrails/publications/models.py"}, {"content": "# Generated by Django 2.2.2 on 2019-06-27 11:16\n\nimport uuid\n\nimport django.db.models.deletion\nfrom django.db import migrations, models\n\nimport ddionrails.base.mixins\nimport ddionrails.elastic.mixins\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n (\"instruments\", \"__first__\"),\n (\"data\", \"0001_initial\"),\n (\"studies\", \"0001_initial\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Attachment\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"url\",\n models.TextField(\n blank=True,\n help_text=\"Link (URL) to the attachment\",\n verbose_name=\"URL\",\n ),\n ),\n (\n \"url_text\",\n models.TextField(\n blank=True,\n help_text=\"Text to be displayed for the link\",\n verbose_name=\"URL text\",\n ),\n ),\n (\n \"context_study\",\n models.ForeignKey(\n help_text=\"Foreign key to studies.Study\",\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"related_attachments\",\n to=\"studies.Study\",\n ),\n ),\n (\n \"dataset\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to data.Dataset\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"data.Dataset\",\n ),\n ),\n (\n \"instrument\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to instruments.Instrument\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"instruments.Instrument\",\n ),\n ),\n (\n \"question\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to instruments.Question\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"instruments.Question\",\n ),\n ),\n (\n \"study\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to studies.Study\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"studies.Study\",\n ),\n ),\n (\n \"variable\",\n models.ForeignKey(\n blank=True,\n help_text=\"Foreign key to data.Variable\",\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attachments\",\n to=\"data.Variable\",\n ),\n ),\n ],\n ),\n migrations.CreateModel(\n name=\"Publication\",\n fields=[\n (\n \"id\",\n models.UUIDField(\n db_index=True,\n default=uuid.uuid4,\n help_text=\"UUID of the publication. Dependent on the associated study.\",\n primary_key=True,\n serialize=False,\n ),\n ),\n (\n \"name\",\n models.CharField(\n db_index=True, help_text=\"Name of the publication\", max_length=255\n ),\n ),\n (\n \"sub_type\",\n models.CharField(\n blank=True,\n help_text=\"Type of the publication (e.g., journal article or dissertation)\",\n max_length=255,\n ),\n ),\n (\n \"title\",\n models.TextField(blank=True, help_text=\"Title of the publication\"),\n ),\n (\n \"author\",\n models.TextField(blank=True, help_text=\"Name(s) of the author(s)\"),\n ),\n (\"year\", models.TextField(blank=True, help_text=\"Year of publication\")),\n (\n \"abstract\",\n models.TextField(blank=True, help_text=\"Abstract of the publication\"),\n ),\n (\n \"cite\",\n models.TextField(\n blank=True, help_text=\"Suggested citation of the publication\"\n ),\n ),\n (\n \"url\",\n models.TextField(\n blank=True,\n help_text=\"URL referencing the publication\",\n verbose_name=\"URL\",\n ),\n ),\n (\n \"doi\",\n models.TextField(\n blank=True,\n help_text=\"DOI of the publication (DOI only, not the URL to the DOI)\",\n verbose_name=\"DOI\",\n ),\n ),\n (\n \"studies\",\n models.TextField(\n blank=True,\n help_text=\"Description of studies/data sources used in the publication\",\n ),\n ),\n (\n \"study\",\n models.ForeignKey(\n help_text=\"Foreign key to studies.Study\",\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"publications\",\n to=\"studies.Study\",\n ),\n ),\n ],\n options={\"unique_together\": {(\"study\", \"name\")}},\n bases=(\n ddionrails.elastic.mixins.ModelMixin,\n ddionrails.base.mixins.ModelMixin,\n models.Model,\n ),\n ),\n ]\n", "path": "ddionrails/publications/migrations/0001_initial.py"}]}
| 3,788 | 237 |
gh_patches_debug_2018
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1237
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.5 fails to compile on OS X 10.8
Full traceback: http://pastebin.com/raw.php?i=M9N6Fgzi
@reaperhulk has diagnosed, but this will require an 0.5.2 release to fix for supported platform.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cryptography/hazmat/bindings/commoncrypto/secitem.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 INCLUDES = """
17 #include <Security/SecItem.h>
18 """
19
20 TYPES = """
21 const CFTypeRef kSecAttrKeyType;
22 const CFTypeRef kSecAttrKeySizeInBits;
23 const CFTypeRef kSecAttrIsPermanent;
24 const CFTypeRef kSecAttrKeyTypeRSA;
25 const CFTypeRef kSecAttrKeyTypeDSA;
26 const CFTypeRef kSecAttrKeyTypeEC;
27 const CFTypeRef kSecAttrKeyTypeEC;
28 const CFTypeRef kSecUseKeychain;
29 """
30
31 FUNCTIONS = """
32 """
33
34 MACROS = """
35 """
36
37 CUSTOMIZATIONS = """
38 """
39
40 CONDITIONAL_NAMES = {}
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cryptography/hazmat/bindings/commoncrypto/secitem.py b/cryptography/hazmat/bindings/commoncrypto/secitem.py
--- a/cryptography/hazmat/bindings/commoncrypto/secitem.py
+++ b/cryptography/hazmat/bindings/commoncrypto/secitem.py
@@ -23,8 +23,6 @@
const CFTypeRef kSecAttrIsPermanent;
const CFTypeRef kSecAttrKeyTypeRSA;
const CFTypeRef kSecAttrKeyTypeDSA;
-const CFTypeRef kSecAttrKeyTypeEC;
-const CFTypeRef kSecAttrKeyTypeEC;
const CFTypeRef kSecUseKeychain;
"""
|
{"golden_diff": "diff --git a/cryptography/hazmat/bindings/commoncrypto/secitem.py b/cryptography/hazmat/bindings/commoncrypto/secitem.py\n--- a/cryptography/hazmat/bindings/commoncrypto/secitem.py\n+++ b/cryptography/hazmat/bindings/commoncrypto/secitem.py\n@@ -23,8 +23,6 @@\n const CFTypeRef kSecAttrIsPermanent;\n const CFTypeRef kSecAttrKeyTypeRSA;\n const CFTypeRef kSecAttrKeyTypeDSA;\n-const CFTypeRef kSecAttrKeyTypeEC;\n-const CFTypeRef kSecAttrKeyTypeEC;\n const CFTypeRef kSecUseKeychain;\n \"\"\"\n", "issue": "0.5 fails to compile on OS X 10.8\nFull traceback: http://pastebin.com/raw.php?i=M9N6Fgzi\n\n@reaperhulk has diagnosed, but this will require an 0.5.2 release to fix for supported platform.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <Security/SecItem.h>\n\"\"\"\n\nTYPES = \"\"\"\nconst CFTypeRef kSecAttrKeyType;\nconst CFTypeRef kSecAttrKeySizeInBits;\nconst CFTypeRef kSecAttrIsPermanent;\nconst CFTypeRef kSecAttrKeyTypeRSA;\nconst CFTypeRef kSecAttrKeyTypeDSA;\nconst CFTypeRef kSecAttrKeyTypeEC;\nconst CFTypeRef kSecAttrKeyTypeEC;\nconst CFTypeRef kSecUseKeychain;\n\"\"\"\n\nFUNCTIONS = \"\"\"\n\"\"\"\n\nMACROS = \"\"\"\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n\nCONDITIONAL_NAMES = {}\n", "path": "cryptography/hazmat/bindings/commoncrypto/secitem.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <Security/SecItem.h>\n\"\"\"\n\nTYPES = \"\"\"\nconst CFTypeRef kSecAttrKeyType;\nconst CFTypeRef kSecAttrKeySizeInBits;\nconst CFTypeRef kSecAttrIsPermanent;\nconst CFTypeRef kSecAttrKeyTypeRSA;\nconst CFTypeRef kSecAttrKeyTypeDSA;\nconst CFTypeRef kSecUseKeychain;\n\"\"\"\n\nFUNCTIONS = \"\"\"\n\"\"\"\n\nMACROS = \"\"\"\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n\nCONDITIONAL_NAMES = {}\n", "path": "cryptography/hazmat/bindings/commoncrypto/secitem.py"}]}
| 660 | 135 |
gh_patches_debug_62850
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-789
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[question] Consistency in package and generator names
After the discussion in https://github.com/conan-io/conan/issues/6269#issuecomment-570182130, most packages got normalized to use the format discussed in this comment (https://github.com/conan-io/conan-center-index/pull/690 and related).
If I understand the intention correctly, in CMake the package should always be added with `CONAN_PKG::{name}` (for example `openssl`) and conan will then expand the correct `Find*`-macro using `self.cpp_info.names["cmake_find_package"]` or `self.cpp_info.names["cmake_find_package_multi"]` (in this case `OpenSSL`).
Some recipes now use an additonal `self.cpp_info.names['cmake']` (a cursory search found [libcurl](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/libcurl/all/conanfile.py#L374) and [backward-cpp](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/backward-cpp/all/conanfile.py#L114)).
This leads to strange behavior:
- the `cmake` generator expects `CONAN_PKG::CURL` and will fail with `CONAN_PKG::libcurl`
- the `cmake_multi` works the opposite way, working with `CONAN_PKG::libcurl` and failing with uppercase `CURL`
In terms of consistency, I tend to say that the behavior of `cmake_multi` is the correct one, but either way, both CMake-variants should at least behave the same way.
I'm not sure if there are any side effects in removing the offending lines.
I didn't check if other generators have a similar behavior for different packages.
It might be a good idea to double-check all occurrences of superfluous or missing `cpp_info.names`, ([tcl](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/tcl/8.6.10/conanfile.py#L198), for example is missing the _multi_ entry). Is there a specific reason to split `cmake` and `cmake_multi` in this case (maybe a question to move to the main conan repo)?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/backward-cpp/all/conanfile.py`
Content:
```
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4
5
6 class BackwardCppConan(ConanFile):
7 name = "backward-cpp"
8 description = "A beautiful stack trace pretty printer for C++"
9 homepage = "https://github.com/bombela/backward-cpp"
10 url = "https://github.com/conan-io/conan-center-index"
11 topics = ("conan", "backward-cpp", "stack-trace")
12 license = "MIT"
13 exports_sources = [ "CMakeLists.txt", "patches/backward-cpp-*.patch" ]
14 generators = "cmake"
15
16 settings = "os", "arch", "compiler", "build_type"
17 options = {
18 "stack_walking" : ["unwind", "backtrace"],
19 "stack_details" : ["dw", "bfd", "dwarf", "backtrace_symbol"],
20 "shared": [True, False],
21 "fPIC": [True, False]
22 }
23 default_options = {
24 "stack_walking": "unwind",
25 "stack_details": "dwarf",
26 "shared": True,
27 "fPIC": True
28 }
29
30 _source_subfolder = "source_subfolder"
31 _build_subfolder = "build_subfolder"
32
33 def _has_stack_walking(self, type):
34 return self.options.stack_walking == type
35
36 def _has_stack_details(self, type):
37 return self.options.stack_details == type
38
39 def configure(self):
40 if self.settings.os not in ["Linux", "Macos", "Android"]:
41 raise ConanInvalidConfiguration("upstream backward-cpp v{0} is not \
42 supported in {1}.".format(self.version, self.settings.os))
43 # windows implementation only available in upstream master branch
44
45 if self.settings.os == "Macos" and \
46 not self._has_stack_details("backtrace_symbol"):
47 raise ConanInvalidConfiguration("only stack_details=backtrace_symbol"
48 " is supported on Macos")
49
50 def requirements(self):
51 if self.settings.os in ["Linux", "Android"] and \
52 self._has_stack_details("dwarf"):
53 self.requires("libdwarf/20191104")
54
55 def system_requirements(self):
56 required_package = None
57 if self.settings.os == "Linux":
58 if self._has_stack_details("dw"):
59 if tools.os_info.linux_distro in ["ubuntu", "debian"]:
60 required_package = "libdw-dev"
61 elif tools.os_info.linux_distro in ["fedora", "centos"]:
62 required_package = "elfutils-libs"
63 elif tools.os_info.linux_distro == "opensuse":
64 required_package = "libdw-devel"
65 elif tools.os_info.linux_distro == "arch":
66 required_package = "libelf"
67
68 if self._has_stack_details("bfd"):
69 if tools.os_info.linux_distro in ["ubuntu", "debian"]:
70 required_package = "binutils-dev"
71 elif tools.os_info.linux_distro in ["fedora", "centos", "opensuse"]:
72 required_package = "binutils-devel"
73 elif tools.os_info.linux_distro == "arch":
74 required_package = "binutils"
75 elif tools.os_info.is_freebsd:
76 required_package = "libbfd"
77
78 if required_package != None:
79 installer = tools.SystemPackageTool()
80 if not installer.installed(required_package):
81 raise ConanInvalidConfiguration("backward-cpp requires {}.".format(required_package))
82
83 def source(self):
84 tools.get(**self.conan_data["sources"][self.version])
85 extracted_dir = self.name + "-" + self.version
86 os.rename(extracted_dir, self._source_subfolder)
87
88 def _configure_cmake(self):
89 cmake = CMake(self)
90 cmake.definitions['STACK_WALKING_UNWIND'] = self._has_stack_walking("unwind")
91 cmake.definitions['STACK_WALKING_BACKTRACE'] = self._has_stack_walking("backtrace")
92 cmake.definitions['STACK_DETAILS_AUTO_DETECT'] = False
93 cmake.definitions['STACK_DETAILS_BACKTRACE_SYMBOL'] = self._has_stack_details("backtrace_symbol")
94 cmake.definitions['STACK_DETAILS_DW'] = self._has_stack_details("dw")
95 cmake.definitions['STACK_DETAILS_BFD'] = self._has_stack_details("bfd")
96 cmake.definitions['STACK_DETAILS_DWARF'] = self._has_stack_details("dwarf")
97 cmake.definitions['BACKWARD_SHARED'] = self.options.shared
98 cmake.configure(build_folder=self._build_subfolder)
99 return cmake
100
101 def build(self):
102 for patch in self.conan_data["patches"][self.version]:
103 tools.patch(**patch)
104 cmake = self._configure_cmake()
105 cmake.build()
106
107 def package(self):
108 cmake = self._configure_cmake()
109 cmake.install()
110 self.copy(pattern="LICENSE*", dst="licenses", src=self._source_subfolder)
111 os.remove(os.path.join(self.package_folder, "lib", "backward", "BackwardConfig.cmake"))
112
113 def package_info(self):
114 self.cpp_info.names["cmake"] = "Backward"
115 self.cpp_info.names["cmake_find_package"] = "Backward"
116 self.cpp_info.names["cmake_find_package_multi"] = "Backward"
117
118 self.cpp_info.defines.append('BACKWARD_HAS_UNWIND={}'.format(int(self._has_stack_walking("unwind"))))
119 self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE={}'.format(int(self._has_stack_walking("backtrace"))))
120
121 self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE_SYMBOL={}'.format(int(self._has_stack_details("backtrace_symbol"))))
122 self.cpp_info.defines.append('BACKWARD_HAS_DW={}'.format(int(self._has_stack_details("dw"))))
123 self.cpp_info.defines.append('BACKWARD_HAS_BFD={}'.format(int(self._has_stack_details("bfd"))))
124 self.cpp_info.defines.append('BACKWARD_HAS_DWARF={}'.format(int(self._has_stack_details("dwarf"))))
125
126 self.cpp_info.libs = tools.collect_libs(self)
127 if self.settings.os == "Linux":
128 self.cpp_info.system_libs.extend(["dl"])
129 if self._has_stack_details("dw"):
130 self.cpp_info.system_libs.extend(["dw"])
131 if self._has_stack_details("bfd"):
132 self.cpp_info.system_libs.extend(["bfd"])
133
134
135
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/backward-cpp/all/conanfile.py b/recipes/backward-cpp/all/conanfile.py
--- a/recipes/backward-cpp/all/conanfile.py
+++ b/recipes/backward-cpp/all/conanfile.py
@@ -111,7 +111,6 @@
os.remove(os.path.join(self.package_folder, "lib", "backward", "BackwardConfig.cmake"))
def package_info(self):
- self.cpp_info.names["cmake"] = "Backward"
self.cpp_info.names["cmake_find_package"] = "Backward"
self.cpp_info.names["cmake_find_package_multi"] = "Backward"
|
{"golden_diff": "diff --git a/recipes/backward-cpp/all/conanfile.py b/recipes/backward-cpp/all/conanfile.py\n--- a/recipes/backward-cpp/all/conanfile.py\n+++ b/recipes/backward-cpp/all/conanfile.py\n@@ -111,7 +111,6 @@\n os.remove(os.path.join(self.package_folder, \"lib\", \"backward\", \"BackwardConfig.cmake\"))\n \n def package_info(self):\n- self.cpp_info.names[\"cmake\"] = \"Backward\"\n self.cpp_info.names[\"cmake_find_package\"] = \"Backward\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Backward\"\n", "issue": "[question] Consistency in package and generator names\nAfter the discussion in https://github.com/conan-io/conan/issues/6269#issuecomment-570182130, most packages got normalized to use the format discussed in this comment (https://github.com/conan-io/conan-center-index/pull/690 and related).\r\n\r\nIf I understand the intention correctly, in CMake the package should always be added with `CONAN_PKG::{name}` (for example `openssl`) and conan will then expand the correct `Find*`-macro using `self.cpp_info.names[\"cmake_find_package\"]` or `self.cpp_info.names[\"cmake_find_package_multi\"]` (in this case `OpenSSL`).\r\n\r\nSome recipes now use an additonal `self.cpp_info.names['cmake']` (a cursory search found [libcurl](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/libcurl/all/conanfile.py#L374) and [backward-cpp](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/backward-cpp/all/conanfile.py#L114)).\r\n\r\nThis leads to strange behavior:\r\n- the `cmake` generator expects `CONAN_PKG::CURL` and will fail with `CONAN_PKG::libcurl`\r\n- the `cmake_multi` works the opposite way, working with `CONAN_PKG::libcurl` and failing with uppercase `CURL`\r\n\r\nIn terms of consistency, I tend to say that the behavior of `cmake_multi` is the correct one, but either way, both CMake-variants should at least behave the same way.\r\n\r\nI'm not sure if there are any side effects in removing the offending lines.\r\n\r\nI didn't check if other generators have a similar behavior for different packages.\r\n\r\nIt might be a good idea to double-check all occurrences of superfluous or missing `cpp_info.names`, ([tcl](https://github.com/conan-io/conan-center-index/blob/ee20546/recipes/tcl/8.6.10/conanfile.py#L198), for example is missing the _multi_ entry). Is there a specific reason to split `cmake` and `cmake_multi` in this case (maybe a question to move to the main conan repo)?\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass BackwardCppConan(ConanFile):\n name = \"backward-cpp\"\n description = \"A beautiful stack trace pretty printer for C++\"\n homepage = \"https://github.com/bombela/backward-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n topics = (\"conan\", \"backward-cpp\", \"stack-trace\")\n license = \"MIT\"\n exports_sources = [ \"CMakeLists.txt\", \"patches/backward-cpp-*.patch\" ]\n generators = \"cmake\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"stack_walking\" : [\"unwind\", \"backtrace\"],\n \"stack_details\" : [\"dw\", \"bfd\", \"dwarf\", \"backtrace_symbol\"],\n \"shared\": [True, False],\n \"fPIC\": [True, False]\n }\n default_options = {\n \"stack_walking\": \"unwind\",\n \"stack_details\": \"dwarf\",\n \"shared\": True,\n \"fPIC\": True\n }\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def _has_stack_walking(self, type):\n return self.options.stack_walking == type\n\n def _has_stack_details(self, type):\n return self.options.stack_details == type\n \n def configure(self):\n if self.settings.os not in [\"Linux\", \"Macos\", \"Android\"]:\n raise ConanInvalidConfiguration(\"upstream backward-cpp v{0} is not \\\n supported in {1}.\".format(self.version, self.settings.os))\n # windows implementation only available in upstream master branch\n\n if self.settings.os == \"Macos\" and \\\n not self._has_stack_details(\"backtrace_symbol\"):\n raise ConanInvalidConfiguration(\"only stack_details=backtrace_symbol\"\n \" is supported on Macos\")\n \n def requirements(self):\n if self.settings.os in [\"Linux\", \"Android\"] and \\\n self._has_stack_details(\"dwarf\"):\n self.requires(\"libdwarf/20191104\")\n \n def system_requirements(self):\n required_package = None\n if self.settings.os == \"Linux\":\n if self._has_stack_details(\"dw\"):\n if tools.os_info.linux_distro in [\"ubuntu\", \"debian\"]:\n required_package = \"libdw-dev\"\n elif tools.os_info.linux_distro in [\"fedora\", \"centos\"]:\n required_package = \"elfutils-libs\"\n elif tools.os_info.linux_distro == \"opensuse\":\n required_package = \"libdw-devel\"\n elif tools.os_info.linux_distro == \"arch\":\n required_package = \"libelf\"\n\n if self._has_stack_details(\"bfd\"):\n if tools.os_info.linux_distro in [\"ubuntu\", \"debian\"]:\n required_package = \"binutils-dev\"\n elif tools.os_info.linux_distro in [\"fedora\", \"centos\", \"opensuse\"]:\n required_package = \"binutils-devel\"\n elif tools.os_info.linux_distro == \"arch\":\n required_package = \"binutils\"\n elif tools.os_info.is_freebsd:\n required_package = \"libbfd\"\n \n if required_package != None:\n installer = tools.SystemPackageTool()\n if not installer.installed(required_package):\n raise ConanInvalidConfiguration(\"backward-cpp requires {}.\".format(required_package))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions['STACK_WALKING_UNWIND'] = self._has_stack_walking(\"unwind\")\n cmake.definitions['STACK_WALKING_BACKTRACE'] = self._has_stack_walking(\"backtrace\")\n cmake.definitions['STACK_DETAILS_AUTO_DETECT'] = False\n cmake.definitions['STACK_DETAILS_BACKTRACE_SYMBOL'] = self._has_stack_details(\"backtrace_symbol\")\n cmake.definitions['STACK_DETAILS_DW'] = self._has_stack_details(\"dw\")\n cmake.definitions['STACK_DETAILS_BFD'] = self._has_stack_details(\"bfd\")\n cmake.definitions['STACK_DETAILS_DWARF'] = self._has_stack_details(\"dwarf\")\n cmake.definitions['BACKWARD_SHARED'] = self.options.shared\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(pattern=\"LICENSE*\", dst=\"licenses\", src=self._source_subfolder)\n os.remove(os.path.join(self.package_folder, \"lib\", \"backward\", \"BackwardConfig.cmake\"))\n\n def package_info(self):\n self.cpp_info.names[\"cmake\"] = \"Backward\"\n self.cpp_info.names[\"cmake_find_package\"] = \"Backward\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Backward\"\n\n self.cpp_info.defines.append('BACKWARD_HAS_UNWIND={}'.format(int(self._has_stack_walking(\"unwind\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE={}'.format(int(self._has_stack_walking(\"backtrace\"))))\n \n self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE_SYMBOL={}'.format(int(self._has_stack_details(\"backtrace_symbol\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_DW={}'.format(int(self._has_stack_details(\"dw\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_BFD={}'.format(int(self._has_stack_details(\"bfd\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_DWARF={}'.format(int(self._has_stack_details(\"dwarf\"))))\n\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"dl\"])\n if self._has_stack_details(\"dw\"):\n self.cpp_info.system_libs.extend([\"dw\"]) \n if self._has_stack_details(\"bfd\"):\n self.cpp_info.system_libs.extend([\"bfd\"])\n\n\n \n", "path": "recipes/backward-cpp/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass BackwardCppConan(ConanFile):\n name = \"backward-cpp\"\n description = \"A beautiful stack trace pretty printer for C++\"\n homepage = \"https://github.com/bombela/backward-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n topics = (\"conan\", \"backward-cpp\", \"stack-trace\")\n license = \"MIT\"\n exports_sources = [ \"CMakeLists.txt\", \"patches/backward-cpp-*.patch\" ]\n generators = \"cmake\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"stack_walking\" : [\"unwind\", \"backtrace\"],\n \"stack_details\" : [\"dw\", \"bfd\", \"dwarf\", \"backtrace_symbol\"],\n \"shared\": [True, False],\n \"fPIC\": [True, False]\n }\n default_options = {\n \"stack_walking\": \"unwind\",\n \"stack_details\": \"dwarf\",\n \"shared\": True,\n \"fPIC\": True\n }\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def _has_stack_walking(self, type):\n return self.options.stack_walking == type\n\n def _has_stack_details(self, type):\n return self.options.stack_details == type\n \n def configure(self):\n if self.settings.os not in [\"Linux\", \"Macos\", \"Android\"]:\n raise ConanInvalidConfiguration(\"upstream backward-cpp v{0} is not \\\n supported in {1}.\".format(self.version, self.settings.os))\n # windows implementation only available in upstream master branch\n\n if self.settings.os == \"Macos\" and \\\n not self._has_stack_details(\"backtrace_symbol\"):\n raise ConanInvalidConfiguration(\"only stack_details=backtrace_symbol\"\n \" is supported on Macos\")\n \n def requirements(self):\n if self.settings.os in [\"Linux\", \"Android\"] and \\\n self._has_stack_details(\"dwarf\"):\n self.requires(\"libdwarf/20191104\")\n \n def system_requirements(self):\n required_package = None\n if self.settings.os == \"Linux\":\n if self._has_stack_details(\"dw\"):\n if tools.os_info.linux_distro in [\"ubuntu\", \"debian\"]:\n required_package = \"libdw-dev\"\n elif tools.os_info.linux_distro in [\"fedora\", \"centos\"]:\n required_package = \"elfutils-libs\"\n elif tools.os_info.linux_distro == \"opensuse\":\n required_package = \"libdw-devel\"\n elif tools.os_info.linux_distro == \"arch\":\n required_package = \"libelf\"\n\n if self._has_stack_details(\"bfd\"):\n if tools.os_info.linux_distro in [\"ubuntu\", \"debian\"]:\n required_package = \"binutils-dev\"\n elif tools.os_info.linux_distro in [\"fedora\", \"centos\", \"opensuse\"]:\n required_package = \"binutils-devel\"\n elif tools.os_info.linux_distro == \"arch\":\n required_package = \"binutils\"\n elif tools.os_info.is_freebsd:\n required_package = \"libbfd\"\n \n if required_package != None:\n installer = tools.SystemPackageTool()\n if not installer.installed(required_package):\n raise ConanInvalidConfiguration(\"backward-cpp requires {}.\".format(required_package))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions['STACK_WALKING_UNWIND'] = self._has_stack_walking(\"unwind\")\n cmake.definitions['STACK_WALKING_BACKTRACE'] = self._has_stack_walking(\"backtrace\")\n cmake.definitions['STACK_DETAILS_AUTO_DETECT'] = False\n cmake.definitions['STACK_DETAILS_BACKTRACE_SYMBOL'] = self._has_stack_details(\"backtrace_symbol\")\n cmake.definitions['STACK_DETAILS_DW'] = self._has_stack_details(\"dw\")\n cmake.definitions['STACK_DETAILS_BFD'] = self._has_stack_details(\"bfd\")\n cmake.definitions['STACK_DETAILS_DWARF'] = self._has_stack_details(\"dwarf\")\n cmake.definitions['BACKWARD_SHARED'] = self.options.shared\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(pattern=\"LICENSE*\", dst=\"licenses\", src=self._source_subfolder)\n os.remove(os.path.join(self.package_folder, \"lib\", \"backward\", \"BackwardConfig.cmake\"))\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"Backward\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Backward\"\n\n self.cpp_info.defines.append('BACKWARD_HAS_UNWIND={}'.format(int(self._has_stack_walking(\"unwind\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE={}'.format(int(self._has_stack_walking(\"backtrace\"))))\n \n self.cpp_info.defines.append('BACKWARD_HAS_BACKTRACE_SYMBOL={}'.format(int(self._has_stack_details(\"backtrace_symbol\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_DW={}'.format(int(self._has_stack_details(\"dw\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_BFD={}'.format(int(self._has_stack_details(\"bfd\"))))\n self.cpp_info.defines.append('BACKWARD_HAS_DWARF={}'.format(int(self._has_stack_details(\"dwarf\"))))\n\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"dl\"])\n if self._has_stack_details(\"dw\"):\n self.cpp_info.system_libs.extend([\"dw\"]) \n if self._has_stack_details(\"bfd\"):\n self.cpp_info.system_libs.extend([\"bfd\"])\n\n\n \n", "path": "recipes/backward-cpp/all/conanfile.py"}]}
| 2,458 | 148 |
gh_patches_debug_29741
|
rasdani/github-patches
|
git_diff
|
getmoto__moto-2143
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Secrets Manager list-secrets
Implement support for `aws secretsmanager list-secrets`
Note: this API call is paginated.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/secretsmanager/responses.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from moto.core.responses import BaseResponse
4
5 from .models import secretsmanager_backends
6
7
8 class SecretsManagerResponse(BaseResponse):
9
10 def get_secret_value(self):
11 secret_id = self._get_param('SecretId')
12 version_id = self._get_param('VersionId')
13 version_stage = self._get_param('VersionStage')
14 return secretsmanager_backends[self.region].get_secret_value(
15 secret_id=secret_id,
16 version_id=version_id,
17 version_stage=version_stage)
18
19 def create_secret(self):
20 name = self._get_param('Name')
21 secret_string = self._get_param('SecretString')
22 tags = self._get_param('Tags', if_none=[])
23 return secretsmanager_backends[self.region].create_secret(
24 name=name,
25 secret_string=secret_string,
26 tags=tags
27 )
28
29 def get_random_password(self):
30 password_length = self._get_param('PasswordLength', if_none=32)
31 exclude_characters = self._get_param('ExcludeCharacters', if_none='')
32 exclude_numbers = self._get_param('ExcludeNumbers', if_none=False)
33 exclude_punctuation = self._get_param('ExcludePunctuation', if_none=False)
34 exclude_uppercase = self._get_param('ExcludeUppercase', if_none=False)
35 exclude_lowercase = self._get_param('ExcludeLowercase', if_none=False)
36 include_space = self._get_param('IncludeSpace', if_none=False)
37 require_each_included_type = self._get_param(
38 'RequireEachIncludedType', if_none=True)
39 return secretsmanager_backends[self.region].get_random_password(
40 password_length=password_length,
41 exclude_characters=exclude_characters,
42 exclude_numbers=exclude_numbers,
43 exclude_punctuation=exclude_punctuation,
44 exclude_uppercase=exclude_uppercase,
45 exclude_lowercase=exclude_lowercase,
46 include_space=include_space,
47 require_each_included_type=require_each_included_type
48 )
49
50 def describe_secret(self):
51 secret_id = self._get_param('SecretId')
52 return secretsmanager_backends[self.region].describe_secret(
53 secret_id=secret_id
54 )
55
56 def rotate_secret(self):
57 client_request_token = self._get_param('ClientRequestToken')
58 rotation_lambda_arn = self._get_param('RotationLambdaARN')
59 rotation_rules = self._get_param('RotationRules')
60 secret_id = self._get_param('SecretId')
61 return secretsmanager_backends[self.region].rotate_secret(
62 secret_id=secret_id,
63 client_request_token=client_request_token,
64 rotation_lambda_arn=rotation_lambda_arn,
65 rotation_rules=rotation_rules
66 )
67
```
Path: `moto/secretsmanager/models.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import time
4 import json
5 import uuid
6
7 import boto3
8
9 from moto.core import BaseBackend, BaseModel
10 from .exceptions import (
11 ResourceNotFoundException,
12 InvalidParameterException,
13 ClientError
14 )
15 from .utils import random_password, secret_arn
16
17
18 class SecretsManager(BaseModel):
19
20 def __init__(self, region_name, **kwargs):
21 self.region = region_name
22
23
24 class SecretsManagerBackend(BaseBackend):
25
26 def __init__(self, region_name=None, **kwargs):
27 super(SecretsManagerBackend, self).__init__()
28 self.region = region_name
29 self.secrets = {}
30
31 def reset(self):
32 region_name = self.region
33 self.__dict__ = {}
34 self.__init__(region_name)
35
36 def _is_valid_identifier(self, identifier):
37 return identifier in self.secrets
38
39 def get_secret_value(self, secret_id, version_id, version_stage):
40
41 if not self._is_valid_identifier(secret_id):
42 raise ResourceNotFoundException()
43
44 secret = self.secrets[secret_id]
45
46 response = json.dumps({
47 "ARN": secret_arn(self.region, secret['secret_id']),
48 "Name": secret['name'],
49 "VersionId": secret['version_id'],
50 "SecretString": secret['secret_string'],
51 "VersionStages": [
52 "AWSCURRENT",
53 ],
54 "CreatedDate": secret['createdate']
55 })
56
57 return response
58
59 def create_secret(self, name, secret_string, tags, **kwargs):
60
61 generated_version_id = str(uuid.uuid4())
62
63 secret = {
64 'secret_string': secret_string,
65 'secret_id': name,
66 'name': name,
67 'createdate': int(time.time()),
68 'rotation_enabled': False,
69 'rotation_lambda_arn': '',
70 'auto_rotate_after_days': 0,
71 'version_id': generated_version_id,
72 'tags': tags
73 }
74
75 self.secrets[name] = secret
76
77 response = json.dumps({
78 "ARN": secret_arn(self.region, name),
79 "Name": name,
80 "VersionId": generated_version_id,
81 })
82
83 return response
84
85 def describe_secret(self, secret_id):
86 if not self._is_valid_identifier(secret_id):
87 raise ResourceNotFoundException
88
89 secret = self.secrets[secret_id]
90
91 response = json.dumps({
92 "ARN": secret_arn(self.region, secret['secret_id']),
93 "Name": secret['name'],
94 "Description": "",
95 "KmsKeyId": "",
96 "RotationEnabled": secret['rotation_enabled'],
97 "RotationLambdaARN": secret['rotation_lambda_arn'],
98 "RotationRules": {
99 "AutomaticallyAfterDays": secret['auto_rotate_after_days']
100 },
101 "LastRotatedDate": None,
102 "LastChangedDate": None,
103 "LastAccessedDate": None,
104 "DeletedDate": None,
105 "Tags": secret['tags']
106 })
107
108 return response
109
110 def rotate_secret(self, secret_id, client_request_token=None,
111 rotation_lambda_arn=None, rotation_rules=None):
112
113 rotation_days = 'AutomaticallyAfterDays'
114
115 if not self._is_valid_identifier(secret_id):
116 raise ResourceNotFoundException
117
118 if client_request_token:
119 token_length = len(client_request_token)
120 if token_length < 32 or token_length > 64:
121 msg = (
122 'ClientRequestToken '
123 'must be 32-64 characters long.'
124 )
125 raise InvalidParameterException(msg)
126
127 if rotation_lambda_arn:
128 if len(rotation_lambda_arn) > 2048:
129 msg = (
130 'RotationLambdaARN '
131 'must <= 2048 characters long.'
132 )
133 raise InvalidParameterException(msg)
134
135 if rotation_rules:
136 if rotation_days in rotation_rules:
137 rotation_period = rotation_rules[rotation_days]
138 if rotation_period < 1 or rotation_period > 1000:
139 msg = (
140 'RotationRules.AutomaticallyAfterDays '
141 'must be within 1-1000.'
142 )
143 raise InvalidParameterException(msg)
144
145 secret = self.secrets[secret_id]
146
147 secret['version_id'] = client_request_token or ''
148 secret['rotation_lambda_arn'] = rotation_lambda_arn or ''
149 if rotation_rules:
150 secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)
151 if secret['auto_rotate_after_days'] > 0:
152 secret['rotation_enabled'] = True
153
154 response = json.dumps({
155 "ARN": secret_arn(self.region, secret['secret_id']),
156 "Name": secret['name'],
157 "VersionId": secret['version_id']
158 })
159
160 return response
161
162 def get_random_password(self, password_length,
163 exclude_characters, exclude_numbers,
164 exclude_punctuation, exclude_uppercase,
165 exclude_lowercase, include_space,
166 require_each_included_type):
167 # password size must have value less than or equal to 4096
168 if password_length > 4096:
169 raise ClientError(
170 "ClientError: An error occurred (ValidationException) \
171 when calling the GetRandomPassword operation: 1 validation error detected: Value '{}' at 'passwordLength' \
172 failed to satisfy constraint: Member must have value less than or equal to 4096".format(password_length))
173 if password_length < 4:
174 raise InvalidParameterException(
175 "InvalidParameterException: An error occurred (InvalidParameterException) \
176 when calling the GetRandomPassword operation: Password length is too short based on the required types.")
177
178 response = json.dumps({
179 "RandomPassword": random_password(password_length,
180 exclude_characters,
181 exclude_numbers,
182 exclude_punctuation,
183 exclude_uppercase,
184 exclude_lowercase,
185 include_space,
186 require_each_included_type)
187 })
188
189 return response
190
191
192 available_regions = (
193 boto3.session.Session().get_available_regions("secretsmanager")
194 )
195 secretsmanager_backends = {region: SecretsManagerBackend(region_name=region)
196 for region in available_regions}
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py
--- a/moto/secretsmanager/models.py
+++ b/moto/secretsmanager/models.py
@@ -188,6 +188,31 @@
return response
+ def list_secrets(self, max_results, next_token):
+ # TODO implement pagination and limits
+
+ secret_list = [{
+ "ARN": secret_arn(self.region, secret['secret_id']),
+ "DeletedDate": None,
+ "Description": "",
+ "KmsKeyId": "",
+ "LastAccessedDate": None,
+ "LastChangedDate": None,
+ "LastRotatedDate": None,
+ "Name": secret['name'],
+ "RotationEnabled": secret['rotation_enabled'],
+ "RotationLambdaARN": secret['rotation_lambda_arn'],
+ "RotationRules": {
+ "AutomaticallyAfterDays": secret['auto_rotate_after_days']
+ },
+ "SecretVersionsToStages": {
+ secret['version_id']: ["AWSCURRENT"]
+ },
+ "Tags": secret['tags']
+ } for secret in self.secrets.values()]
+
+ return secret_list, None
+
available_regions = (
boto3.session.Session().get_available_regions("secretsmanager")
diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py
--- a/moto/secretsmanager/responses.py
+++ b/moto/secretsmanager/responses.py
@@ -4,6 +4,8 @@
from .models import secretsmanager_backends
+import json
+
class SecretsManagerResponse(BaseResponse):
@@ -64,3 +66,12 @@
rotation_lambda_arn=rotation_lambda_arn,
rotation_rules=rotation_rules
)
+
+ def list_secrets(self):
+ max_results = self._get_int_param("MaxResults")
+ next_token = self._get_param("NextToken")
+ secret_list, next_token = secretsmanager_backends[self.region].list_secrets(
+ max_results=max_results,
+ next_token=next_token,
+ )
+ return json.dumps(dict(SecretList=secret_list, NextToken=next_token))
|
{"golden_diff": "diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py\n--- a/moto/secretsmanager/models.py\n+++ b/moto/secretsmanager/models.py\n@@ -188,6 +188,31 @@\n \n return response\n \n+ def list_secrets(self, max_results, next_token):\n+ # TODO implement pagination and limits\n+\n+ secret_list = [{\n+ \"ARN\": secret_arn(self.region, secret['secret_id']),\n+ \"DeletedDate\": None,\n+ \"Description\": \"\",\n+ \"KmsKeyId\": \"\",\n+ \"LastAccessedDate\": None,\n+ \"LastChangedDate\": None,\n+ \"LastRotatedDate\": None,\n+ \"Name\": secret['name'],\n+ \"RotationEnabled\": secret['rotation_enabled'],\n+ \"RotationLambdaARN\": secret['rotation_lambda_arn'],\n+ \"RotationRules\": {\n+ \"AutomaticallyAfterDays\": secret['auto_rotate_after_days']\n+ },\n+ \"SecretVersionsToStages\": {\n+ secret['version_id']: [\"AWSCURRENT\"]\n+ },\n+ \"Tags\": secret['tags']\n+ } for secret in self.secrets.values()]\n+\n+ return secret_list, None\n+\n \n available_regions = (\n boto3.session.Session().get_available_regions(\"secretsmanager\")\ndiff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py\n--- a/moto/secretsmanager/responses.py\n+++ b/moto/secretsmanager/responses.py\n@@ -4,6 +4,8 @@\n \n from .models import secretsmanager_backends\n \n+import json\n+\n \n class SecretsManagerResponse(BaseResponse):\n \n@@ -64,3 +66,12 @@\n rotation_lambda_arn=rotation_lambda_arn,\n rotation_rules=rotation_rules\n )\n+\n+ def list_secrets(self):\n+ max_results = self._get_int_param(\"MaxResults\")\n+ next_token = self._get_param(\"NextToken\")\n+ secret_list, next_token = secretsmanager_backends[self.region].list_secrets(\n+ max_results=max_results,\n+ next_token=next_token,\n+ )\n+ return json.dumps(dict(SecretList=secret_list, NextToken=next_token))\n", "issue": "Support Secrets Manager list-secrets\nImplement support for `aws secretsmanager list-secrets`\r\n\r\nNote: this API call is paginated.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom moto.core.responses import BaseResponse\n\nfrom .models import secretsmanager_backends\n\n\nclass SecretsManagerResponse(BaseResponse):\n\n def get_secret_value(self):\n secret_id = self._get_param('SecretId')\n version_id = self._get_param('VersionId')\n version_stage = self._get_param('VersionStage')\n return secretsmanager_backends[self.region].get_secret_value(\n secret_id=secret_id,\n version_id=version_id,\n version_stage=version_stage)\n\n def create_secret(self):\n name = self._get_param('Name')\n secret_string = self._get_param('SecretString')\n tags = self._get_param('Tags', if_none=[])\n return secretsmanager_backends[self.region].create_secret(\n name=name,\n secret_string=secret_string,\n tags=tags\n )\n\n def get_random_password(self):\n password_length = self._get_param('PasswordLength', if_none=32)\n exclude_characters = self._get_param('ExcludeCharacters', if_none='')\n exclude_numbers = self._get_param('ExcludeNumbers', if_none=False)\n exclude_punctuation = self._get_param('ExcludePunctuation', if_none=False)\n exclude_uppercase = self._get_param('ExcludeUppercase', if_none=False)\n exclude_lowercase = self._get_param('ExcludeLowercase', if_none=False)\n include_space = self._get_param('IncludeSpace', if_none=False)\n require_each_included_type = self._get_param(\n 'RequireEachIncludedType', if_none=True)\n return secretsmanager_backends[self.region].get_random_password(\n password_length=password_length,\n exclude_characters=exclude_characters,\n exclude_numbers=exclude_numbers,\n exclude_punctuation=exclude_punctuation,\n exclude_uppercase=exclude_uppercase,\n exclude_lowercase=exclude_lowercase,\n include_space=include_space,\n require_each_included_type=require_each_included_type\n )\n\n def describe_secret(self):\n secret_id = self._get_param('SecretId')\n return secretsmanager_backends[self.region].describe_secret(\n secret_id=secret_id\n )\n\n def rotate_secret(self):\n client_request_token = self._get_param('ClientRequestToken')\n rotation_lambda_arn = self._get_param('RotationLambdaARN')\n rotation_rules = self._get_param('RotationRules')\n secret_id = self._get_param('SecretId')\n return secretsmanager_backends[self.region].rotate_secret(\n secret_id=secret_id,\n client_request_token=client_request_token,\n rotation_lambda_arn=rotation_lambda_arn,\n rotation_rules=rotation_rules\n )\n", "path": "moto/secretsmanager/responses.py"}, {"content": "from __future__ import unicode_literals\n\nimport time\nimport json\nimport uuid\n\nimport boto3\n\nfrom moto.core import BaseBackend, BaseModel\nfrom .exceptions import (\n ResourceNotFoundException,\n InvalidParameterException,\n ClientError\n)\nfrom .utils import random_password, secret_arn\n\n\nclass SecretsManager(BaseModel):\n\n def __init__(self, region_name, **kwargs):\n self.region = region_name\n\n\nclass SecretsManagerBackend(BaseBackend):\n\n def __init__(self, region_name=None, **kwargs):\n super(SecretsManagerBackend, self).__init__()\n self.region = region_name\n self.secrets = {}\n\n def reset(self):\n region_name = self.region\n self.__dict__ = {}\n self.__init__(region_name)\n\n def _is_valid_identifier(self, identifier):\n return identifier in self.secrets\n\n def get_secret_value(self, secret_id, version_id, version_stage):\n\n if not self._is_valid_identifier(secret_id):\n raise ResourceNotFoundException()\n\n secret = self.secrets[secret_id]\n\n response = json.dumps({\n \"ARN\": secret_arn(self.region, secret['secret_id']),\n \"Name\": secret['name'],\n \"VersionId\": secret['version_id'],\n \"SecretString\": secret['secret_string'],\n \"VersionStages\": [\n \"AWSCURRENT\",\n ],\n \"CreatedDate\": secret['createdate']\n })\n\n return response\n\n def create_secret(self, name, secret_string, tags, **kwargs):\n\n generated_version_id = str(uuid.uuid4())\n\n secret = {\n 'secret_string': secret_string,\n 'secret_id': name,\n 'name': name,\n 'createdate': int(time.time()),\n 'rotation_enabled': False,\n 'rotation_lambda_arn': '',\n 'auto_rotate_after_days': 0,\n 'version_id': generated_version_id,\n 'tags': tags\n }\n\n self.secrets[name] = secret\n\n response = json.dumps({\n \"ARN\": secret_arn(self.region, name),\n \"Name\": name,\n \"VersionId\": generated_version_id,\n })\n\n return response\n\n def describe_secret(self, secret_id):\n if not self._is_valid_identifier(secret_id):\n raise ResourceNotFoundException\n\n secret = self.secrets[secret_id]\n\n response = json.dumps({\n \"ARN\": secret_arn(self.region, secret['secret_id']),\n \"Name\": secret['name'],\n \"Description\": \"\",\n \"KmsKeyId\": \"\",\n \"RotationEnabled\": secret['rotation_enabled'],\n \"RotationLambdaARN\": secret['rotation_lambda_arn'],\n \"RotationRules\": {\n \"AutomaticallyAfterDays\": secret['auto_rotate_after_days']\n },\n \"LastRotatedDate\": None,\n \"LastChangedDate\": None,\n \"LastAccessedDate\": None,\n \"DeletedDate\": None,\n \"Tags\": secret['tags']\n })\n\n return response\n\n def rotate_secret(self, secret_id, client_request_token=None,\n rotation_lambda_arn=None, rotation_rules=None):\n\n rotation_days = 'AutomaticallyAfterDays'\n\n if not self._is_valid_identifier(secret_id):\n raise ResourceNotFoundException\n\n if client_request_token:\n token_length = len(client_request_token)\n if token_length < 32 or token_length > 64:\n msg = (\n 'ClientRequestToken '\n 'must be 32-64 characters long.'\n )\n raise InvalidParameterException(msg)\n\n if rotation_lambda_arn:\n if len(rotation_lambda_arn) > 2048:\n msg = (\n 'RotationLambdaARN '\n 'must <= 2048 characters long.'\n )\n raise InvalidParameterException(msg)\n\n if rotation_rules:\n if rotation_days in rotation_rules:\n rotation_period = rotation_rules[rotation_days]\n if rotation_period < 1 or rotation_period > 1000:\n msg = (\n 'RotationRules.AutomaticallyAfterDays '\n 'must be within 1-1000.'\n )\n raise InvalidParameterException(msg)\n\n secret = self.secrets[secret_id]\n\n secret['version_id'] = client_request_token or ''\n secret['rotation_lambda_arn'] = rotation_lambda_arn or ''\n if rotation_rules:\n secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)\n if secret['auto_rotate_after_days'] > 0:\n secret['rotation_enabled'] = True\n\n response = json.dumps({\n \"ARN\": secret_arn(self.region, secret['secret_id']),\n \"Name\": secret['name'],\n \"VersionId\": secret['version_id']\n })\n\n return response\n\n def get_random_password(self, password_length,\n exclude_characters, exclude_numbers,\n exclude_punctuation, exclude_uppercase,\n exclude_lowercase, include_space,\n require_each_included_type):\n # password size must have value less than or equal to 4096\n if password_length > 4096:\n raise ClientError(\n \"ClientError: An error occurred (ValidationException) \\\n when calling the GetRandomPassword operation: 1 validation error detected: Value '{}' at 'passwordLength' \\\n failed to satisfy constraint: Member must have value less than or equal to 4096\".format(password_length))\n if password_length < 4:\n raise InvalidParameterException(\n \"InvalidParameterException: An error occurred (InvalidParameterException) \\\n when calling the GetRandomPassword operation: Password length is too short based on the required types.\")\n\n response = json.dumps({\n \"RandomPassword\": random_password(password_length,\n exclude_characters,\n exclude_numbers,\n exclude_punctuation,\n exclude_uppercase,\n exclude_lowercase,\n include_space,\n require_each_included_type)\n })\n\n return response\n\n\navailable_regions = (\n boto3.session.Session().get_available_regions(\"secretsmanager\")\n)\nsecretsmanager_backends = {region: SecretsManagerBackend(region_name=region)\n for region in available_regions}\n", "path": "moto/secretsmanager/models.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom moto.core.responses import BaseResponse\n\nfrom .models import secretsmanager_backends\n\nimport json\n\n\nclass SecretsManagerResponse(BaseResponse):\n\n def get_secret_value(self):\n secret_id = self._get_param('SecretId')\n version_id = self._get_param('VersionId')\n version_stage = self._get_param('VersionStage')\n return secretsmanager_backends[self.region].get_secret_value(\n secret_id=secret_id,\n version_id=version_id,\n version_stage=version_stage)\n\n def create_secret(self):\n name = self._get_param('Name')\n secret_string = self._get_param('SecretString')\n tags = self._get_param('Tags', if_none=[])\n return secretsmanager_backends[self.region].create_secret(\n name=name,\n secret_string=secret_string,\n tags=tags\n )\n\n def get_random_password(self):\n password_length = self._get_param('PasswordLength', if_none=32)\n exclude_characters = self._get_param('ExcludeCharacters', if_none='')\n exclude_numbers = self._get_param('ExcludeNumbers', if_none=False)\n exclude_punctuation = self._get_param('ExcludePunctuation', if_none=False)\n exclude_uppercase = self._get_param('ExcludeUppercase', if_none=False)\n exclude_lowercase = self._get_param('ExcludeLowercase', if_none=False)\n include_space = self._get_param('IncludeSpace', if_none=False)\n require_each_included_type = self._get_param(\n 'RequireEachIncludedType', if_none=True)\n return secretsmanager_backends[self.region].get_random_password(\n password_length=password_length,\n exclude_characters=exclude_characters,\n exclude_numbers=exclude_numbers,\n exclude_punctuation=exclude_punctuation,\n exclude_uppercase=exclude_uppercase,\n exclude_lowercase=exclude_lowercase,\n include_space=include_space,\n require_each_included_type=require_each_included_type\n )\n\n def describe_secret(self):\n secret_id = self._get_param('SecretId')\n return secretsmanager_backends[self.region].describe_secret(\n secret_id=secret_id\n )\n\n def rotate_secret(self):\n client_request_token = self._get_param('ClientRequestToken')\n rotation_lambda_arn = self._get_param('RotationLambdaARN')\n rotation_rules = self._get_param('RotationRules')\n secret_id = self._get_param('SecretId')\n return secretsmanager_backends[self.region].rotate_secret(\n secret_id=secret_id,\n client_request_token=client_request_token,\n rotation_lambda_arn=rotation_lambda_arn,\n rotation_rules=rotation_rules\n )\n\n def list_secrets(self):\n max_results = self._get_int_param(\"MaxResults\")\n next_token = self._get_param(\"NextToken\")\n secret_list, next_token = secretsmanager_backends[self.region].list_secrets(\n max_results=max_results,\n next_token=next_token,\n )\n return json.dumps(dict(SecretList=secret_list, NextToken=next_token))\n", "path": "moto/secretsmanager/responses.py"}, {"content": "from __future__ import unicode_literals\n\nimport time\nimport json\nimport uuid\n\nimport boto3\n\nfrom moto.core import BaseBackend, BaseModel\nfrom .exceptions import (\n ResourceNotFoundException,\n InvalidParameterException,\n ClientError\n)\nfrom .utils import random_password, secret_arn\n\n\nclass SecretsManager(BaseModel):\n\n def __init__(self, region_name, **kwargs):\n self.region = region_name\n\n\nclass SecretsManagerBackend(BaseBackend):\n\n def __init__(self, region_name=None, **kwargs):\n super(SecretsManagerBackend, self).__init__()\n self.region = region_name\n self.secrets = {}\n\n def reset(self):\n region_name = self.region\n self.__dict__ = {}\n self.__init__(region_name)\n\n def _is_valid_identifier(self, identifier):\n return identifier in self.secrets\n\n def get_secret_value(self, secret_id, version_id, version_stage):\n\n if not self._is_valid_identifier(secret_id):\n raise ResourceNotFoundException()\n\n secret = self.secrets[secret_id]\n\n response = json.dumps({\n \"ARN\": secret_arn(self.region, secret['secret_id']),\n \"Name\": secret['name'],\n \"VersionId\": secret['version_id'],\n \"SecretString\": secret['secret_string'],\n \"VersionStages\": [\n \"AWSCURRENT\",\n ],\n \"CreatedDate\": secret['createdate']\n })\n\n return response\n\n def create_secret(self, name, secret_string, tags, **kwargs):\n\n generated_version_id = str(uuid.uuid4())\n\n secret = {\n 'secret_string': secret_string,\n 'secret_id': name,\n 'name': name,\n 'createdate': int(time.time()),\n 'rotation_enabled': False,\n 'rotation_lambda_arn': '',\n 'auto_rotate_after_days': 0,\n 'version_id': generated_version_id,\n 'tags': tags\n }\n\n self.secrets[name] = secret\n\n response = json.dumps({\n \"ARN\": secret_arn(self.region, name),\n \"Name\": name,\n \"VersionId\": generated_version_id,\n })\n\n return response\n\n def describe_secret(self, secret_id):\n if not self._is_valid_identifier(secret_id):\n raise ResourceNotFoundException\n\n secret = self.secrets[secret_id]\n\n response = json.dumps({\n \"ARN\": secret_arn(self.region, secret['secret_id']),\n \"Name\": secret['name'],\n \"Description\": \"\",\n \"KmsKeyId\": \"\",\n \"RotationEnabled\": secret['rotation_enabled'],\n \"RotationLambdaARN\": secret['rotation_lambda_arn'],\n \"RotationRules\": {\n \"AutomaticallyAfterDays\": secret['auto_rotate_after_days']\n },\n \"LastRotatedDate\": None,\n \"LastChangedDate\": None,\n \"LastAccessedDate\": None,\n \"DeletedDate\": None,\n \"Tags\": secret['tags']\n })\n\n return response\n\n def rotate_secret(self, secret_id, client_request_token=None,\n rotation_lambda_arn=None, rotation_rules=None):\n\n rotation_days = 'AutomaticallyAfterDays'\n\n if not self._is_valid_identifier(secret_id):\n raise ResourceNotFoundException\n\n if client_request_token:\n token_length = len(client_request_token)\n if token_length < 32 or token_length > 64:\n msg = (\n 'ClientRequestToken '\n 'must be 32-64 characters long.'\n )\n raise InvalidParameterException(msg)\n\n if rotation_lambda_arn:\n if len(rotation_lambda_arn) > 2048:\n msg = (\n 'RotationLambdaARN '\n 'must <= 2048 characters long.'\n )\n raise InvalidParameterException(msg)\n\n if rotation_rules:\n if rotation_days in rotation_rules:\n rotation_period = rotation_rules[rotation_days]\n if rotation_period < 1 or rotation_period > 1000:\n msg = (\n 'RotationRules.AutomaticallyAfterDays '\n 'must be within 1-1000.'\n )\n raise InvalidParameterException(msg)\n\n secret = self.secrets[secret_id]\n\n secret['version_id'] = client_request_token or ''\n secret['rotation_lambda_arn'] = rotation_lambda_arn or ''\n if rotation_rules:\n secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)\n if secret['auto_rotate_after_days'] > 0:\n secret['rotation_enabled'] = True\n\n response = json.dumps({\n \"ARN\": secret_arn(self.region, secret['secret_id']),\n \"Name\": secret['name'],\n \"VersionId\": secret['version_id']\n })\n\n return response\n\n def get_random_password(self, password_length,\n exclude_characters, exclude_numbers,\n exclude_punctuation, exclude_uppercase,\n exclude_lowercase, include_space,\n require_each_included_type):\n # password size must have value less than or equal to 4096\n if password_length > 4096:\n raise ClientError(\n \"ClientError: An error occurred (ValidationException) \\\n when calling the GetRandomPassword operation: 1 validation error detected: Value '{}' at 'passwordLength' \\\n failed to satisfy constraint: Member must have value less than or equal to 4096\".format(password_length))\n if password_length < 4:\n raise InvalidParameterException(\n \"InvalidParameterException: An error occurred (InvalidParameterException) \\\n when calling the GetRandomPassword operation: Password length is too short based on the required types.\")\n\n response = json.dumps({\n \"RandomPassword\": random_password(password_length,\n exclude_characters,\n exclude_numbers,\n exclude_punctuation,\n exclude_uppercase,\n exclude_lowercase,\n include_space,\n require_each_included_type)\n })\n\n return response\n\n def list_secrets(self, max_results, next_token):\n # TODO implement pagination and limits\n\n secret_list = [{\n \"ARN\": secret_arn(self.region, secret['secret_id']),\n \"DeletedDate\": None,\n \"Description\": \"\",\n \"KmsKeyId\": \"\",\n \"LastAccessedDate\": None,\n \"LastChangedDate\": None,\n \"LastRotatedDate\": None,\n \"Name\": secret['name'],\n \"RotationEnabled\": secret['rotation_enabled'],\n \"RotationLambdaARN\": secret['rotation_lambda_arn'],\n \"RotationRules\": {\n \"AutomaticallyAfterDays\": secret['auto_rotate_after_days']\n },\n \"SecretVersionsToStages\": {\n secret['version_id']: [\"AWSCURRENT\"]\n },\n \"Tags\": secret['tags']\n } for secret in self.secrets.values()]\n\n return secret_list, None\n\n\navailable_regions = (\n boto3.session.Session().get_available_regions(\"secretsmanager\")\n)\nsecretsmanager_backends = {region: SecretsManagerBackend(region_name=region)\n for region in available_regions}\n", "path": "moto/secretsmanager/models.py"}]}
| 2,790 | 493 |
gh_patches_debug_7452
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-521
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--private-ip-address option at run-instances does not work
I think --private-ip-address option can accept private ip address to assign.
but when I ran the command as follows,
> aws ec2 run-instances \
> --image-id ami-b1fe9bb0 \
> --subnet-id subnet-00xxxxxx \
> --key-name keypair1 \
> --security-group-ids sg-yyyyyy \
> --region ap-northeast-1 \
> --instance-type t1.micro \
> --associate-public-ip-address \
> --private-ip-address 10.0.0.222 \
> --debug
Error message was show as follows.
> ClientError: A client error (InvalidParameterCombination) occurred: Network interfaces and an instance-level private IP address may not be specified on the same request
On the other hand, when executed same command by ec2-api-tools, it worked fine.
Bellow is the differences in API request between awscli and ec2-api-tools
ec2-api-tools
> NetworkInterface.1.PrivateIpAddresses.1.Primary=true&
> NetworkInterface.1.PrivateIpAddresses.1.PrivateIpAddress=10.0.0.222&
awscli
> PrivateIpAddress=10.0.0.222&
This seems to be same as #504.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/ec2runinstances.py`
Content:
```
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 This customization adds two new parameters to the ``ec2 run-instance``
15 command. The first, ``--secondary-private-ip-addresses`` allows a list
16 of IP addresses within the specified subnet to be associated with the
17 new instance. The second, ``--secondary-ip-address-count`` allows you
18 to specify how many additional IP addresses you want but the actual
19 address will be assigned for you.
20
21 This functionality (and much more) is also available using the
22 ``--network-interfaces`` complex argument. This just makes two of
23 the most commonly used features available more easily.
24 """
25 from awscli.arguments import CustomArgument
26
27 # --secondary-private-ip-address
28 SECONDARY_PRIVATE_IP_ADDRESSES_DOCS = (
29 '[EC2-VPC] A secondary private IP address for the network interface '
30 'or instance. You can specify this multiple times to assign multiple '
31 'secondary IP addresses. If you want additional private IP addresses '
32 'but do not need a specific address, use the '
33 '--secondary-private-ip-address-count option.')
34
35 # --secondary-private-ip-address-count
36 SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = (
37 '[EC2-VPC] The number of secondary IP addresses to assign to '
38 'the network interface or instance.')
39
40 # --associate-public-ip-address
41 ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = (
42 '[EC2-VPC] If specified a public IP address will be assigned '
43 'to the new instance in a VPC.')
44
45 def _add_params(argument_table, operation, **kwargs):
46 arg = SecondaryPrivateIpAddressesArgument(
47 name='secondary-private-ip-addresses',
48 help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS)
49 argument_table['secondary-private-ip-addresses'] = arg
50 arg = SecondaryPrivateIpAddressCountArgument(
51 name='secondary-private-ip-address-count',
52 help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)
53 argument_table['secondary-private-ip-address-count'] = arg
54 arg = AssociatePublicIpAddressArgument(
55 name='associate-public-ip-address',
56 help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,
57 action='store_true', group_name='associate_public_ip')
58 argument_table['associate-public-ip-address'] = arg
59 arg = NoAssociatePublicIpAddressArgument(
60 name='no-associate-public-ip-address',
61 help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,
62 action='store_false', group_name='associate_public_ip')
63 argument_table['no-associate-public-ip-address'] = arg
64
65
66 def _check_args(parsed_args, **kwargs):
67 # This function checks the parsed args. If the user specified
68 # the --network-interfaces option with any of the scalar options we
69 # raise an error.
70 arg_dict = vars(parsed_args)
71 if arg_dict['network_interfaces']:
72 for key in ('secondary_private_ip_addresses',
73 'secondary_private_ip_address_count',
74 'associate_public_ip_address'):
75 if arg_dict[key]:
76 msg = ('Mixing the --network-interfaces option '
77 'with the simple, scalar options is '
78 'not supported.')
79 raise ValueError(msg)
80
81
82 def _fix_args(operation, endpoint, params, **kwargs):
83 # The RunInstances request provides some parameters
84 # such as --subnet-id and --security-group-id that can be specified
85 # as separate options only if the request DOES NOT include a
86 # NetworkInterfaces structure. In those cases, the values for
87 # these parameters must be specified inside the NetworkInterfaces
88 # structure. This function checks for those parameters
89 # and fixes them if necessary.
90 # NOTE: If the user is a default VPC customer, RunInstances
91 # allows them to specify the security group by name or by id.
92 # However, in this scenario we can only support id because
93 # we can't place a group name in the NetworkInterfaces structure.
94 if 'network_interfaces' in params:
95 ni = params['network_interfaces']
96 if 'AssociatePublicIpAddress' in ni[0]:
97 if 'subnet_id' in params:
98 ni[0]['SubnetId'] = params['subnet_id']
99 del params['subnet_id']
100 if 'security_group_ids' in params:
101 ni[0]['Groups'] = params['security_group_ids']
102 del params['security_group_ids']
103
104
105 EVENTS = [
106 ('building-argument-table.ec2.run-instances', _add_params),
107 ('operation-args-parsed.ec2.run-instances', _check_args),
108 ('before-parameter-build.ec2.RunInstances', _fix_args),
109 ]
110
111
112 def register_runinstances(event_handler):
113 # Register all of the events for customizing BundleInstance
114 for event, handler in EVENTS:
115 event_handler.register(event, handler)
116
117
118 def _build_network_interfaces(params, key, value):
119 # Build up the NetworkInterfaces data structure
120 if 'network_interfaces' not in params:
121 params['network_interfaces'] = [{'DeviceIndex': 0}]
122
123 if key == 'PrivateIpAddresses':
124 if 'PrivateIpAddresses' not in params['network_interfaces'][0]:
125 params['network_interfaces'][0]['PrivateIpAddresses'] = value
126 else:
127 params['network_interfaces'][0][key] = value
128
129
130 class SecondaryPrivateIpAddressesArgument(CustomArgument):
131
132 def add_to_parser(self, parser, cli_name=None):
133 parser.add_argument(self.cli_name, dest=self.py_name,
134 default=self._default, nargs='*')
135
136 def add_to_params(self, parameters, value):
137 if value:
138 value = [{'PrivateIpAddress': v, 'Primary': False} for
139 v in value]
140 _build_network_interfaces(parameters,
141 'PrivateIpAddresses',
142 value)
143
144
145 class SecondaryPrivateIpAddressCountArgument(CustomArgument):
146
147 def add_to_parser(self, parser, cli_name=None):
148 parser.add_argument(self.cli_name, dest=self.py_name,
149 default=self._default, type=int)
150
151 def add_to_params(self, parameters, value):
152 if value:
153 _build_network_interfaces(parameters,
154 'SecondaryPrivateIpAddressCount',
155 value)
156
157
158 class AssociatePublicIpAddressArgument(CustomArgument):
159
160 def add_to_params(self, parameters, value):
161 if value is True:
162 _build_network_interfaces(parameters,
163 'AssociatePublicIpAddress',
164 value)
165
166
167 class NoAssociatePublicIpAddressArgument(CustomArgument):
168
169 def add_to_params(self, parameters, value):
170 if value is False:
171 _build_network_interfaces(parameters,
172 'AssociatePublicIpAddress',
173 value)
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awscli/customizations/ec2runinstances.py b/awscli/customizations/ec2runinstances.py
--- a/awscli/customizations/ec2runinstances.py
+++ b/awscli/customizations/ec2runinstances.py
@@ -100,6 +100,11 @@
if 'security_group_ids' in params:
ni[0]['Groups'] = params['security_group_ids']
del params['security_group_ids']
+ if 'private_ip_address' in params:
+ ip_addr = {'PrivateIpAddress': params['private_ip_address'],
+ 'Primary': True}
+ ni[0]['PrivateIpAddresses'] = [ip_addr]
+ del params['private_ip_address']
EVENTS = [
|
{"golden_diff": "diff --git a/awscli/customizations/ec2runinstances.py b/awscli/customizations/ec2runinstances.py\n--- a/awscli/customizations/ec2runinstances.py\n+++ b/awscli/customizations/ec2runinstances.py\n@@ -100,6 +100,11 @@\n if 'security_group_ids' in params:\n ni[0]['Groups'] = params['security_group_ids']\n del params['security_group_ids']\n+ if 'private_ip_address' in params:\n+ ip_addr = {'PrivateIpAddress': params['private_ip_address'],\n+ 'Primary': True}\n+ ni[0]['PrivateIpAddresses'] = [ip_addr]\n+ del params['private_ip_address']\n \n \n EVENTS = [\n", "issue": "--private-ip-address option at run-instances does not work \nI think --private-ip-address option can accept private ip address to assign.\nbut when I ran the command as follows, \n\n> aws ec2 run-instances \\\n> --image-id ami-b1fe9bb0 \\\n> --subnet-id subnet-00xxxxxx \\\n> --key-name keypair1 \\\n> --security-group-ids sg-yyyyyy \\\n> --region ap-northeast-1 \\\n> --instance-type t1.micro \\\n> --associate-public-ip-address \\\n> --private-ip-address 10.0.0.222 \\\n> --debug\n\nError message was show as follows.\n\n> ClientError: A client error (InvalidParameterCombination) occurred: Network interfaces and an instance-level private IP address may not be specified on the same request\n\nOn the other hand, when executed same command by ec2-api-tools, it worked fine.\n\nBellow is the differences in API request between awscli and ec2-api-tools\n\nec2-api-tools\n\n> NetworkInterface.1.PrivateIpAddresses.1.Primary=true&\n> NetworkInterface.1.PrivateIpAddresses.1.PrivateIpAddress=10.0.0.222&\n\nawscli\n\n> PrivateIpAddress=10.0.0.222&\n\nThis seems to be same as #504.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds two new parameters to the ``ec2 run-instance``\ncommand. The first, ``--secondary-private-ip-addresses`` allows a list\nof IP addresses within the specified subnet to be associated with the\nnew instance. The second, ``--secondary-ip-address-count`` allows you\nto specify how many additional IP addresses you want but the actual\naddress will be assigned for you.\n\nThis functionality (and much more) is also available using the\n``--network-interfaces`` complex argument. This just makes two of\nthe most commonly used features available more easily.\n\"\"\"\nfrom awscli.arguments import CustomArgument\n\n# --secondary-private-ip-address\nSECONDARY_PRIVATE_IP_ADDRESSES_DOCS = (\n '[EC2-VPC] A secondary private IP address for the network interface '\n 'or instance. You can specify this multiple times to assign multiple '\n 'secondary IP addresses. If you want additional private IP addresses '\n 'but do not need a specific address, use the '\n '--secondary-private-ip-address-count option.')\n\n# --secondary-private-ip-address-count\nSECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = (\n '[EC2-VPC] The number of secondary IP addresses to assign to '\n 'the network interface or instance.')\n\n# --associate-public-ip-address\nASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = (\n '[EC2-VPC] If specified a public IP address will be assigned '\n 'to the new instance in a VPC.')\n\ndef _add_params(argument_table, operation, **kwargs):\n arg = SecondaryPrivateIpAddressesArgument(\n name='secondary-private-ip-addresses',\n help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS)\n argument_table['secondary-private-ip-addresses'] = arg\n arg = SecondaryPrivateIpAddressCountArgument(\n name='secondary-private-ip-address-count',\n help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)\n argument_table['secondary-private-ip-address-count'] = arg\n arg = AssociatePublicIpAddressArgument(\n name='associate-public-ip-address',\n help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n action='store_true', group_name='associate_public_ip')\n argument_table['associate-public-ip-address'] = arg\n arg = NoAssociatePublicIpAddressArgument(\n name='no-associate-public-ip-address',\n help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n action='store_false', group_name='associate_public_ip')\n argument_table['no-associate-public-ip-address'] = arg\n\n\ndef _check_args(parsed_args, **kwargs):\n # This function checks the parsed args. If the user specified\n # the --network-interfaces option with any of the scalar options we\n # raise an error.\n arg_dict = vars(parsed_args)\n if arg_dict['network_interfaces']:\n for key in ('secondary_private_ip_addresses',\n 'secondary_private_ip_address_count',\n 'associate_public_ip_address'):\n if arg_dict[key]:\n msg = ('Mixing the --network-interfaces option '\n 'with the simple, scalar options is '\n 'not supported.')\n raise ValueError(msg)\n\n\ndef _fix_args(operation, endpoint, params, **kwargs):\n # The RunInstances request provides some parameters\n # such as --subnet-id and --security-group-id that can be specified\n # as separate options only if the request DOES NOT include a\n # NetworkInterfaces structure. In those cases, the values for\n # these parameters must be specified inside the NetworkInterfaces\n # structure. This function checks for those parameters\n # and fixes them if necessary.\n # NOTE: If the user is a default VPC customer, RunInstances\n # allows them to specify the security group by name or by id.\n # However, in this scenario we can only support id because\n # we can't place a group name in the NetworkInterfaces structure.\n if 'network_interfaces' in params:\n ni = params['network_interfaces']\n if 'AssociatePublicIpAddress' in ni[0]:\n if 'subnet_id' in params:\n ni[0]['SubnetId'] = params['subnet_id']\n del params['subnet_id']\n if 'security_group_ids' in params:\n ni[0]['Groups'] = params['security_group_ids']\n del params['security_group_ids']\n\n\nEVENTS = [\n ('building-argument-table.ec2.run-instances', _add_params),\n ('operation-args-parsed.ec2.run-instances', _check_args),\n ('before-parameter-build.ec2.RunInstances', _fix_args),\n ]\n\n\ndef register_runinstances(event_handler):\n # Register all of the events for customizing BundleInstance\n for event, handler in EVENTS:\n event_handler.register(event, handler)\n\n\ndef _build_network_interfaces(params, key, value):\n # Build up the NetworkInterfaces data structure\n if 'network_interfaces' not in params:\n params['network_interfaces'] = [{'DeviceIndex': 0}]\n\n if key == 'PrivateIpAddresses':\n if 'PrivateIpAddresses' not in params['network_interfaces'][0]:\n params['network_interfaces'][0]['PrivateIpAddresses'] = value\n else:\n params['network_interfaces'][0][key] = value\n\n\nclass SecondaryPrivateIpAddressesArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, nargs='*')\n\n def add_to_params(self, parameters, value):\n if value:\n value = [{'PrivateIpAddress': v, 'Primary': False} for\n v in value]\n _build_network_interfaces(parameters,\n 'PrivateIpAddresses',\n value)\n\n\nclass SecondaryPrivateIpAddressCountArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, type=int)\n\n def add_to_params(self, parameters, value):\n if value:\n _build_network_interfaces(parameters,\n 'SecondaryPrivateIpAddressCount',\n value)\n\n\nclass AssociatePublicIpAddressArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value is True:\n _build_network_interfaces(parameters,\n 'AssociatePublicIpAddress',\n value)\n\n\nclass NoAssociatePublicIpAddressArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value is False:\n _build_network_interfaces(parameters,\n 'AssociatePublicIpAddress',\n value)\n", "path": "awscli/customizations/ec2runinstances.py"}], "after_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds two new parameters to the ``ec2 run-instance``\ncommand. The first, ``--secondary-private-ip-addresses`` allows a list\nof IP addresses within the specified subnet to be associated with the\nnew instance. The second, ``--secondary-ip-address-count`` allows you\nto specify how many additional IP addresses you want but the actual\naddress will be assigned for you.\n\nThis functionality (and much more) is also available using the\n``--network-interfaces`` complex argument. This just makes two of\nthe most commonly used features available more easily.\n\"\"\"\nfrom awscli.arguments import CustomArgument\n\n# --secondary-private-ip-address\nSECONDARY_PRIVATE_IP_ADDRESSES_DOCS = (\n '[EC2-VPC] A secondary private IP address for the network interface '\n 'or instance. You can specify this multiple times to assign multiple '\n 'secondary IP addresses. If you want additional private IP addresses '\n 'but do not need a specific address, use the '\n '--secondary-private-ip-address-count option.')\n\n# --secondary-private-ip-address-count\nSECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = (\n '[EC2-VPC] The number of secondary IP addresses to assign to '\n 'the network interface or instance.')\n\n# --associate-public-ip-address\nASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = (\n '[EC2-VPC] If specified a public IP address will be assigned '\n 'to the new instance in a VPC.')\n\ndef _add_params(argument_table, operation, **kwargs):\n arg = SecondaryPrivateIpAddressesArgument(\n name='secondary-private-ip-addresses',\n help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS)\n argument_table['secondary-private-ip-addresses'] = arg\n arg = SecondaryPrivateIpAddressCountArgument(\n name='secondary-private-ip-address-count',\n help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)\n argument_table['secondary-private-ip-address-count'] = arg\n arg = AssociatePublicIpAddressArgument(\n name='associate-public-ip-address',\n help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n action='store_true', group_name='associate_public_ip')\n argument_table['associate-public-ip-address'] = arg\n arg = NoAssociatePublicIpAddressArgument(\n name='no-associate-public-ip-address',\n help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n action='store_false', group_name='associate_public_ip')\n argument_table['no-associate-public-ip-address'] = arg\n\n\ndef _check_args(parsed_args, **kwargs):\n # This function checks the parsed args. If the user specified\n # the --network-interfaces option with any of the scalar options we\n # raise an error.\n arg_dict = vars(parsed_args)\n if arg_dict['network_interfaces']:\n for key in ('secondary_private_ip_addresses',\n 'secondary_private_ip_address_count',\n 'associate_public_ip_address'):\n if arg_dict[key]:\n msg = ('Mixing the --network-interfaces option '\n 'with the simple, scalar options is '\n 'not supported.')\n raise ValueError(msg)\n\n\ndef _fix_args(operation, endpoint, params, **kwargs):\n # The RunInstances request provides some parameters\n # such as --subnet-id and --security-group-id that can be specified\n # as separate options only if the request DOES NOT include a\n # NetworkInterfaces structure. In those cases, the values for\n # these parameters must be specified inside the NetworkInterfaces\n # structure. This function checks for those parameters\n # and fixes them if necessary.\n # NOTE: If the user is a default VPC customer, RunInstances\n # allows them to specify the security group by name or by id.\n # However, in this scenario we can only support id because\n # we can't place a group name in the NetworkInterfaces structure.\n if 'network_interfaces' in params:\n ni = params['network_interfaces']\n if 'AssociatePublicIpAddress' in ni[0]:\n if 'subnet_id' in params:\n ni[0]['SubnetId'] = params['subnet_id']\n del params['subnet_id']\n if 'security_group_ids' in params:\n ni[0]['Groups'] = params['security_group_ids']\n del params['security_group_ids']\n if 'private_ip_address' in params:\n ip_addr = {'PrivateIpAddress': params['private_ip_address'],\n 'Primary': True}\n ni[0]['PrivateIpAddresses'] = [ip_addr]\n del params['private_ip_address']\n\n\nEVENTS = [\n ('building-argument-table.ec2.run-instances', _add_params),\n ('operation-args-parsed.ec2.run-instances', _check_args),\n ('before-parameter-build.ec2.RunInstances', _fix_args),\n ]\n\n\ndef register_runinstances(event_handler):\n # Register all of the events for customizing BundleInstance\n for event, handler in EVENTS:\n event_handler.register(event, handler)\n\n\ndef _build_network_interfaces(params, key, value):\n # Build up the NetworkInterfaces data structure\n if 'network_interfaces' not in params:\n params['network_interfaces'] = [{'DeviceIndex': 0}]\n\n if key == 'PrivateIpAddresses':\n if 'PrivateIpAddresses' not in params['network_interfaces'][0]:\n params['network_interfaces'][0]['PrivateIpAddresses'] = value\n else:\n params['network_interfaces'][0][key] = value\n\n\nclass SecondaryPrivateIpAddressesArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, nargs='*')\n\n def add_to_params(self, parameters, value):\n if value:\n value = [{'PrivateIpAddress': v, 'Primary': False} for\n v in value]\n _build_network_interfaces(parameters,\n 'PrivateIpAddresses',\n value)\n\n\nclass SecondaryPrivateIpAddressCountArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, type=int)\n\n def add_to_params(self, parameters, value):\n if value:\n _build_network_interfaces(parameters,\n 'SecondaryPrivateIpAddressCount',\n value)\n\n\nclass AssociatePublicIpAddressArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value is True:\n _build_network_interfaces(parameters,\n 'AssociatePublicIpAddress',\n value)\n\n\nclass NoAssociatePublicIpAddressArgument(CustomArgument):\n\n def add_to_params(self, parameters, value):\n if value is False:\n _build_network_interfaces(parameters,\n 'AssociatePublicIpAddress',\n value)\n", "path": "awscli/customizations/ec2runinstances.py"}]}
| 2,483 | 156 |
gh_patches_debug_3327
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-5135
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Presubmit failure
If you look into the test it said
```
Traceback (most recent call last):
File "<string>", line 3, in <module>
File "/usr/local/lib/python3.6/site-packages/kfp/__init__.py", line 24, in <module>
from ._client import Client
File "/usr/local/lib/python3.6/site-packages/kfp/_client.py", line 31, in <module>
from kfp.compiler import compiler
File "/usr/local/lib/python3.6/site-packages/kfp/compiler/__init__.py", line 17, in <module>
from ..containers._component_builder import build_python_component, build_docker_image, VersionedDependency
File "/usr/local/lib/python3.6/site-packages/kfp/containers/_component_builder.py", line 32, in <module>
from kfp.containers import entrypoint
File "/usr/local/lib/python3.6/site-packages/kfp/containers/entrypoint.py", line 23, in <module>
from kfp.containers import entrypoint_utils
File "/usr/local/lib/python3.6/site-packages/kfp/containers/entrypoint_utils.py", line 23, in <module>
from kfp.pipeline_spec import pipeline_spec_pb2
File "/usr/local/lib/python3.6/site-packages/kfp/pipeline_spec/pipeline_spec_pb2.py", line 23, in <module>
create_key=_descriptor._internal_create_key,
AttributeError: module 'google.protobuf.descriptor' has no attribute '_internal_create_key'
```
Looks like the `protobuf` version is not matching in this case. @Bobgy are you aware of this error? Thanks.
_Originally posted by @Tomcli in https://github.com/kubeflow/pipelines/pull/5059#issuecomment-777656530_
/cc @numerology @chensun @Ark-kun
Can you take a look at this issue? I have seen multiple reports, this error seems to fail consistently.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/v2alpha1/python/setup.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import subprocess
17 import sys
18 import setuptools
19 try:
20 from distutils.spawn import find_executable
21 except ImportError:
22 from shutil import which as find_executable
23
24 NAME = "kfp-pipeline-spec"
25 VERSION = "0.1.5"
26
27 PROTO_DIR = os.path.realpath(
28 os.path.join(os.path.dirname(__file__), os.pardir))
29
30 PKG_DIR = os.path.realpath(
31 os.path.join(os.path.dirname(__file__), "kfp", "pipeline_spec"))
32
33 # Find the Protocol Compiler. (Taken from protobuf/python/setup.py)
34 if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
35 PROTOC = os.environ["PROTOC"]
36 else:
37 PROTOC = find_executable("protoc")
38
39
40 def GenerateProto(source):
41 """Generate a _pb2.py from a .proto file.
42
43 Invokes the Protocol Compiler to generate a _pb2.py from the given
44 .proto file. Does nothing if the output already exists and is newer than
45 the input.
46
47 Args:
48 source: The source proto file that needs to be compiled.
49 """
50
51 output = source.replace(".proto", "_pb2.py")
52
53 if not os.path.exists(output) or (
54 os.path.exists(source) and
55 os.path.getmtime(source) > os.path.getmtime(output)):
56 print("Generating %s..." % output)
57
58 if not os.path.exists(source):
59 sys.stderr.write("Can't find required file: %s\n" % source)
60 sys.exit(-1)
61
62 if PROTOC is None:
63 sys.stderr.write("protoc is not found. Please compile it "
64 "or install the binary package.\n")
65 sys.exit(-1)
66
67 protoc_command = [
68 PROTOC, "-I%s" % PROTO_DIR,
69 "--python_out=%s" % PKG_DIR, source
70 ]
71 if subprocess.call(protoc_command) != 0:
72 sys.exit(-1)
73
74
75 # Generate the protobuf files that we depend on.
76 GenerateProto(os.path.join(PROTO_DIR, "pipeline_spec.proto"))
77
78 setuptools.setup(
79 name=NAME,
80 version=VERSION,
81 description="Kubeflow Pipelines pipeline spec",
82 author="google",
83 author_email="[email protected]",
84 url="https://github.com/kubeflow/pipelines",
85 packages=setuptools.find_namespace_packages(include=['kfp.*']),
86 python_requires=">=3.5.3",
87 include_package_data=True,
88 license="Apache 2.0",
89 )
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/v2alpha1/python/setup.py b/api/v2alpha1/python/setup.py
--- a/api/v2alpha1/python/setup.py
+++ b/api/v2alpha1/python/setup.py
@@ -84,6 +84,7 @@
url="https://github.com/kubeflow/pipelines",
packages=setuptools.find_namespace_packages(include=['kfp.*']),
python_requires=">=3.5.3",
+ install_requires=["protobuf>=3.13.0,<4"],
include_package_data=True,
license="Apache 2.0",
)
|
{"golden_diff": "diff --git a/api/v2alpha1/python/setup.py b/api/v2alpha1/python/setup.py\n--- a/api/v2alpha1/python/setup.py\n+++ b/api/v2alpha1/python/setup.py\n@@ -84,6 +84,7 @@\n url=\"https://github.com/kubeflow/pipelines\",\n packages=setuptools.find_namespace_packages(include=['kfp.*']),\n python_requires=\">=3.5.3\",\n+ install_requires=[\"protobuf>=3.13.0,<4\"],\n include_package_data=True,\n license=\"Apache 2.0\",\n )\n", "issue": "Presubmit failure\nIf you look into the test it said\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 3, in <module>\r\n File \"/usr/local/lib/python3.6/site-packages/kfp/__init__.py\", line 24, in <module>\r\n from ._client import Client\r\n File \"/usr/local/lib/python3.6/site-packages/kfp/_client.py\", line 31, in <module>\r\n from kfp.compiler import compiler\r\n File \"/usr/local/lib/python3.6/site-packages/kfp/compiler/__init__.py\", line 17, in <module>\r\n from ..containers._component_builder import build_python_component, build_docker_image, VersionedDependency\r\n File \"/usr/local/lib/python3.6/site-packages/kfp/containers/_component_builder.py\", line 32, in <module>\r\n from kfp.containers import entrypoint\r\n File \"/usr/local/lib/python3.6/site-packages/kfp/containers/entrypoint.py\", line 23, in <module>\r\n from kfp.containers import entrypoint_utils\r\n File \"/usr/local/lib/python3.6/site-packages/kfp/containers/entrypoint_utils.py\", line 23, in <module>\r\n from kfp.pipeline_spec import pipeline_spec_pb2\r\n File \"/usr/local/lib/python3.6/site-packages/kfp/pipeline_spec/pipeline_spec_pb2.py\", line 23, in <module>\r\n create_key=_descriptor._internal_create_key,\r\nAttributeError: module 'google.protobuf.descriptor' has no attribute '_internal_create_key'\r\n```\r\n\r\nLooks like the `protobuf` version is not matching in this case. @Bobgy are you aware of this error? Thanks.\n\n_Originally posted by @Tomcli in https://github.com/kubeflow/pipelines/pull/5059#issuecomment-777656530_\n\n/cc @numerology @chensun @Ark-kun \nCan you take a look at this issue? I have seen multiple reports, this error seems to fail consistently.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\nimport sys\nimport setuptools\ntry:\n from distutils.spawn import find_executable\nexcept ImportError:\n from shutil import which as find_executable\n\nNAME = \"kfp-pipeline-spec\"\nVERSION = \"0.1.5\"\n\nPROTO_DIR = os.path.realpath(\n os.path.join(os.path.dirname(__file__), os.pardir))\n\nPKG_DIR = os.path.realpath(\n os.path.join(os.path.dirname(__file__), \"kfp\", \"pipeline_spec\"))\n\n# Find the Protocol Compiler. (Taken from protobuf/python/setup.py)\nif \"PROTOC\" in os.environ and os.path.exists(os.environ[\"PROTOC\"]):\n PROTOC = os.environ[\"PROTOC\"]\nelse:\n PROTOC = find_executable(\"protoc\")\n\n\ndef GenerateProto(source):\n \"\"\"Generate a _pb2.py from a .proto file.\n\n Invokes the Protocol Compiler to generate a _pb2.py from the given\n .proto file. Does nothing if the output already exists and is newer than\n the input.\n\n Args:\n source: The source proto file that needs to be compiled.\n \"\"\"\n\n output = source.replace(\".proto\", \"_pb2.py\")\n\n if not os.path.exists(output) or (\n os.path.exists(source) and\n os.path.getmtime(source) > os.path.getmtime(output)):\n print(\"Generating %s...\" % output)\n\n if not os.path.exists(source):\n sys.stderr.write(\"Can't find required file: %s\\n\" % source)\n sys.exit(-1)\n\n if PROTOC is None:\n sys.stderr.write(\"protoc is not found. Please compile it \"\n \"or install the binary package.\\n\")\n sys.exit(-1)\n\n protoc_command = [\n PROTOC, \"-I%s\" % PROTO_DIR,\n \"--python_out=%s\" % PKG_DIR, source\n ]\n if subprocess.call(protoc_command) != 0:\n sys.exit(-1)\n\n\n# Generate the protobuf files that we depend on.\nGenerateProto(os.path.join(PROTO_DIR, \"pipeline_spec.proto\"))\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=\"Kubeflow Pipelines pipeline spec\",\n author=\"google\",\n author_email=\"[email protected]\",\n url=\"https://github.com/kubeflow/pipelines\",\n packages=setuptools.find_namespace_packages(include=['kfp.*']),\n python_requires=\">=3.5.3\",\n include_package_data=True,\n license=\"Apache 2.0\",\n)\n", "path": "api/v2alpha1/python/setup.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\nimport sys\nimport setuptools\ntry:\n from distutils.spawn import find_executable\nexcept ImportError:\n from shutil import which as find_executable\n\nNAME = \"kfp-pipeline-spec\"\nVERSION = \"0.1.5\"\n\nPROTO_DIR = os.path.realpath(\n os.path.join(os.path.dirname(__file__), os.pardir))\n\nPKG_DIR = os.path.realpath(\n os.path.join(os.path.dirname(__file__), \"kfp\", \"pipeline_spec\"))\n\n# Find the Protocol Compiler. (Taken from protobuf/python/setup.py)\nif \"PROTOC\" in os.environ and os.path.exists(os.environ[\"PROTOC\"]):\n PROTOC = os.environ[\"PROTOC\"]\nelse:\n PROTOC = find_executable(\"protoc\")\n\n\ndef GenerateProto(source):\n \"\"\"Generate a _pb2.py from a .proto file.\n\n Invokes the Protocol Compiler to generate a _pb2.py from the given\n .proto file. Does nothing if the output already exists and is newer than\n the input.\n\n Args:\n source: The source proto file that needs to be compiled.\n \"\"\"\n\n output = source.replace(\".proto\", \"_pb2.py\")\n\n if not os.path.exists(output) or (\n os.path.exists(source) and\n os.path.getmtime(source) > os.path.getmtime(output)):\n print(\"Generating %s...\" % output)\n\n if not os.path.exists(source):\n sys.stderr.write(\"Can't find required file: %s\\n\" % source)\n sys.exit(-1)\n\n if PROTOC is None:\n sys.stderr.write(\"protoc is not found. Please compile it \"\n \"or install the binary package.\\n\")\n sys.exit(-1)\n\n protoc_command = [\n PROTOC, \"-I%s\" % PROTO_DIR,\n \"--python_out=%s\" % PKG_DIR, source\n ]\n if subprocess.call(protoc_command) != 0:\n sys.exit(-1)\n\n\n# Generate the protobuf files that we depend on.\nGenerateProto(os.path.join(PROTO_DIR, \"pipeline_spec.proto\"))\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=\"Kubeflow Pipelines pipeline spec\",\n author=\"google\",\n author_email=\"[email protected]\",\n url=\"https://github.com/kubeflow/pipelines\",\n packages=setuptools.find_namespace_packages(include=['kfp.*']),\n python_requires=\">=3.5.3\",\n install_requires=[\"protobuf>=3.13.0,<4\"],\n include_package_data=True,\n license=\"Apache 2.0\",\n)\n", "path": "api/v2alpha1/python/setup.py"}]}
| 1,565 | 126 |
gh_patches_debug_24758
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-9627
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Dropbox] "No video formats found!" on some pages
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that yt-dlp is broken on a **supported** site
- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
_No response_
### Provide a description that is worded well enough to be understood
For some videos hosted on Dropbox yt-dlp gives an error "No video formats found!" Others are downloaded without problems. The example of a problematic URL is https://www.dropbox.com/s/h4nny8z62nnhezj/Side.Milf.Milk.mp4?dl=0 (Warning: NSFW!)
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['-F', 'https://www.dropbox.com/s/h4nny8z62nnhezj/Side.Milf.Milk.mp4?dl=0', '-vU']
[debug] User config "/home/tkzv/yt-dlp.conf": ['--alias', 'tg', '--write-info-json -o "%(channel)s[%(id)s].%(ext)s"']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp-nightly-builds [e5d4f1110] (pip)
[debug] Python 3.11.8 (CPython x86_64 64bit) - Linux-5.15.32-gentoo-r1-x86_64-AMD_FX-tm-8300_Eight-Core_Processor-with-glibc2.38 (OpenSSL 3.0.13 30 Jan 2024, glibc 2.38)
[debug] exe versions: ffmpeg 6.0.1 (fdk,setts), ffprobe 6.0.1, rtmpdump 2.4
[debug] Optional libraries: Cryptodome-3.19.0, brotli-1.1.0, certifi-2023.07.22, mutagen-1.47.0, requests-2.31.0, sqlite3-3.45.1, urllib3-2.1.0, websockets-12.0
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets
[debug] Loaded 1807 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: [email protected] from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date ([email protected] from yt-dlp/yt-dlp-nightly-builds)
[Dropbox] Extracting URL: https://www.dropbox.com/s/h4nny8z62nnhezj/Side.Milf.Milk.mp4?dl=0
[Dropbox] h4nny8z62nnhezj: Downloading webpage
ERROR: [Dropbox] h4nny8z62nnhezj: No video formats found!; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
Traceback (most recent call last):
File "/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py", line 1605, in wrapper
return func(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py", line 1761, in __extract_info
return self.process_ie_result(ie_result, download, extra_info)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py", line 1820, in process_ie_result
ie_result = self.process_video_result(ie_result, download=download)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py", line 2808, in process_video_result
self.raise_no_formats(info_dict)
File "/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py", line 1103, in raise_no_formats
raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
yt_dlp.utils.ExtractorError: [Dropbox] h4nny8z62nnhezj: No video formats found!; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/dropbox.py`
Content:
```
1 import base64
2 import os.path
3 import re
4
5 from .common import InfoExtractor
6 from ..compat import compat_urllib_parse_unquote
7 from ..utils import (
8 ExtractorError,
9 update_url_query,
10 url_basename,
11 )
12
13
14 class DropboxIE(InfoExtractor):
15 _VALID_URL = r'https?://(?:www\.)?dropbox\.com/(?:(?:e/)?scl/fi|sh?)/(?P<id>\w+)'
16 _TESTS = [
17 {
18 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
19 'info_dict': {
20 'id': 'nelirfsxnmcfbfh',
21 'ext': 'mp4',
22 'title': 'youtube-dl test video \'ä"BaW_jenozKc'
23 }
24 }, {
25 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh',
26 'only_matching': True,
27 }, {
28 'url': 'https://www.dropbox.com/sh/2mgpiuq7kv8nqdf/AABy-fW4dkydT4GmWi2mdOUDa?dl=0&preview=Drone+Shot.mp4',
29 'only_matching': True,
30 }, {
31 'url': 'https://www.dropbox.com/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h',
32 'only_matching': True,
33 }, {
34 'url': 'https://www.dropbox.com/e/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h',
35 'only_matching': True,
36 },
37 ]
38
39 def _real_extract(self, url):
40 mobj = self._match_valid_url(url)
41 video_id = mobj.group('id')
42 webpage = self._download_webpage(url, video_id)
43 fn = compat_urllib_parse_unquote(url_basename(url))
44 title = os.path.splitext(fn)[0]
45
46 password = self.get_param('videopassword')
47 if (self._og_search_title(webpage) == 'Dropbox - Password Required'
48 or 'Enter the password for this link' in webpage):
49
50 if password:
51 content_id = self._search_regex(r'content_id=(.*?)["\']', webpage, 'content_id')
52 payload = f'is_xhr=true&t={self._get_cookies("https://www.dropbox.com").get("t").value}&content_id={content_id}&password={password}&url={url}'
53 response = self._download_json(
54 'https://www.dropbox.com/sm/auth', video_id, 'POSTing video password', data=payload.encode('UTF-8'),
55 headers={'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'})
56
57 if response.get('status') != 'authed':
58 raise ExtractorError('Authentication failed!', expected=True)
59 webpage = self._download_webpage(url, video_id)
60 elif self._get_cookies('https://dropbox.com').get('sm_auth'):
61 webpage = self._download_webpage(url, video_id)
62 else:
63 raise ExtractorError('Password protected video, use --video-password <password>', expected=True)
64
65 formats, subtitles, has_anonymous_download = [], {}, False
66 for encoded in reversed(re.findall(r'registerStreamedPrefetch\s*\(\s*"[\w/+=]+"\s*,\s*"([\w/+=]+)"', webpage)):
67 decoded = base64.b64decode(encoded).decode('utf-8', 'ignore')
68 transcode_url = self._search_regex(
69 r'\n.(https://[^\x03\x08\x12\n]+\.m3u8)', decoded, 'transcode url', default=None)
70 if not transcode_url:
71 continue
72 formats, subtitles = self._extract_m3u8_formats_and_subtitles(transcode_url, video_id, 'mp4')
73 has_anonymous_download = self._search_regex(r'(anonymous:\tanonymous)', decoded, 'anonymous', default=False)
74 break
75
76 # downloads enabled we can get the original file
77 if has_anonymous_download:
78 formats.append({
79 'url': update_url_query(url, {'dl': '1'}),
80 'format_id': 'original',
81 'format_note': 'Original',
82 'quality': 1
83 })
84
85 return {
86 'id': video_id,
87 'title': title,
88 'formats': formats,
89 'subtitles': subtitles
90 }
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt_dlp/extractor/dropbox.py b/yt_dlp/extractor/dropbox.py
--- a/yt_dlp/extractor/dropbox.py
+++ b/yt_dlp/extractor/dropbox.py
@@ -65,12 +65,14 @@
formats, subtitles, has_anonymous_download = [], {}, False
for encoded in reversed(re.findall(r'registerStreamedPrefetch\s*\(\s*"[\w/+=]+"\s*,\s*"([\w/+=]+)"', webpage)):
decoded = base64.b64decode(encoded).decode('utf-8', 'ignore')
+ if not has_anonymous_download:
+ has_anonymous_download = self._search_regex(
+ r'(anonymous:\tanonymous)', decoded, 'anonymous', default=False)
transcode_url = self._search_regex(
r'\n.(https://[^\x03\x08\x12\n]+\.m3u8)', decoded, 'transcode url', default=None)
if not transcode_url:
continue
formats, subtitles = self._extract_m3u8_formats_and_subtitles(transcode_url, video_id, 'mp4')
- has_anonymous_download = self._search_regex(r'(anonymous:\tanonymous)', decoded, 'anonymous', default=False)
break
# downloads enabled we can get the original file
|
{"golden_diff": "diff --git a/yt_dlp/extractor/dropbox.py b/yt_dlp/extractor/dropbox.py\n--- a/yt_dlp/extractor/dropbox.py\n+++ b/yt_dlp/extractor/dropbox.py\n@@ -65,12 +65,14 @@\n formats, subtitles, has_anonymous_download = [], {}, False\n for encoded in reversed(re.findall(r'registerStreamedPrefetch\\s*\\(\\s*\"[\\w/+=]+\"\\s*,\\s*\"([\\w/+=]+)\"', webpage)):\n decoded = base64.b64decode(encoded).decode('utf-8', 'ignore')\n+ if not has_anonymous_download:\n+ has_anonymous_download = self._search_regex(\n+ r'(anonymous:\\tanonymous)', decoded, 'anonymous', default=False)\n transcode_url = self._search_regex(\n r'\\n.(https://[^\\x03\\x08\\x12\\n]+\\.m3u8)', decoded, 'transcode url', default=None)\n if not transcode_url:\n continue\n formats, subtitles = self._extract_m3u8_formats_and_subtitles(transcode_url, video_id, 'mp4')\n- has_anonymous_download = self._search_regex(r'(anonymous:\\tanonymous)', decoded, 'anonymous', default=False)\n break\n \n # downloads enabled we can get the original file\n", "issue": "[Dropbox] \"No video formats found!\" on some pages\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting that yt-dlp is broken on a **supported** site\n- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\n_No response_\n\n### Provide a description that is worded well enough to be understood\n\nFor some videos hosted on Dropbox yt-dlp gives an error \"No video formats found!\" Others are downloaded without problems. The example of a problematic URL is https://www.dropbox.com/s/h4nny8z62nnhezj/Side.Milf.Milk.mp4?dl=0 (Warning: NSFW!)\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['-F', 'https://www.dropbox.com/s/h4nny8z62nnhezj/Side.Milf.Milk.mp4?dl=0', '-vU']\r\n[debug] User config \"/home/tkzv/yt-dlp.conf\": ['--alias', 'tg', '--write-info-json -o \"%(channel)s[%(id)s].%(ext)s\"']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp-nightly-builds [e5d4f1110] (pip)\r\n[debug] Python 3.11.8 (CPython x86_64 64bit) - Linux-5.15.32-gentoo-r1-x86_64-AMD_FX-tm-8300_Eight-Core_Processor-with-glibc2.38 (OpenSSL 3.0.13 30 Jan 2024, glibc 2.38)\r\n[debug] exe versions: ffmpeg 6.0.1 (fdk,setts), ffprobe 6.0.1, rtmpdump 2.4\r\n[debug] Optional libraries: Cryptodome-3.19.0, brotli-1.1.0, certifi-2023.07.22, mutagen-1.47.0, requests-2.31.0, sqlite3-3.45.1, urllib3-2.1.0, websockets-12.0\r\n[debug] Proxy map: {}\r\n[debug] Request Handlers: urllib, requests, websockets\r\n[debug] Loaded 1807 extractors\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest\r\nLatest version: [email protected] from yt-dlp/yt-dlp-nightly-builds\r\nyt-dlp is up to date ([email protected] from yt-dlp/yt-dlp-nightly-builds)\r\n[Dropbox] Extracting URL: https://www.dropbox.com/s/h4nny8z62nnhezj/Side.Milf.Milk.mp4?dl=0\r\n[Dropbox] h4nny8z62nnhezj: Downloading webpage\r\nERROR: [Dropbox] h4nny8z62nnhezj: No video formats found!; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\nTraceback (most recent call last):\r\n File \"/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py\", line 1605, in wrapper\r\n return func(self, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py\", line 1761, in __extract_info\r\n return self.process_ie_result(ie_result, download, extra_info)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py\", line 1820, in process_ie_result\r\n ie_result = self.process_video_result(ie_result, download=download)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py\", line 2808, in process_video_result\r\n self.raise_no_formats(info_dict)\r\n File \"/home/tkzv/.local/lib/python3.11/site-packages/yt_dlp/YoutubeDL.py\", line 1103, in raise_no_formats\r\n raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],\r\nyt_dlp.utils.ExtractorError: [Dropbox] h4nny8z62nnhezj: No video formats found!; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\n```\n\n", "before_files": [{"content": "import base64\nimport os.path\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_urllib_parse_unquote\nfrom ..utils import (\n ExtractorError,\n update_url_query,\n url_basename,\n)\n\n\nclass DropboxIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?dropbox\\.com/(?:(?:e/)?scl/fi|sh?)/(?P<id>\\w+)'\n _TESTS = [\n {\n 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',\n 'info_dict': {\n 'id': 'nelirfsxnmcfbfh',\n 'ext': 'mp4',\n 'title': 'youtube-dl test video \\'\u00e4\"BaW_jenozKc'\n }\n }, {\n 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh',\n 'only_matching': True,\n }, {\n 'url': 'https://www.dropbox.com/sh/2mgpiuq7kv8nqdf/AABy-fW4dkydT4GmWi2mdOUDa?dl=0&preview=Drone+Shot.mp4',\n 'only_matching': True,\n }, {\n 'url': 'https://www.dropbox.com/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h',\n 'only_matching': True,\n }, {\n 'url': 'https://www.dropbox.com/e/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h',\n 'only_matching': True,\n },\n ]\n\n def _real_extract(self, url):\n mobj = self._match_valid_url(url)\n video_id = mobj.group('id')\n webpage = self._download_webpage(url, video_id)\n fn = compat_urllib_parse_unquote(url_basename(url))\n title = os.path.splitext(fn)[0]\n\n password = self.get_param('videopassword')\n if (self._og_search_title(webpage) == 'Dropbox - Password Required'\n or 'Enter the password for this link' in webpage):\n\n if password:\n content_id = self._search_regex(r'content_id=(.*?)[\"\\']', webpage, 'content_id')\n payload = f'is_xhr=true&t={self._get_cookies(\"https://www.dropbox.com\").get(\"t\").value}&content_id={content_id}&password={password}&url={url}'\n response = self._download_json(\n 'https://www.dropbox.com/sm/auth', video_id, 'POSTing video password', data=payload.encode('UTF-8'),\n headers={'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'})\n\n if response.get('status') != 'authed':\n raise ExtractorError('Authentication failed!', expected=True)\n webpage = self._download_webpage(url, video_id)\n elif self._get_cookies('https://dropbox.com').get('sm_auth'):\n webpage = self._download_webpage(url, video_id)\n else:\n raise ExtractorError('Password protected video, use --video-password <password>', expected=True)\n\n formats, subtitles, has_anonymous_download = [], {}, False\n for encoded in reversed(re.findall(r'registerStreamedPrefetch\\s*\\(\\s*\"[\\w/+=]+\"\\s*,\\s*\"([\\w/+=]+)\"', webpage)):\n decoded = base64.b64decode(encoded).decode('utf-8', 'ignore')\n transcode_url = self._search_regex(\n r'\\n.(https://[^\\x03\\x08\\x12\\n]+\\.m3u8)', decoded, 'transcode url', default=None)\n if not transcode_url:\n continue\n formats, subtitles = self._extract_m3u8_formats_and_subtitles(transcode_url, video_id, 'mp4')\n has_anonymous_download = self._search_regex(r'(anonymous:\\tanonymous)', decoded, 'anonymous', default=False)\n break\n\n # downloads enabled we can get the original file\n if has_anonymous_download:\n formats.append({\n 'url': update_url_query(url, {'dl': '1'}),\n 'format_id': 'original',\n 'format_note': 'Original',\n 'quality': 1\n })\n\n return {\n 'id': video_id,\n 'title': title,\n 'formats': formats,\n 'subtitles': subtitles\n }\n", "path": "yt_dlp/extractor/dropbox.py"}], "after_files": [{"content": "import base64\nimport os.path\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_urllib_parse_unquote\nfrom ..utils import (\n ExtractorError,\n update_url_query,\n url_basename,\n)\n\n\nclass DropboxIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?dropbox\\.com/(?:(?:e/)?scl/fi|sh?)/(?P<id>\\w+)'\n _TESTS = [\n {\n 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',\n 'info_dict': {\n 'id': 'nelirfsxnmcfbfh',\n 'ext': 'mp4',\n 'title': 'youtube-dl test video \\'\u00e4\"BaW_jenozKc'\n }\n }, {\n 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh',\n 'only_matching': True,\n }, {\n 'url': 'https://www.dropbox.com/sh/2mgpiuq7kv8nqdf/AABy-fW4dkydT4GmWi2mdOUDa?dl=0&preview=Drone+Shot.mp4',\n 'only_matching': True,\n }, {\n 'url': 'https://www.dropbox.com/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h',\n 'only_matching': True,\n }, {\n 'url': 'https://www.dropbox.com/e/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h',\n 'only_matching': True,\n },\n ]\n\n def _real_extract(self, url):\n mobj = self._match_valid_url(url)\n video_id = mobj.group('id')\n webpage = self._download_webpage(url, video_id)\n fn = compat_urllib_parse_unquote(url_basename(url))\n title = os.path.splitext(fn)[0]\n\n password = self.get_param('videopassword')\n if (self._og_search_title(webpage) == 'Dropbox - Password Required'\n or 'Enter the password for this link' in webpage):\n\n if password:\n content_id = self._search_regex(r'content_id=(.*?)[\"\\']', webpage, 'content_id')\n payload = f'is_xhr=true&t={self._get_cookies(\"https://www.dropbox.com\").get(\"t\").value}&content_id={content_id}&password={password}&url={url}'\n response = self._download_json(\n 'https://www.dropbox.com/sm/auth', video_id, 'POSTing video password', data=payload.encode('UTF-8'),\n headers={'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'})\n\n if response.get('status') != 'authed':\n raise ExtractorError('Authentication failed!', expected=True)\n webpage = self._download_webpage(url, video_id)\n elif self._get_cookies('https://dropbox.com').get('sm_auth'):\n webpage = self._download_webpage(url, video_id)\n else:\n raise ExtractorError('Password protected video, use --video-password <password>', expected=True)\n\n formats, subtitles, has_anonymous_download = [], {}, False\n for encoded in reversed(re.findall(r'registerStreamedPrefetch\\s*\\(\\s*\"[\\w/+=]+\"\\s*,\\s*\"([\\w/+=]+)\"', webpage)):\n decoded = base64.b64decode(encoded).decode('utf-8', 'ignore')\n if not has_anonymous_download:\n has_anonymous_download = self._search_regex(\n r'(anonymous:\\tanonymous)', decoded, 'anonymous', default=False)\n transcode_url = self._search_regex(\n r'\\n.(https://[^\\x03\\x08\\x12\\n]+\\.m3u8)', decoded, 'transcode url', default=None)\n if not transcode_url:\n continue\n formats, subtitles = self._extract_m3u8_formats_and_subtitles(transcode_url, video_id, 'mp4')\n break\n\n # downloads enabled we can get the original file\n if has_anonymous_download:\n formats.append({\n 'url': update_url_query(url, {'dl': '1'}),\n 'format_id': 'original',\n 'format_note': 'Original',\n 'quality': 1\n })\n\n return {\n 'id': video_id,\n 'title': title,\n 'formats': formats,\n 'subtitles': subtitles\n }\n", "path": "yt_dlp/extractor/dropbox.py"}]}
| 3,150 | 306 |
gh_patches_debug_26964
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-4274
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPI sends emails with unicode in header
**Describe the bug**
<!-- A clear and concise description the bug -->
I created a new account on pypi.org, but I am not receiving any email verification link by email - not after accouunt creation and also not after resending the verification link (even if pypi.org claims "Verification email for [email protected] resent").
test.pypi.org works for me, using the same account name and email address. I deleted and recreated the account on both pypi.org and test.pypi.org - same behaviour (well, the initial email did sometimes not arrive on test.pypi.org, but the the verification resend always arrived right away).
**Expected behavior**
<!-- A clear and concise description of what you expected to happen -->
I should get an email token after registering, and also when I resend the verification mail from the account page. I know it might take a while, but I am experiencing this since Friday.
**To Reproduce**
<!-- Steps to reproduce the bug, or a link to PyPI where the bug is visible -->
Register on pypi.org, wait for email. Resend the verification email from the account settings page, wait again.
**My Platform**
<!--
Any details about your specific platform:
* If the problem is in the browser, what browser, version, and OS?
* If the problem is with a command-line tool, what version of that tool?
* If the problem is with connecting to PyPI, include some details about
your network, including SSL/TLS implementation in use, internet service
provider, and if there are any firewalls or proxies in use.
-->
Firefox 52
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/email/services.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from email.headerregistry import Address
14 from email.utils import parseaddr
15
16 from pyramid_mailer import get_mailer
17 from pyramid_mailer.message import Message
18 from zope.interface import implementer
19
20 from warehouse.email.interfaces import IEmailSender
21 from warehouse.email.ses.models import EmailMessage
22
23
24 def _format_sender(sitename, sender):
25 if sender is not None:
26 return str(Address(sitename, addr_spec=sender))
27
28
29 @implementer(IEmailSender)
30 class SMTPEmailSender:
31 def __init__(self, mailer, sender=None):
32 self.mailer = mailer
33 self.sender = sender
34
35 @classmethod
36 def create_service(cls, context, request):
37 sitename = request.registry.settings["site.name"]
38 sender = _format_sender(sitename, request.registry.settings.get("mail.sender"))
39 return cls(get_mailer(request), sender=sender)
40
41 def send(self, subject, body, *, recipient):
42 message = Message(
43 subject=subject, body=body, recipients=[recipient], sender=self.sender
44 )
45 self.mailer.send_immediately(message)
46
47
48 @implementer(IEmailSender)
49 class SESEmailSender:
50 def __init__(self, client, *, sender=None, db):
51 self._client = client
52 self._sender = sender
53 self._db = db
54
55 @classmethod
56 def create_service(cls, context, request):
57 sitename = request.registry.settings["site.name"]
58 sender = _format_sender(sitename, request.registry.settings.get("mail.sender"))
59
60 aws_session = request.find_service(name="aws.session")
61
62 return cls(
63 aws_session.client(
64 "ses", region_name=request.registry.settings.get("mail.region")
65 ),
66 sender=sender,
67 db=request.db,
68 )
69
70 def send(self, subject, body, *, recipient):
71 resp = self._client.send_email(
72 Source=self._sender,
73 Destination={"ToAddresses": [recipient]},
74 Message={
75 "Subject": {"Data": subject, "Charset": "UTF-8"},
76 "Body": {"Text": {"Data": body, "Charset": "UTF-8"}},
77 },
78 )
79
80 self._db.add(
81 EmailMessage(
82 message_id=resp["MessageId"],
83 from_=parseaddr(self._sender)[1],
84 to=parseaddr(recipient)[1],
85 subject=subject,
86 )
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/email/services.py b/warehouse/email/services.py
--- a/warehouse/email/services.py
+++ b/warehouse/email/services.py
@@ -11,7 +11,9 @@
# limitations under the License.
from email.headerregistry import Address
-from email.utils import parseaddr
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.utils import parseaddr, formataddr
from pyramid_mailer import get_mailer
from pyramid_mailer.message import Message
@@ -68,13 +70,25 @@
)
def send(self, subject, body, *, recipient):
- resp = self._client.send_email(
+ message = MIMEMultipart("mixed")
+ message["Subject"] = subject
+ message["From"] = self._sender
+
+ # The following is necessary to support friendly names with Unicode characters,
+ # otherwise the entire value will get encoded and will not be accepted by SES:
+ #
+ # >>> parseaddr("Fööbar <[email protected]>")
+ # ('Fööbar', '[email protected]')
+ # >>> formataddr(_)
+ # '=?utf-8?b?RsO2w7ZiYXI=?= <[email protected]>'
+ message["To"] = formataddr(parseaddr(recipient))
+
+ message.attach(MIMEText(body, "plain", "utf-8"))
+
+ resp = self._client.send_raw_email(
Source=self._sender,
- Destination={"ToAddresses": [recipient]},
- Message={
- "Subject": {"Data": subject, "Charset": "UTF-8"},
- "Body": {"Text": {"Data": body, "Charset": "UTF-8"}},
- },
+ Destinations=[recipient],
+ RawMessage={"Data": message.as_string()},
)
self._db.add(
|
{"golden_diff": "diff --git a/warehouse/email/services.py b/warehouse/email/services.py\n--- a/warehouse/email/services.py\n+++ b/warehouse/email/services.py\n@@ -11,7 +11,9 @@\n # limitations under the License.\n \n from email.headerregistry import Address\n-from email.utils import parseaddr\n+from email.mime.multipart import MIMEMultipart\n+from email.mime.text import MIMEText\n+from email.utils import parseaddr, formataddr\n \n from pyramid_mailer import get_mailer\n from pyramid_mailer.message import Message\n@@ -68,13 +70,25 @@\n )\n \n def send(self, subject, body, *, recipient):\n- resp = self._client.send_email(\n+ message = MIMEMultipart(\"mixed\")\n+ message[\"Subject\"] = subject\n+ message[\"From\"] = self._sender\n+\n+ # The following is necessary to support friendly names with Unicode characters,\n+ # otherwise the entire value will get encoded and will not be accepted by SES:\n+ #\n+ # >>> parseaddr(\"F\u00f6\u00f6bar <[email protected]>\")\n+ # ('F\u00f6\u00f6bar', '[email protected]')\n+ # >>> formataddr(_)\n+ # '=?utf-8?b?RsO2w7ZiYXI=?= <[email protected]>'\n+ message[\"To\"] = formataddr(parseaddr(recipient))\n+\n+ message.attach(MIMEText(body, \"plain\", \"utf-8\"))\n+\n+ resp = self._client.send_raw_email(\n Source=self._sender,\n- Destination={\"ToAddresses\": [recipient]},\n- Message={\n- \"Subject\": {\"Data\": subject, \"Charset\": \"UTF-8\"},\n- \"Body\": {\"Text\": {\"Data\": body, \"Charset\": \"UTF-8\"}},\n- },\n+ Destinations=[recipient],\n+ RawMessage={\"Data\": message.as_string()},\n )\n \n self._db.add(\n", "issue": "PyPI sends emails with unicode in header\n**Describe the bug**\r\n<!-- A clear and concise description the bug -->\r\nI created a new account on pypi.org, but I am not receiving any email verification link by email - not after accouunt creation and also not after resending the verification link (even if pypi.org claims \"Verification email for [email protected] resent\").\r\n\r\ntest.pypi.org works for me, using the same account name and email address. I deleted and recreated the account on both pypi.org and test.pypi.org - same behaviour (well, the initial email did sometimes not arrive on test.pypi.org, but the the verification resend always arrived right away).\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen -->\r\nI should get an email token after registering, and also when I resend the verification mail from the account page. I know it might take a while, but I am experiencing this since Friday.\r\n\r\n**To Reproduce**\r\n<!-- Steps to reproduce the bug, or a link to PyPI where the bug is visible -->\r\nRegister on pypi.org, wait for email. Resend the verification email from the account settings page, wait again.\r\n\r\n**My Platform**\r\n<!--\r\n Any details about your specific platform:\r\n * If the problem is in the browser, what browser, version, and OS?\r\n * If the problem is with a command-line tool, what version of that tool?\r\n * If the problem is with connecting to PyPI, include some details about\r\n your network, including SSL/TLS implementation in use, internet service\r\n provider, and if there are any firewalls or proxies in use.\r\n-->\r\nFirefox 52\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom email.headerregistry import Address\nfrom email.utils import parseaddr\n\nfrom pyramid_mailer import get_mailer\nfrom pyramid_mailer.message import Message\nfrom zope.interface import implementer\n\nfrom warehouse.email.interfaces import IEmailSender\nfrom warehouse.email.ses.models import EmailMessage\n\n\ndef _format_sender(sitename, sender):\n if sender is not None:\n return str(Address(sitename, addr_spec=sender))\n\n\n@implementer(IEmailSender)\nclass SMTPEmailSender:\n def __init__(self, mailer, sender=None):\n self.mailer = mailer\n self.sender = sender\n\n @classmethod\n def create_service(cls, context, request):\n sitename = request.registry.settings[\"site.name\"]\n sender = _format_sender(sitename, request.registry.settings.get(\"mail.sender\"))\n return cls(get_mailer(request), sender=sender)\n\n def send(self, subject, body, *, recipient):\n message = Message(\n subject=subject, body=body, recipients=[recipient], sender=self.sender\n )\n self.mailer.send_immediately(message)\n\n\n@implementer(IEmailSender)\nclass SESEmailSender:\n def __init__(self, client, *, sender=None, db):\n self._client = client\n self._sender = sender\n self._db = db\n\n @classmethod\n def create_service(cls, context, request):\n sitename = request.registry.settings[\"site.name\"]\n sender = _format_sender(sitename, request.registry.settings.get(\"mail.sender\"))\n\n aws_session = request.find_service(name=\"aws.session\")\n\n return cls(\n aws_session.client(\n \"ses\", region_name=request.registry.settings.get(\"mail.region\")\n ),\n sender=sender,\n db=request.db,\n )\n\n def send(self, subject, body, *, recipient):\n resp = self._client.send_email(\n Source=self._sender,\n Destination={\"ToAddresses\": [recipient]},\n Message={\n \"Subject\": {\"Data\": subject, \"Charset\": \"UTF-8\"},\n \"Body\": {\"Text\": {\"Data\": body, \"Charset\": \"UTF-8\"}},\n },\n )\n\n self._db.add(\n EmailMessage(\n message_id=resp[\"MessageId\"],\n from_=parseaddr(self._sender)[1],\n to=parseaddr(recipient)[1],\n subject=subject,\n )\n )\n", "path": "warehouse/email/services.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom email.headerregistry import Address\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import parseaddr, formataddr\n\nfrom pyramid_mailer import get_mailer\nfrom pyramid_mailer.message import Message\nfrom zope.interface import implementer\n\nfrom warehouse.email.interfaces import IEmailSender\nfrom warehouse.email.ses.models import EmailMessage\n\n\ndef _format_sender(sitename, sender):\n if sender is not None:\n return str(Address(sitename, addr_spec=sender))\n\n\n@implementer(IEmailSender)\nclass SMTPEmailSender:\n def __init__(self, mailer, sender=None):\n self.mailer = mailer\n self.sender = sender\n\n @classmethod\n def create_service(cls, context, request):\n sitename = request.registry.settings[\"site.name\"]\n sender = _format_sender(sitename, request.registry.settings.get(\"mail.sender\"))\n return cls(get_mailer(request), sender=sender)\n\n def send(self, subject, body, *, recipient):\n message = Message(\n subject=subject, body=body, recipients=[recipient], sender=self.sender\n )\n self.mailer.send_immediately(message)\n\n\n@implementer(IEmailSender)\nclass SESEmailSender:\n def __init__(self, client, *, sender=None, db):\n self._client = client\n self._sender = sender\n self._db = db\n\n @classmethod\n def create_service(cls, context, request):\n sitename = request.registry.settings[\"site.name\"]\n sender = _format_sender(sitename, request.registry.settings.get(\"mail.sender\"))\n\n aws_session = request.find_service(name=\"aws.session\")\n\n return cls(\n aws_session.client(\n \"ses\", region_name=request.registry.settings.get(\"mail.region\")\n ),\n sender=sender,\n db=request.db,\n )\n\n def send(self, subject, body, *, recipient):\n message = MIMEMultipart(\"mixed\")\n message[\"Subject\"] = subject\n message[\"From\"] = self._sender\n\n # The following is necessary to support friendly names with Unicode characters,\n # otherwise the entire value will get encoded and will not be accepted by SES:\n #\n # >>> parseaddr(\"F\u00f6\u00f6bar <[email protected]>\")\n # ('F\u00f6\u00f6bar', '[email protected]')\n # >>> formataddr(_)\n # '=?utf-8?b?RsO2w7ZiYXI=?= <[email protected]>'\n message[\"To\"] = formataddr(parseaddr(recipient))\n\n message.attach(MIMEText(body, \"plain\", \"utf-8\"))\n\n resp = self._client.send_raw_email(\n Source=self._sender,\n Destinations=[recipient],\n RawMessage={\"Data\": message.as_string()},\n )\n\n self._db.add(\n EmailMessage(\n message_id=resp[\"MessageId\"],\n from_=parseaddr(self._sender)[1],\n to=parseaddr(recipient)[1],\n subject=subject,\n )\n )\n", "path": "warehouse/email/services.py"}]}
| 1,408 | 425 |
gh_patches_debug_12352
|
rasdani/github-patches
|
git_diff
|
WeblateOrg__weblate-3677
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Search / Criteria"Exact" overwrites 'Add" button
The search is a great feature (quick to fill, powerfull - with advanced criterias - and results are fast and reliable) but I have noticed an issue in the layout : "Exact" (which seems to be an untranslatable string yet) overwrite the "Add" button (at least in french, the full french label is "Ajouter"); see: 
page https://hosted.weblate.org/projects/weblate/master/#search
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/utils/search.py`
Content:
```
1 #
2 # Copyright © 2012 - 2020 Michal Čihař <[email protected]>
3 #
4 # This file is part of Weblate <https://weblate.org/>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <https://www.gnu.org/licenses/>.
18 #
19
20
21 import re
22 from functools import lru_cache, reduce
23
24 import whoosh.qparser
25 import whoosh.qparser.dateparse
26 import whoosh.query
27 from django.db.models import Q
28 from django.utils import timezone
29 from django.utils.translation import gettext as _
30 from jellyfish import damerau_levenshtein_distance
31 from whoosh.fields import BOOLEAN, DATETIME, NUMERIC, TEXT, Schema
32
33 from weblate.trans.util import PLURAL_SEPARATOR
34 from weblate.utils.state import (
35 STATE_APPROVED,
36 STATE_NAMES,
37 STATE_READONLY,
38 STATE_TRANSLATED,
39 )
40
41
42 class Comparer:
43 """String comparer abstraction.
44
45 The reason is to be able to change implementation.
46 """
47
48 def similarity(self, first, second):
49 """Returns string similarity in range 0 - 100%."""
50 try:
51 distance = damerau_levenshtein_distance(first, second)
52 return int(
53 100 * (1.0 - (float(distance) / max(len(first), len(second), 1)))
54 )
55 except MemoryError:
56 # Too long string, mark them as not much similar
57 return 50
58
59
60 class QuotePlugin(whoosh.qparser.SingleQuotePlugin):
61 """Single and double quotes to specify a term."""
62
63 expr = r"(^|(?<=\W))['\"](?P<text>.*?)['\"](?=\s|\]|[)}]|$)"
64
65
66 class Exact(whoosh.query.Term):
67 """Class for queries with exact operator."""
68
69 pass
70
71
72 class ExactPlugin(QuotePlugin):
73 """Exact match plugin with quotes to specify an exact term."""
74
75 class ExactNode(whoosh.qparser.syntax.TextNode):
76 qclass = Exact
77
78 def r(self):
79 return "Exact %r" % self.text
80
81 expr = r"\=(^|(?<=\W))['\"](?P<text>.*?)['\"](?=\s|\]|[)}]|$)"
82 nodetype = ExactNode
83
84
85 class GtLtPlugin(whoosh.qparser.GtLtPlugin):
86 """GtLt plugin taggin only after ":"."""
87
88 def match(self, parser, text, pos):
89 if pos == 0 or text[pos - 1] != ":":
90 return None
91 return super().match(parser, text, pos)
92
93
94 class DateParser(whoosh.qparser.dateparse.English):
95 def setup(self):
96 super().setup()
97 # We prefer simple parser prior to datetime
98 # This might not be necessary after following issue is fixed:
99 # https://github.com/whoosh-community/whoosh/issues/552
100 self.bundle.elements = (self.plusdate, self.simple, self.datetime)
101
102
103 class NumberField(NUMERIC):
104 def to_bytes(self, x, shift=0):
105 return int(x)
106
107
108 class StateField(NumberField):
109 def parse_query(self, fieldname, qstring, boost=1.0):
110 return super().parse_query(fieldname, state_to_int(qstring), boost)
111
112 def parse_range(self, fieldname, start, end, startexcl, endexcl, boost=1.0):
113 return super().parse_range(
114 fieldname, state_to_int(start), state_to_int(end), startexcl, endexcl, boost
115 )
116
117
118 def state_to_int(text):
119 if text is None:
120 return None
121 try:
122 return STATE_NAMES[text]
123 except KeyError:
124 raise ValueError(_("Unsupported state: {}").format(text))
125
126
127 class QueryParser(whoosh.qparser.QueryParser):
128 """Weblate query parser, differences to Whoosh default.
129
130 - no phrase plugin
131 - <> operators support
132 - double and single quotes behave identical
133 - multifield lookup for unspecified terms
134 """
135
136 def __init__(self):
137 # Define fields for parsing
138 fields = {
139 # Unit fields
140 "source": TEXT,
141 "target": TEXT,
142 "context": TEXT,
143 "note": TEXT,
144 "location": TEXT,
145 "priority": NumberField,
146 "added": DATETIME,
147 "state": StateField,
148 "pending": BOOLEAN,
149 "has": TEXT,
150 "is": TEXT,
151 # Language
152 "language": TEXT,
153 # Change fields
154 "changed": DATETIME,
155 "changed_by": TEXT,
156 # Unit data
157 "check": TEXT,
158 "ignored_check": TEXT,
159 "suggestion": TEXT,
160 "suggestion_author": TEXT,
161 "comment": TEXT,
162 "comment_author": TEXT,
163 "label": TEXT,
164 }
165 schema = Schema(**fields)
166
167 # List of plugins
168 plugins = [
169 whoosh.qparser.WhitespacePlugin(),
170 QuotePlugin(),
171 whoosh.qparser.FieldsPlugin(),
172 whoosh.qparser.RangePlugin(),
173 GtLtPlugin(),
174 ExactPlugin(),
175 whoosh.qparser.RegexPlugin(),
176 whoosh.qparser.GroupPlugin(),
177 whoosh.qparser.OperatorsPlugin(),
178 whoosh.qparser.dateparse.DateParserPlugin(dateparser=DateParser()),
179 whoosh.qparser.MultifieldPlugin(["source", "target", "context"]),
180 ]
181 super().__init__(None, schema, plugins=plugins)
182
183 def term_query(
184 self, fieldname, text, termclass, boost=1.0, tokenize=True, removestops=True
185 ):
186 if self.schema and fieldname in self.schema:
187 if isinstance(self.schema[fieldname], TEXT):
188 return termclass(fieldname, text, boost=boost)
189 return super().term_query(
190 fieldname, text, termclass, boost, tokenize, removestops
191 )
192
193
194 PARSER = QueryParser()
195
196 PLAIN_FIELDS = ("source", "target", "context", "note", "location")
197 FIELD_MAP = {"changed": "change__timestamp", "added": "timestamp"}
198 STRING_FIELD_MAP = {"suggestion": "suggestion__target", "comment": "comment__comment"}
199 EXACT_FIELD_MAP = {
200 "check": "check__check",
201 "ignored_check": "check__check",
202 "language": "translation__language__code",
203 "changed_by": "change__author__username",
204 "suggestion_author": "suggestion__user__username",
205 "comment_author": "comment__user__username",
206 "label": "labels__name",
207 }
208
209
210 def field_name(field, suffix="substring"):
211 if field in FIELD_MAP:
212 return FIELD_MAP[field]
213 if field in PLAIN_FIELDS:
214 return "{}__{}".format(field, suffix)
215 if field in STRING_FIELD_MAP:
216 return "{}__{}".format(STRING_FIELD_MAP[field], suffix)
217 if field in EXACT_FIELD_MAP:
218 # Change contains to exact, do not change other (for example regex)
219 if suffix == "substring":
220 suffix = "iexact"
221 return "{}__{}".format(EXACT_FIELD_MAP[field], suffix)
222 return field
223
224
225 def field_extra(field, query):
226 from weblate.trans.models import Change
227
228 if field in {"changed", "changed_by"}:
229 return query & Q(change__action__in=Change.ACTIONS_CONTENT)
230 if field == "check":
231 return query & Q(check__ignore=False)
232 if field == "ignored_check":
233 return query & Q(check__ignore=True)
234 return query
235
236
237 def range_sql(field, start, end, startexcl, endexcl, conv=int):
238 def range_lookup(field, op, value):
239 return {"{}__{}".format(field_name(field), op): conv(value)}
240
241 gte = "gt" if startexcl else "gte"
242 lte = "lt" if endexcl else "lte"
243
244 if start is not None and end is not None:
245 return Q(**range_lookup(field, gte, start)) & Q(**range_lookup(field, lte, end))
246 if start is not None:
247 return Q(**range_lookup(field, gte, start))
248 return Q(**range_lookup(field, lte, end))
249
250
251 def has_sql(text):
252 if text == "plural":
253 return Q(source__contains=PLURAL_SEPARATOR)
254 if text == "suggestion":
255 return Q(has_suggestion=True)
256 if text == "comment":
257 return Q(has_comment=True)
258 if text in ("check", "failing-check", "failing_check"):
259 return Q(has_failing_check=True)
260 if text in ("ignored-check", "ignored_check"):
261 return Q(check__ignore=True)
262 if text == "translation":
263 return Q(state__gte=STATE_TRANSLATED)
264 if text == "shaping":
265 return Q(shaping__isnull=False)
266 if text == "label":
267 return Q(labels__isnull=False)
268
269 raise ValueError("Unsupported has lookup: {}".format(text))
270
271
272 def is_sql(text):
273 if text in ("read-only", "readonly"):
274 return Q(state=STATE_READONLY)
275 if text == "approved":
276 return Q(state=STATE_APPROVED)
277 if text == "translated":
278 return Q(state__gte=STATE_TRANSLATED)
279 if text == "untranslated":
280 return Q(state__lt=STATE_TRANSLATED)
281 if text == "pending":
282 return Q(pending=True)
283
284 raise ValueError("Unsupported is lookup: {}".format(text))
285
286
287 def exact_sql(field, text):
288 return Q(**{field_name(field, "iexact"): text})
289
290
291 def query_sql(obj):
292 if isinstance(obj, whoosh.query.And):
293 return reduce(
294 lambda x, y: x & y,
295 (query_sql(q) for q in obj.subqueries if q != whoosh.query.NullQuery),
296 )
297 if isinstance(obj, whoosh.query.Or):
298 return reduce(
299 lambda x, y: x | y,
300 (query_sql(q) for q in obj.subqueries if q != whoosh.query.NullQuery),
301 )
302 if isinstance(obj, whoosh.query.Not):
303 return ~query_sql(obj.query)
304 if isinstance(obj, Exact):
305 return exact_sql(obj.fieldname, obj.text)
306 if isinstance(obj, whoosh.query.Term):
307 if obj.fieldname == "has":
308 return has_sql(obj.text)
309 if obj.fieldname == "is":
310 return is_sql(obj.text)
311 return field_extra(obj.fieldname, Q(**{field_name(obj.fieldname): obj.text}))
312 if isinstance(obj, whoosh.query.DateRange):
313 return field_extra(
314 obj.fieldname,
315 range_sql(
316 obj.fieldname,
317 obj.startdate,
318 obj.enddate,
319 obj.startexcl,
320 obj.endexcl,
321 timezone.make_aware,
322 ),
323 )
324 if isinstance(obj, whoosh.query.NumericRange):
325 return range_sql(obj.fieldname, obj.start, obj.end, obj.startexcl, obj.endexcl)
326 if isinstance(obj, whoosh.query.Regex):
327 try:
328 re.compile(obj.text)
329 return Q(**{field_name(obj.fieldname, "regex"): obj.text})
330 except re.error as error:
331 raise ValueError(_("Invalid regular expression: {}").format(error))
332
333 if obj == whoosh.query.NullQuery:
334 return Q()
335 raise ValueError("Unsupported: {!r}".format(obj))
336
337
338 @lru_cache(maxsize=512)
339 def parse_query(text):
340 return query_sql(PARSER.parse(text))
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/weblate/utils/search.py b/weblate/utils/search.py
--- a/weblate/utils/search.py
+++ b/weblate/utils/search.py
@@ -69,8 +69,8 @@
pass
-class ExactPlugin(QuotePlugin):
- """Exact match plugin with quotes to specify an exact term."""
+class ExactPlugin(whoosh.qparser.TaggingPlugin):
+ """Exact match plugin to specify an exact term."""
class ExactNode(whoosh.qparser.syntax.TextNode):
qclass = Exact
@@ -78,7 +78,7 @@
def r(self):
return "Exact %r" % self.text
- expr = r"\=(^|(?<=\W))['\"](?P<text>.*?)['\"](?=\s|\]|[)}]|$)"
+ expr = r"\=(^|(?<=\W))(['\"]?)(?P<text>.*?)\2(?=\s|\]|[)}]|$)"
nodetype = ExactNode
|
{"golden_diff": "diff --git a/weblate/utils/search.py b/weblate/utils/search.py\n--- a/weblate/utils/search.py\n+++ b/weblate/utils/search.py\n@@ -69,8 +69,8 @@\n pass\n \n \n-class ExactPlugin(QuotePlugin):\n- \"\"\"Exact match plugin with quotes to specify an exact term.\"\"\"\n+class ExactPlugin(whoosh.qparser.TaggingPlugin):\n+ \"\"\"Exact match plugin to specify an exact term.\"\"\"\n \n class ExactNode(whoosh.qparser.syntax.TextNode):\n qclass = Exact\n@@ -78,7 +78,7 @@\n def r(self):\n return \"Exact %r\" % self.text\n \n- expr = r\"\\=(^|(?<=\\W))['\\\"](?P<text>.*?)['\\\"](?=\\s|\\]|[)}]|$)\"\n+ expr = r\"\\=(^|(?<=\\W))(['\\\"]?)(?P<text>.*?)\\2(?=\\s|\\]|[)}]|$)\"\n nodetype = ExactNode\n", "issue": "Search / Criteria\"Exact\" overwrites 'Add\" button\nThe search is a great feature (quick to fill, powerfull - with advanced criterias - and results are fast and reliable) but I have noticed an issue in the layout : \"Exact\" (which seems to be an untranslatable string yet) overwrite the \"Add\" button (at least in french, the full french label is \"Ajouter\"); see: \r\n\r\npage https://hosted.weblate.org/projects/weblate/master/#search \r\n\n", "before_files": [{"content": "#\n# Copyright \u00a9 2012 - 2020 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\n\nimport re\nfrom functools import lru_cache, reduce\n\nimport whoosh.qparser\nimport whoosh.qparser.dateparse\nimport whoosh.query\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\nfrom jellyfish import damerau_levenshtein_distance\nfrom whoosh.fields import BOOLEAN, DATETIME, NUMERIC, TEXT, Schema\n\nfrom weblate.trans.util import PLURAL_SEPARATOR\nfrom weblate.utils.state import (\n STATE_APPROVED,\n STATE_NAMES,\n STATE_READONLY,\n STATE_TRANSLATED,\n)\n\n\nclass Comparer:\n \"\"\"String comparer abstraction.\n\n The reason is to be able to change implementation.\n \"\"\"\n\n def similarity(self, first, second):\n \"\"\"Returns string similarity in range 0 - 100%.\"\"\"\n try:\n distance = damerau_levenshtein_distance(first, second)\n return int(\n 100 * (1.0 - (float(distance) / max(len(first), len(second), 1)))\n )\n except MemoryError:\n # Too long string, mark them as not much similar\n return 50\n\n\nclass QuotePlugin(whoosh.qparser.SingleQuotePlugin):\n \"\"\"Single and double quotes to specify a term.\"\"\"\n\n expr = r\"(^|(?<=\\W))['\\\"](?P<text>.*?)['\\\"](?=\\s|\\]|[)}]|$)\"\n\n\nclass Exact(whoosh.query.Term):\n \"\"\"Class for queries with exact operator.\"\"\"\n\n pass\n\n\nclass ExactPlugin(QuotePlugin):\n \"\"\"Exact match plugin with quotes to specify an exact term.\"\"\"\n\n class ExactNode(whoosh.qparser.syntax.TextNode):\n qclass = Exact\n\n def r(self):\n return \"Exact %r\" % self.text\n\n expr = r\"\\=(^|(?<=\\W))['\\\"](?P<text>.*?)['\\\"](?=\\s|\\]|[)}]|$)\"\n nodetype = ExactNode\n\n\nclass GtLtPlugin(whoosh.qparser.GtLtPlugin):\n \"\"\"GtLt plugin taggin only after \":\".\"\"\"\n\n def match(self, parser, text, pos):\n if pos == 0 or text[pos - 1] != \":\":\n return None\n return super().match(parser, text, pos)\n\n\nclass DateParser(whoosh.qparser.dateparse.English):\n def setup(self):\n super().setup()\n # We prefer simple parser prior to datetime\n # This might not be necessary after following issue is fixed:\n # https://github.com/whoosh-community/whoosh/issues/552\n self.bundle.elements = (self.plusdate, self.simple, self.datetime)\n\n\nclass NumberField(NUMERIC):\n def to_bytes(self, x, shift=0):\n return int(x)\n\n\nclass StateField(NumberField):\n def parse_query(self, fieldname, qstring, boost=1.0):\n return super().parse_query(fieldname, state_to_int(qstring), boost)\n\n def parse_range(self, fieldname, start, end, startexcl, endexcl, boost=1.0):\n return super().parse_range(\n fieldname, state_to_int(start), state_to_int(end), startexcl, endexcl, boost\n )\n\n\ndef state_to_int(text):\n if text is None:\n return None\n try:\n return STATE_NAMES[text]\n except KeyError:\n raise ValueError(_(\"Unsupported state: {}\").format(text))\n\n\nclass QueryParser(whoosh.qparser.QueryParser):\n \"\"\"Weblate query parser, differences to Whoosh default.\n\n - no phrase plugin\n - <> operators support\n - double and single quotes behave identical\n - multifield lookup for unspecified terms\n \"\"\"\n\n def __init__(self):\n # Define fields for parsing\n fields = {\n # Unit fields\n \"source\": TEXT,\n \"target\": TEXT,\n \"context\": TEXT,\n \"note\": TEXT,\n \"location\": TEXT,\n \"priority\": NumberField,\n \"added\": DATETIME,\n \"state\": StateField,\n \"pending\": BOOLEAN,\n \"has\": TEXT,\n \"is\": TEXT,\n # Language\n \"language\": TEXT,\n # Change fields\n \"changed\": DATETIME,\n \"changed_by\": TEXT,\n # Unit data\n \"check\": TEXT,\n \"ignored_check\": TEXT,\n \"suggestion\": TEXT,\n \"suggestion_author\": TEXT,\n \"comment\": TEXT,\n \"comment_author\": TEXT,\n \"label\": TEXT,\n }\n schema = Schema(**fields)\n\n # List of plugins\n plugins = [\n whoosh.qparser.WhitespacePlugin(),\n QuotePlugin(),\n whoosh.qparser.FieldsPlugin(),\n whoosh.qparser.RangePlugin(),\n GtLtPlugin(),\n ExactPlugin(),\n whoosh.qparser.RegexPlugin(),\n whoosh.qparser.GroupPlugin(),\n whoosh.qparser.OperatorsPlugin(),\n whoosh.qparser.dateparse.DateParserPlugin(dateparser=DateParser()),\n whoosh.qparser.MultifieldPlugin([\"source\", \"target\", \"context\"]),\n ]\n super().__init__(None, schema, plugins=plugins)\n\n def term_query(\n self, fieldname, text, termclass, boost=1.0, tokenize=True, removestops=True\n ):\n if self.schema and fieldname in self.schema:\n if isinstance(self.schema[fieldname], TEXT):\n return termclass(fieldname, text, boost=boost)\n return super().term_query(\n fieldname, text, termclass, boost, tokenize, removestops\n )\n\n\nPARSER = QueryParser()\n\nPLAIN_FIELDS = (\"source\", \"target\", \"context\", \"note\", \"location\")\nFIELD_MAP = {\"changed\": \"change__timestamp\", \"added\": \"timestamp\"}\nSTRING_FIELD_MAP = {\"suggestion\": \"suggestion__target\", \"comment\": \"comment__comment\"}\nEXACT_FIELD_MAP = {\n \"check\": \"check__check\",\n \"ignored_check\": \"check__check\",\n \"language\": \"translation__language__code\",\n \"changed_by\": \"change__author__username\",\n \"suggestion_author\": \"suggestion__user__username\",\n \"comment_author\": \"comment__user__username\",\n \"label\": \"labels__name\",\n}\n\n\ndef field_name(field, suffix=\"substring\"):\n if field in FIELD_MAP:\n return FIELD_MAP[field]\n if field in PLAIN_FIELDS:\n return \"{}__{}\".format(field, suffix)\n if field in STRING_FIELD_MAP:\n return \"{}__{}\".format(STRING_FIELD_MAP[field], suffix)\n if field in EXACT_FIELD_MAP:\n # Change contains to exact, do not change other (for example regex)\n if suffix == \"substring\":\n suffix = \"iexact\"\n return \"{}__{}\".format(EXACT_FIELD_MAP[field], suffix)\n return field\n\n\ndef field_extra(field, query):\n from weblate.trans.models import Change\n\n if field in {\"changed\", \"changed_by\"}:\n return query & Q(change__action__in=Change.ACTIONS_CONTENT)\n if field == \"check\":\n return query & Q(check__ignore=False)\n if field == \"ignored_check\":\n return query & Q(check__ignore=True)\n return query\n\n\ndef range_sql(field, start, end, startexcl, endexcl, conv=int):\n def range_lookup(field, op, value):\n return {\"{}__{}\".format(field_name(field), op): conv(value)}\n\n gte = \"gt\" if startexcl else \"gte\"\n lte = \"lt\" if endexcl else \"lte\"\n\n if start is not None and end is not None:\n return Q(**range_lookup(field, gte, start)) & Q(**range_lookup(field, lte, end))\n if start is not None:\n return Q(**range_lookup(field, gte, start))\n return Q(**range_lookup(field, lte, end))\n\n\ndef has_sql(text):\n if text == \"plural\":\n return Q(source__contains=PLURAL_SEPARATOR)\n if text == \"suggestion\":\n return Q(has_suggestion=True)\n if text == \"comment\":\n return Q(has_comment=True)\n if text in (\"check\", \"failing-check\", \"failing_check\"):\n return Q(has_failing_check=True)\n if text in (\"ignored-check\", \"ignored_check\"):\n return Q(check__ignore=True)\n if text == \"translation\":\n return Q(state__gte=STATE_TRANSLATED)\n if text == \"shaping\":\n return Q(shaping__isnull=False)\n if text == \"label\":\n return Q(labels__isnull=False)\n\n raise ValueError(\"Unsupported has lookup: {}\".format(text))\n\n\ndef is_sql(text):\n if text in (\"read-only\", \"readonly\"):\n return Q(state=STATE_READONLY)\n if text == \"approved\":\n return Q(state=STATE_APPROVED)\n if text == \"translated\":\n return Q(state__gte=STATE_TRANSLATED)\n if text == \"untranslated\":\n return Q(state__lt=STATE_TRANSLATED)\n if text == \"pending\":\n return Q(pending=True)\n\n raise ValueError(\"Unsupported is lookup: {}\".format(text))\n\n\ndef exact_sql(field, text):\n return Q(**{field_name(field, \"iexact\"): text})\n\n\ndef query_sql(obj):\n if isinstance(obj, whoosh.query.And):\n return reduce(\n lambda x, y: x & y,\n (query_sql(q) for q in obj.subqueries if q != whoosh.query.NullQuery),\n )\n if isinstance(obj, whoosh.query.Or):\n return reduce(\n lambda x, y: x | y,\n (query_sql(q) for q in obj.subqueries if q != whoosh.query.NullQuery),\n )\n if isinstance(obj, whoosh.query.Not):\n return ~query_sql(obj.query)\n if isinstance(obj, Exact):\n return exact_sql(obj.fieldname, obj.text)\n if isinstance(obj, whoosh.query.Term):\n if obj.fieldname == \"has\":\n return has_sql(obj.text)\n if obj.fieldname == \"is\":\n return is_sql(obj.text)\n return field_extra(obj.fieldname, Q(**{field_name(obj.fieldname): obj.text}))\n if isinstance(obj, whoosh.query.DateRange):\n return field_extra(\n obj.fieldname,\n range_sql(\n obj.fieldname,\n obj.startdate,\n obj.enddate,\n obj.startexcl,\n obj.endexcl,\n timezone.make_aware,\n ),\n )\n if isinstance(obj, whoosh.query.NumericRange):\n return range_sql(obj.fieldname, obj.start, obj.end, obj.startexcl, obj.endexcl)\n if isinstance(obj, whoosh.query.Regex):\n try:\n re.compile(obj.text)\n return Q(**{field_name(obj.fieldname, \"regex\"): obj.text})\n except re.error as error:\n raise ValueError(_(\"Invalid regular expression: {}\").format(error))\n\n if obj == whoosh.query.NullQuery:\n return Q()\n raise ValueError(\"Unsupported: {!r}\".format(obj))\n\n\n@lru_cache(maxsize=512)\ndef parse_query(text):\n return query_sql(PARSER.parse(text))\n", "path": "weblate/utils/search.py"}], "after_files": [{"content": "#\n# Copyright \u00a9 2012 - 2020 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\n\nimport re\nfrom functools import lru_cache, reduce\n\nimport whoosh.qparser\nimport whoosh.qparser.dateparse\nimport whoosh.query\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\nfrom jellyfish import damerau_levenshtein_distance\nfrom whoosh.fields import BOOLEAN, DATETIME, NUMERIC, TEXT, Schema\n\nfrom weblate.trans.util import PLURAL_SEPARATOR\nfrom weblate.utils.state import (\n STATE_APPROVED,\n STATE_NAMES,\n STATE_READONLY,\n STATE_TRANSLATED,\n)\n\n\nclass Comparer:\n \"\"\"String comparer abstraction.\n\n The reason is to be able to change implementation.\n \"\"\"\n\n def similarity(self, first, second):\n \"\"\"Returns string similarity in range 0 - 100%.\"\"\"\n try:\n distance = damerau_levenshtein_distance(first, second)\n return int(\n 100 * (1.0 - (float(distance) / max(len(first), len(second), 1)))\n )\n except MemoryError:\n # Too long string, mark them as not much similar\n return 50\n\n\nclass QuotePlugin(whoosh.qparser.SingleQuotePlugin):\n \"\"\"Single and double quotes to specify a term.\"\"\"\n\n expr = r\"(^|(?<=\\W))['\\\"](?P<text>.*?)['\\\"](?=\\s|\\]|[)}]|$)\"\n\n\nclass Exact(whoosh.query.Term):\n \"\"\"Class for queries with exact operator.\"\"\"\n\n pass\n\n\nclass ExactPlugin(whoosh.qparser.TaggingPlugin):\n \"\"\"Exact match plugin to specify an exact term.\"\"\"\n\n class ExactNode(whoosh.qparser.syntax.TextNode):\n qclass = Exact\n\n def r(self):\n return \"Exact %r\" % self.text\n\n expr = r\"\\=(^|(?<=\\W))(['\\\"]?)(?P<text>.*?)\\2(?=\\s|\\]|[)}]|$)\"\n nodetype = ExactNode\n\n\nclass GtLtPlugin(whoosh.qparser.GtLtPlugin):\n \"\"\"GtLt plugin taggin only after \":\".\"\"\"\n\n def match(self, parser, text, pos):\n if pos == 0 or text[pos - 1] != \":\":\n return None\n return super().match(parser, text, pos)\n\n\nclass DateParser(whoosh.qparser.dateparse.English):\n def setup(self):\n super().setup()\n # We prefer simple parser prior to datetime\n # This might not be necessary after following issue is fixed:\n # https://github.com/whoosh-community/whoosh/issues/552\n self.bundle.elements = (self.plusdate, self.simple, self.datetime)\n\n\nclass NumberField(NUMERIC):\n def to_bytes(self, x, shift=0):\n return int(x)\n\n\nclass StateField(NumberField):\n def parse_query(self, fieldname, qstring, boost=1.0):\n return super().parse_query(fieldname, state_to_int(qstring), boost)\n\n def parse_range(self, fieldname, start, end, startexcl, endexcl, boost=1.0):\n return super().parse_range(\n fieldname, state_to_int(start), state_to_int(end), startexcl, endexcl, boost\n )\n\n\ndef state_to_int(text):\n if text is None:\n return None\n try:\n return STATE_NAMES[text]\n except KeyError:\n raise ValueError(_(\"Unsupported state: {}\").format(text))\n\n\nclass QueryParser(whoosh.qparser.QueryParser):\n \"\"\"Weblate query parser, differences to Whoosh default.\n\n - no phrase plugin\n - <> operators support\n - double and single quotes behave identical\n - multifield lookup for unspecified terms\n \"\"\"\n\n def __init__(self):\n # Define fields for parsing\n fields = {\n # Unit fields\n \"source\": TEXT,\n \"target\": TEXT,\n \"context\": TEXT,\n \"note\": TEXT,\n \"location\": TEXT,\n \"priority\": NumberField,\n \"added\": DATETIME,\n \"state\": StateField,\n \"pending\": BOOLEAN,\n \"has\": TEXT,\n \"is\": TEXT,\n # Language\n \"language\": TEXT,\n # Change fields\n \"changed\": DATETIME,\n \"changed_by\": TEXT,\n # Unit data\n \"check\": TEXT,\n \"ignored_check\": TEXT,\n \"suggestion\": TEXT,\n \"suggestion_author\": TEXT,\n \"comment\": TEXT,\n \"comment_author\": TEXT,\n \"label\": TEXT,\n }\n schema = Schema(**fields)\n\n # List of plugins\n plugins = [\n whoosh.qparser.WhitespacePlugin(),\n QuotePlugin(),\n whoosh.qparser.FieldsPlugin(),\n whoosh.qparser.RangePlugin(),\n GtLtPlugin(),\n ExactPlugin(),\n whoosh.qparser.RegexPlugin(),\n whoosh.qparser.GroupPlugin(),\n whoosh.qparser.OperatorsPlugin(),\n whoosh.qparser.dateparse.DateParserPlugin(dateparser=DateParser()),\n whoosh.qparser.MultifieldPlugin([\"source\", \"target\", \"context\"]),\n ]\n super().__init__(None, schema, plugins=plugins)\n\n def term_query(\n self, fieldname, text, termclass, boost=1.0, tokenize=True, removestops=True\n ):\n if self.schema and fieldname in self.schema:\n if isinstance(self.schema[fieldname], TEXT):\n return termclass(fieldname, text, boost=boost)\n return super().term_query(\n fieldname, text, termclass, boost, tokenize, removestops\n )\n\n\nPARSER = QueryParser()\n\nPLAIN_FIELDS = (\"source\", \"target\", \"context\", \"note\", \"location\")\nFIELD_MAP = {\"changed\": \"change__timestamp\", \"added\": \"timestamp\"}\nSTRING_FIELD_MAP = {\"suggestion\": \"suggestion__target\", \"comment\": \"comment__comment\"}\nEXACT_FIELD_MAP = {\n \"check\": \"check__check\",\n \"ignored_check\": \"check__check\",\n \"language\": \"translation__language__code\",\n \"changed_by\": \"change__author__username\",\n \"suggestion_author\": \"suggestion__user__username\",\n \"comment_author\": \"comment__user__username\",\n \"label\": \"labels__name\",\n}\n\n\ndef field_name(field, suffix=\"substring\"):\n if field in FIELD_MAP:\n return FIELD_MAP[field]\n if field in PLAIN_FIELDS:\n return \"{}__{}\".format(field, suffix)\n if field in STRING_FIELD_MAP:\n return \"{}__{}\".format(STRING_FIELD_MAP[field], suffix)\n if field in EXACT_FIELD_MAP:\n # Change contains to exact, do not change other (for example regex)\n if suffix == \"substring\":\n suffix = \"iexact\"\n return \"{}__{}\".format(EXACT_FIELD_MAP[field], suffix)\n return field\n\n\ndef field_extra(field, query):\n from weblate.trans.models import Change\n\n if field in {\"changed\", \"changed_by\"}:\n return query & Q(change__action__in=Change.ACTIONS_CONTENT)\n if field == \"check\":\n return query & Q(check__ignore=False)\n if field == \"ignored_check\":\n return query & Q(check__ignore=True)\n return query\n\n\ndef range_sql(field, start, end, startexcl, endexcl, conv=int):\n def range_lookup(field, op, value):\n return {\"{}__{}\".format(field_name(field), op): conv(value)}\n\n gte = \"gt\" if startexcl else \"gte\"\n lte = \"lt\" if endexcl else \"lte\"\n\n if start is not None and end is not None:\n return Q(**range_lookup(field, gte, start)) & Q(**range_lookup(field, lte, end))\n if start is not None:\n return Q(**range_lookup(field, gte, start))\n return Q(**range_lookup(field, lte, end))\n\n\ndef has_sql(text):\n if text == \"plural\":\n return Q(source__contains=PLURAL_SEPARATOR)\n if text == \"suggestion\":\n return Q(has_suggestion=True)\n if text == \"comment\":\n return Q(has_comment=True)\n if text in (\"check\", \"failing-check\", \"failing_check\"):\n return Q(has_failing_check=True)\n if text in (\"ignored-check\", \"ignored_check\"):\n return Q(check__ignore=True)\n if text == \"translation\":\n return Q(state__gte=STATE_TRANSLATED)\n if text == \"shaping\":\n return Q(shaping__isnull=False)\n if text == \"label\":\n return Q(labels__isnull=False)\n\n raise ValueError(\"Unsupported has lookup: {}\".format(text))\n\n\ndef is_sql(text):\n if text in (\"read-only\", \"readonly\"):\n return Q(state=STATE_READONLY)\n if text == \"approved\":\n return Q(state=STATE_APPROVED)\n if text == \"translated\":\n return Q(state__gte=STATE_TRANSLATED)\n if text == \"untranslated\":\n return Q(state__lt=STATE_TRANSLATED)\n if text == \"pending\":\n return Q(pending=True)\n\n raise ValueError(\"Unsupported is lookup: {}\".format(text))\n\n\ndef exact_sql(field, text):\n return Q(**{field_name(field, \"iexact\"): text})\n\n\ndef query_sql(obj):\n if isinstance(obj, whoosh.query.And):\n return reduce(\n lambda x, y: x & y,\n (query_sql(q) for q in obj.subqueries if q != whoosh.query.NullQuery),\n )\n if isinstance(obj, whoosh.query.Or):\n return reduce(\n lambda x, y: x | y,\n (query_sql(q) for q in obj.subqueries if q != whoosh.query.NullQuery),\n )\n if isinstance(obj, whoosh.query.Not):\n return ~query_sql(obj.query)\n if isinstance(obj, Exact):\n return exact_sql(obj.fieldname, obj.text)\n if isinstance(obj, whoosh.query.Term):\n if obj.fieldname == \"has\":\n return has_sql(obj.text)\n if obj.fieldname == \"is\":\n return is_sql(obj.text)\n return field_extra(obj.fieldname, Q(**{field_name(obj.fieldname): obj.text}))\n if isinstance(obj, whoosh.query.DateRange):\n return field_extra(\n obj.fieldname,\n range_sql(\n obj.fieldname,\n obj.startdate,\n obj.enddate,\n obj.startexcl,\n obj.endexcl,\n timezone.make_aware,\n ),\n )\n if isinstance(obj, whoosh.query.NumericRange):\n return range_sql(obj.fieldname, obj.start, obj.end, obj.startexcl, obj.endexcl)\n if isinstance(obj, whoosh.query.Regex):\n try:\n re.compile(obj.text)\n return Q(**{field_name(obj.fieldname, \"regex\"): obj.text})\n except re.error as error:\n raise ValueError(_(\"Invalid regular expression: {}\").format(error))\n\n if obj == whoosh.query.NullQuery:\n return Q()\n raise ValueError(\"Unsupported: {!r}\".format(obj))\n\n\n@lru_cache(maxsize=512)\ndef parse_query(text):\n return query_sql(PARSER.parse(text))\n", "path": "weblate/utils/search.py"}]}
| 3,967 | 221 |
gh_patches_debug_36225
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1454
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parser regex.py does not support identically named group in regex_matcher
# Description
When using the same group name in **match_regex** decorator **regex.py** issue an error because the python re module used to parse messages does not support identical named group
## Steps to Reproduce
- Create a skill like (the goal is to have a multiple patterns using the same group name)
```python
from opsdroid.matchers import match_regex
from opsdroid.skill import Skill
PATTERNS = '|'.join([
"say (?P<word>\w+)",
"repeat (?P<word>\w+)",
"say",
"repeat"
])
class SaySkill(Skill):
@match_regex(PATTERNS, case_sensitive=False, matching_condition='match')
async def say(self, message):
word = message.entities['word']['value'] or None
if word is None:
word = 'No word to say'
await message.respond(word)
```
- Add it to configuration file
- using opsdroid shell or opsdroid desktop test it
- Issue occurs
```
DEBUG opsdroid.core: Parsing input: <opsdroid.events.Message(text=say hello)>.
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "/usr/lib64/python3.6/site-packages/aiohttp/web_protocol.py", line 418, in start
resp = await task
File "/usr/lib64/python3.6/site-packages/aiohttp/web_app.py", line 458, in _handle
resp = await handler(request)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/connector/websocket/__init__.py", line 101, in websocket_handler
await self.opsdroid.parse(message)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/core.py", line 498, in parse
ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/core.py", line 421, in get_ranked_skills
ranked_skills += await parse_regex(self, skills, message)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/parsers/regex.py", line 40, in parse_regex
regex = await match_regex(message.text, opts)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/parsers/regex.py", line 29, in match_regex
regex = re.match(opts["expression"], text, is_case_sensitive())
File "/usr/lib64/python3.6/re.py", line 172, in match
return _compile(pattern, flags).match(string)
File "/usr/lib64/python3.6/re.py", line 301, in _compile
p = sre_compile.compile(pattern, flags)
File "/usr/lib64/python3.6/sre_compile.py", line 562, in compile
p = sre_parse.parse(p, flags)
File "/usr/lib64/python3.6/sre_parse.py", line 855, in parse
p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, 0)
File "/usr/lib64/python3.6/sre_parse.py", line 416, in _parse_sub
not nested and not items))
File "/usr/lib64/python3.6/sre_parse.py", line 759, in _parse
raise source.error(err.msg, len(name) + 1) from None
sre_constants.error: redefinition of group name 'city' as group 2; was group 1 at position 42 <-------
```
## Expected Functionality
Handling multiple patterns with the same name group
```
DEBUG opsdroid.parsers.crontab: Running crontab skills at Fri May 1 19:03:00 2020.
DEBUG opsdroid.core: Parsing input: <opsdroid.events.Message(text=say hello)>.
DEBUG opsdroid.core: Processing parsers...
DEBUG opsdroid.connector.websocket: Responding with: 'hello' in target 8d9c8b96-8bcd-11ea-b043-0050568f7a82 <----------
```
## Experienced Functionality
debug logs :
```
DEBUG opsdroid.core: Parsing input: <opsdroid.events.Message(text=say hello)>.
ERROR aiohttp.server: Error handling request
Traceback (most recent call last):
File "/usr/lib64/python3.6/site-packages/aiohttp/web_protocol.py", line 418, in start
resp = await task
File "/usr/lib64/python3.6/site-packages/aiohttp/web_app.py", line 458, in _handle
resp = await handler(request)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/connector/websocket/__init__.py", line 101, in websocket_handler
await self.opsdroid.parse(message)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/core.py", line 498, in parse
ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/core.py", line 421, in get_ranked_skills
ranked_skills += await parse_regex(self, skills, message)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/parsers/regex.py", line 40, in parse_regex
regex = await match_regex(message.text, opts)
File "/home/ibot/ibot-opsdroid/opsdroid/opsdroid/parsers/regex.py", line 29, in match_regex
regex = re.match(opts["expression"], text, is_case_sensitive())
File "/usr/lib64/python3.6/re.py", line 172, in match
return _compile(pattern, flags).match(string)
File "/usr/lib64/python3.6/re.py", line 301, in _compile
p = sre_compile.compile(pattern, flags)
File "/usr/lib64/python3.6/sre_compile.py", line 562, in compile
p = sre_parse.parse(p, flags)
File "/usr/lib64/python3.6/sre_parse.py", line 855, in parse
p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, 0)
File "/usr/lib64/python3.6/sre_parse.py", line 416, in _parse_sub
not nested and not items))
File "/usr/lib64/python3.6/sre_parse.py", line 759, in _parse
raise source.error(err.msg, len(name) + 1) from None
sre_constants.error: redefinition of group name 'city' as group 2; was group 1 at position 42 <-------
```
## Versions
- **Opsdroid version:*v0.18.0*
- **Python version:*3.6.7*
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/parsers/regex.py`
Content:
```
1 """A helper function for parsing and executing regex skills."""
2
3 import logging
4 import re
5
6 _LOGGER = logging.getLogger(__name__)
7
8
9 async def calculate_score(regex, score_factor):
10 """Calculate the score of a regex."""
11 # The score asymptotically approaches the max score
12 # based on the length of the expression.
13 return (1 - (1 / ((len(regex) + 1) ** 2))) * score_factor
14
15
16 async def match_regex(text, opts):
17 """Return False if matching does not need to be case sensitive."""
18
19 def is_case_sensitive():
20 if opts["case_sensitive"]:
21 return False
22 return re.IGNORECASE
23
24 if opts["matching_condition"].lower() == "search":
25 regex = re.search(opts["expression"], text, is_case_sensitive())
26 elif opts["matching_condition"].lower() == "fullmatch":
27 regex = re.fullmatch(opts["expression"], text, is_case_sensitive())
28 else:
29 regex = re.match(opts["expression"], text, is_case_sensitive())
30 return regex
31
32
33 async def parse_regex(opsdroid, skills, message):
34 """Parse a message against all regex skills."""
35 matched_skills = []
36 for skill in skills:
37 for matcher in skill.matchers:
38 if "regex" in matcher:
39 opts = matcher["regex"]
40 regex = await match_regex(message.text, opts)
41 if regex:
42 message.regex = regex
43 for regroup, value in regex.groupdict().items():
44 message.update_entity(regroup, value, None)
45 matched_skills.append(
46 {
47 "score": await calculate_score(
48 opts["expression"], opts["score_factor"]
49 ),
50 "skill": skill,
51 "config": skill.config,
52 "message": message,
53 }
54 )
55 return matched_skills
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/parsers/regex.py b/opsdroid/parsers/regex.py
--- a/opsdroid/parsers/regex.py
+++ b/opsdroid/parsers/regex.py
@@ -1,16 +1,16 @@
"""A helper function for parsing and executing regex skills."""
import logging
-import re
+import regex
_LOGGER = logging.getLogger(__name__)
-async def calculate_score(regex, score_factor):
+async def calculate_score(expression, score_factor):
"""Calculate the score of a regex."""
# The score asymptotically approaches the max score
# based on the length of the expression.
- return (1 - (1 / ((len(regex) + 1) ** 2))) * score_factor
+ return (1 - (1 / ((len(expression) + 1) ** 2))) * score_factor
async def match_regex(text, opts):
@@ -19,15 +19,15 @@
def is_case_sensitive():
if opts["case_sensitive"]:
return False
- return re.IGNORECASE
+ return regex.IGNORECASE
if opts["matching_condition"].lower() == "search":
- regex = re.search(opts["expression"], text, is_case_sensitive())
+ matched_regex = regex.search(opts["expression"], text, is_case_sensitive())
elif opts["matching_condition"].lower() == "fullmatch":
- regex = re.fullmatch(opts["expression"], text, is_case_sensitive())
+ matched_regex = regex.fullmatch(opts["expression"], text, is_case_sensitive())
else:
- regex = re.match(opts["expression"], text, is_case_sensitive())
- return regex
+ matched_regex = regex.match(opts["expression"], text, is_case_sensitive())
+ return matched_regex
async def parse_regex(opsdroid, skills, message):
@@ -37,10 +37,10 @@
for matcher in skill.matchers:
if "regex" in matcher:
opts = matcher["regex"]
- regex = await match_regex(message.text, opts)
- if regex:
- message.regex = regex
- for regroup, value in regex.groupdict().items():
+ matched_regex = await match_regex(message.text, opts)
+ if matched_regex:
+ message.regex = matched_regex
+ for regroup, value in matched_regex.groupdict().items():
message.update_entity(regroup, value, None)
matched_skills.append(
{
|
{"golden_diff": "diff --git a/opsdroid/parsers/regex.py b/opsdroid/parsers/regex.py\n--- a/opsdroid/parsers/regex.py\n+++ b/opsdroid/parsers/regex.py\n@@ -1,16 +1,16 @@\n \"\"\"A helper function for parsing and executing regex skills.\"\"\"\n \n import logging\n-import re\n+import regex\n \n _LOGGER = logging.getLogger(__name__)\n \n \n-async def calculate_score(regex, score_factor):\n+async def calculate_score(expression, score_factor):\n \"\"\"Calculate the score of a regex.\"\"\"\n # The score asymptotically approaches the max score\n # based on the length of the expression.\n- return (1 - (1 / ((len(regex) + 1) ** 2))) * score_factor\n+ return (1 - (1 / ((len(expression) + 1) ** 2))) * score_factor\n \n \n async def match_regex(text, opts):\n@@ -19,15 +19,15 @@\n def is_case_sensitive():\n if opts[\"case_sensitive\"]:\n return False\n- return re.IGNORECASE\n+ return regex.IGNORECASE\n \n if opts[\"matching_condition\"].lower() == \"search\":\n- regex = re.search(opts[\"expression\"], text, is_case_sensitive())\n+ matched_regex = regex.search(opts[\"expression\"], text, is_case_sensitive())\n elif opts[\"matching_condition\"].lower() == \"fullmatch\":\n- regex = re.fullmatch(opts[\"expression\"], text, is_case_sensitive())\n+ matched_regex = regex.fullmatch(opts[\"expression\"], text, is_case_sensitive())\n else:\n- regex = re.match(opts[\"expression\"], text, is_case_sensitive())\n- return regex\n+ matched_regex = regex.match(opts[\"expression\"], text, is_case_sensitive())\n+ return matched_regex\n \n \n async def parse_regex(opsdroid, skills, message):\n@@ -37,10 +37,10 @@\n for matcher in skill.matchers:\n if \"regex\" in matcher:\n opts = matcher[\"regex\"]\n- regex = await match_regex(message.text, opts)\n- if regex:\n- message.regex = regex\n- for regroup, value in regex.groupdict().items():\n+ matched_regex = await match_regex(message.text, opts)\n+ if matched_regex:\n+ message.regex = matched_regex\n+ for regroup, value in matched_regex.groupdict().items():\n message.update_entity(regroup, value, None)\n matched_skills.append(\n {\n", "issue": "Parser regex.py does not support identically named group in regex_matcher\n# Description\r\n\r\nWhen using the same group name in **match_regex** decorator **regex.py** issue an error because the python re module used to parse messages does not support identical named group\r\n\r\n\r\n## Steps to Reproduce\r\n- Create a skill like (the goal is to have a multiple patterns using the same group name)\r\n\r\n```python\r\nfrom opsdroid.matchers import match_regex\r\nfrom opsdroid.skill import Skill\r\n\r\nPATTERNS = '|'.join([\r\n \"say (?P<word>\\w+)\",\r\n \"repeat (?P<word>\\w+)\",\r\n \"say\",\r\n \"repeat\"\r\n])\r\n\r\n\r\nclass SaySkill(Skill):\r\n\r\n @match_regex(PATTERNS, case_sensitive=False, matching_condition='match')\r\n async def say(self, message):\r\n word = message.entities['word']['value'] or None\r\n if word is None:\r\n word = 'No word to say'\r\n\r\n await message.respond(word)\r\n\r\n```\r\n\r\n- Add it to configuration file\r\n- using opsdroid shell or opsdroid desktop test it \r\n- Issue occurs\r\n\r\n```\r\nDEBUG opsdroid.core: Parsing input: <opsdroid.events.Message(text=say hello)>.\r\nERROR aiohttp.server: Error handling request\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.6/site-packages/aiohttp/web_protocol.py\", line 418, in start\r\n resp = await task\r\n File \"/usr/lib64/python3.6/site-packages/aiohttp/web_app.py\", line 458, in _handle\r\n resp = await handler(request)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/connector/websocket/__init__.py\", line 101, in websocket_handler\r\n await self.opsdroid.parse(message)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/core.py\", line 498, in parse\r\n ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/core.py\", line 421, in get_ranked_skills\r\n ranked_skills += await parse_regex(self, skills, message)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/parsers/regex.py\", line 40, in parse_regex\r\n regex = await match_regex(message.text, opts)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/parsers/regex.py\", line 29, in match_regex\r\n regex = re.match(opts[\"expression\"], text, is_case_sensitive())\r\n File \"/usr/lib64/python3.6/re.py\", line 172, in match\r\n return _compile(pattern, flags).match(string)\r\n File \"/usr/lib64/python3.6/re.py\", line 301, in _compile\r\n p = sre_compile.compile(pattern, flags)\r\n File \"/usr/lib64/python3.6/sre_compile.py\", line 562, in compile\r\n p = sre_parse.parse(p, flags)\r\n File \"/usr/lib64/python3.6/sre_parse.py\", line 855, in parse\r\n p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, 0)\r\n File \"/usr/lib64/python3.6/sre_parse.py\", line 416, in _parse_sub\r\n not nested and not items))\r\n File \"/usr/lib64/python3.6/sre_parse.py\", line 759, in _parse\r\n raise source.error(err.msg, len(name) + 1) from None\r\nsre_constants.error: redefinition of group name 'city' as group 2; was group 1 at position 42 <-------\r\n```\r\n\r\n## Expected Functionality\r\nHandling multiple patterns with the same name group\r\n\r\n```\r\nDEBUG opsdroid.parsers.crontab: Running crontab skills at Fri May 1 19:03:00 2020.\r\nDEBUG opsdroid.core: Parsing input: <opsdroid.events.Message(text=say hello)>.\r\nDEBUG opsdroid.core: Processing parsers...\r\nDEBUG opsdroid.connector.websocket: Responding with: 'hello' in target 8d9c8b96-8bcd-11ea-b043-0050568f7a82 <----------\r\n```\r\n## Experienced Functionality\r\n\r\ndebug logs :\r\n\r\n```\r\nDEBUG opsdroid.core: Parsing input: <opsdroid.events.Message(text=say hello)>.\r\nERROR aiohttp.server: Error handling request\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.6/site-packages/aiohttp/web_protocol.py\", line 418, in start\r\n resp = await task\r\n File \"/usr/lib64/python3.6/site-packages/aiohttp/web_app.py\", line 458, in _handle\r\n resp = await handler(request)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/connector/websocket/__init__.py\", line 101, in websocket_handler\r\n await self.opsdroid.parse(message)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/core.py\", line 498, in parse\r\n ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/core.py\", line 421, in get_ranked_skills\r\n ranked_skills += await parse_regex(self, skills, message)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/parsers/regex.py\", line 40, in parse_regex\r\n regex = await match_regex(message.text, opts)\r\n File \"/home/ibot/ibot-opsdroid/opsdroid/opsdroid/parsers/regex.py\", line 29, in match_regex\r\n regex = re.match(opts[\"expression\"], text, is_case_sensitive())\r\n File \"/usr/lib64/python3.6/re.py\", line 172, in match\r\n return _compile(pattern, flags).match(string)\r\n File \"/usr/lib64/python3.6/re.py\", line 301, in _compile\r\n p = sre_compile.compile(pattern, flags)\r\n File \"/usr/lib64/python3.6/sre_compile.py\", line 562, in compile\r\n p = sre_parse.parse(p, flags)\r\n File \"/usr/lib64/python3.6/sre_parse.py\", line 855, in parse\r\n p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, 0)\r\n File \"/usr/lib64/python3.6/sre_parse.py\", line 416, in _parse_sub\r\n not nested and not items))\r\n File \"/usr/lib64/python3.6/sre_parse.py\", line 759, in _parse\r\n raise source.error(err.msg, len(name) + 1) from None\r\nsre_constants.error: redefinition of group name 'city' as group 2; was group 1 at position 42 <-------\r\n```\r\n\r\n## Versions\r\n- **Opsdroid version:*v0.18.0*\r\n- **Python version:*3.6.7*\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing regex skills.\"\"\"\n\nimport logging\nimport re\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def calculate_score(regex, score_factor):\n \"\"\"Calculate the score of a regex.\"\"\"\n # The score asymptotically approaches the max score\n # based on the length of the expression.\n return (1 - (1 / ((len(regex) + 1) ** 2))) * score_factor\n\n\nasync def match_regex(text, opts):\n \"\"\"Return False if matching does not need to be case sensitive.\"\"\"\n\n def is_case_sensitive():\n if opts[\"case_sensitive\"]:\n return False\n return re.IGNORECASE\n\n if opts[\"matching_condition\"].lower() == \"search\":\n regex = re.search(opts[\"expression\"], text, is_case_sensitive())\n elif opts[\"matching_condition\"].lower() == \"fullmatch\":\n regex = re.fullmatch(opts[\"expression\"], text, is_case_sensitive())\n else:\n regex = re.match(opts[\"expression\"], text, is_case_sensitive())\n return regex\n\n\nasync def parse_regex(opsdroid, skills, message):\n \"\"\"Parse a message against all regex skills.\"\"\"\n matched_skills = []\n for skill in skills:\n for matcher in skill.matchers:\n if \"regex\" in matcher:\n opts = matcher[\"regex\"]\n regex = await match_regex(message.text, opts)\n if regex:\n message.regex = regex\n for regroup, value in regex.groupdict().items():\n message.update_entity(regroup, value, None)\n matched_skills.append(\n {\n \"score\": await calculate_score(\n opts[\"expression\"], opts[\"score_factor\"]\n ),\n \"skill\": skill,\n \"config\": skill.config,\n \"message\": message,\n }\n )\n return matched_skills\n", "path": "opsdroid/parsers/regex.py"}], "after_files": [{"content": "\"\"\"A helper function for parsing and executing regex skills.\"\"\"\n\nimport logging\nimport regex\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def calculate_score(expression, score_factor):\n \"\"\"Calculate the score of a regex.\"\"\"\n # The score asymptotically approaches the max score\n # based on the length of the expression.\n return (1 - (1 / ((len(expression) + 1) ** 2))) * score_factor\n\n\nasync def match_regex(text, opts):\n \"\"\"Return False if matching does not need to be case sensitive.\"\"\"\n\n def is_case_sensitive():\n if opts[\"case_sensitive\"]:\n return False\n return regex.IGNORECASE\n\n if opts[\"matching_condition\"].lower() == \"search\":\n matched_regex = regex.search(opts[\"expression\"], text, is_case_sensitive())\n elif opts[\"matching_condition\"].lower() == \"fullmatch\":\n matched_regex = regex.fullmatch(opts[\"expression\"], text, is_case_sensitive())\n else:\n matched_regex = regex.match(opts[\"expression\"], text, is_case_sensitive())\n return matched_regex\n\n\nasync def parse_regex(opsdroid, skills, message):\n \"\"\"Parse a message against all regex skills.\"\"\"\n matched_skills = []\n for skill in skills:\n for matcher in skill.matchers:\n if \"regex\" in matcher:\n opts = matcher[\"regex\"]\n matched_regex = await match_regex(message.text, opts)\n if matched_regex:\n message.regex = matched_regex\n for regroup, value in matched_regex.groupdict().items():\n message.update_entity(regroup, value, None)\n matched_skills.append(\n {\n \"score\": await calculate_score(\n opts[\"expression\"], opts[\"score_factor\"]\n ),\n \"skill\": skill,\n \"config\": skill.config,\n \"message\": message,\n }\n )\n return matched_skills\n", "path": "opsdroid/parsers/regex.py"}]}
| 2,431 | 537 |
gh_patches_debug_40549
|
rasdani/github-patches
|
git_diff
|
spotify__luigi-883
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
specifying ssh port for RemoteTarget
It would be nice to be able to specify a port number when establishing a RemoteTarget.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `luigi/contrib/ssh.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 """
18 Light-weight remote execution library and utilities.
19
20 There are some examples in the unittest, but
21 I added another more luigi-specific in the examples directory (examples/ssh_remote_execution.py
22
23 :class:`RemoteContext` is meant to provide functionality similar to that of the standard library subprocess module,
24 but where the commands executed are run on a remote machine instead,
25 without the user having to think about prefixing everything with "ssh" and credentials etc.
26
27 Using this mini library (which is just a convenience wrapper for subprocess),
28 :class:`RemoteTarget` is created to let you stream data from a remotely stored file using
29 the luigi :class:`~luigi.target.FileSystemTarget` semantics.
30
31 As a bonus, :class:`RemoteContext` also provides a really cool feature that let's you
32 set up ssh tunnels super easily using a python context manager (there is an example
33 in the integration part of unittests).
34
35 This can be super convenient when you want secure communication using a non-secure
36 protocol or circumvent firewalls (as long as they are open for ssh traffic).
37 """
38
39 import contextlib
40 import os
41 import random
42 import subprocess
43
44 import luigi
45 import luigi.format
46 import luigi.target
47
48
49 class RemoteContext(object):
50
51 def __init__(self, host, username=None, key_file=None, connect_timeout=None):
52 self.host = host
53 self.username = username
54 self.key_file = key_file
55 self.connect_timeout = connect_timeout
56
57 def __repr__(self):
58 return '%s(%r, %r, %r, %r)' % (
59 type(self).__name__, self.host, self.username, self.key_file, self.connect_timeout)
60
61 def __eq__(self, other):
62 return repr(self) == repr(other)
63
64 def __hash__(self):
65 return hash(repr(self))
66
67 def _host_ref(self):
68 if self.username:
69 return "{0}@{1}".format(self.username, self.host)
70 else:
71 return self.host
72
73 def _prepare_cmd(self, cmd):
74 connection_cmd = ["ssh", self._host_ref(),
75 "-S", "none", # disable ControlMaster since it causes all sorts of weird behaviour with subprocesses...
76 "-o", "BatchMode=yes", # no password prompts etc
77 ]
78
79 if self.connect_timeout is not None:
80 connection_cmd += ['-o', 'ConnectTimeout=%d' % self.connect_timeout]
81
82 if self.key_file:
83 connection_cmd.extend(["-i", self.key_file])
84 return connection_cmd + cmd
85
86 def Popen(self, cmd, **kwargs):
87 """
88 Remote Popen.
89 """
90 prefixed_cmd = self._prepare_cmd(cmd)
91 return subprocess.Popen(prefixed_cmd, **kwargs)
92
93 def check_output(self, cmd):
94 """
95 Execute a shell command remotely and return the output.
96
97 Simplified version of Popen when you only want the output as a string and detect any errors.
98 """
99 p = self.Popen(cmd, stdout=subprocess.PIPE)
100 output, _ = p.communicate()
101 if p.returncode != 0:
102 raise subprocess.CalledProcessError(p.returncode, cmd)
103 return output
104
105 @contextlib.contextmanager
106 def tunnel(self, local_port, remote_port=None, remote_host="localhost"):
107 """
108 Open a tunnel between localhost:local_port and remote_host:remote_port via the host specified by this context.
109
110 Remember to close() the returned "tunnel" object in order to clean up
111 after yourself when you are done with the tunnel.
112 """
113 tunnel_host = "{0}:{1}:{2}".format(local_port, remote_host, remote_port)
114 proc = self.Popen(
115 # cat so we can shut down gracefully by closing stdin
116 ["-L", tunnel_host, "echo -n ready && cat"],
117 stdin=subprocess.PIPE,
118 stdout=subprocess.PIPE,
119 )
120 # make sure to get the data so we know the connection is established
121 ready = proc.stdout.read(5)
122 assert ready == b"ready", "Didn't get ready from remote echo"
123 yield # user code executed here
124 proc.communicate()
125 assert proc.returncode == 0, "Tunnel process did an unclean exit (returncode %s)" % (proc.returncode,)
126
127
128 class RemoteFileSystem(luigi.target.FileSystem):
129
130 def __init__(self, host, username=None, key_file=None):
131 self.remote_context = RemoteContext(host, username, key_file)
132
133 def exists(self, path):
134 """
135 Return `True` if file or directory at `path` exist, False otherwise.
136 """
137 try:
138 self.remote_context.check_output(["test", "-e", path])
139 except subprocess.CalledProcessError as e:
140 if e.returncode == 1:
141 return False
142 else:
143 raise
144 return True
145
146 def remove(self, path, recursive=True):
147 """
148 Remove file or directory at location `path`.
149 """
150 if recursive:
151 cmd = ["rm", "-r", path]
152 else:
153 cmd = ["rm", path]
154
155 self.remote_context.check_output(cmd)
156
157 def _scp(self, src, dest):
158 cmd = ["scp", "-q", "-B", "-C", "-o", "ControlMaster=no"]
159 if self.remote_context.key_file:
160 cmd.extend(["-i", self.remote_context.key_file])
161 cmd.extend([src, dest])
162 p = subprocess.Popen(cmd)
163 output, _ = p.communicate()
164 if p.returncode != 0:
165 raise subprocess.CalledProcessError(p.returncode, cmd)
166
167 def put(self, local_path, path):
168 # create parent folder if not exists
169 normpath = os.path.normpath(path)
170 folder = os.path.dirname(normpath)
171 if folder and not self.exists(folder):
172 self.remote_context.check_output(['mkdir', '-p', folder])
173
174 tmp_path = path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
175 self._scp(local_path, "%s:%s" % (self.remote_context._host_ref(), tmp_path))
176 self.remote_context.check_output(['mv', tmp_path, path])
177
178 def get(self, path, local_path):
179 # Create folder if it does not exist
180 normpath = os.path.normpath(local_path)
181 folder = os.path.dirname(normpath)
182 if folder and not os.path.exists(folder):
183 os.makedirs(folder)
184
185 tmp_local_path = local_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
186 self._scp("%s:%s" % (self.remote_context._host_ref(), path), tmp_local_path)
187 os.rename(tmp_local_path, local_path)
188
189
190 class AtomicRemoteFileWriter(luigi.format.OutputPipeProcessWrapper):
191
192 def __init__(self, fs, path):
193 self._fs = fs
194 self.path = path
195
196 # create parent folder if not exists
197 normpath = os.path.normpath(self.path)
198 folder = os.path.dirname(normpath)
199 if folder and not self.fs.exists(folder):
200 self.fs.remote_context.check_output(['mkdir', '-p', folder])
201
202 self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)
203 super(AtomicRemoteFileWriter, self).__init__(
204 self.fs.remote_context._prepare_cmd(['cat', '>', self.__tmp_path]))
205
206 def __del__(self):
207 super(AtomicRemoteFileWriter, self).__del__()
208 if self.fs.exists(self.__tmp_path):
209 self.fs.remote_context.check_output(['rm', self.__tmp_path])
210
211 def close(self):
212 super(AtomicRemoteFileWriter, self).close()
213 self.fs.remote_context.check_output(['mv', self.__tmp_path, self.path])
214
215 @property
216 def tmp_path(self):
217 return self.__tmp_path
218
219 @property
220 def fs(self):
221 return self._fs
222
223
224 class RemoteTarget(luigi.target.FileSystemTarget):
225 """
226 Target used for reading from remote files.
227
228 The target is implemented using ssh commands streaming data over the network.
229 """
230
231 def __init__(self, path, host, format=None, username=None, key_file=None):
232 super(RemoteTarget, self).__init__(path)
233 if format is None:
234 format = luigi.format.get_default_format()
235 self.format = format
236 self._fs = RemoteFileSystem(host, username, key_file)
237
238 @property
239 def fs(self):
240 return self._fs
241
242 def open(self, mode='r'):
243 if mode == 'w':
244 file_writer = AtomicRemoteFileWriter(self.fs, self.path)
245 if self.format:
246 return self.format.pipe_writer(file_writer)
247 else:
248 return file_writer
249 elif mode == 'r':
250 file_reader = luigi.format.InputPipeProcessWrapper(
251 self.fs.remote_context._prepare_cmd(["cat", self.path]))
252 if self.format:
253 return self.format.pipe_reader(file_reader)
254 else:
255 return file_reader
256 else:
257 raise Exception("mode must be r/w")
258
259 def put(self, local_path):
260 self.fs.put(local_path, self.path)
261
262 def get(self, local_path):
263 self.fs.get(self.path, local_path)
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/luigi/contrib/ssh.py b/luigi/contrib/ssh.py
--- a/luigi/contrib/ssh.py
+++ b/luigi/contrib/ssh.py
@@ -48,15 +48,16 @@
class RemoteContext(object):
- def __init__(self, host, username=None, key_file=None, connect_timeout=None):
+ def __init__(self, host, username=None, key_file=None, connect_timeout=None, port=None):
self.host = host
self.username = username
self.key_file = key_file
self.connect_timeout = connect_timeout
+ self.port = port
def __repr__(self):
- return '%s(%r, %r, %r, %r)' % (
- type(self).__name__, self.host, self.username, self.key_file, self.connect_timeout)
+ return '%s(%r, %r, %r, %r, %r)' % (
+ type(self).__name__, self.host, self.username, self.key_file, self.connect_timeout, self.port)
def __eq__(self, other):
return repr(self) == repr(other)
@@ -75,6 +76,8 @@
"-S", "none", # disable ControlMaster since it causes all sorts of weird behaviour with subprocesses...
"-o", "BatchMode=yes", # no password prompts etc
]
+ if self.port:
+ connection_cmd.extend(["-p", self.port])
if self.connect_timeout is not None:
connection_cmd += ['-o', 'ConnectTimeout=%d' % self.connect_timeout]
@@ -127,8 +130,8 @@
class RemoteFileSystem(luigi.target.FileSystem):
- def __init__(self, host, username=None, key_file=None):
- self.remote_context = RemoteContext(host, username, key_file)
+ def __init__(self, host, username=None, key_file=None, port=None):
+ self.remote_context = RemoteContext(host, username, key_file, port)
def exists(self, path):
"""
@@ -158,6 +161,8 @@
cmd = ["scp", "-q", "-B", "-C", "-o", "ControlMaster=no"]
if self.remote_context.key_file:
cmd.extend(["-i", self.remote_context.key_file])
+ if self.remote_context.port:
+ cmd.extend(["-P", self.remote_context.port])
cmd.extend([src, dest])
p = subprocess.Popen(cmd)
output, _ = p.communicate()
@@ -228,12 +233,12 @@
The target is implemented using ssh commands streaming data over the network.
"""
- def __init__(self, path, host, format=None, username=None, key_file=None):
+ def __init__(self, path, host, format=None, username=None, key_file=None, port=None):
super(RemoteTarget, self).__init__(path)
if format is None:
format = luigi.format.get_default_format()
self.format = format
- self._fs = RemoteFileSystem(host, username, key_file)
+ self._fs = RemoteFileSystem(host, username, key_file, port)
@property
def fs(self):
|
{"golden_diff": "diff --git a/luigi/contrib/ssh.py b/luigi/contrib/ssh.py\n--- a/luigi/contrib/ssh.py\n+++ b/luigi/contrib/ssh.py\n@@ -48,15 +48,16 @@\n \n class RemoteContext(object):\n \n- def __init__(self, host, username=None, key_file=None, connect_timeout=None):\n+ def __init__(self, host, username=None, key_file=None, connect_timeout=None, port=None):\n self.host = host\n self.username = username\n self.key_file = key_file\n self.connect_timeout = connect_timeout\n+ self.port = port\n \n def __repr__(self):\n- return '%s(%r, %r, %r, %r)' % (\n- type(self).__name__, self.host, self.username, self.key_file, self.connect_timeout)\n+ return '%s(%r, %r, %r, %r, %r)' % (\n+ type(self).__name__, self.host, self.username, self.key_file, self.connect_timeout, self.port)\n \n def __eq__(self, other):\n return repr(self) == repr(other)\n@@ -75,6 +76,8 @@\n \"-S\", \"none\", # disable ControlMaster since it causes all sorts of weird behaviour with subprocesses...\n \"-o\", \"BatchMode=yes\", # no password prompts etc\n ]\n+ if self.port:\n+ connection_cmd.extend([\"-p\", self.port])\n \n if self.connect_timeout is not None:\n connection_cmd += ['-o', 'ConnectTimeout=%d' % self.connect_timeout]\n@@ -127,8 +130,8 @@\n \n class RemoteFileSystem(luigi.target.FileSystem):\n \n- def __init__(self, host, username=None, key_file=None):\n- self.remote_context = RemoteContext(host, username, key_file)\n+ def __init__(self, host, username=None, key_file=None, port=None):\n+ self.remote_context = RemoteContext(host, username, key_file, port)\n \n def exists(self, path):\n \"\"\"\n@@ -158,6 +161,8 @@\n cmd = [\"scp\", \"-q\", \"-B\", \"-C\", \"-o\", \"ControlMaster=no\"]\n if self.remote_context.key_file:\n cmd.extend([\"-i\", self.remote_context.key_file])\n+ if self.remote_context.port:\n+ cmd.extend([\"-P\", self.remote_context.port])\n cmd.extend([src, dest])\n p = subprocess.Popen(cmd)\n output, _ = p.communicate()\n@@ -228,12 +233,12 @@\n The target is implemented using ssh commands streaming data over the network.\n \"\"\"\n \n- def __init__(self, path, host, format=None, username=None, key_file=None):\n+ def __init__(self, path, host, format=None, username=None, key_file=None, port=None):\n super(RemoteTarget, self).__init__(path)\n if format is None:\n format = luigi.format.get_default_format()\n self.format = format\n- self._fs = RemoteFileSystem(host, username, key_file)\n+ self._fs = RemoteFileSystem(host, username, key_file, port)\n \n @property\n def fs(self):\n", "issue": "specifying ssh port for RemoteTarget\nIt would be nice to be able to specify a port number when establishing a RemoteTarget.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nLight-weight remote execution library and utilities.\n\nThere are some examples in the unittest, but\nI added another more luigi-specific in the examples directory (examples/ssh_remote_execution.py\n\n:class:`RemoteContext` is meant to provide functionality similar to that of the standard library subprocess module,\nbut where the commands executed are run on a remote machine instead,\nwithout the user having to think about prefixing everything with \"ssh\" and credentials etc.\n\nUsing this mini library (which is just a convenience wrapper for subprocess),\n:class:`RemoteTarget` is created to let you stream data from a remotely stored file using\nthe luigi :class:`~luigi.target.FileSystemTarget` semantics.\n\nAs a bonus, :class:`RemoteContext` also provides a really cool feature that let's you\nset up ssh tunnels super easily using a python context manager (there is an example\nin the integration part of unittests).\n\nThis can be super convenient when you want secure communication using a non-secure\nprotocol or circumvent firewalls (as long as they are open for ssh traffic).\n\"\"\"\n\nimport contextlib\nimport os\nimport random\nimport subprocess\n\nimport luigi\nimport luigi.format\nimport luigi.target\n\n\nclass RemoteContext(object):\n\n def __init__(self, host, username=None, key_file=None, connect_timeout=None):\n self.host = host\n self.username = username\n self.key_file = key_file\n self.connect_timeout = connect_timeout\n\n def __repr__(self):\n return '%s(%r, %r, %r, %r)' % (\n type(self).__name__, self.host, self.username, self.key_file, self.connect_timeout)\n\n def __eq__(self, other):\n return repr(self) == repr(other)\n\n def __hash__(self):\n return hash(repr(self))\n\n def _host_ref(self):\n if self.username:\n return \"{0}@{1}\".format(self.username, self.host)\n else:\n return self.host\n\n def _prepare_cmd(self, cmd):\n connection_cmd = [\"ssh\", self._host_ref(),\n \"-S\", \"none\", # disable ControlMaster since it causes all sorts of weird behaviour with subprocesses...\n \"-o\", \"BatchMode=yes\", # no password prompts etc\n ]\n\n if self.connect_timeout is not None:\n connection_cmd += ['-o', 'ConnectTimeout=%d' % self.connect_timeout]\n\n if self.key_file:\n connection_cmd.extend([\"-i\", self.key_file])\n return connection_cmd + cmd\n\n def Popen(self, cmd, **kwargs):\n \"\"\"\n Remote Popen.\n \"\"\"\n prefixed_cmd = self._prepare_cmd(cmd)\n return subprocess.Popen(prefixed_cmd, **kwargs)\n\n def check_output(self, cmd):\n \"\"\"\n Execute a shell command remotely and return the output.\n\n Simplified version of Popen when you only want the output as a string and detect any errors.\n \"\"\"\n p = self.Popen(cmd, stdout=subprocess.PIPE)\n output, _ = p.communicate()\n if p.returncode != 0:\n raise subprocess.CalledProcessError(p.returncode, cmd)\n return output\n\n @contextlib.contextmanager\n def tunnel(self, local_port, remote_port=None, remote_host=\"localhost\"):\n \"\"\"\n Open a tunnel between localhost:local_port and remote_host:remote_port via the host specified by this context.\n\n Remember to close() the returned \"tunnel\" object in order to clean up\n after yourself when you are done with the tunnel.\n \"\"\"\n tunnel_host = \"{0}:{1}:{2}\".format(local_port, remote_host, remote_port)\n proc = self.Popen(\n # cat so we can shut down gracefully by closing stdin\n [\"-L\", tunnel_host, \"echo -n ready && cat\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n # make sure to get the data so we know the connection is established\n ready = proc.stdout.read(5)\n assert ready == b\"ready\", \"Didn't get ready from remote echo\"\n yield # user code executed here\n proc.communicate()\n assert proc.returncode == 0, \"Tunnel process did an unclean exit (returncode %s)\" % (proc.returncode,)\n\n\nclass RemoteFileSystem(luigi.target.FileSystem):\n\n def __init__(self, host, username=None, key_file=None):\n self.remote_context = RemoteContext(host, username, key_file)\n\n def exists(self, path):\n \"\"\"\n Return `True` if file or directory at `path` exist, False otherwise.\n \"\"\"\n try:\n self.remote_context.check_output([\"test\", \"-e\", path])\n except subprocess.CalledProcessError as e:\n if e.returncode == 1:\n return False\n else:\n raise\n return True\n\n def remove(self, path, recursive=True):\n \"\"\"\n Remove file or directory at location `path`.\n \"\"\"\n if recursive:\n cmd = [\"rm\", \"-r\", path]\n else:\n cmd = [\"rm\", path]\n\n self.remote_context.check_output(cmd)\n\n def _scp(self, src, dest):\n cmd = [\"scp\", \"-q\", \"-B\", \"-C\", \"-o\", \"ControlMaster=no\"]\n if self.remote_context.key_file:\n cmd.extend([\"-i\", self.remote_context.key_file])\n cmd.extend([src, dest])\n p = subprocess.Popen(cmd)\n output, _ = p.communicate()\n if p.returncode != 0:\n raise subprocess.CalledProcessError(p.returncode, cmd)\n\n def put(self, local_path, path):\n # create parent folder if not exists\n normpath = os.path.normpath(path)\n folder = os.path.dirname(normpath)\n if folder and not self.exists(folder):\n self.remote_context.check_output(['mkdir', '-p', folder])\n\n tmp_path = path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n self._scp(local_path, \"%s:%s\" % (self.remote_context._host_ref(), tmp_path))\n self.remote_context.check_output(['mv', tmp_path, path])\n\n def get(self, path, local_path):\n # Create folder if it does not exist\n normpath = os.path.normpath(local_path)\n folder = os.path.dirname(normpath)\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n tmp_local_path = local_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n self._scp(\"%s:%s\" % (self.remote_context._host_ref(), path), tmp_local_path)\n os.rename(tmp_local_path, local_path)\n\n\nclass AtomicRemoteFileWriter(luigi.format.OutputPipeProcessWrapper):\n\n def __init__(self, fs, path):\n self._fs = fs\n self.path = path\n\n # create parent folder if not exists\n normpath = os.path.normpath(self.path)\n folder = os.path.dirname(normpath)\n if folder and not self.fs.exists(folder):\n self.fs.remote_context.check_output(['mkdir', '-p', folder])\n\n self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n super(AtomicRemoteFileWriter, self).__init__(\n self.fs.remote_context._prepare_cmd(['cat', '>', self.__tmp_path]))\n\n def __del__(self):\n super(AtomicRemoteFileWriter, self).__del__()\n if self.fs.exists(self.__tmp_path):\n self.fs.remote_context.check_output(['rm', self.__tmp_path])\n\n def close(self):\n super(AtomicRemoteFileWriter, self).close()\n self.fs.remote_context.check_output(['mv', self.__tmp_path, self.path])\n\n @property\n def tmp_path(self):\n return self.__tmp_path\n\n @property\n def fs(self):\n return self._fs\n\n\nclass RemoteTarget(luigi.target.FileSystemTarget):\n \"\"\"\n Target used for reading from remote files.\n\n The target is implemented using ssh commands streaming data over the network.\n \"\"\"\n\n def __init__(self, path, host, format=None, username=None, key_file=None):\n super(RemoteTarget, self).__init__(path)\n if format is None:\n format = luigi.format.get_default_format()\n self.format = format\n self._fs = RemoteFileSystem(host, username, key_file)\n\n @property\n def fs(self):\n return self._fs\n\n def open(self, mode='r'):\n if mode == 'w':\n file_writer = AtomicRemoteFileWriter(self.fs, self.path)\n if self.format:\n return self.format.pipe_writer(file_writer)\n else:\n return file_writer\n elif mode == 'r':\n file_reader = luigi.format.InputPipeProcessWrapper(\n self.fs.remote_context._prepare_cmd([\"cat\", self.path]))\n if self.format:\n return self.format.pipe_reader(file_reader)\n else:\n return file_reader\n else:\n raise Exception(\"mode must be r/w\")\n\n def put(self, local_path):\n self.fs.put(local_path, self.path)\n\n def get(self, local_path):\n self.fs.get(self.path, local_path)\n", "path": "luigi/contrib/ssh.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nLight-weight remote execution library and utilities.\n\nThere are some examples in the unittest, but\nI added another more luigi-specific in the examples directory (examples/ssh_remote_execution.py\n\n:class:`RemoteContext` is meant to provide functionality similar to that of the standard library subprocess module,\nbut where the commands executed are run on a remote machine instead,\nwithout the user having to think about prefixing everything with \"ssh\" and credentials etc.\n\nUsing this mini library (which is just a convenience wrapper for subprocess),\n:class:`RemoteTarget` is created to let you stream data from a remotely stored file using\nthe luigi :class:`~luigi.target.FileSystemTarget` semantics.\n\nAs a bonus, :class:`RemoteContext` also provides a really cool feature that let's you\nset up ssh tunnels super easily using a python context manager (there is an example\nin the integration part of unittests).\n\nThis can be super convenient when you want secure communication using a non-secure\nprotocol or circumvent firewalls (as long as they are open for ssh traffic).\n\"\"\"\n\nimport contextlib\nimport os\nimport random\nimport subprocess\n\nimport luigi\nimport luigi.format\nimport luigi.target\n\n\nclass RemoteContext(object):\n\n def __init__(self, host, username=None, key_file=None, connect_timeout=None, port=None):\n self.host = host\n self.username = username\n self.key_file = key_file\n self.connect_timeout = connect_timeout\n self.port = port\n\n def __repr__(self):\n return '%s(%r, %r, %r, %r, %r)' % (\n type(self).__name__, self.host, self.username, self.key_file, self.connect_timeout, self.port)\n\n def __eq__(self, other):\n return repr(self) == repr(other)\n\n def __hash__(self):\n return hash(repr(self))\n\n def _host_ref(self):\n if self.username:\n return \"{0}@{1}\".format(self.username, self.host)\n else:\n return self.host\n\n def _prepare_cmd(self, cmd):\n connection_cmd = [\"ssh\", self._host_ref(),\n \"-S\", \"none\", # disable ControlMaster since it causes all sorts of weird behaviour with subprocesses...\n \"-o\", \"BatchMode=yes\", # no password prompts etc\n ]\n if self.port:\n connection_cmd.extend([\"-p\", self.port])\n\n if self.connect_timeout is not None:\n connection_cmd += ['-o', 'ConnectTimeout=%d' % self.connect_timeout]\n\n if self.key_file:\n connection_cmd.extend([\"-i\", self.key_file])\n return connection_cmd + cmd\n\n def Popen(self, cmd, **kwargs):\n \"\"\"\n Remote Popen.\n \"\"\"\n prefixed_cmd = self._prepare_cmd(cmd)\n return subprocess.Popen(prefixed_cmd, **kwargs)\n\n def check_output(self, cmd):\n \"\"\"\n Execute a shell command remotely and return the output.\n\n Simplified version of Popen when you only want the output as a string and detect any errors.\n \"\"\"\n p = self.Popen(cmd, stdout=subprocess.PIPE)\n output, _ = p.communicate()\n if p.returncode != 0:\n raise subprocess.CalledProcessError(p.returncode, cmd)\n return output\n\n @contextlib.contextmanager\n def tunnel(self, local_port, remote_port=None, remote_host=\"localhost\"):\n \"\"\"\n Open a tunnel between localhost:local_port and remote_host:remote_port via the host specified by this context.\n\n Remember to close() the returned \"tunnel\" object in order to clean up\n after yourself when you are done with the tunnel.\n \"\"\"\n tunnel_host = \"{0}:{1}:{2}\".format(local_port, remote_host, remote_port)\n proc = self.Popen(\n # cat so we can shut down gracefully by closing stdin\n [\"-L\", tunnel_host, \"echo -n ready && cat\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n )\n # make sure to get the data so we know the connection is established\n ready = proc.stdout.read(5)\n assert ready == b\"ready\", \"Didn't get ready from remote echo\"\n yield # user code executed here\n proc.communicate()\n assert proc.returncode == 0, \"Tunnel process did an unclean exit (returncode %s)\" % (proc.returncode,)\n\n\nclass RemoteFileSystem(luigi.target.FileSystem):\n\n def __init__(self, host, username=None, key_file=None, port=None):\n self.remote_context = RemoteContext(host, username, key_file, port)\n\n def exists(self, path):\n \"\"\"\n Return `True` if file or directory at `path` exist, False otherwise.\n \"\"\"\n try:\n self.remote_context.check_output([\"test\", \"-e\", path])\n except subprocess.CalledProcessError as e:\n if e.returncode == 1:\n return False\n else:\n raise\n return True\n\n def remove(self, path, recursive=True):\n \"\"\"\n Remove file or directory at location `path`.\n \"\"\"\n if recursive:\n cmd = [\"rm\", \"-r\", path]\n else:\n cmd = [\"rm\", path]\n\n self.remote_context.check_output(cmd)\n\n def _scp(self, src, dest):\n cmd = [\"scp\", \"-q\", \"-B\", \"-C\", \"-o\", \"ControlMaster=no\"]\n if self.remote_context.key_file:\n cmd.extend([\"-i\", self.remote_context.key_file])\n if self.remote_context.port:\n cmd.extend([\"-P\", self.remote_context.port])\n cmd.extend([src, dest])\n p = subprocess.Popen(cmd)\n output, _ = p.communicate()\n if p.returncode != 0:\n raise subprocess.CalledProcessError(p.returncode, cmd)\n\n def put(self, local_path, path):\n # create parent folder if not exists\n normpath = os.path.normpath(path)\n folder = os.path.dirname(normpath)\n if folder and not self.exists(folder):\n self.remote_context.check_output(['mkdir', '-p', folder])\n\n tmp_path = path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n self._scp(local_path, \"%s:%s\" % (self.remote_context._host_ref(), tmp_path))\n self.remote_context.check_output(['mv', tmp_path, path])\n\n def get(self, path, local_path):\n # Create folder if it does not exist\n normpath = os.path.normpath(local_path)\n folder = os.path.dirname(normpath)\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n tmp_local_path = local_path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n self._scp(\"%s:%s\" % (self.remote_context._host_ref(), path), tmp_local_path)\n os.rename(tmp_local_path, local_path)\n\n\nclass AtomicRemoteFileWriter(luigi.format.OutputPipeProcessWrapper):\n\n def __init__(self, fs, path):\n self._fs = fs\n self.path = path\n\n # create parent folder if not exists\n normpath = os.path.normpath(self.path)\n folder = os.path.dirname(normpath)\n if folder and not self.fs.exists(folder):\n self.fs.remote_context.check_output(['mkdir', '-p', folder])\n\n self.__tmp_path = self.path + '-luigi-tmp-%09d' % random.randrange(0, 1e10)\n super(AtomicRemoteFileWriter, self).__init__(\n self.fs.remote_context._prepare_cmd(['cat', '>', self.__tmp_path]))\n\n def __del__(self):\n super(AtomicRemoteFileWriter, self).__del__()\n if self.fs.exists(self.__tmp_path):\n self.fs.remote_context.check_output(['rm', self.__tmp_path])\n\n def close(self):\n super(AtomicRemoteFileWriter, self).close()\n self.fs.remote_context.check_output(['mv', self.__tmp_path, self.path])\n\n @property\n def tmp_path(self):\n return self.__tmp_path\n\n @property\n def fs(self):\n return self._fs\n\n\nclass RemoteTarget(luigi.target.FileSystemTarget):\n \"\"\"\n Target used for reading from remote files.\n\n The target is implemented using ssh commands streaming data over the network.\n \"\"\"\n\n def __init__(self, path, host, format=None, username=None, key_file=None, port=None):\n super(RemoteTarget, self).__init__(path)\n if format is None:\n format = luigi.format.get_default_format()\n self.format = format\n self._fs = RemoteFileSystem(host, username, key_file, port)\n\n @property\n def fs(self):\n return self._fs\n\n def open(self, mode='r'):\n if mode == 'w':\n file_writer = AtomicRemoteFileWriter(self.fs, self.path)\n if self.format:\n return self.format.pipe_writer(file_writer)\n else:\n return file_writer\n elif mode == 'r':\n file_reader = luigi.format.InputPipeProcessWrapper(\n self.fs.remote_context._prepare_cmd([\"cat\", self.path]))\n if self.format:\n return self.format.pipe_reader(file_reader)\n else:\n return file_reader\n else:\n raise Exception(\"mode must be r/w\")\n\n def put(self, local_path):\n self.fs.put(local_path, self.path)\n\n def get(self, local_path):\n self.fs.get(self.path, local_path)\n", "path": "luigi/contrib/ssh.py"}]}
| 3,119 | 721 |
gh_patches_debug_4268
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-685
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove unnecessary warning when using `download`
When running `retriever download` an unnecessary warning displays
```
ethan@gandalf:~$ retriever download Clark2006
Creating database Clark2006...
Couldn't create database ('NoneType' object has no attribute 'execute').
...
```
The command executes correctly, but this warning isn't really correct (there is no database to create since we're just downloading the raw data files) and could be distracting to users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `engines/download_only.py`
Content:
```
1 from __future__ import print_function
2 from builtins import object
3 import os
4 import platform
5 import shutil
6 import inspect
7
8 from retriever.lib.engine import filename_from_url
9 from retriever.lib.models import Engine, no_cleanup
10 from retriever import DATA_DIR, HOME_DIR
11
12
13 class DummyConnection(object):
14
15 def cursor(self):
16 pass
17
18 def commit(self):
19 pass
20
21 def rollback(self):
22 pass
23
24 def close(self):
25 pass
26
27
28 class DummyCursor(DummyConnection):
29 pass
30
31
32 class engine(Engine):
33 """Engine instance for writing data to a CSV file."""
34 name = "Download Only"
35 abbreviation = "download"
36 required_opts = [("path",
37 "File path to copy data files",
38 "./"),
39 ("subdir",
40 "Keep the subdirectories for archived files",
41 False)
42 ]
43
44 def table_exists(self, dbname, tablename):
45 """Checks if the file to be downloaded already exists"""
46 try:
47 tablename = self.table_name(name=tablename, dbname=dbname)
48 return os.path.exists(tablename)
49 except:
50 return False
51
52 def get_connection(self):
53 """Gets the db connection."""
54 self.get_input()
55 return DummyConnection()
56
57 def final_cleanup(self):
58 """Copies downloaded files to desired directory
59
60 Copies the downloaded files into the chosen directory unless files with the same
61 name already exist in the directory.
62
63 """
64 if hasattr(self, "all_files"):
65 for file_name in self.all_files:
66 file_path, file_name_nopath = os.path.split(file_name)
67 subdir = os.path.split(file_path)[1] if self.opts['subdir'] else ''
68 dest_path = os.path.join(self.opts['path'], subdir)
69 if os.path.isfile(os.path.join(dest_path, file_name_nopath)):
70 print ("File already exists at specified location")
71 elif os.path.abspath(file_path) == os.path.abspath(os.path.join(DATA_DIR, subdir)):
72 print ("%s is already in the working directory" %
73 file_name_nopath)
74 print("Keeping existing copy.")
75 else:
76 print("Copying %s from %s" % (file_name_nopath, file_path))
77 if os.path.isdir(dest_path):
78 try:
79 shutil.copy(file_name, dest_path)
80 except:
81 print("Couldn't copy file to %s" % dest_path)
82 else:
83 try:
84 print("Creating directory %s" % dest_path)
85 os.makedirs(dest_path)
86 shutil.copy(file_name, dest_path)
87 except:
88 print("Couldn't create directory %s" % dest_path)
89 self.all_files = set()
90
91 def auto_create_table(self, table, url=None, filename=None, pk=None):
92 """Download the file if it doesn't exist"""
93 if url and not filename:
94 filename = filename_from_url(url)
95
96 if url and not self.find_file(filename):
97 # If the file doesn't exist, download it
98 self.download_file(url, filename)
99
100 def insert_data_from_url(self, url):
101 """Insert data from a web resource"""
102 filename = filename_from_url(url)
103 find = self.find_file(filename)
104 if not find:
105 self.create_raw_data_dir()
106 self.download_file(url, filename)
107
108 def find_file(self, filename):
109 """Checks for the given file and adds it to the list of all files"""
110 result = Engine.find_file(self, filename)
111 if not hasattr(self, "all_files"):
112 self.all_files = set()
113 if result:
114 self.all_files.add(result)
115 return result
116
117 def register_files(self, filenames):
118 """Identify a list of files to be moved by the download
119
120 When downloading archives with multiple files the engine needs to be
121 informed of all of the file names so that it can move them.
122
123 """
124 full_filenames = {self.find_file(filename) for filename in filenames
125 if self.find_file(filename)}
126 self.all_files = self.all_files.union(full_filenames)
127
128
129 # replace all other methods with a function that does nothing
130 def dummy_method(self, *args, **kwargs):
131 pass
132
133
134 methods = inspect.getmembers(engine, predicate=inspect.ismethod)
135 keep_methods = {'table_exists',
136 'get_connection',
137 'final_cleanup',
138 'auto_create_table',
139 'insert_data_from_url',
140 }
141 remove_methods = ['insert_data_from_file']
142 for name, method in methods:
143 if (name not in keep_methods and
144 'download' not in name and
145 'file' not in name and
146 'dir' not in name):
147 setattr(engine, name, dummy_method)
148 for name in remove_methods:
149 setattr(engine, name, dummy_method)
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/engines/download_only.py b/engines/download_only.py
--- a/engines/download_only.py
+++ b/engines/download_only.py
@@ -138,7 +138,7 @@
'auto_create_table',
'insert_data_from_url',
}
-remove_methods = ['insert_data_from_file']
+remove_methods = ['insert_data_from_file', 'create_db']
for name, method in methods:
if (name not in keep_methods and
'download' not in name and
|
{"golden_diff": "diff --git a/engines/download_only.py b/engines/download_only.py\n--- a/engines/download_only.py\n+++ b/engines/download_only.py\n@@ -138,7 +138,7 @@\n 'auto_create_table',\n 'insert_data_from_url',\n }\n-remove_methods = ['insert_data_from_file']\n+remove_methods = ['insert_data_from_file', 'create_db']\n for name, method in methods:\n if (name not in keep_methods and\n 'download' not in name and\n", "issue": "Remove unnecessary warning when using `download`\nWhen running `retriever download` an unnecessary warning displays\n\n```\nethan@gandalf:~$ retriever download Clark2006\nCreating database Clark2006...\nCouldn't create database ('NoneType' object has no attribute 'execute').\n...\n```\n\nThe command executes correctly, but this warning isn't really correct (there is no database to create since we're just downloading the raw data files) and could be distracting to users.\n\n", "before_files": [{"content": "from __future__ import print_function\nfrom builtins import object\nimport os\nimport platform\nimport shutil\nimport inspect\n\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\n\nclass DummyConnection(object):\n\n def cursor(self):\n pass\n\n def commit(self):\n pass\n\n def rollback(self):\n pass\n\n def close(self):\n pass\n\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n (\"subdir\",\n \"Keep the subdirectories for archived files\",\n False)\n ]\n\n def table_exists(self, dbname, tablename):\n \"\"\"Checks if the file to be downloaded already exists\"\"\"\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n \"\"\"Copies downloaded files to desired directory\n\n Copies the downloaded files into the chosen directory unless files with the same\n name already exist in the directory.\n\n \"\"\"\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n subdir = os.path.split(file_path)[1] if self.opts['subdir'] else ''\n dest_path = os.path.join(self.opts['path'], subdir)\n if os.path.isfile(os.path.join(dest_path, file_name_nopath)):\n print (\"File already exists at specified location\")\n elif os.path.abspath(file_path) == os.path.abspath(os.path.join(DATA_DIR, subdir)):\n print (\"%s is already in the working directory\" %\n file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n if os.path.isdir(dest_path):\n try:\n shutil.copy(file_name, dest_path)\n except:\n print(\"Couldn't copy file to %s\" % dest_path)\n else:\n try:\n print(\"Creating directory %s\" % dest_path)\n os.makedirs(dest_path)\n shutil.copy(file_name, dest_path)\n except:\n print(\"Couldn't create directory %s\" % dest_path)\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n \"\"\"Download the file if it doesn't exist\"\"\"\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n \"\"\"Insert data from a web resource\"\"\"\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n \"\"\"Checks for the given file and adds it to the list of all files\"\"\"\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"):\n self.all_files = set()\n if result:\n self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames\n if self.find_file(filename)}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\n\n\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file']\nfor name, method in methods:\n if (name not in keep_methods and\n 'download' not in name and\n 'file' not in name and\n 'dir' not in name):\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom builtins import object\nimport os\nimport platform\nimport shutil\nimport inspect\n\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\n\nclass DummyConnection(object):\n\n def cursor(self):\n pass\n\n def commit(self):\n pass\n\n def rollback(self):\n pass\n\n def close(self):\n pass\n\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n (\"subdir\",\n \"Keep the subdirectories for archived files\",\n False)\n ]\n\n def table_exists(self, dbname, tablename):\n \"\"\"Checks if the file to be downloaded already exists\"\"\"\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n \"\"\"Copies downloaded files to desired directory\n\n Copies the downloaded files into the chosen directory unless files with the same\n name already exist in the directory.\n\n \"\"\"\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n subdir = os.path.split(file_path)[1] if self.opts['subdir'] else ''\n dest_path = os.path.join(self.opts['path'], subdir)\n if os.path.isfile(os.path.join(dest_path, file_name_nopath)):\n print (\"File already exists at specified location\")\n elif os.path.abspath(file_path) == os.path.abspath(os.path.join(DATA_DIR, subdir)):\n print (\"%s is already in the working directory\" %\n file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n if os.path.isdir(dest_path):\n try:\n shutil.copy(file_name, dest_path)\n except:\n print(\"Couldn't copy file to %s\" % dest_path)\n else:\n try:\n print(\"Creating directory %s\" % dest_path)\n os.makedirs(dest_path)\n shutil.copy(file_name, dest_path)\n except:\n print(\"Couldn't create directory %s\" % dest_path)\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n \"\"\"Download the file if it doesn't exist\"\"\"\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n \"\"\"Insert data from a web resource\"\"\"\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n \"\"\"Checks for the given file and adds it to the list of all files\"\"\"\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"):\n self.all_files = set()\n if result:\n self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames\n if self.find_file(filename)}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\n\n\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file', 'create_db']\nfor name, method in methods:\n if (name not in keep_methods and\n 'download' not in name and\n 'file' not in name and\n 'dir' not in name):\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}]}
| 1,707 | 114 |
gh_patches_debug_7225
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-3065
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`SlurmProvider` is missing a docstring for `init_blocks`
**Describe the bug**
The `SlurmProvider` is missing the docstring for its `init_blocks` keyword argument [here](https://github.com/Parsl/parsl/blob/72df44c105a294b90d1b7f9022ed24309fc41885/parsl/providers/slurm/slurm.py#L37).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/providers/slurm/slurm.py`
Content:
```
1 import os
2 import math
3 import time
4 import logging
5 import re
6 import typeguard
7
8 from typing import Optional
9
10 from parsl.channels import LocalChannel
11 from parsl.channels.base import Channel
12 from parsl.jobs.states import JobState, JobStatus
13 from parsl.launchers import SingleNodeLauncher
14 from parsl.launchers.base import Launcher
15 from parsl.providers.cluster_provider import ClusterProvider
16 from parsl.providers.errors import SubmitException
17 from parsl.providers.slurm.template import template_string
18 from parsl.utils import RepresentationMixin, wtime_to_minutes
19
20 logger = logging.getLogger(__name__)
21
22 translate_table = {
23 'PD': JobState.PENDING,
24 'R': JobState.RUNNING,
25 'CA': JobState.CANCELLED,
26 'CF': JobState.PENDING, # (configuring),
27 'CG': JobState.RUNNING, # (completing),
28 'CD': JobState.COMPLETED,
29 'F': JobState.FAILED, # (failed),
30 'TO': JobState.TIMEOUT, # (timeout),
31 'NF': JobState.FAILED, # (node failure),
32 'RV': JobState.FAILED, # (revoked) and
33 'SE': JobState.FAILED # (special exit state)
34 }
35
36
37 class SlurmProvider(ClusterProvider, RepresentationMixin):
38 """Slurm Execution Provider
39
40 This provider uses sbatch to submit, squeue for status and scancel to cancel
41 jobs. The sbatch script to be used is created from a template file in this
42 same module.
43
44 Parameters
45 ----------
46 partition : str
47 Slurm partition to request blocks from. If unspecified or ``None``, no partition slurm directive will be specified.
48 account : str
49 Slurm account to which to charge resources used by the job. If unspecified or ``None``, the job will use the
50 user's default account.
51 qos : str
52 Slurm queue to place job in. If unspecified or ``None``, no queue slurm directive will be specified.
53 constraint : str
54 Slurm job constraint, often used to choose cpu or gpu type. If unspecified or ``None``, no constraint slurm directive will be added.
55 channel : Channel
56 Channel for accessing this provider. Possible channels include
57 :class:`~parsl.channels.LocalChannel` (the default),
58 :class:`~parsl.channels.SSHChannel`, or
59 :class:`~parsl.channels.SSHInteractiveLoginChannel`.
60 nodes_per_block : int
61 Nodes to provision per block.
62 cores_per_node : int
63 Specify the number of cores to provision per node. If set to None, executors
64 will assume all cores on the node are available for computation. Default is None.
65 mem_per_node : int
66 Specify the real memory to provision per node in GB. If set to None, no
67 explicit request to the scheduler will be made. Default is None.
68 min_blocks : int
69 Minimum number of blocks to maintain.
70 max_blocks : int
71 Maximum number of blocks to maintain.
72 parallelism : float
73 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
74 scaling where as many resources as possible are used; parallelism close to 0 represents
75 the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
76 walltime : str
77 Walltime requested per block in HH:MM:SS.
78 scheduler_options : str
79 String to prepend to the #SBATCH blocks in the submit script to the scheduler.
80 regex_job_id : str
81 The regular expression used to extract the job ID from the ``sbatch`` standard output.
82 The default is ``r"Submitted batch job (?P<id>\\S*)"``, where ``id`` is the regular expression
83 symbolic group for the job ID.
84 worker_init : str
85 Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
86 exclusive : bool (Default = True)
87 Requests nodes which are not shared with other running jobs.
88 launcher : Launcher
89 Launcher for this provider. Possible launchers include
90 :class:`~parsl.launchers.SingleNodeLauncher` (the default),
91 :class:`~parsl.launchers.SrunLauncher`, or
92 :class:`~parsl.launchers.AprunLauncher`
93 move_files : Optional[Bool]: should files be moved? by default, Parsl will try to move files.
94 """
95
96 @typeguard.typechecked
97 def __init__(self,
98 partition: Optional[str] = None,
99 account: Optional[str] = None,
100 qos: Optional[str] = None,
101 constraint: Optional[str] = None,
102 channel: Channel = LocalChannel(),
103 nodes_per_block: int = 1,
104 cores_per_node: Optional[int] = None,
105 mem_per_node: Optional[int] = None,
106 init_blocks: int = 1,
107 min_blocks: int = 0,
108 max_blocks: int = 1,
109 parallelism: float = 1,
110 walltime: str = "00:10:00",
111 scheduler_options: str = '',
112 regex_job_id: str = r"Submitted batch job (?P<id>\S*)",
113 worker_init: str = '',
114 cmd_timeout: int = 10,
115 exclusive: bool = True,
116 move_files: bool = True,
117 launcher: Launcher = SingleNodeLauncher()):
118 label = 'slurm'
119 super().__init__(label,
120 channel,
121 nodes_per_block,
122 init_blocks,
123 min_blocks,
124 max_blocks,
125 parallelism,
126 walltime,
127 cmd_timeout=cmd_timeout,
128 launcher=launcher)
129
130 self.partition = partition
131 self.cores_per_node = cores_per_node
132 self.mem_per_node = mem_per_node
133 self.exclusive = exclusive
134 self.move_files = move_files
135 self.account = account
136 self.qos = qos
137 self.constraint = constraint
138 self.scheduler_options = scheduler_options + '\n'
139 if exclusive:
140 self.scheduler_options += "#SBATCH --exclusive\n"
141 if partition:
142 self.scheduler_options += "#SBATCH --partition={}\n".format(partition)
143 if account:
144 self.scheduler_options += "#SBATCH --account={}\n".format(account)
145 if qos:
146 self.scheduler_options += "#SBATCH --qos={}\n".format(qos)
147 if constraint:
148 self.scheduler_options += "#SBATCH --constraint={}\n".format(constraint)
149
150 self.regex_job_id = regex_job_id
151 self.worker_init = worker_init + '\n'
152
153 def _status(self):
154 '''Returns the status list for a list of job_ids
155
156 Args:
157 self
158
159 Returns:
160 [status...] : Status list of all jobs
161 '''
162 job_id_list = ','.join(
163 [jid for jid, job in self.resources.items() if not job['status'].terminal]
164 )
165 if not job_id_list:
166 logger.debug('No active jobs, skipping status update')
167 return
168
169 cmd = "squeue --noheader --format='%i %t' --job '{0}'".format(job_id_list)
170 logger.debug("Executing %s", cmd)
171 retcode, stdout, stderr = self.execute_wait(cmd)
172 logger.debug("squeue returned %s %s", stdout, stderr)
173
174 # Execute_wait failed. Do no update
175 if retcode != 0:
176 logger.warning("squeue failed with non-zero exit code {}".format(retcode))
177 return
178
179 jobs_missing = set(self.resources.keys())
180 for line in stdout.split('\n'):
181 if not line:
182 # Blank line
183 continue
184 job_id, slurm_state = line.split()
185 if slurm_state not in translate_table:
186 logger.warning(f"Slurm status {slurm_state} is not recognized")
187 status = translate_table.get(slurm_state, JobState.UNKNOWN)
188 logger.debug("Updating job {} with slurm status {} to parsl state {!s}".format(job_id, slurm_state, status))
189 self.resources[job_id]['status'] = JobStatus(status)
190 jobs_missing.remove(job_id)
191
192 # squeue does not report on jobs that are not running. So we are filling in the
193 # blanks for missing jobs, we might lose some information about why the jobs failed.
194 for missing_job in jobs_missing:
195 logger.debug("Updating missing job {} to completed status".format(missing_job))
196 self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED)
197
198 def submit(self, command: str, tasks_per_node: int, job_name="parsl.slurm") -> str:
199 """Submit the command as a slurm job.
200
201 Parameters
202 ----------
203 command : str
204 Command to be made on the remote side.
205 tasks_per_node : int
206 Command invocations to be launched per node
207 job_name : str
208 Name for the job
209 Returns
210 -------
211 job id : str
212 A string identifier for the job
213 """
214
215 scheduler_options = self.scheduler_options
216 worker_init = self.worker_init
217 if self.mem_per_node is not None:
218 scheduler_options += '#SBATCH --mem={}g\n'.format(self.mem_per_node)
219 worker_init += 'export PARSL_MEMORY_GB={}\n'.format(self.mem_per_node)
220 if self.cores_per_node is not None:
221 cpus_per_task = math.floor(self.cores_per_node / tasks_per_node)
222 scheduler_options += '#SBATCH --cpus-per-task={}'.format(cpus_per_task)
223 worker_init += 'export PARSL_CORES={}\n'.format(cpus_per_task)
224
225 job_name = "{0}.{1}".format(job_name, time.time())
226
227 script_path = "{0}/{1}.submit".format(self.script_dir, job_name)
228 script_path = os.path.abspath(script_path)
229
230 logger.debug("Requesting one block with {} nodes".format(self.nodes_per_block))
231
232 job_config = {}
233 job_config["submit_script_dir"] = self.channel.script_dir
234 job_config["nodes"] = self.nodes_per_block
235 job_config["tasks_per_node"] = tasks_per_node
236 job_config["walltime"] = wtime_to_minutes(self.walltime)
237 job_config["scheduler_options"] = scheduler_options
238 job_config["worker_init"] = worker_init
239 job_config["user_script"] = command
240
241 # Wrap the command
242 job_config["user_script"] = self.launcher(command,
243 tasks_per_node,
244 self.nodes_per_block)
245
246 logger.debug("Writing submit script")
247 self._write_submit_script(template_string, script_path, job_name, job_config)
248
249 if self.move_files:
250 logger.debug("moving files")
251 channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
252 else:
253 logger.debug("not moving files")
254 channel_script_path = script_path
255
256 retcode, stdout, stderr = self.execute_wait("sbatch {0}".format(channel_script_path))
257
258 if retcode == 0:
259 for line in stdout.split('\n'):
260 match = re.match(self.regex_job_id, line)
261 if match:
262 job_id = match.group("id")
263 self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}
264 return job_id
265 else:
266 logger.error("Could not read job ID from submit command standard output.")
267 logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
268 raise SubmitException(job_name, "Could not read job ID from submit command standard output", stdout=stdout, stderr=stderr, retcode=retcode)
269 else:
270 logger.error("Submit command failed")
271 logger.error("Retcode:%s STDOUT:%s STDERR:%s", retcode, stdout.strip(), stderr.strip())
272 raise SubmitException(job_name, "Could not read job ID from submit command standard output", stdout=stdout, stderr=stderr, retcode=retcode)
273
274 def cancel(self, job_ids):
275 ''' Cancels the jobs specified by a list of job ids
276
277 Args:
278 job_ids : [<job_id> ...]
279
280 Returns :
281 [True/False...] : If the cancel operation fails the entire list will be False.
282 '''
283
284 job_id_list = ' '.join(job_ids)
285 retcode, stdout, stderr = self.execute_wait("scancel {0}".format(job_id_list))
286 rets = None
287 if retcode == 0:
288 for jid in job_ids:
289 self.resources[jid]['status'] = JobStatus(JobState.CANCELLED) # Setting state to cancelled
290 rets = [True for i in job_ids]
291 else:
292 rets = [False for i in job_ids]
293
294 return rets
295
296 @property
297 def status_polling_interval(self):
298 return 60
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/providers/slurm/slurm.py b/parsl/providers/slurm/slurm.py
--- a/parsl/providers/slurm/slurm.py
+++ b/parsl/providers/slurm/slurm.py
@@ -65,6 +65,8 @@
mem_per_node : int
Specify the real memory to provision per node in GB. If set to None, no
explicit request to the scheduler will be made. Default is None.
+ init_blocks : int
+ Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
|
{"golden_diff": "diff --git a/parsl/providers/slurm/slurm.py b/parsl/providers/slurm/slurm.py\n--- a/parsl/providers/slurm/slurm.py\n+++ b/parsl/providers/slurm/slurm.py\n@@ -65,6 +65,8 @@\n mem_per_node : int\n Specify the real memory to provision per node in GB. If set to None, no\n explicit request to the scheduler will be made. Default is None.\n+ init_blocks : int\n+ Number of blocks to provision at the start of the run. Default is 1.\n min_blocks : int\n Minimum number of blocks to maintain.\n max_blocks : int\n", "issue": "`SlurmProvider` is missing a docstring for `init_blocks`\n**Describe the bug**\r\nThe `SlurmProvider` is missing the docstring for its `init_blocks` keyword argument [here](https://github.com/Parsl/parsl/blob/72df44c105a294b90d1b7f9022ed24309fc41885/parsl/providers/slurm/slurm.py#L37).\r\n\n", "before_files": [{"content": "import os\nimport math\nimport time\nimport logging\nimport re\nimport typeguard\n\nfrom typing import Optional\n\nfrom parsl.channels import LocalChannel\nfrom parsl.channels.base import Channel\nfrom parsl.jobs.states import JobState, JobStatus\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.launchers.base import Launcher\nfrom parsl.providers.cluster_provider import ClusterProvider\nfrom parsl.providers.errors import SubmitException\nfrom parsl.providers.slurm.template import template_string\nfrom parsl.utils import RepresentationMixin, wtime_to_minutes\n\nlogger = logging.getLogger(__name__)\n\ntranslate_table = {\n 'PD': JobState.PENDING,\n 'R': JobState.RUNNING,\n 'CA': JobState.CANCELLED,\n 'CF': JobState.PENDING, # (configuring),\n 'CG': JobState.RUNNING, # (completing),\n 'CD': JobState.COMPLETED,\n 'F': JobState.FAILED, # (failed),\n 'TO': JobState.TIMEOUT, # (timeout),\n 'NF': JobState.FAILED, # (node failure),\n 'RV': JobState.FAILED, # (revoked) and\n 'SE': JobState.FAILED # (special exit state)\n}\n\n\nclass SlurmProvider(ClusterProvider, RepresentationMixin):\n \"\"\"Slurm Execution Provider\n\n This provider uses sbatch to submit, squeue for status and scancel to cancel\n jobs. The sbatch script to be used is created from a template file in this\n same module.\n\n Parameters\n ----------\n partition : str\n Slurm partition to request blocks from. If unspecified or ``None``, no partition slurm directive will be specified.\n account : str\n Slurm account to which to charge resources used by the job. If unspecified or ``None``, the job will use the\n user's default account.\n qos : str\n Slurm queue to place job in. If unspecified or ``None``, no queue slurm directive will be specified.\n constraint : str\n Slurm job constraint, often used to choose cpu or gpu type. If unspecified or ``None``, no constraint slurm directive will be added.\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n nodes_per_block : int\n Nodes to provision per block.\n cores_per_node : int\n Specify the number of cores to provision per node. If set to None, executors\n will assume all cores on the node are available for computation. Default is None.\n mem_per_node : int\n Specify the real memory to provision per node in GB. If set to None, no\n explicit request to the scheduler will be made. Default is None.\n min_blocks : int\n Minimum number of blocks to maintain.\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #SBATCH blocks in the submit script to the scheduler.\n regex_job_id : str\n The regular expression used to extract the job ID from the ``sbatch`` standard output.\n The default is ``r\"Submitted batch job (?P<id>\\\\S*)\"``, where ``id`` is the regular expression\n symbolic group for the job ID.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n exclusive : bool (Default = True)\n Requests nodes which are not shared with other running jobs.\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.SingleNodeLauncher` (the default),\n :class:`~parsl.launchers.SrunLauncher`, or\n :class:`~parsl.launchers.AprunLauncher`\n move_files : Optional[Bool]: should files be moved? by default, Parsl will try to move files.\n \"\"\"\n\n @typeguard.typechecked\n def __init__(self,\n partition: Optional[str] = None,\n account: Optional[str] = None,\n qos: Optional[str] = None,\n constraint: Optional[str] = None,\n channel: Channel = LocalChannel(),\n nodes_per_block: int = 1,\n cores_per_node: Optional[int] = None,\n mem_per_node: Optional[int] = None,\n init_blocks: int = 1,\n min_blocks: int = 0,\n max_blocks: int = 1,\n parallelism: float = 1,\n walltime: str = \"00:10:00\",\n scheduler_options: str = '',\n regex_job_id: str = r\"Submitted batch job (?P<id>\\S*)\",\n worker_init: str = '',\n cmd_timeout: int = 10,\n exclusive: bool = True,\n move_files: bool = True,\n launcher: Launcher = SingleNodeLauncher()):\n label = 'slurm'\n super().__init__(label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n cmd_timeout=cmd_timeout,\n launcher=launcher)\n\n self.partition = partition\n self.cores_per_node = cores_per_node\n self.mem_per_node = mem_per_node\n self.exclusive = exclusive\n self.move_files = move_files\n self.account = account\n self.qos = qos\n self.constraint = constraint\n self.scheduler_options = scheduler_options + '\\n'\n if exclusive:\n self.scheduler_options += \"#SBATCH --exclusive\\n\"\n if partition:\n self.scheduler_options += \"#SBATCH --partition={}\\n\".format(partition)\n if account:\n self.scheduler_options += \"#SBATCH --account={}\\n\".format(account)\n if qos:\n self.scheduler_options += \"#SBATCH --qos={}\\n\".format(qos)\n if constraint:\n self.scheduler_options += \"#SBATCH --constraint={}\\n\".format(constraint)\n\n self.regex_job_id = regex_job_id\n self.worker_init = worker_init + '\\n'\n\n def _status(self):\n '''Returns the status list for a list of job_ids\n\n Args:\n self\n\n Returns:\n [status...] : Status list of all jobs\n '''\n job_id_list = ','.join(\n [jid for jid, job in self.resources.items() if not job['status'].terminal]\n )\n if not job_id_list:\n logger.debug('No active jobs, skipping status update')\n return\n\n cmd = \"squeue --noheader --format='%i %t' --job '{0}'\".format(job_id_list)\n logger.debug(\"Executing %s\", cmd)\n retcode, stdout, stderr = self.execute_wait(cmd)\n logger.debug(\"squeue returned %s %s\", stdout, stderr)\n\n # Execute_wait failed. Do no update\n if retcode != 0:\n logger.warning(\"squeue failed with non-zero exit code {}\".format(retcode))\n return\n\n jobs_missing = set(self.resources.keys())\n for line in stdout.split('\\n'):\n if not line:\n # Blank line\n continue\n job_id, slurm_state = line.split()\n if slurm_state not in translate_table:\n logger.warning(f\"Slurm status {slurm_state} is not recognized\")\n status = translate_table.get(slurm_state, JobState.UNKNOWN)\n logger.debug(\"Updating job {} with slurm status {} to parsl state {!s}\".format(job_id, slurm_state, status))\n self.resources[job_id]['status'] = JobStatus(status)\n jobs_missing.remove(job_id)\n\n # squeue does not report on jobs that are not running. So we are filling in the\n # blanks for missing jobs, we might lose some information about why the jobs failed.\n for missing_job in jobs_missing:\n logger.debug(\"Updating missing job {} to completed status\".format(missing_job))\n self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED)\n\n def submit(self, command: str, tasks_per_node: int, job_name=\"parsl.slurm\") -> str:\n \"\"\"Submit the command as a slurm job.\n\n Parameters\n ----------\n command : str\n Command to be made on the remote side.\n tasks_per_node : int\n Command invocations to be launched per node\n job_name : str\n Name for the job\n Returns\n -------\n job id : str\n A string identifier for the job\n \"\"\"\n\n scheduler_options = self.scheduler_options\n worker_init = self.worker_init\n if self.mem_per_node is not None:\n scheduler_options += '#SBATCH --mem={}g\\n'.format(self.mem_per_node)\n worker_init += 'export PARSL_MEMORY_GB={}\\n'.format(self.mem_per_node)\n if self.cores_per_node is not None:\n cpus_per_task = math.floor(self.cores_per_node / tasks_per_node)\n scheduler_options += '#SBATCH --cpus-per-task={}'.format(cpus_per_task)\n worker_init += 'export PARSL_CORES={}\\n'.format(cpus_per_task)\n\n job_name = \"{0}.{1}\".format(job_name, time.time())\n\n script_path = \"{0}/{1}.submit\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n\n logger.debug(\"Requesting one block with {} nodes\".format(self.nodes_per_block))\n\n job_config = {}\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"nodes\"] = self.nodes_per_block\n job_config[\"tasks_per_node\"] = tasks_per_node\n job_config[\"walltime\"] = wtime_to_minutes(self.walltime)\n job_config[\"scheduler_options\"] = scheduler_options\n job_config[\"worker_init\"] = worker_init\n job_config[\"user_script\"] = command\n\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n logger.debug(\"Writing submit script\")\n self._write_submit_script(template_string, script_path, job_name, job_config)\n\n if self.move_files:\n logger.debug(\"moving files\")\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n else:\n logger.debug(\"not moving files\")\n channel_script_path = script_path\n\n retcode, stdout, stderr = self.execute_wait(\"sbatch {0}\".format(channel_script_path))\n\n if retcode == 0:\n for line in stdout.split('\\n'):\n match = re.match(self.regex_job_id, line)\n if match:\n job_id = match.group(\"id\")\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\n return job_id\n else:\n logger.error(\"Could not read job ID from submit command standard output.\")\n logger.error(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n raise SubmitException(job_name, \"Could not read job ID from submit command standard output\", stdout=stdout, stderr=stderr, retcode=retcode)\n else:\n logger.error(\"Submit command failed\")\n logger.error(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n raise SubmitException(job_name, \"Could not read job ID from submit command standard output\", stdout=stdout, stderr=stderr, retcode=retcode)\n\n def cancel(self, job_ids):\n ''' Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [<job_id> ...]\n\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.\n '''\n\n job_id_list = ' '.join(job_ids)\n retcode, stdout, stderr = self.execute_wait(\"scancel {0}\".format(job_id_list))\n rets = None\n if retcode == 0:\n for jid in job_ids:\n self.resources[jid]['status'] = JobStatus(JobState.CANCELLED) # Setting state to cancelled\n rets = [True for i in job_ids]\n else:\n rets = [False for i in job_ids]\n\n return rets\n\n @property\n def status_polling_interval(self):\n return 60\n", "path": "parsl/providers/slurm/slurm.py"}], "after_files": [{"content": "import os\nimport math\nimport time\nimport logging\nimport re\nimport typeguard\n\nfrom typing import Optional\n\nfrom parsl.channels import LocalChannel\nfrom parsl.channels.base import Channel\nfrom parsl.jobs.states import JobState, JobStatus\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.launchers.base import Launcher\nfrom parsl.providers.cluster_provider import ClusterProvider\nfrom parsl.providers.errors import SubmitException\nfrom parsl.providers.slurm.template import template_string\nfrom parsl.utils import RepresentationMixin, wtime_to_minutes\n\nlogger = logging.getLogger(__name__)\n\ntranslate_table = {\n 'PD': JobState.PENDING,\n 'R': JobState.RUNNING,\n 'CA': JobState.CANCELLED,\n 'CF': JobState.PENDING, # (configuring),\n 'CG': JobState.RUNNING, # (completing),\n 'CD': JobState.COMPLETED,\n 'F': JobState.FAILED, # (failed),\n 'TO': JobState.TIMEOUT, # (timeout),\n 'NF': JobState.FAILED, # (node failure),\n 'RV': JobState.FAILED, # (revoked) and\n 'SE': JobState.FAILED # (special exit state)\n}\n\n\nclass SlurmProvider(ClusterProvider, RepresentationMixin):\n \"\"\"Slurm Execution Provider\n\n This provider uses sbatch to submit, squeue for status and scancel to cancel\n jobs. The sbatch script to be used is created from a template file in this\n same module.\n\n Parameters\n ----------\n partition : str\n Slurm partition to request blocks from. If unspecified or ``None``, no partition slurm directive will be specified.\n account : str\n Slurm account to which to charge resources used by the job. If unspecified or ``None``, the job will use the\n user's default account.\n qos : str\n Slurm queue to place job in. If unspecified or ``None``, no queue slurm directive will be specified.\n constraint : str\n Slurm job constraint, often used to choose cpu or gpu type. If unspecified or ``None``, no constraint slurm directive will be added.\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n nodes_per_block : int\n Nodes to provision per block.\n cores_per_node : int\n Specify the number of cores to provision per node. If set to None, executors\n will assume all cores on the node are available for computation. Default is None.\n mem_per_node : int\n Specify the real memory to provision per node in GB. If set to None, no\n explicit request to the scheduler will be made. Default is None.\n init_blocks : int\n Number of blocks to provision at the start of the run. Default is 1.\n min_blocks : int\n Minimum number of blocks to maintain.\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #SBATCH blocks in the submit script to the scheduler.\n regex_job_id : str\n The regular expression used to extract the job ID from the ``sbatch`` standard output.\n The default is ``r\"Submitted batch job (?P<id>\\\\S*)\"``, where ``id`` is the regular expression\n symbolic group for the job ID.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n exclusive : bool (Default = True)\n Requests nodes which are not shared with other running jobs.\n launcher : Launcher\n Launcher for this provider. Possible launchers include\n :class:`~parsl.launchers.SingleNodeLauncher` (the default),\n :class:`~parsl.launchers.SrunLauncher`, or\n :class:`~parsl.launchers.AprunLauncher`\n move_files : Optional[Bool]: should files be moved? by default, Parsl will try to move files.\n \"\"\"\n\n @typeguard.typechecked\n def __init__(self,\n partition: Optional[str] = None,\n account: Optional[str] = None,\n qos: Optional[str] = None,\n constraint: Optional[str] = None,\n channel: Channel = LocalChannel(),\n nodes_per_block: int = 1,\n cores_per_node: Optional[int] = None,\n mem_per_node: Optional[int] = None,\n init_blocks: int = 1,\n min_blocks: int = 0,\n max_blocks: int = 1,\n parallelism: float = 1,\n walltime: str = \"00:10:00\",\n scheduler_options: str = '',\n regex_job_id: str = r\"Submitted batch job (?P<id>\\S*)\",\n worker_init: str = '',\n cmd_timeout: int = 10,\n exclusive: bool = True,\n move_files: bool = True,\n launcher: Launcher = SingleNodeLauncher()):\n label = 'slurm'\n super().__init__(label,\n channel,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n walltime,\n cmd_timeout=cmd_timeout,\n launcher=launcher)\n\n self.partition = partition\n self.cores_per_node = cores_per_node\n self.mem_per_node = mem_per_node\n self.exclusive = exclusive\n self.move_files = move_files\n self.account = account\n self.qos = qos\n self.constraint = constraint\n self.scheduler_options = scheduler_options + '\\n'\n if exclusive:\n self.scheduler_options += \"#SBATCH --exclusive\\n\"\n if partition:\n self.scheduler_options += \"#SBATCH --partition={}\\n\".format(partition)\n if account:\n self.scheduler_options += \"#SBATCH --account={}\\n\".format(account)\n if qos:\n self.scheduler_options += \"#SBATCH --qos={}\\n\".format(qos)\n if constraint:\n self.scheduler_options += \"#SBATCH --constraint={}\\n\".format(constraint)\n\n self.regex_job_id = regex_job_id\n self.worker_init = worker_init + '\\n'\n\n def _status(self):\n '''Returns the status list for a list of job_ids\n\n Args:\n self\n\n Returns:\n [status...] : Status list of all jobs\n '''\n job_id_list = ','.join(\n [jid for jid, job in self.resources.items() if not job['status'].terminal]\n )\n if not job_id_list:\n logger.debug('No active jobs, skipping status update')\n return\n\n cmd = \"squeue --noheader --format='%i %t' --job '{0}'\".format(job_id_list)\n logger.debug(\"Executing %s\", cmd)\n retcode, stdout, stderr = self.execute_wait(cmd)\n logger.debug(\"squeue returned %s %s\", stdout, stderr)\n\n # Execute_wait failed. Do no update\n if retcode != 0:\n logger.warning(\"squeue failed with non-zero exit code {}\".format(retcode))\n return\n\n jobs_missing = set(self.resources.keys())\n for line in stdout.split('\\n'):\n if not line:\n # Blank line\n continue\n job_id, slurm_state = line.split()\n if slurm_state not in translate_table:\n logger.warning(f\"Slurm status {slurm_state} is not recognized\")\n status = translate_table.get(slurm_state, JobState.UNKNOWN)\n logger.debug(\"Updating job {} with slurm status {} to parsl state {!s}\".format(job_id, slurm_state, status))\n self.resources[job_id]['status'] = JobStatus(status)\n jobs_missing.remove(job_id)\n\n # squeue does not report on jobs that are not running. So we are filling in the\n # blanks for missing jobs, we might lose some information about why the jobs failed.\n for missing_job in jobs_missing:\n logger.debug(\"Updating missing job {} to completed status\".format(missing_job))\n self.resources[missing_job]['status'] = JobStatus(JobState.COMPLETED)\n\n def submit(self, command: str, tasks_per_node: int, job_name=\"parsl.slurm\") -> str:\n \"\"\"Submit the command as a slurm job.\n\n Parameters\n ----------\n command : str\n Command to be made on the remote side.\n tasks_per_node : int\n Command invocations to be launched per node\n job_name : str\n Name for the job\n Returns\n -------\n job id : str\n A string identifier for the job\n \"\"\"\n\n scheduler_options = self.scheduler_options\n worker_init = self.worker_init\n if self.mem_per_node is not None:\n scheduler_options += '#SBATCH --mem={}g\\n'.format(self.mem_per_node)\n worker_init += 'export PARSL_MEMORY_GB={}\\n'.format(self.mem_per_node)\n if self.cores_per_node is not None:\n cpus_per_task = math.floor(self.cores_per_node / tasks_per_node)\n scheduler_options += '#SBATCH --cpus-per-task={}'.format(cpus_per_task)\n worker_init += 'export PARSL_CORES={}\\n'.format(cpus_per_task)\n\n job_name = \"{0}.{1}\".format(job_name, time.time())\n\n script_path = \"{0}/{1}.submit\".format(self.script_dir, job_name)\n script_path = os.path.abspath(script_path)\n\n logger.debug(\"Requesting one block with {} nodes\".format(self.nodes_per_block))\n\n job_config = {}\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"nodes\"] = self.nodes_per_block\n job_config[\"tasks_per_node\"] = tasks_per_node\n job_config[\"walltime\"] = wtime_to_minutes(self.walltime)\n job_config[\"scheduler_options\"] = scheduler_options\n job_config[\"worker_init\"] = worker_init\n job_config[\"user_script\"] = command\n\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n logger.debug(\"Writing submit script\")\n self._write_submit_script(template_string, script_path, job_name, job_config)\n\n if self.move_files:\n logger.debug(\"moving files\")\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n else:\n logger.debug(\"not moving files\")\n channel_script_path = script_path\n\n retcode, stdout, stderr = self.execute_wait(\"sbatch {0}\".format(channel_script_path))\n\n if retcode == 0:\n for line in stdout.split('\\n'):\n match = re.match(self.regex_job_id, line)\n if match:\n job_id = match.group(\"id\")\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\n return job_id\n else:\n logger.error(\"Could not read job ID from submit command standard output.\")\n logger.error(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n raise SubmitException(job_name, \"Could not read job ID from submit command standard output\", stdout=stdout, stderr=stderr, retcode=retcode)\n else:\n logger.error(\"Submit command failed\")\n logger.error(\"Retcode:%s STDOUT:%s STDERR:%s\", retcode, stdout.strip(), stderr.strip())\n raise SubmitException(job_name, \"Could not read job ID from submit command standard output\", stdout=stdout, stderr=stderr, retcode=retcode)\n\n def cancel(self, job_ids):\n ''' Cancels the jobs specified by a list of job ids\n\n Args:\n job_ids : [<job_id> ...]\n\n Returns :\n [True/False...] : If the cancel operation fails the entire list will be False.\n '''\n\n job_id_list = ' '.join(job_ids)\n retcode, stdout, stderr = self.execute_wait(\"scancel {0}\".format(job_id_list))\n rets = None\n if retcode == 0:\n for jid in job_ids:\n self.resources[jid]['status'] = JobStatus(JobState.CANCELLED) # Setting state to cancelled\n rets = [True for i in job_ids]\n else:\n rets = [False for i in job_ids]\n\n return rets\n\n @property\n def status_polling_interval(self):\n return 60\n", "path": "parsl/providers/slurm/slurm.py"}]}
| 3,941 | 146 |
gh_patches_debug_938
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-1735
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError in robotstxt middleware
I'm getting these errors in robots.txt middleware:
```
2016-01-27 16:18:21 [scrapy.core.scraper] ERROR: Error downloading <GET http://yellowpages.co.th>
Traceback (most recent call last):
File "/Users/kmike/envs/scraping/lib/python2.7/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred
result = f(*args, **kw)
File "/Users/kmike/svn/scrapy/scrapy/downloadermiddlewares/robotstxt.py", line 65, in robot_parser
if isinstance(self._parsers[netloc], Deferred):
KeyError: 'yellowpages.co.th'
```
It looks like https://github.com/scrapy/scrapy/pull/1473 caused it (I can't get this issue in Scrapy 1.0.4, but it present in Scrapy master). It happens when page failed to download and HTTP cache is enabled. I haven't debugged it further.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/downloadermiddlewares/robotstxt.py`
Content:
```
1 """
2 This is a middleware to respect robots.txt policies. To activate it you must
3 enable this middleware and enable the ROBOTSTXT_OBEY setting.
4
5 """
6
7 import logging
8
9 from six.moves.urllib import robotparser
10
11 from twisted.internet.defer import Deferred, maybeDeferred
12 from scrapy.exceptions import NotConfigured, IgnoreRequest
13 from scrapy.http import Request
14 from scrapy.utils.httpobj import urlparse_cached
15 from scrapy.utils.log import failure_to_exc_info
16
17 logger = logging.getLogger(__name__)
18
19
20 class RobotsTxtMiddleware(object):
21 DOWNLOAD_PRIORITY = 1000
22
23 def __init__(self, crawler):
24 if not crawler.settings.getbool('ROBOTSTXT_OBEY'):
25 raise NotConfigured
26
27 self.crawler = crawler
28 self._useragent = crawler.settings.get('USER_AGENT')
29 self._parsers = {}
30
31 @classmethod
32 def from_crawler(cls, crawler):
33 return cls(crawler)
34
35 def process_request(self, request, spider):
36 if request.meta.get('dont_obey_robotstxt'):
37 return
38 d = maybeDeferred(self.robot_parser, request, spider)
39 d.addCallback(self.process_request_2, request, spider)
40 return d
41
42 def process_request_2(self, rp, request, spider):
43 if rp is not None and not rp.can_fetch(self._useragent, request.url):
44 logger.debug("Forbidden by robots.txt: %(request)s",
45 {'request': request}, extra={'spider': spider})
46 raise IgnoreRequest()
47
48 def robot_parser(self, request, spider):
49 url = urlparse_cached(request)
50 netloc = url.netloc
51
52 if netloc not in self._parsers:
53 self._parsers[netloc] = Deferred()
54 robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc)
55 robotsreq = Request(
56 robotsurl,
57 priority=self.DOWNLOAD_PRIORITY,
58 meta={'dont_obey_robotstxt': True}
59 )
60 dfd = self.crawler.engine.download(robotsreq, spider)
61 dfd.addCallback(self._parse_robots, netloc)
62 dfd.addErrback(self._logerror, robotsreq, spider)
63 dfd.addErrback(self._robots_error, netloc)
64
65 if isinstance(self._parsers[netloc], Deferred):
66 d = Deferred()
67 def cb(result):
68 d.callback(result)
69 return result
70 self._parsers[netloc].addCallback(cb)
71 return d
72 else:
73 return self._parsers[netloc]
74
75 def _logerror(self, failure, request, spider):
76 if failure.type is not IgnoreRequest:
77 logger.error("Error downloading %(request)s: %(f_exception)s",
78 {'request': request, 'f_exception': failure.value},
79 exc_info=failure_to_exc_info(failure),
80 extra={'spider': spider})
81 return failure
82
83 def _parse_robots(self, response, netloc):
84 rp = robotparser.RobotFileParser(response.url)
85 body = ''
86 if hasattr(response, 'text'):
87 body = response.text
88 else: # last effort try
89 try:
90 body = response.body.decode('utf-8')
91 except UnicodeDecodeError:
92 # If we found garbage, disregard it:,
93 # but keep the lookup cached (in self._parsers)
94 # Running rp.parse() will set rp state from
95 # 'disallow all' to 'allow any'.
96 pass
97 rp.parse(body.splitlines())
98
99 rp_dfd = self._parsers[netloc]
100 self._parsers[netloc] = rp
101 rp_dfd.callback(rp)
102
103 def _robots_error(self, failure, netloc):
104 self._parsers.pop(netloc).callback(None)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/downloadermiddlewares/robotstxt.py b/scrapy/downloadermiddlewares/robotstxt.py
--- a/scrapy/downloadermiddlewares/robotstxt.py
+++ b/scrapy/downloadermiddlewares/robotstxt.py
@@ -101,4 +101,6 @@
rp_dfd.callback(rp)
def _robots_error(self, failure, netloc):
- self._parsers.pop(netloc).callback(None)
+ rp_dfd = self._parsers[netloc]
+ self._parsers[netloc] = None
+ rp_dfd.callback(None)
|
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/robotstxt.py b/scrapy/downloadermiddlewares/robotstxt.py\n--- a/scrapy/downloadermiddlewares/robotstxt.py\n+++ b/scrapy/downloadermiddlewares/robotstxt.py\n@@ -101,4 +101,6 @@\n rp_dfd.callback(rp)\n \n def _robots_error(self, failure, netloc):\n- self._parsers.pop(netloc).callback(None)\n+ rp_dfd = self._parsers[netloc]\n+ self._parsers[netloc] = None\n+ rp_dfd.callback(None)\n", "issue": "KeyError in robotstxt middleware\nI'm getting these errors in robots.txt middleware:\n\n```\n2016-01-27 16:18:21 [scrapy.core.scraper] ERROR: Error downloading <GET http://yellowpages.co.th>\nTraceback (most recent call last):\n File \"/Users/kmike/envs/scraping/lib/python2.7/site-packages/twisted/internet/defer.py\", line 150, in maybeDeferred\n result = f(*args, **kw)\n File \"/Users/kmike/svn/scrapy/scrapy/downloadermiddlewares/robotstxt.py\", line 65, in robot_parser\n if isinstance(self._parsers[netloc], Deferred):\nKeyError: 'yellowpages.co.th'\n```\n\nIt looks like https://github.com/scrapy/scrapy/pull/1473 caused it (I can't get this issue in Scrapy 1.0.4, but it present in Scrapy master). It happens when page failed to download and HTTP cache is enabled. I haven't debugged it further.\n\n", "before_files": [{"content": "\"\"\"\nThis is a middleware to respect robots.txt policies. To activate it you must\nenable this middleware and enable the ROBOTSTXT_OBEY setting.\n\n\"\"\"\n\nimport logging\n\nfrom six.moves.urllib import robotparser\n\nfrom twisted.internet.defer import Deferred, maybeDeferred\nfrom scrapy.exceptions import NotConfigured, IgnoreRequest\nfrom scrapy.http import Request\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.utils.log import failure_to_exc_info\n\nlogger = logging.getLogger(__name__)\n\n\nclass RobotsTxtMiddleware(object):\n DOWNLOAD_PRIORITY = 1000\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('ROBOTSTXT_OBEY'):\n raise NotConfigured\n\n self.crawler = crawler\n self._useragent = crawler.settings.get('USER_AGENT')\n self._parsers = {}\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def process_request(self, request, spider):\n if request.meta.get('dont_obey_robotstxt'):\n return\n d = maybeDeferred(self.robot_parser, request, spider)\n d.addCallback(self.process_request_2, request, spider)\n return d\n\n def process_request_2(self, rp, request, spider):\n if rp is not None and not rp.can_fetch(self._useragent, request.url):\n logger.debug(\"Forbidden by robots.txt: %(request)s\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest()\n\n def robot_parser(self, request, spider):\n url = urlparse_cached(request)\n netloc = url.netloc\n\n if netloc not in self._parsers:\n self._parsers[netloc] = Deferred()\n robotsurl = \"%s://%s/robots.txt\" % (url.scheme, url.netloc)\n robotsreq = Request(\n robotsurl,\n priority=self.DOWNLOAD_PRIORITY,\n meta={'dont_obey_robotstxt': True}\n )\n dfd = self.crawler.engine.download(robotsreq, spider)\n dfd.addCallback(self._parse_robots, netloc)\n dfd.addErrback(self._logerror, robotsreq, spider)\n dfd.addErrback(self._robots_error, netloc)\n\n if isinstance(self._parsers[netloc], Deferred):\n d = Deferred()\n def cb(result):\n d.callback(result)\n return result\n self._parsers[netloc].addCallback(cb)\n return d\n else:\n return self._parsers[netloc]\n\n def _logerror(self, failure, request, spider):\n if failure.type is not IgnoreRequest:\n logger.error(\"Error downloading %(request)s: %(f_exception)s\",\n {'request': request, 'f_exception': failure.value},\n exc_info=failure_to_exc_info(failure),\n extra={'spider': spider})\n return failure\n\n def _parse_robots(self, response, netloc):\n rp = robotparser.RobotFileParser(response.url)\n body = ''\n if hasattr(response, 'text'):\n body = response.text\n else: # last effort try\n try:\n body = response.body.decode('utf-8')\n except UnicodeDecodeError:\n # If we found garbage, disregard it:,\n # but keep the lookup cached (in self._parsers)\n # Running rp.parse() will set rp state from\n # 'disallow all' to 'allow any'.\n pass\n rp.parse(body.splitlines())\n\n rp_dfd = self._parsers[netloc]\n self._parsers[netloc] = rp\n rp_dfd.callback(rp)\n\n def _robots_error(self, failure, netloc):\n self._parsers.pop(netloc).callback(None)\n", "path": "scrapy/downloadermiddlewares/robotstxt.py"}], "after_files": [{"content": "\"\"\"\nThis is a middleware to respect robots.txt policies. To activate it you must\nenable this middleware and enable the ROBOTSTXT_OBEY setting.\n\n\"\"\"\n\nimport logging\n\nfrom six.moves.urllib import robotparser\n\nfrom twisted.internet.defer import Deferred, maybeDeferred\nfrom scrapy.exceptions import NotConfigured, IgnoreRequest\nfrom scrapy.http import Request\nfrom scrapy.utils.httpobj import urlparse_cached\nfrom scrapy.utils.log import failure_to_exc_info\n\nlogger = logging.getLogger(__name__)\n\n\nclass RobotsTxtMiddleware(object):\n DOWNLOAD_PRIORITY = 1000\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('ROBOTSTXT_OBEY'):\n raise NotConfigured\n\n self.crawler = crawler\n self._useragent = crawler.settings.get('USER_AGENT')\n self._parsers = {}\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def process_request(self, request, spider):\n if request.meta.get('dont_obey_robotstxt'):\n return\n d = maybeDeferred(self.robot_parser, request, spider)\n d.addCallback(self.process_request_2, request, spider)\n return d\n\n def process_request_2(self, rp, request, spider):\n if rp is not None and not rp.can_fetch(self._useragent, request.url):\n logger.debug(\"Forbidden by robots.txt: %(request)s\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest()\n\n def robot_parser(self, request, spider):\n url = urlparse_cached(request)\n netloc = url.netloc\n\n if netloc not in self._parsers:\n self._parsers[netloc] = Deferred()\n robotsurl = \"%s://%s/robots.txt\" % (url.scheme, url.netloc)\n robotsreq = Request(\n robotsurl,\n priority=self.DOWNLOAD_PRIORITY,\n meta={'dont_obey_robotstxt': True}\n )\n dfd = self.crawler.engine.download(robotsreq, spider)\n dfd.addCallback(self._parse_robots, netloc)\n dfd.addErrback(self._logerror, robotsreq, spider)\n dfd.addErrback(self._robots_error, netloc)\n\n if isinstance(self._parsers[netloc], Deferred):\n d = Deferred()\n def cb(result):\n d.callback(result)\n return result\n self._parsers[netloc].addCallback(cb)\n return d\n else:\n return self._parsers[netloc]\n\n def _logerror(self, failure, request, spider):\n if failure.type is not IgnoreRequest:\n logger.error(\"Error downloading %(request)s: %(f_exception)s\",\n {'request': request, 'f_exception': failure.value},\n exc_info=failure_to_exc_info(failure),\n extra={'spider': spider})\n return failure\n\n def _parse_robots(self, response, netloc):\n rp = robotparser.RobotFileParser(response.url)\n body = ''\n if hasattr(response, 'body_as_unicode'):\n body = response.body_as_unicode()\n else: # last effort try\n try:\n body = response.body.decode('utf-8')\n except UnicodeDecodeError:\n # If we found garbage, disregard it:,\n # but keep the lookup cached (in self._parsers)\n # Running rp.parse() will set rp state from\n # 'disallow all' to 'allow any'.\n pass\n rp.parse(body.splitlines())\n\n rp_dfd = self._parsers[netloc]\n self._parsers[netloc] = rp\n rp_dfd.callback(rp)\n\n def _robots_error(self, failure, netloc):\n rp_dfd = self._parsers[netloc]\n self._parsers[netloc] = None\n rp_dfd.callback(None)\n", "path": "scrapy/downloadermiddlewares/robotstxt.py"}]}
| 1,507 | 131 |
gh_patches_debug_5689
|
rasdani/github-patches
|
git_diff
|
instadeepai__Mava-808
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[MAINTAIN] Set environment factory function based on executor or evaluator.
### Please describe what needs to be maintained?
Set `evaluation` variable in environment factory based on if an executor or evaluator is in use.
### Describe the outcome you'd like
Set `evaluation` variable in environment factory based on `store.executor_environment`.
### How do we know when maintenance is complete?
`evaluation` variable in environment factory will be set based on if an executor or evaluator is us use.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mava/components/building/environments.py`
Content:
```
1 # python3
2 # Copyright 2021 InstaDeep Ltd. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Execution components for system builders"""
17 import abc
18 import os
19 from dataclasses import dataclass
20 from typing import Callable, List, Optional, Tuple, Type, Union
21
22 import acme
23
24 from mava import specs
25 from mava.callbacks import Callback
26 from mava.components import Component
27 from mava.components.building.loggers import Logger
28 from mava.core_jax import SystemBuilder
29 from mava.environment_loop import ParallelEnvironmentLoop
30 from mava.utils.sort_utils import sort_str_num
31 from mava.wrappers.environment_loop_wrappers import (
32 DetailedPerAgentStatistics,
33 EnvironmentLoopStatisticsBase,
34 MonitorParallelEnvironmentLoop,
35 )
36
37
38 @dataclass
39 class EnvironmentSpecConfig:
40 environment_factory: Optional[Callable[[bool], acme.core.Worker]] = None
41
42
43 class EnvironmentSpec(Component):
44 def __init__(self, config: EnvironmentSpecConfig = EnvironmentSpecConfig()):
45 """Component creates a multi-agent environment spec.
46
47 Args:
48 config: EnvironmentSpecConfig.
49 """
50 self.config = config
51
52 def on_building_init_start(self, builder: SystemBuilder) -> None:
53 """Using the env factory in config, create and store the env spec and agents.
54
55 Args:
56 builder: SystemBuilder.
57
58 Returns:
59 None.
60 """
61 builder.store.manager_pid = os.getpid()
62 builder.store.ma_environment_spec = specs.MAEnvironmentSpec(
63 self.config.environment_factory()
64 )
65
66 builder.store.agents = sort_str_num(
67 builder.store.ma_environment_spec.get_agent_ids()
68 )
69 builder.store.extras_spec = {}
70
71 @staticmethod
72 def name() -> str:
73 """Static method that returns component name."""
74 return "environment_spec"
75
76 @staticmethod
77 def required_components() -> List[Type[Callback]]:
78 """List of other Components required in the system for this Component to function.
79
80 None required.
81
82 Returns:
83 List of required component classes.
84 """
85 return []
86
87
88 @dataclass
89 class ExecutorEnvironmentLoopConfig:
90 should_update: bool = True
91 executor_stats_wrapper_class: Optional[
92 Type[EnvironmentLoopStatisticsBase]
93 ] = DetailedPerAgentStatistics
94
95
96 class ExecutorEnvironmentLoop(Component):
97 def __init__(
98 self, config: ExecutorEnvironmentLoopConfig = ExecutorEnvironmentLoopConfig()
99 ):
100 """Component creates an executor environment loop.
101
102 Args:
103 config: ExecutorEnvironmentLoopConfig.
104 """
105 self.config = config
106
107 def on_building_executor_environment(self, builder: SystemBuilder) -> None:
108 """Create and store the executor environment from the factory in config.
109
110 Args:
111 builder: SystemBuilder.
112
113 Returns:
114 None.
115 """
116 # Global config set by EnvironmentSpec component
117 builder.store.executor_environment = (
118 builder.store.global_config.environment_factory(evaluation=False)
119 ) # type: ignore
120
121 @abc.abstractmethod
122 def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:
123 """Abstract method for overriding: should create executor environment loop.
124
125 Args:
126 builder: SystemBuilder.
127
128 Returns:
129 None.
130 """
131
132 @staticmethod
133 def name() -> str:
134 """Static method that returns component name."""
135 return "executor_environment_loop"
136
137 @staticmethod
138 def required_components() -> List[Type[Callback]]:
139 """List of other Components required in the system for this Component to function.
140
141 Logger required to set up builder.store.executor_logger.
142 EnvironmentSpec required for config environment_factory.
143
144 Returns:
145 List of required component classes.
146 """
147 return [Logger, EnvironmentSpec]
148
149
150 class ParallelExecutorEnvironmentLoop(ExecutorEnvironmentLoop):
151 def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:
152 """Create and store a parallel environment loop.
153
154 Args:
155 builder: SystemBuilder.
156
157 Returns:
158 None.
159 """
160 executor_environment_loop = ParallelEnvironmentLoop(
161 environment=builder.store.executor_environment,
162 executor=builder.store.executor, # Set up by builder
163 logger=builder.store.executor_logger,
164 should_update=self.config.should_update,
165 )
166 del builder.store.executor_logger
167
168 if self.config.executor_stats_wrapper_class:
169 executor_environment_loop = self.config.executor_stats_wrapper_class(
170 executor_environment_loop
171 )
172 builder.store.system_executor = executor_environment_loop
173
174
175 @dataclass
176 class MonitorExecutorEnvironmentLoopConfig(ExecutorEnvironmentLoopConfig):
177 filename: str = "agents"
178 label: str = "parallel_environment_loop"
179 record_every: int = 1000
180 fps: int = 15
181 counter_str: str = "evaluator_episodes"
182 format: str = "video"
183 figsize: Union[float, Tuple[int, int]] = (360, 640)
184
185
186 class MonitorExecutorEnvironmentLoop(ExecutorEnvironmentLoop):
187 def __init__(
188 self,
189 config: MonitorExecutorEnvironmentLoopConfig = MonitorExecutorEnvironmentLoopConfig(), # noqa
190 ):
191 """Component for visualising environment progress."""
192 super().__init__(config=config)
193 self.config = config
194
195 def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:
196 """Monitors environments and produces videos of episodes.
197
198 Builds a `ParallelEnvironmentLoop` on the evaluator and a
199 `MonitorParallelEnvironmentLoop` on all executors and stores it
200 in the `builder.store.system_executor`.
201
202 Args:
203 builder: SystemBuilder
204 """
205 if builder.store.is_evaluator:
206 executor_environment_loop = MonitorParallelEnvironmentLoop(
207 environment=builder.store.executor_environment,
208 executor=builder.store.executor,
209 logger=builder.store.executor_logger,
210 should_update=self.config.should_update,
211 filename=self.config.filename,
212 label=self.config.label,
213 record_every=self.config.record_every,
214 path=builder.store.global_config.experiment_path,
215 fps=self.config.fps,
216 counter_str=self.config.counter_str,
217 format=self.config.format,
218 figsize=self.config.figsize,
219 )
220 else:
221 executor_environment_loop = ParallelEnvironmentLoop(
222 environment=builder.store.executor_environment,
223 executor=builder.store.executor,
224 logger=builder.store.executor_logger,
225 should_update=self.config.should_update,
226 )
227
228 del builder.store.executor_logger
229
230 if self.config.executor_stats_wrapper_class:
231 executor_environment_loop = self.config.executor_stats_wrapper_class(
232 executor_environment_loop
233 )
234
235 builder.store.system_executor = executor_environment_loop
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mava/components/building/environments.py b/mava/components/building/environments.py
--- a/mava/components/building/environments.py
+++ b/mava/components/building/environments.py
@@ -115,7 +115,9 @@
"""
# Global config set by EnvironmentSpec component
builder.store.executor_environment = (
- builder.store.global_config.environment_factory(evaluation=False)
+ builder.store.global_config.environment_factory(
+ evaluation=builder.store.is_evaluator
+ )
) # type: ignore
@abc.abstractmethod
|
{"golden_diff": "diff --git a/mava/components/building/environments.py b/mava/components/building/environments.py\n--- a/mava/components/building/environments.py\n+++ b/mava/components/building/environments.py\n@@ -115,7 +115,9 @@\n \"\"\"\n # Global config set by EnvironmentSpec component\n builder.store.executor_environment = (\n- builder.store.global_config.environment_factory(evaluation=False)\n+ builder.store.global_config.environment_factory(\n+ evaluation=builder.store.is_evaluator\n+ )\n ) # type: ignore\n \n @abc.abstractmethod\n", "issue": "[MAINTAIN] Set environment factory function based on executor or evaluator.\n### Please describe what needs to be maintained?\r\nSet `evaluation` variable in environment factory based on if an executor or evaluator is in use.\r\n\r\n### Describe the outcome you'd like\r\nSet `evaluation` variable in environment factory based on `store.executor_environment`. \r\n\r\n### How do we know when maintenance is complete?\r\n`evaluation` variable in environment factory will be set based on if an executor or evaluator is us use.\r\n\n", "before_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Execution components for system builders\"\"\"\nimport abc\nimport os\nfrom dataclasses import dataclass\nfrom typing import Callable, List, Optional, Tuple, Type, Union\n\nimport acme\n\nfrom mava import specs\nfrom mava.callbacks import Callback\nfrom mava.components import Component\nfrom mava.components.building.loggers import Logger\nfrom mava.core_jax import SystemBuilder\nfrom mava.environment_loop import ParallelEnvironmentLoop\nfrom mava.utils.sort_utils import sort_str_num\nfrom mava.wrappers.environment_loop_wrappers import (\n DetailedPerAgentStatistics,\n EnvironmentLoopStatisticsBase,\n MonitorParallelEnvironmentLoop,\n)\n\n\n@dataclass\nclass EnvironmentSpecConfig:\n environment_factory: Optional[Callable[[bool], acme.core.Worker]] = None\n\n\nclass EnvironmentSpec(Component):\n def __init__(self, config: EnvironmentSpecConfig = EnvironmentSpecConfig()):\n \"\"\"Component creates a multi-agent environment spec.\n\n Args:\n config: EnvironmentSpecConfig.\n \"\"\"\n self.config = config\n\n def on_building_init_start(self, builder: SystemBuilder) -> None:\n \"\"\"Using the env factory in config, create and store the env spec and agents.\n\n Args:\n builder: SystemBuilder.\n\n Returns:\n None.\n \"\"\"\n builder.store.manager_pid = os.getpid()\n builder.store.ma_environment_spec = specs.MAEnvironmentSpec(\n self.config.environment_factory()\n )\n\n builder.store.agents = sort_str_num(\n builder.store.ma_environment_spec.get_agent_ids()\n )\n builder.store.extras_spec = {}\n\n @staticmethod\n def name() -> str:\n \"\"\"Static method that returns component name.\"\"\"\n return \"environment_spec\"\n\n @staticmethod\n def required_components() -> List[Type[Callback]]:\n \"\"\"List of other Components required in the system for this Component to function.\n\n None required.\n\n Returns:\n List of required component classes.\n \"\"\"\n return []\n\n\n@dataclass\nclass ExecutorEnvironmentLoopConfig:\n should_update: bool = True\n executor_stats_wrapper_class: Optional[\n Type[EnvironmentLoopStatisticsBase]\n ] = DetailedPerAgentStatistics\n\n\nclass ExecutorEnvironmentLoop(Component):\n def __init__(\n self, config: ExecutorEnvironmentLoopConfig = ExecutorEnvironmentLoopConfig()\n ):\n \"\"\"Component creates an executor environment loop.\n\n Args:\n config: ExecutorEnvironmentLoopConfig.\n \"\"\"\n self.config = config\n\n def on_building_executor_environment(self, builder: SystemBuilder) -> None:\n \"\"\"Create and store the executor environment from the factory in config.\n\n Args:\n builder: SystemBuilder.\n\n Returns:\n None.\n \"\"\"\n # Global config set by EnvironmentSpec component\n builder.store.executor_environment = (\n builder.store.global_config.environment_factory(evaluation=False)\n ) # type: ignore\n\n @abc.abstractmethod\n def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:\n \"\"\"Abstract method for overriding: should create executor environment loop.\n\n Args:\n builder: SystemBuilder.\n\n Returns:\n None.\n \"\"\"\n\n @staticmethod\n def name() -> str:\n \"\"\"Static method that returns component name.\"\"\"\n return \"executor_environment_loop\"\n\n @staticmethod\n def required_components() -> List[Type[Callback]]:\n \"\"\"List of other Components required in the system for this Component to function.\n\n Logger required to set up builder.store.executor_logger.\n EnvironmentSpec required for config environment_factory.\n\n Returns:\n List of required component classes.\n \"\"\"\n return [Logger, EnvironmentSpec]\n\n\nclass ParallelExecutorEnvironmentLoop(ExecutorEnvironmentLoop):\n def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:\n \"\"\"Create and store a parallel environment loop.\n\n Args:\n builder: SystemBuilder.\n\n Returns:\n None.\n \"\"\"\n executor_environment_loop = ParallelEnvironmentLoop(\n environment=builder.store.executor_environment,\n executor=builder.store.executor, # Set up by builder\n logger=builder.store.executor_logger,\n should_update=self.config.should_update,\n )\n del builder.store.executor_logger\n\n if self.config.executor_stats_wrapper_class:\n executor_environment_loop = self.config.executor_stats_wrapper_class(\n executor_environment_loop\n )\n builder.store.system_executor = executor_environment_loop\n\n\n@dataclass\nclass MonitorExecutorEnvironmentLoopConfig(ExecutorEnvironmentLoopConfig):\n filename: str = \"agents\"\n label: str = \"parallel_environment_loop\"\n record_every: int = 1000\n fps: int = 15\n counter_str: str = \"evaluator_episodes\"\n format: str = \"video\"\n figsize: Union[float, Tuple[int, int]] = (360, 640)\n\n\nclass MonitorExecutorEnvironmentLoop(ExecutorEnvironmentLoop):\n def __init__(\n self,\n config: MonitorExecutorEnvironmentLoopConfig = MonitorExecutorEnvironmentLoopConfig(), # noqa\n ):\n \"\"\"Component for visualising environment progress.\"\"\"\n super().__init__(config=config)\n self.config = config\n\n def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:\n \"\"\"Monitors environments and produces videos of episodes.\n\n Builds a `ParallelEnvironmentLoop` on the evaluator and a\n `MonitorParallelEnvironmentLoop` on all executors and stores it\n in the `builder.store.system_executor`.\n\n Args:\n builder: SystemBuilder\n \"\"\"\n if builder.store.is_evaluator:\n executor_environment_loop = MonitorParallelEnvironmentLoop(\n environment=builder.store.executor_environment,\n executor=builder.store.executor,\n logger=builder.store.executor_logger,\n should_update=self.config.should_update,\n filename=self.config.filename,\n label=self.config.label,\n record_every=self.config.record_every,\n path=builder.store.global_config.experiment_path,\n fps=self.config.fps,\n counter_str=self.config.counter_str,\n format=self.config.format,\n figsize=self.config.figsize,\n )\n else:\n executor_environment_loop = ParallelEnvironmentLoop(\n environment=builder.store.executor_environment,\n executor=builder.store.executor,\n logger=builder.store.executor_logger,\n should_update=self.config.should_update,\n )\n\n del builder.store.executor_logger\n\n if self.config.executor_stats_wrapper_class:\n executor_environment_loop = self.config.executor_stats_wrapper_class(\n executor_environment_loop\n )\n\n builder.store.system_executor = executor_environment_loop\n", "path": "mava/components/building/environments.py"}], "after_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Execution components for system builders\"\"\"\nimport abc\nimport os\nfrom dataclasses import dataclass\nfrom typing import Callable, List, Optional, Tuple, Type, Union\n\nimport acme\n\nfrom mava import specs\nfrom mava.callbacks import Callback\nfrom mava.components import Component\nfrom mava.components.building.loggers import Logger\nfrom mava.core_jax import SystemBuilder\nfrom mava.environment_loop import ParallelEnvironmentLoop\nfrom mava.utils.sort_utils import sort_str_num\nfrom mava.wrappers.environment_loop_wrappers import (\n DetailedPerAgentStatistics,\n EnvironmentLoopStatisticsBase,\n MonitorParallelEnvironmentLoop,\n)\n\n\n@dataclass\nclass EnvironmentSpecConfig:\n environment_factory: Optional[Callable[[bool], acme.core.Worker]] = None\n\n\nclass EnvironmentSpec(Component):\n def __init__(self, config: EnvironmentSpecConfig = EnvironmentSpecConfig()):\n \"\"\"Component creates a multi-agent environment spec.\n\n Args:\n config: EnvironmentSpecConfig.\n \"\"\"\n self.config = config\n\n def on_building_init_start(self, builder: SystemBuilder) -> None:\n \"\"\"Using the env factory in config, create and store the env spec and agents.\n\n Args:\n builder: SystemBuilder.\n\n Returns:\n None.\n \"\"\"\n builder.store.manager_pid = os.getpid()\n builder.store.ma_environment_spec = specs.MAEnvironmentSpec(\n self.config.environment_factory()\n )\n\n builder.store.agents = sort_str_num(\n builder.store.ma_environment_spec.get_agent_ids()\n )\n builder.store.extras_spec = {}\n\n @staticmethod\n def name() -> str:\n \"\"\"Static method that returns component name.\"\"\"\n return \"environment_spec\"\n\n @staticmethod\n def required_components() -> List[Type[Callback]]:\n \"\"\"List of other Components required in the system for this Component to function.\n\n None required.\n\n Returns:\n List of required component classes.\n \"\"\"\n return []\n\n\n@dataclass\nclass ExecutorEnvironmentLoopConfig:\n should_update: bool = True\n executor_stats_wrapper_class: Optional[\n Type[EnvironmentLoopStatisticsBase]\n ] = DetailedPerAgentStatistics\n\n\nclass ExecutorEnvironmentLoop(Component):\n def __init__(\n self, config: ExecutorEnvironmentLoopConfig = ExecutorEnvironmentLoopConfig()\n ):\n \"\"\"Component creates an executor environment loop.\n\n Args:\n config: ExecutorEnvironmentLoopConfig.\n \"\"\"\n self.config = config\n\n def on_building_executor_environment(self, builder: SystemBuilder) -> None:\n \"\"\"Create and store the executor environment from the factory in config.\n\n Args:\n builder: SystemBuilder.\n\n Returns:\n None.\n \"\"\"\n # Global config set by EnvironmentSpec component\n builder.store.executor_environment = (\n builder.store.global_config.environment_factory(\n evaluation=builder.store.is_evaluator\n )\n ) # type: ignore\n\n @abc.abstractmethod\n def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:\n \"\"\"Abstract method for overriding: should create executor environment loop.\n\n Args:\n builder: SystemBuilder.\n\n Returns:\n None.\n \"\"\"\n\n @staticmethod\n def name() -> str:\n \"\"\"Static method that returns component name.\"\"\"\n return \"executor_environment_loop\"\n\n @staticmethod\n def required_components() -> List[Type[Callback]]:\n \"\"\"List of other Components required in the system for this Component to function.\n\n Logger required to set up builder.store.executor_logger.\n EnvironmentSpec required for config environment_factory.\n\n Returns:\n List of required component classes.\n \"\"\"\n return [Logger, EnvironmentSpec]\n\n\nclass ParallelExecutorEnvironmentLoop(ExecutorEnvironmentLoop):\n def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:\n \"\"\"Create and store a parallel environment loop.\n\n Args:\n builder: SystemBuilder.\n\n Returns:\n None.\n \"\"\"\n executor_environment_loop = ParallelEnvironmentLoop(\n environment=builder.store.executor_environment,\n executor=builder.store.executor, # Set up by builder\n logger=builder.store.executor_logger,\n should_update=self.config.should_update,\n )\n del builder.store.executor_logger\n\n if self.config.executor_stats_wrapper_class:\n executor_environment_loop = self.config.executor_stats_wrapper_class(\n executor_environment_loop\n )\n builder.store.system_executor = executor_environment_loop\n\n\n@dataclass\nclass MonitorExecutorEnvironmentLoopConfig(ExecutorEnvironmentLoopConfig):\n filename: str = \"agents\"\n label: str = \"parallel_environment_loop\"\n record_every: int = 1000\n fps: int = 15\n counter_str: str = \"evaluator_episodes\"\n format: str = \"video\"\n figsize: Union[float, Tuple[int, int]] = (360, 640)\n\n\nclass MonitorExecutorEnvironmentLoop(ExecutorEnvironmentLoop):\n def __init__(\n self,\n config: MonitorExecutorEnvironmentLoopConfig = MonitorExecutorEnvironmentLoopConfig(), # noqa\n ):\n \"\"\"Component for visualising environment progress.\"\"\"\n super().__init__(config=config)\n self.config = config\n\n def on_building_executor_environment_loop(self, builder: SystemBuilder) -> None:\n \"\"\"Monitors environments and produces videos of episodes.\n\n Builds a `ParallelEnvironmentLoop` on the evaluator and a\n `MonitorParallelEnvironmentLoop` on all executors and stores it\n in the `builder.store.system_executor`.\n\n Args:\n builder: SystemBuilder\n \"\"\"\n if builder.store.is_evaluator:\n executor_environment_loop = MonitorParallelEnvironmentLoop(\n environment=builder.store.executor_environment,\n executor=builder.store.executor,\n logger=builder.store.executor_logger,\n should_update=self.config.should_update,\n filename=self.config.filename,\n label=self.config.label,\n record_every=self.config.record_every,\n path=builder.store.global_config.experiment_path,\n fps=self.config.fps,\n counter_str=self.config.counter_str,\n format=self.config.format,\n figsize=self.config.figsize,\n )\n else:\n executor_environment_loop = ParallelEnvironmentLoop(\n environment=builder.store.executor_environment,\n executor=builder.store.executor,\n logger=builder.store.executor_logger,\n should_update=self.config.should_update,\n )\n\n del builder.store.executor_logger\n\n if self.config.executor_stats_wrapper_class:\n executor_environment_loop = self.config.executor_stats_wrapper_class(\n executor_environment_loop\n )\n\n builder.store.system_executor = executor_environment_loop\n", "path": "mava/components/building/environments.py"}]}
| 2,462 | 125 |
gh_patches_debug_8639
|
rasdani/github-patches
|
git_diff
|
pallets__werkzeug-2727
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`FileStorage.content_length` raises `TypeError` if no length was provided
I'm trying to get a file size using property `content_length` of class `werkzeug.datastructures.FileStorage`, but getting an error
```
File ".../services/file.py", line 18, in save_web_file
if file.content_length > (2**8) * (2**10) * 100: # 100KB
File ".../lib/python3.10/site-packages/werkzeug/datastructures/file_storage.py", line 71, in content_length
return _plain_int(self.headers.get("content-length") or 0)
File ".../lib/python3.10/site-packages/werkzeug/_internal.py", line 326, in _plain_int
if _plain_int_re.fullmatch(value) is None:
TypeError: expected string or bytes-like object
```
To reproduce this, you need to POST file form without `content_length`
To fix it, i think we need to change 0 as int to '0', because _plain_int expects value as str, not int, but when we haven't content-length header or have empty field, 0 as int will passed.
```python
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
try:
return _plain_int(self.headers.get("content-length") or 0) # here 0 -> '0'
except ValueError:
return 0
```
Environment:
- Python version: 3.10
- Flask version: 2.3.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/datastructures/file_storage.py`
Content:
```
1 from __future__ import annotations
2
3 import mimetypes
4 from io import BytesIO
5 from os import fsdecode
6 from os import fspath
7
8 from .._internal import _plain_int
9 from .structures import MultiDict
10
11
12 class FileStorage:
13 """The :class:`FileStorage` class is a thin wrapper over incoming files.
14 It is used by the request object to represent uploaded files. All the
15 attributes of the wrapper stream are proxied by the file storage so
16 it's possible to do ``storage.read()`` instead of the long form
17 ``storage.stream.read()``.
18 """
19
20 def __init__(
21 self,
22 stream=None,
23 filename=None,
24 name=None,
25 content_type=None,
26 content_length=None,
27 headers=None,
28 ):
29 self.name = name
30 self.stream = stream or BytesIO()
31
32 # If no filename is provided, attempt to get the filename from
33 # the stream object. Python names special streams like
34 # ``<stderr>`` with angular brackets, skip these streams.
35 if filename is None:
36 filename = getattr(stream, "name", None)
37
38 if filename is not None:
39 filename = fsdecode(filename)
40
41 if filename and filename[0] == "<" and filename[-1] == ">":
42 filename = None
43 else:
44 filename = fsdecode(filename)
45
46 self.filename = filename
47
48 if headers is None:
49 from .headers import Headers
50
51 headers = Headers()
52 self.headers = headers
53 if content_type is not None:
54 headers["Content-Type"] = content_type
55 if content_length is not None:
56 headers["Content-Length"] = str(content_length)
57
58 def _parse_content_type(self):
59 if not hasattr(self, "_parsed_content_type"):
60 self._parsed_content_type = http.parse_options_header(self.content_type)
61
62 @property
63 def content_type(self):
64 """The content-type sent in the header. Usually not available"""
65 return self.headers.get("content-type")
66
67 @property
68 def content_length(self):
69 """The content-length sent in the header. Usually not available"""
70 try:
71 return _plain_int(self.headers.get("content-length") or 0)
72 except ValueError:
73 return 0
74
75 @property
76 def mimetype(self):
77 """Like :attr:`content_type`, but without parameters (eg, without
78 charset, type etc.) and always lowercase. For example if the content
79 type is ``text/HTML; charset=utf-8`` the mimetype would be
80 ``'text/html'``.
81
82 .. versionadded:: 0.7
83 """
84 self._parse_content_type()
85 return self._parsed_content_type[0].lower()
86
87 @property
88 def mimetype_params(self):
89 """The mimetype parameters as dict. For example if the content
90 type is ``text/html; charset=utf-8`` the params would be
91 ``{'charset': 'utf-8'}``.
92
93 .. versionadded:: 0.7
94 """
95 self._parse_content_type()
96 return self._parsed_content_type[1]
97
98 def save(self, dst, buffer_size=16384):
99 """Save the file to a destination path or file object. If the
100 destination is a file object you have to close it yourself after the
101 call. The buffer size is the number of bytes held in memory during
102 the copy process. It defaults to 16KB.
103
104 For secure file saving also have a look at :func:`secure_filename`.
105
106 :param dst: a filename, :class:`os.PathLike`, or open file
107 object to write to.
108 :param buffer_size: Passed as the ``length`` parameter of
109 :func:`shutil.copyfileobj`.
110
111 .. versionchanged:: 1.0
112 Supports :mod:`pathlib`.
113 """
114 from shutil import copyfileobj
115
116 close_dst = False
117
118 if hasattr(dst, "__fspath__"):
119 dst = fspath(dst)
120
121 if isinstance(dst, str):
122 dst = open(dst, "wb")
123 close_dst = True
124
125 try:
126 copyfileobj(self.stream, dst, buffer_size)
127 finally:
128 if close_dst:
129 dst.close()
130
131 def close(self):
132 """Close the underlying file if possible."""
133 try:
134 self.stream.close()
135 except Exception:
136 pass
137
138 def __bool__(self):
139 return bool(self.filename)
140
141 def __getattr__(self, name):
142 try:
143 return getattr(self.stream, name)
144 except AttributeError:
145 # SpooledTemporaryFile doesn't implement IOBase, get the
146 # attribute from its backing file instead.
147 # https://github.com/python/cpython/pull/3249
148 if hasattr(self.stream, "_file"):
149 return getattr(self.stream._file, name)
150 raise
151
152 def __iter__(self):
153 return iter(self.stream)
154
155 def __repr__(self):
156 return f"<{type(self).__name__}: {self.filename!r} ({self.content_type!r})>"
157
158
159 class FileMultiDict(MultiDict):
160 """A special :class:`MultiDict` that has convenience methods to add
161 files to it. This is used for :class:`EnvironBuilder` and generally
162 useful for unittesting.
163
164 .. versionadded:: 0.5
165 """
166
167 def add_file(self, name, file, filename=None, content_type=None):
168 """Adds a new file to the dict. `file` can be a file name or
169 a :class:`file`-like or a :class:`FileStorage` object.
170
171 :param name: the name of the field.
172 :param file: a filename or :class:`file`-like object
173 :param filename: an optional filename
174 :param content_type: an optional content type
175 """
176 if isinstance(file, FileStorage):
177 value = file
178 else:
179 if isinstance(file, str):
180 if filename is None:
181 filename = file
182 file = open(file, "rb")
183 if filename and content_type is None:
184 content_type = (
185 mimetypes.guess_type(filename)[0] or "application/octet-stream"
186 )
187 value = FileStorage(file, filename, name, content_type)
188
189 self.add(name, value)
190
191
192 # circular dependencies
193 from .. import http
194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/werkzeug/datastructures/file_storage.py b/src/werkzeug/datastructures/file_storage.py
--- a/src/werkzeug/datastructures/file_storage.py
+++ b/src/werkzeug/datastructures/file_storage.py
@@ -67,10 +67,13 @@
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
- try:
- return _plain_int(self.headers.get("content-length") or 0)
- except ValueError:
- return 0
+ if "content-length" in self.headers:
+ try:
+ return _plain_int(self.headers["content-length"])
+ except ValueError:
+ pass
+
+ return 0
@property
def mimetype(self):
|
{"golden_diff": "diff --git a/src/werkzeug/datastructures/file_storage.py b/src/werkzeug/datastructures/file_storage.py\n--- a/src/werkzeug/datastructures/file_storage.py\n+++ b/src/werkzeug/datastructures/file_storage.py\n@@ -67,10 +67,13 @@\n @property\n def content_length(self):\n \"\"\"The content-length sent in the header. Usually not available\"\"\"\n- try:\n- return _plain_int(self.headers.get(\"content-length\") or 0)\n- except ValueError:\n- return 0\n+ if \"content-length\" in self.headers:\n+ try:\n+ return _plain_int(self.headers[\"content-length\"])\n+ except ValueError:\n+ pass\n+\n+ return 0\n \n @property\n def mimetype(self):\n", "issue": "`FileStorage.content_length` raises `TypeError` if no length was provided\nI'm trying to get a file size using property `content_length` of class `werkzeug.datastructures.FileStorage`, but getting an error\r\n```\r\nFile \".../services/file.py\", line 18, in save_web_file\r\n if file.content_length > (2**8) * (2**10) * 100: # 100KB\r\n File \".../lib/python3.10/site-packages/werkzeug/datastructures/file_storage.py\", line 71, in content_length\r\n return _plain_int(self.headers.get(\"content-length\") or 0)\r\n File \".../lib/python3.10/site-packages/werkzeug/_internal.py\", line 326, in _plain_int\r\n if _plain_int_re.fullmatch(value) is None:\r\nTypeError: expected string or bytes-like object\r\n```\r\nTo reproduce this, you need to POST file form without `content_length` \r\n\r\nTo fix it, i think we need to change 0 as int to '0', because _plain_int expects value as str, not int, but when we haven't content-length header or have empty field, 0 as int will passed.\r\n```python\r\n@property\r\ndef content_length(self):\r\n \"\"\"The content-length sent in the header. Usually not available\"\"\"\r\n try:\r\n return _plain_int(self.headers.get(\"content-length\") or 0) # here 0 -> '0'\r\n except ValueError:\r\n return 0\r\n ```\r\n\r\nEnvironment:\r\n\r\n- Python version: 3.10\r\n- Flask version: 2.3.2\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport mimetypes\nfrom io import BytesIO\nfrom os import fsdecode\nfrom os import fspath\n\nfrom .._internal import _plain_int\nfrom .structures import MultiDict\n\n\nclass FileStorage:\n \"\"\"The :class:`FileStorage` class is a thin wrapper over incoming files.\n It is used by the request object to represent uploaded files. All the\n attributes of the wrapper stream are proxied by the file storage so\n it's possible to do ``storage.read()`` instead of the long form\n ``storage.stream.read()``.\n \"\"\"\n\n def __init__(\n self,\n stream=None,\n filename=None,\n name=None,\n content_type=None,\n content_length=None,\n headers=None,\n ):\n self.name = name\n self.stream = stream or BytesIO()\n\n # If no filename is provided, attempt to get the filename from\n # the stream object. Python names special streams like\n # ``<stderr>`` with angular brackets, skip these streams.\n if filename is None:\n filename = getattr(stream, \"name\", None)\n\n if filename is not None:\n filename = fsdecode(filename)\n\n if filename and filename[0] == \"<\" and filename[-1] == \">\":\n filename = None\n else:\n filename = fsdecode(filename)\n\n self.filename = filename\n\n if headers is None:\n from .headers import Headers\n\n headers = Headers()\n self.headers = headers\n if content_type is not None:\n headers[\"Content-Type\"] = content_type\n if content_length is not None:\n headers[\"Content-Length\"] = str(content_length)\n\n def _parse_content_type(self):\n if not hasattr(self, \"_parsed_content_type\"):\n self._parsed_content_type = http.parse_options_header(self.content_type)\n\n @property\n def content_type(self):\n \"\"\"The content-type sent in the header. Usually not available\"\"\"\n return self.headers.get(\"content-type\")\n\n @property\n def content_length(self):\n \"\"\"The content-length sent in the header. Usually not available\"\"\"\n try:\n return _plain_int(self.headers.get(\"content-length\") or 0)\n except ValueError:\n return 0\n\n @property\n def mimetype(self):\n \"\"\"Like :attr:`content_type`, but without parameters (eg, without\n charset, type etc.) and always lowercase. For example if the content\n type is ``text/HTML; charset=utf-8`` the mimetype would be\n ``'text/html'``.\n\n .. versionadded:: 0.7\n \"\"\"\n self._parse_content_type()\n return self._parsed_content_type[0].lower()\n\n @property\n def mimetype_params(self):\n \"\"\"The mimetype parameters as dict. For example if the content\n type is ``text/html; charset=utf-8`` the params would be\n ``{'charset': 'utf-8'}``.\n\n .. versionadded:: 0.7\n \"\"\"\n self._parse_content_type()\n return self._parsed_content_type[1]\n\n def save(self, dst, buffer_size=16384):\n \"\"\"Save the file to a destination path or file object. If the\n destination is a file object you have to close it yourself after the\n call. The buffer size is the number of bytes held in memory during\n the copy process. It defaults to 16KB.\n\n For secure file saving also have a look at :func:`secure_filename`.\n\n :param dst: a filename, :class:`os.PathLike`, or open file\n object to write to.\n :param buffer_size: Passed as the ``length`` parameter of\n :func:`shutil.copyfileobj`.\n\n .. versionchanged:: 1.0\n Supports :mod:`pathlib`.\n \"\"\"\n from shutil import copyfileobj\n\n close_dst = False\n\n if hasattr(dst, \"__fspath__\"):\n dst = fspath(dst)\n\n if isinstance(dst, str):\n dst = open(dst, \"wb\")\n close_dst = True\n\n try:\n copyfileobj(self.stream, dst, buffer_size)\n finally:\n if close_dst:\n dst.close()\n\n def close(self):\n \"\"\"Close the underlying file if possible.\"\"\"\n try:\n self.stream.close()\n except Exception:\n pass\n\n def __bool__(self):\n return bool(self.filename)\n\n def __getattr__(self, name):\n try:\n return getattr(self.stream, name)\n except AttributeError:\n # SpooledTemporaryFile doesn't implement IOBase, get the\n # attribute from its backing file instead.\n # https://github.com/python/cpython/pull/3249\n if hasattr(self.stream, \"_file\"):\n return getattr(self.stream._file, name)\n raise\n\n def __iter__(self):\n return iter(self.stream)\n\n def __repr__(self):\n return f\"<{type(self).__name__}: {self.filename!r} ({self.content_type!r})>\"\n\n\nclass FileMultiDict(MultiDict):\n \"\"\"A special :class:`MultiDict` that has convenience methods to add\n files to it. This is used for :class:`EnvironBuilder` and generally\n useful for unittesting.\n\n .. versionadded:: 0.5\n \"\"\"\n\n def add_file(self, name, file, filename=None, content_type=None):\n \"\"\"Adds a new file to the dict. `file` can be a file name or\n a :class:`file`-like or a :class:`FileStorage` object.\n\n :param name: the name of the field.\n :param file: a filename or :class:`file`-like object\n :param filename: an optional filename\n :param content_type: an optional content type\n \"\"\"\n if isinstance(file, FileStorage):\n value = file\n else:\n if isinstance(file, str):\n if filename is None:\n filename = file\n file = open(file, \"rb\")\n if filename and content_type is None:\n content_type = (\n mimetypes.guess_type(filename)[0] or \"application/octet-stream\"\n )\n value = FileStorage(file, filename, name, content_type)\n\n self.add(name, value)\n\n\n# circular dependencies\nfrom .. import http\n", "path": "src/werkzeug/datastructures/file_storage.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport mimetypes\nfrom io import BytesIO\nfrom os import fsdecode\nfrom os import fspath\n\nfrom .._internal import _plain_int\nfrom .structures import MultiDict\n\n\nclass FileStorage:\n \"\"\"The :class:`FileStorage` class is a thin wrapper over incoming files.\n It is used by the request object to represent uploaded files. All the\n attributes of the wrapper stream are proxied by the file storage so\n it's possible to do ``storage.read()`` instead of the long form\n ``storage.stream.read()``.\n \"\"\"\n\n def __init__(\n self,\n stream=None,\n filename=None,\n name=None,\n content_type=None,\n content_length=None,\n headers=None,\n ):\n self.name = name\n self.stream = stream or BytesIO()\n\n # If no filename is provided, attempt to get the filename from\n # the stream object. Python names special streams like\n # ``<stderr>`` with angular brackets, skip these streams.\n if filename is None:\n filename = getattr(stream, \"name\", None)\n\n if filename is not None:\n filename = fsdecode(filename)\n\n if filename and filename[0] == \"<\" and filename[-1] == \">\":\n filename = None\n else:\n filename = fsdecode(filename)\n\n self.filename = filename\n\n if headers is None:\n from .headers import Headers\n\n headers = Headers()\n self.headers = headers\n if content_type is not None:\n headers[\"Content-Type\"] = content_type\n if content_length is not None:\n headers[\"Content-Length\"] = str(content_length)\n\n def _parse_content_type(self):\n if not hasattr(self, \"_parsed_content_type\"):\n self._parsed_content_type = http.parse_options_header(self.content_type)\n\n @property\n def content_type(self):\n \"\"\"The content-type sent in the header. Usually not available\"\"\"\n return self.headers.get(\"content-type\")\n\n @property\n def content_length(self):\n \"\"\"The content-length sent in the header. Usually not available\"\"\"\n if \"content-length\" in self.headers:\n try:\n return _plain_int(self.headers[\"content-length\"])\n except ValueError:\n pass\n\n return 0\n\n @property\n def mimetype(self):\n \"\"\"Like :attr:`content_type`, but without parameters (eg, without\n charset, type etc.) and always lowercase. For example if the content\n type is ``text/HTML; charset=utf-8`` the mimetype would be\n ``'text/html'``.\n\n .. versionadded:: 0.7\n \"\"\"\n self._parse_content_type()\n return self._parsed_content_type[0].lower()\n\n @property\n def mimetype_params(self):\n \"\"\"The mimetype parameters as dict. For example if the content\n type is ``text/html; charset=utf-8`` the params would be\n ``{'charset': 'utf-8'}``.\n\n .. versionadded:: 0.7\n \"\"\"\n self._parse_content_type()\n return self._parsed_content_type[1]\n\n def save(self, dst, buffer_size=16384):\n \"\"\"Save the file to a destination path or file object. If the\n destination is a file object you have to close it yourself after the\n call. The buffer size is the number of bytes held in memory during\n the copy process. It defaults to 16KB.\n\n For secure file saving also have a look at :func:`secure_filename`.\n\n :param dst: a filename, :class:`os.PathLike`, or open file\n object to write to.\n :param buffer_size: Passed as the ``length`` parameter of\n :func:`shutil.copyfileobj`.\n\n .. versionchanged:: 1.0\n Supports :mod:`pathlib`.\n \"\"\"\n from shutil import copyfileobj\n\n close_dst = False\n\n if hasattr(dst, \"__fspath__\"):\n dst = fspath(dst)\n\n if isinstance(dst, str):\n dst = open(dst, \"wb\")\n close_dst = True\n\n try:\n copyfileobj(self.stream, dst, buffer_size)\n finally:\n if close_dst:\n dst.close()\n\n def close(self):\n \"\"\"Close the underlying file if possible.\"\"\"\n try:\n self.stream.close()\n except Exception:\n pass\n\n def __bool__(self):\n return bool(self.filename)\n\n def __getattr__(self, name):\n try:\n return getattr(self.stream, name)\n except AttributeError:\n # SpooledTemporaryFile doesn't implement IOBase, get the\n # attribute from its backing file instead.\n # https://github.com/python/cpython/pull/3249\n if hasattr(self.stream, \"_file\"):\n return getattr(self.stream._file, name)\n raise\n\n def __iter__(self):\n return iter(self.stream)\n\n def __repr__(self):\n return f\"<{type(self).__name__}: {self.filename!r} ({self.content_type!r})>\"\n\n\nclass FileMultiDict(MultiDict):\n \"\"\"A special :class:`MultiDict` that has convenience methods to add\n files to it. This is used for :class:`EnvironBuilder` and generally\n useful for unittesting.\n\n .. versionadded:: 0.5\n \"\"\"\n\n def add_file(self, name, file, filename=None, content_type=None):\n \"\"\"Adds a new file to the dict. `file` can be a file name or\n a :class:`file`-like or a :class:`FileStorage` object.\n\n :param name: the name of the field.\n :param file: a filename or :class:`file`-like object\n :param filename: an optional filename\n :param content_type: an optional content type\n \"\"\"\n if isinstance(file, FileStorage):\n value = file\n else:\n if isinstance(file, str):\n if filename is None:\n filename = file\n file = open(file, \"rb\")\n if filename and content_type is None:\n content_type = (\n mimetypes.guess_type(filename)[0] or \"application/octet-stream\"\n )\n value = FileStorage(file, filename, name, content_type)\n\n self.add(name, value)\n\n\n# circular dependencies\nfrom .. import http\n", "path": "src/werkzeug/datastructures/file_storage.py"}]}
| 2,474 | 171 |
gh_patches_debug_16374
|
rasdani/github-patches
|
git_diff
|
tinygrad__tinygrad-1480
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tensor.where() bug on LLVM
when either `input_` or `other` is of `dtypes.bool`, llvmir attempts to turn bool into float: `%".16" = fpext i1 %".15" to float` but this raises
```
RuntimeError: LLVM IR parsing error
<string>:19:18: error: invalid cast opcode for cast from 'i1' to 'float'
```
to reproduce:
```
from tinygrad.lazy import Device
from tinygrad.tensor import Tensor
from tinygrad.helpers import dtypes
Device.DEFAULT = "LLVM"
cond = Tensor([0,1]) == Tensor([1,1]);
cond.where(Tensor([1,1], dtype=dtypes.bool), 0).realize()
```
prg:
```
; ModuleID = "/Users/zibo/fun/tinygrad/tinygrad/renderer/llvmir.py"
target triple = "unknown-unknown-unknown"
target datalayout = ""
define void @"E_2n1"(float* %".1", float* %".2", float* %".3", float* %".4", i1* %".5") "no-nans-fp-math"="true"
{
entry:
br label %"loop_body_gidx0"
loop_body_gidx0:
%"gidx0" = phi i64 [0, %"entry"], [%".23", %"loop_body_gidx0"]
%".8" = getelementptr inbounds float, float* %".2", i64 %"gidx0"
%".9" = load float, float* %".8"
%".10" = getelementptr inbounds float, float* %".3", i64 %"gidx0"
%".11" = load float, float* %".10"
%".12" = getelementptr inbounds float, float* %".4", i64 0
%".13" = load float, float* %".12"
%".14" = getelementptr inbounds i1, i1* %".5", i64 %"gidx0"
%".15" = load i1, i1* %".14"
%".16" = fpext i1 %".15" to float # HERE IT IS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
%".17" = fcmp fast oeq float %".9", %".11"
%".18" = uitofp i1 %".17" to float
%".19" = fcmp fast une float %".18", 0x0
%".20" = select fast i1 %".19", float %".13", float %".16"
%".21" = getelementptr inbounds float, float* %".1", i64 %"gidx0"
store float %".20", float* %".21"
%".23" = add i64 %"gidx0", 1
%".24" = icmp eq i64 %".23", 2
br i1 %".24", label %"loop_exit_gidx0", label %"loop_body_gidx0"
loop_exit_gidx0:
ret void
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tinygrad/renderer/llvmir.py`
Content:
```
1 from typing import Final, Dict, Callable, Any, List, Optional, Tuple
2 import functools
3 from llvmlite import ir # type: ignore
4 from tinygrad.codegen.linearizer import UOps, UOp, Token, MemOp, ConstOp
5 from tinygrad.helpers import dtypes
6 from tinygrad.ops import Op, UnaryOps, BinaryOps, TernaryOps
7
8 from tinygrad.shape.symbolic import Variable, NumNode, MulNode, DivNode, ModNode, LtNode, SumNode, AndNode
9 def int_const(x): return ir.Constant(ir.IntType(64), x)
10 render_llvm = {
11 NumNode: lambda self,ops,ctx: int_const(self.b),
12 MulNode: lambda self,ops,ctx: ctx.mul(self.a.render(ops,ctx), int_const(self.b)),
13 DivNode: lambda self,ops,ctx: ctx.sdiv(self.a.render(ops,ctx), int_const(self.b)),
14 ModNode: lambda self,ops,ctx: ctx.srem(self.a.render(ops,ctx), int_const(self.b)),
15 LtNode: lambda self,ops,ctx: ctx.icmp_signed("<", self.a.render(ops,ctx), int_const(self.b)),
16 SumNode: lambda self,ops,ctx: functools.reduce(lambda a,b: ctx.add(a,b.render(ops,ctx)), self.nodes[1:], self.nodes[0].render(ops,ctx)),
17 AndNode: lambda self,ops,ctx: functools.reduce(lambda a,b: ctx.and_(a,b.render(ops,ctx)), self.nodes[1:], self.nodes[0].render(ops,ctx))
18 }
19
20 code_for_op: Final[Dict[Op, Callable]] = {
21 UnaryOps.EXP2: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.exp2', [ir.FloatType()]), [x], fastmath=('fast',)),
22 UnaryOps.LOG2: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.log2', [ir.FloatType()]), [x], fastmath=('fast',)),
23 UnaryOps.SIN: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.sin', [ir.FloatType()]), [x], fastmath=('fast',)),
24 UnaryOps.SQRT: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.sqrt', [ir.FloatType()]), [x], fastmath=('fast',)),
25 BinaryOps.ADD: lambda builder,x,y: builder.fadd(x,y, flags=('fast',)),
26 BinaryOps.SUB: lambda builder,x,y: builder.fsub(x,y, flags=('fast',)),
27 BinaryOps.MUL: lambda builder,x,y: builder.fmul(x,y, flags=('fast',)),
28 BinaryOps.DIV: lambda builder,x,y: builder.fdiv(x,y, flags=('fast',)),
29 BinaryOps.CMPEQ: lambda builder,x,y: builder.uitofp(builder.fcmp_ordered("==", x, y, flags=('fast',)), ir.FloatType()),
30 BinaryOps.MAX: lambda builder,x,y: builder.select(builder.fcmp_unordered(">", x, y, flags=('fast',)), x, y, flags=('fast',)),
31 TernaryOps.MULACC: lambda builder,x,y,z: builder.fadd(builder.fmul(x,y, flags=('fast',)), z, flags=('fast',)),
32 TernaryOps.WHERE: lambda builder,x,y,z: builder.select(builder.fcmp_unordered("!=", x, ir.Constant(ir.FloatType(), 0), flags=('fast',)), y, z, flags=('fast',)),
33 }
34
35 def uops_to_llvm_ir(function_name:str, uops:List[UOp]) -> Tuple[str, Optional[List[int]], Optional[List[int]]]:
36 # all llvm stuff goes into a module
37 module = ir.Module(name=__file__)
38
39 # extract global buffers
40 buf_to_dtype = {args[0]:args[1] for uop,_,_,args in uops if uop == UOps.DEFINE_GLOBAL}
41 buf_index = {x:i for i,x in enumerate(buf_to_dtype.keys())}
42
43 # create llvm function
44 dtype_to_llvm_dtype = {dtypes.float16:ir.HalfType(), dtypes.bfloat16:ir.IntType(16), dtypes.float32:ir.FloatType(), dtypes.int8:ir.IntType(8), dtypes.uint8:ir.IntType(8), dtypes.bool: ir.IntType(1), dtypes.int64: ir.IntType(64), dtypes.int32: ir.IntType(32)}
45 func_dtypes = [dtype_to_llvm_dtype[dtype] for dtype in buf_to_dtype.values()]
46 func = ir.Function(module, ir.FunctionType(ir.VoidType(), [x.as_pointer() for x in func_dtypes]), name=function_name)
47
48 # force llvmlite to allow us to add function attribute then add the attribute
49 func.attributes._known = func.attributes._known.union(frozenset(['"no-nans-fp-math"="true"']))
50 func.attributes.add('"no-nans-fp-math"="true"')
51
52 bb = [ir.IRBuilder(func.append_basic_block("entry"))]
53 loop_blocks = []
54 reduce_phis: List = []
55 # TODO: newvar probably shouldn't be optional
56 lvars: Dict[Optional[Token], Any] = {} # this Any is an llvm type
57 render_llvm[Variable] = lambda self,ops,ctx: lvars[self.expr]
58
59 for uop,newvar,vin,args in uops:
60 if uop == UOps.LOOP:
61 for var in args[0]:
62 if isinstance(var, NumNode): continue
63 bb.append(ir.IRBuilder(func.append_basic_block(f"loop_body_{var.expr}")))
64 bb[-2].branch(bb[-1]._block)
65
66 phis = []
67 for rp in reduce_phis:
68 incoming = lvars[rp]
69 lvars[rp] = bb[-1].phi(ir.FloatType())
70 lvars[rp].add_incoming(incoming, bb[-2]._block)
71 phis.append((rp, lvars[rp]))
72 loop_blocks.append((bb[-1], phis))
73
74 lvars[var.expr] = bb[-1].phi(ir.IntType(64), name=var.expr)
75 lvars[var.expr].add_incoming(int_const(var.min), bb[-2]._block)
76 if uop == UOps.ENDLOOP:
77 for var in args[0][::-1]:
78 if isinstance(var, NumNode): continue
79 block, phis = loop_blocks.pop()
80 idx_p1 = bb[-1].add(lvars[var.expr], int_const(1))
81 lvars[var.expr].add_incoming(idx_p1, bb[-1]._block)
82 for n,phi in phis: phi.add_incoming(lvars[n], bb[-1]._block)
83 bb.append(ir.IRBuilder(func.append_basic_block(f"loop_exit_{var.expr}")))
84 bb[-2].cbranch(bb[-2].icmp_unsigned("==", idx_p1, int_const(var.max+1)), bb[-1]._block, block._block)
85 if uop == UOps.LOAD:
86 assert newvar is not None and isinstance(args, (MemOp, ConstOp))
87 assert newvar.dtype == dtypes.float, "newvar must be float"
88 valid = args.valid.render(render_llvm, bb[-1])
89 if isinstance(args, ConstOp):
90 if args.valid.min == 0 and args.valid.max == 1:
91 val = bb[-1].select(valid, ir.Constant(ir.FloatType(), args.value), ir.Constant(ir.FloatType(), args.invalid_value))
92 else:
93 val = ir.Constant(ir.FloatType(), args.value if args.valid.min == 1 else args.invalid_value)
94 # TODO: this is a hack. it shouldn't be const that signals this
95 reduce_phis.append(newvar)
96 else:
97 idx = args.idx.render(render_llvm, bb[-1])
98 if args.valid.max == 0:
99 val = ir.Constant(dtype_to_llvm_dtype[args.memory_dtype], args.invalid_value)
100 elif args.valid.min == 0:
101 aug_idx = bb[-1].select(valid, idx, int_const(0))
102 val = bb[-1].select(valid, bb[-1].load(bb[-1].gep(func.args[buf_index[args.name]], [aug_idx], inbounds=True)), ir.Constant(dtype_to_llvm_dtype[args.memory_dtype], args.invalid_value))
103 else:
104 val = bb[-1].load(bb[-1].gep(func.args[buf_index[args.name]], [idx], inbounds=True))
105
106 if args.memory_dtype != newvar.dtype:
107 if dtypes.is_int(args.memory_dtype):
108 val = bb[-1].uitofp(val, ir.FloatType()) if dtypes.is_unsigned(args.memory_dtype) else bb[-1].sitofp(val, ir.FloatType())
109 elif args.memory_dtype == dtypes.bfloat16:
110 val = bb[-1].sext(val, ir.IntType(32))
111 val = bb[-1].shl(val, ir.Constant(ir.IntType(32), 16))
112 val = bb[-1].bitcast(val, ir.FloatType())
113 else:
114 val = bb[-1].fpext(val, ir.FloatType())
115 lvars[newvar] = val
116 if uop == UOps.STORE:
117 assert args.valid.min == 1 and isinstance(args, MemOp), "store must be valid and to memory"
118 idx = args.idx.render(render_llvm, bb[-1])
119 element = lvars[vin[0]]
120 if args.memory_dtype != vin[0].dtype:
121 if dtypes.is_int(args.memory_dtype):
122 element = bb[-1].fptoui(element, dtype_to_llvm_dtype[args.memory_dtype]) if dtypes.is_unsigned(args.memory_dtype) else bb[-1].fptosi(element, dtype_to_llvm_dtype[args.memory_dtype])
123 elif args.memory_dtype == dtypes.bfloat16:
124 element = bb[-1].bitcast(element, ir.IntType(32))
125 element = bb[-1].lshr(element, ir.Constant(ir.IntType(32), 16))
126 element = bb[-1].trunc(element, ir.IntType(16))
127 else:
128 element = bb[-1].fptrunc(element, dtype_to_llvm_dtype[args.memory_dtype])
129 bb[-1].store(element, bb[-1].gep(func.args[buf_index[args.name]], [idx], inbounds=True))
130 if uop == UOps.ALU:
131 lvars[newvar] = code_for_op[args](bb[-1], *[lvars[x] for x in vin])
132
133 bb[-1].ret_void()
134 return str(module), None, None
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tinygrad/renderer/llvmir.py b/tinygrad/renderer/llvmir.py
--- a/tinygrad/renderer/llvmir.py
+++ b/tinygrad/renderer/llvmir.py
@@ -104,8 +104,8 @@
val = bb[-1].load(bb[-1].gep(func.args[buf_index[args.name]], [idx], inbounds=True))
if args.memory_dtype != newvar.dtype:
- if dtypes.is_int(args.memory_dtype):
- val = bb[-1].uitofp(val, ir.FloatType()) if dtypes.is_unsigned(args.memory_dtype) else bb[-1].sitofp(val, ir.FloatType())
+ if dtypes.is_int(args.memory_dtype) or args.memory_dtype == dtypes.bool:
+ val = bb[-1].uitofp(val, ir.FloatType()) if dtypes.is_unsigned(args.memory_dtype) or args.memory_dtype == dtypes.bool else bb[-1].sitofp(val, ir.FloatType())
elif args.memory_dtype == dtypes.bfloat16:
val = bb[-1].sext(val, ir.IntType(32))
val = bb[-1].shl(val, ir.Constant(ir.IntType(32), 16))
|
{"golden_diff": "diff --git a/tinygrad/renderer/llvmir.py b/tinygrad/renderer/llvmir.py\n--- a/tinygrad/renderer/llvmir.py\n+++ b/tinygrad/renderer/llvmir.py\n@@ -104,8 +104,8 @@\n val = bb[-1].load(bb[-1].gep(func.args[buf_index[args.name]], [idx], inbounds=True))\n \n if args.memory_dtype != newvar.dtype:\n- if dtypes.is_int(args.memory_dtype):\n- val = bb[-1].uitofp(val, ir.FloatType()) if dtypes.is_unsigned(args.memory_dtype) else bb[-1].sitofp(val, ir.FloatType())\n+ if dtypes.is_int(args.memory_dtype) or args.memory_dtype == dtypes.bool:\n+ val = bb[-1].uitofp(val, ir.FloatType()) if dtypes.is_unsigned(args.memory_dtype) or args.memory_dtype == dtypes.bool else bb[-1].sitofp(val, ir.FloatType())\n elif args.memory_dtype == dtypes.bfloat16:\n val = bb[-1].sext(val, ir.IntType(32))\n val = bb[-1].shl(val, ir.Constant(ir.IntType(32), 16))\n", "issue": "Tensor.where() bug on LLVM\nwhen either `input_` or `other` is of `dtypes.bool`, llvmir attempts to turn bool into float: `%\".16\" = fpext i1 %\".15\" to float` but this raises \r\n```\r\nRuntimeError: LLVM IR parsing error\r\n<string>:19:18: error: invalid cast opcode for cast from 'i1' to 'float'\r\n```\r\n\r\nto reproduce:\r\n```\r\nfrom tinygrad.lazy import Device\r\nfrom tinygrad.tensor import Tensor\r\nfrom tinygrad.helpers import dtypes\r\nDevice.DEFAULT = \"LLVM\"\r\ncond = Tensor([0,1]) == Tensor([1,1]);\r\ncond.where(Tensor([1,1], dtype=dtypes.bool), 0).realize()\r\n```\r\n\r\nprg:\r\n```\r\n; ModuleID = \"/Users/zibo/fun/tinygrad/tinygrad/renderer/llvmir.py\" \r\ntarget triple = \"unknown-unknown-unknown\"\r\ntarget datalayout = \"\" \r\n define void @\"E_2n1\"(float* %\".1\", float* %\".2\", float* %\".3\", float* %\".4\", i1* %\".5\") \"no-nans-fp-math\"=\"true\"\r\n{ \r\nentry:\r\n br label %\"loop_body_gidx0\" \r\nloop_body_gidx0: \r\n %\"gidx0\" = phi i64 [0, %\"entry\"], [%\".23\", %\"loop_body_gidx0\"] \r\n %\".8\" = getelementptr inbounds float, float* %\".2\", i64 %\"gidx0\" \r\n %\".9\" = load float, float* %\".8\" \r\n %\".10\" = getelementptr inbounds float, float* %\".3\", i64 %\"gidx0\"\r\n %\".11\" = load float, float* %\".10\" \r\n %\".12\" = getelementptr inbounds float, float* %\".4\", i64 0 \r\n %\".13\" = load float, float* %\".12\" \r\n %\".14\" = getelementptr inbounds i1, i1* %\".5\", i64 %\"gidx0\"\r\n %\".15\" = load i1, i1* %\".14\" \r\n %\".16\" = fpext i1 %\".15\" to float # HERE IT IS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n %\".17\" = fcmp fast oeq float %\".9\", %\".11\" \r\n %\".18\" = uitofp i1 %\".17\" to float \r\n %\".19\" = fcmp fast une float %\".18\", 0x0 \r\n %\".20\" = select fast i1 %\".19\", float %\".13\", float %\".16\" \r\n %\".21\" = getelementptr inbounds float, float* %\".1\", i64 %\"gidx0\" \r\n store float %\".20\", float* %\".21\" \r\n %\".23\" = add i64 %\"gidx0\", 1\r\n %\".24\" = icmp eq i64 %\".23\", 2 \r\n br i1 %\".24\", label %\"loop_exit_gidx0\", label %\"loop_body_gidx0\"\r\nloop_exit_gidx0: \r\n ret void \r\n}\r\n```\r\n\n", "before_files": [{"content": "from typing import Final, Dict, Callable, Any, List, Optional, Tuple\nimport functools\nfrom llvmlite import ir # type: ignore\nfrom tinygrad.codegen.linearizer import UOps, UOp, Token, MemOp, ConstOp\nfrom tinygrad.helpers import dtypes\nfrom tinygrad.ops import Op, UnaryOps, BinaryOps, TernaryOps\n\nfrom tinygrad.shape.symbolic import Variable, NumNode, MulNode, DivNode, ModNode, LtNode, SumNode, AndNode\ndef int_const(x): return ir.Constant(ir.IntType(64), x)\nrender_llvm = {\n NumNode: lambda self,ops,ctx: int_const(self.b),\n MulNode: lambda self,ops,ctx: ctx.mul(self.a.render(ops,ctx), int_const(self.b)),\n DivNode: lambda self,ops,ctx: ctx.sdiv(self.a.render(ops,ctx), int_const(self.b)),\n ModNode: lambda self,ops,ctx: ctx.srem(self.a.render(ops,ctx), int_const(self.b)),\n LtNode: lambda self,ops,ctx: ctx.icmp_signed(\"<\", self.a.render(ops,ctx), int_const(self.b)),\n SumNode: lambda self,ops,ctx: functools.reduce(lambda a,b: ctx.add(a,b.render(ops,ctx)), self.nodes[1:], self.nodes[0].render(ops,ctx)),\n AndNode: lambda self,ops,ctx: functools.reduce(lambda a,b: ctx.and_(a,b.render(ops,ctx)), self.nodes[1:], self.nodes[0].render(ops,ctx))\n}\n\ncode_for_op: Final[Dict[Op, Callable]] = {\n UnaryOps.EXP2: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.exp2', [ir.FloatType()]), [x], fastmath=('fast',)),\n UnaryOps.LOG2: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.log2', [ir.FloatType()]), [x], fastmath=('fast',)),\n UnaryOps.SIN: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.sin', [ir.FloatType()]), [x], fastmath=('fast',)),\n UnaryOps.SQRT: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.sqrt', [ir.FloatType()]), [x], fastmath=('fast',)),\n BinaryOps.ADD: lambda builder,x,y: builder.fadd(x,y, flags=('fast',)),\n BinaryOps.SUB: lambda builder,x,y: builder.fsub(x,y, flags=('fast',)),\n BinaryOps.MUL: lambda builder,x,y: builder.fmul(x,y, flags=('fast',)),\n BinaryOps.DIV: lambda builder,x,y: builder.fdiv(x,y, flags=('fast',)),\n BinaryOps.CMPEQ: lambda builder,x,y: builder.uitofp(builder.fcmp_ordered(\"==\", x, y, flags=('fast',)), ir.FloatType()),\n BinaryOps.MAX: lambda builder,x,y: builder.select(builder.fcmp_unordered(\">\", x, y, flags=('fast',)), x, y, flags=('fast',)),\n TernaryOps.MULACC: lambda builder,x,y,z: builder.fadd(builder.fmul(x,y, flags=('fast',)), z, flags=('fast',)),\n TernaryOps.WHERE: lambda builder,x,y,z: builder.select(builder.fcmp_unordered(\"!=\", x, ir.Constant(ir.FloatType(), 0), flags=('fast',)), y, z, flags=('fast',)),\n}\n\ndef uops_to_llvm_ir(function_name:str, uops:List[UOp]) -> Tuple[str, Optional[List[int]], Optional[List[int]]]:\n # all llvm stuff goes into a module\n module = ir.Module(name=__file__)\n\n # extract global buffers\n buf_to_dtype = {args[0]:args[1] for uop,_,_,args in uops if uop == UOps.DEFINE_GLOBAL}\n buf_index = {x:i for i,x in enumerate(buf_to_dtype.keys())}\n\n # create llvm function\n dtype_to_llvm_dtype = {dtypes.float16:ir.HalfType(), dtypes.bfloat16:ir.IntType(16), dtypes.float32:ir.FloatType(), dtypes.int8:ir.IntType(8), dtypes.uint8:ir.IntType(8), dtypes.bool: ir.IntType(1), dtypes.int64: ir.IntType(64), dtypes.int32: ir.IntType(32)}\n func_dtypes = [dtype_to_llvm_dtype[dtype] for dtype in buf_to_dtype.values()]\n func = ir.Function(module, ir.FunctionType(ir.VoidType(), [x.as_pointer() for x in func_dtypes]), name=function_name)\n\n # force llvmlite to allow us to add function attribute then add the attribute\n func.attributes._known = func.attributes._known.union(frozenset(['\"no-nans-fp-math\"=\"true\"']))\n func.attributes.add('\"no-nans-fp-math\"=\"true\"')\n\n bb = [ir.IRBuilder(func.append_basic_block(\"entry\"))]\n loop_blocks = []\n reduce_phis: List = []\n # TODO: newvar probably shouldn't be optional\n lvars: Dict[Optional[Token], Any] = {} # this Any is an llvm type\n render_llvm[Variable] = lambda self,ops,ctx: lvars[self.expr]\n\n for uop,newvar,vin,args in uops:\n if uop == UOps.LOOP:\n for var in args[0]:\n if isinstance(var, NumNode): continue\n bb.append(ir.IRBuilder(func.append_basic_block(f\"loop_body_{var.expr}\")))\n bb[-2].branch(bb[-1]._block)\n\n phis = []\n for rp in reduce_phis:\n incoming = lvars[rp]\n lvars[rp] = bb[-1].phi(ir.FloatType())\n lvars[rp].add_incoming(incoming, bb[-2]._block)\n phis.append((rp, lvars[rp]))\n loop_blocks.append((bb[-1], phis))\n\n lvars[var.expr] = bb[-1].phi(ir.IntType(64), name=var.expr)\n lvars[var.expr].add_incoming(int_const(var.min), bb[-2]._block)\n if uop == UOps.ENDLOOP:\n for var in args[0][::-1]:\n if isinstance(var, NumNode): continue\n block, phis = loop_blocks.pop()\n idx_p1 = bb[-1].add(lvars[var.expr], int_const(1))\n lvars[var.expr].add_incoming(idx_p1, bb[-1]._block)\n for n,phi in phis: phi.add_incoming(lvars[n], bb[-1]._block)\n bb.append(ir.IRBuilder(func.append_basic_block(f\"loop_exit_{var.expr}\")))\n bb[-2].cbranch(bb[-2].icmp_unsigned(\"==\", idx_p1, int_const(var.max+1)), bb[-1]._block, block._block)\n if uop == UOps.LOAD:\n assert newvar is not None and isinstance(args, (MemOp, ConstOp))\n assert newvar.dtype == dtypes.float, \"newvar must be float\"\n valid = args.valid.render(render_llvm, bb[-1])\n if isinstance(args, ConstOp):\n if args.valid.min == 0 and args.valid.max == 1:\n val = bb[-1].select(valid, ir.Constant(ir.FloatType(), args.value), ir.Constant(ir.FloatType(), args.invalid_value))\n else:\n val = ir.Constant(ir.FloatType(), args.value if args.valid.min == 1 else args.invalid_value)\n # TODO: this is a hack. it shouldn't be const that signals this\n reduce_phis.append(newvar)\n else:\n idx = args.idx.render(render_llvm, bb[-1])\n if args.valid.max == 0:\n val = ir.Constant(dtype_to_llvm_dtype[args.memory_dtype], args.invalid_value)\n elif args.valid.min == 0:\n aug_idx = bb[-1].select(valid, idx, int_const(0))\n val = bb[-1].select(valid, bb[-1].load(bb[-1].gep(func.args[buf_index[args.name]], [aug_idx], inbounds=True)), ir.Constant(dtype_to_llvm_dtype[args.memory_dtype], args.invalid_value))\n else:\n val = bb[-1].load(bb[-1].gep(func.args[buf_index[args.name]], [idx], inbounds=True))\n\n if args.memory_dtype != newvar.dtype:\n if dtypes.is_int(args.memory_dtype):\n val = bb[-1].uitofp(val, ir.FloatType()) if dtypes.is_unsigned(args.memory_dtype) else bb[-1].sitofp(val, ir.FloatType())\n elif args.memory_dtype == dtypes.bfloat16:\n val = bb[-1].sext(val, ir.IntType(32))\n val = bb[-1].shl(val, ir.Constant(ir.IntType(32), 16))\n val = bb[-1].bitcast(val, ir.FloatType())\n else:\n val = bb[-1].fpext(val, ir.FloatType())\n lvars[newvar] = val\n if uop == UOps.STORE:\n assert args.valid.min == 1 and isinstance(args, MemOp), \"store must be valid and to memory\"\n idx = args.idx.render(render_llvm, bb[-1])\n element = lvars[vin[0]]\n if args.memory_dtype != vin[0].dtype:\n if dtypes.is_int(args.memory_dtype):\n element = bb[-1].fptoui(element, dtype_to_llvm_dtype[args.memory_dtype]) if dtypes.is_unsigned(args.memory_dtype) else bb[-1].fptosi(element, dtype_to_llvm_dtype[args.memory_dtype])\n elif args.memory_dtype == dtypes.bfloat16:\n element = bb[-1].bitcast(element, ir.IntType(32))\n element = bb[-1].lshr(element, ir.Constant(ir.IntType(32), 16))\n element = bb[-1].trunc(element, ir.IntType(16))\n else:\n element = bb[-1].fptrunc(element, dtype_to_llvm_dtype[args.memory_dtype])\n bb[-1].store(element, bb[-1].gep(func.args[buf_index[args.name]], [idx], inbounds=True))\n if uop == UOps.ALU:\n lvars[newvar] = code_for_op[args](bb[-1], *[lvars[x] for x in vin])\n\n bb[-1].ret_void()\n return str(module), None, None\n", "path": "tinygrad/renderer/llvmir.py"}], "after_files": [{"content": "from typing import Final, Dict, Callable, Any, List, Optional, Tuple\nimport functools\nfrom llvmlite import ir # type: ignore\nfrom tinygrad.codegen.linearizer import UOps, UOp, Token, MemOp, ConstOp\nfrom tinygrad.helpers import dtypes\nfrom tinygrad.ops import Op, UnaryOps, BinaryOps, TernaryOps\n\nfrom tinygrad.shape.symbolic import Variable, NumNode, MulNode, DivNode, ModNode, LtNode, SumNode, AndNode\ndef int_const(x): return ir.Constant(ir.IntType(64), x)\nrender_llvm = {\n NumNode: lambda self,ops,ctx: int_const(self.b),\n MulNode: lambda self,ops,ctx: ctx.mul(self.a.render(ops,ctx), int_const(self.b)),\n DivNode: lambda self,ops,ctx: ctx.sdiv(self.a.render(ops,ctx), int_const(self.b)),\n ModNode: lambda self,ops,ctx: ctx.srem(self.a.render(ops,ctx), int_const(self.b)),\n LtNode: lambda self,ops,ctx: ctx.icmp_signed(\"<\", self.a.render(ops,ctx), int_const(self.b)),\n SumNode: lambda self,ops,ctx: functools.reduce(lambda a,b: ctx.add(a,b.render(ops,ctx)), self.nodes[1:], self.nodes[0].render(ops,ctx)),\n AndNode: lambda self,ops,ctx: functools.reduce(lambda a,b: ctx.and_(a,b.render(ops,ctx)), self.nodes[1:], self.nodes[0].render(ops,ctx))\n}\n\ncode_for_op: Final[Dict[Op, Callable]] = {\n UnaryOps.EXP2: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.exp2', [ir.FloatType()]), [x], fastmath=('fast',)),\n UnaryOps.LOG2: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.log2', [ir.FloatType()]), [x], fastmath=('fast',)),\n UnaryOps.SIN: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.sin', [ir.FloatType()]), [x], fastmath=('fast',)),\n UnaryOps.SQRT: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.sqrt', [ir.FloatType()]), [x], fastmath=('fast',)),\n BinaryOps.ADD: lambda builder,x,y: builder.fadd(x,y, flags=('fast',)),\n BinaryOps.SUB: lambda builder,x,y: builder.fsub(x,y, flags=('fast',)),\n BinaryOps.MUL: lambda builder,x,y: builder.fmul(x,y, flags=('fast',)),\n BinaryOps.DIV: lambda builder,x,y: builder.fdiv(x,y, flags=('fast',)),\n BinaryOps.CMPEQ: lambda builder,x,y: builder.uitofp(builder.fcmp_ordered(\"==\", x, y, flags=('fast',)), ir.FloatType()),\n BinaryOps.MAX: lambda builder,x,y: builder.select(builder.fcmp_unordered(\">\", x, y, flags=('fast',)), x, y, flags=('fast',)),\n TernaryOps.MULACC: lambda builder,x,y,z: builder.fadd(builder.fmul(x,y, flags=('fast',)), z, flags=('fast',)),\n TernaryOps.WHERE: lambda builder,x,y,z: builder.select(builder.fcmp_unordered(\"!=\", x, ir.Constant(ir.FloatType(), 0), flags=('fast',)), y, z, flags=('fast',)),\n}\n\ndef uops_to_llvm_ir(function_name:str, uops:List[UOp]) -> Tuple[str, Optional[List[int]], Optional[List[int]]]:\n # all llvm stuff goes into a module\n module = ir.Module(name=__file__)\n\n # extract global buffers\n buf_to_dtype = {args[0]:args[1] for uop,_,_,args in uops if uop == UOps.DEFINE_GLOBAL}\n buf_index = {x:i for i,x in enumerate(buf_to_dtype.keys())}\n\n # create llvm function\n dtype_to_llvm_dtype = {dtypes.float16:ir.HalfType(), dtypes.bfloat16:ir.IntType(16), dtypes.float32:ir.FloatType(), dtypes.int8:ir.IntType(8), dtypes.uint8:ir.IntType(8), dtypes.bool: ir.IntType(1), dtypes.int64: ir.IntType(64), dtypes.int32: ir.IntType(32)}\n func_dtypes = [dtype_to_llvm_dtype[dtype] for dtype in buf_to_dtype.values()]\n func = ir.Function(module, ir.FunctionType(ir.VoidType(), [x.as_pointer() for x in func_dtypes]), name=function_name)\n\n # force llvmlite to allow us to add function attribute then add the attribute\n func.attributes._known = func.attributes._known.union(frozenset(['\"no-nans-fp-math\"=\"true\"']))\n func.attributes.add('\"no-nans-fp-math\"=\"true\"')\n\n bb = [ir.IRBuilder(func.append_basic_block(\"entry\"))]\n loop_blocks = []\n reduce_phis: List = []\n # TODO: newvar probably shouldn't be optional\n lvars: Dict[Optional[Token], Any] = {} # this Any is an llvm type\n render_llvm[Variable] = lambda self,ops,ctx: lvars[self.expr]\n\n for uop,newvar,vin,args in uops:\n if uop == UOps.LOOP:\n for var in args[0]:\n if isinstance(var, NumNode): continue\n bb.append(ir.IRBuilder(func.append_basic_block(f\"loop_body_{var.expr}\")))\n bb[-2].branch(bb[-1]._block)\n\n phis = []\n for rp in reduce_phis:\n incoming = lvars[rp]\n lvars[rp] = bb[-1].phi(ir.FloatType())\n lvars[rp].add_incoming(incoming, bb[-2]._block)\n phis.append((rp, lvars[rp]))\n loop_blocks.append((bb[-1], phis))\n\n lvars[var.expr] = bb[-1].phi(ir.IntType(64), name=var.expr)\n lvars[var.expr].add_incoming(int_const(var.min), bb[-2]._block)\n if uop == UOps.ENDLOOP:\n for var in args[0][::-1]:\n if isinstance(var, NumNode): continue\n block, phis = loop_blocks.pop()\n idx_p1 = bb[-1].add(lvars[var.expr], int_const(1))\n lvars[var.expr].add_incoming(idx_p1, bb[-1]._block)\n for n,phi in phis: phi.add_incoming(lvars[n], bb[-1]._block)\n bb.append(ir.IRBuilder(func.append_basic_block(f\"loop_exit_{var.expr}\")))\n bb[-2].cbranch(bb[-2].icmp_unsigned(\"==\", idx_p1, int_const(var.max+1)), bb[-1]._block, block._block)\n if uop == UOps.LOAD:\n assert newvar is not None and isinstance(args, (MemOp, ConstOp))\n assert newvar.dtype == dtypes.float, \"newvar must be float\"\n valid = args.valid.render(render_llvm, bb[-1])\n if isinstance(args, ConstOp):\n if args.valid.min == 0 and args.valid.max == 1:\n val = bb[-1].select(valid, ir.Constant(ir.FloatType(), args.value), ir.Constant(ir.FloatType(), args.invalid_value))\n else:\n val = ir.Constant(ir.FloatType(), args.value if args.valid.min == 1 else args.invalid_value)\n # TODO: this is a hack. it shouldn't be const that signals this\n reduce_phis.append(newvar)\n else:\n idx = args.idx.render(render_llvm, bb[-1])\n if args.valid.max == 0:\n val = ir.Constant(dtype_to_llvm_dtype[args.memory_dtype], args.invalid_value)\n elif args.valid.min == 0:\n aug_idx = bb[-1].select(valid, idx, int_const(0))\n val = bb[-1].select(valid, bb[-1].load(bb[-1].gep(func.args[buf_index[args.name]], [aug_idx], inbounds=True)), ir.Constant(dtype_to_llvm_dtype[args.memory_dtype], args.invalid_value))\n else:\n val = bb[-1].load(bb[-1].gep(func.args[buf_index[args.name]], [idx], inbounds=True))\n\n if args.memory_dtype != newvar.dtype:\n if dtypes.is_int(args.memory_dtype) or args.memory_dtype == dtypes.bool:\n val = bb[-1].uitofp(val, ir.FloatType()) if dtypes.is_unsigned(args.memory_dtype) or args.memory_dtype == dtypes.bool else bb[-1].sitofp(val, ir.FloatType())\n elif args.memory_dtype == dtypes.bfloat16:\n val = bb[-1].sext(val, ir.IntType(32))\n val = bb[-1].shl(val, ir.Constant(ir.IntType(32), 16))\n val = bb[-1].bitcast(val, ir.FloatType())\n else:\n val = bb[-1].fpext(val, ir.FloatType())\n lvars[newvar] = val\n if uop == UOps.STORE:\n assert args.valid.min == 1 and isinstance(args, MemOp), \"store must be valid and to memory\"\n idx = args.idx.render(render_llvm, bb[-1])\n element = lvars[vin[0]]\n if args.memory_dtype != vin[0].dtype:\n if dtypes.is_int(args.memory_dtype):\n element = bb[-1].fptoui(element, dtype_to_llvm_dtype[args.memory_dtype]) if dtypes.is_unsigned(args.memory_dtype) else bb[-1].fptosi(element, dtype_to_llvm_dtype[args.memory_dtype])\n elif args.memory_dtype == dtypes.bfloat16:\n element = bb[-1].bitcast(element, ir.IntType(32))\n element = bb[-1].lshr(element, ir.Constant(ir.IntType(32), 16))\n element = bb[-1].trunc(element, ir.IntType(16))\n else:\n element = bb[-1].fptrunc(element, dtype_to_llvm_dtype[args.memory_dtype])\n bb[-1].store(element, bb[-1].gep(func.args[buf_index[args.name]], [idx], inbounds=True))\n if uop == UOps.ALU:\n lvars[newvar] = code_for_op[args](bb[-1], *[lvars[x] for x in vin])\n\n bb[-1].ret_void()\n return str(module), None, None\n", "path": "tinygrad/renderer/llvmir.py"}]}
| 3,658 | 268 |
gh_patches_debug_34740
|
rasdani/github-patches
|
git_diff
|
apache__airflow-24215
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to specify Python version for AwsGlueJobOperator
### Apache Airflow Provider(s)
amazon
### Versions of Apache Airflow Providers
_No response_
### Apache Airflow version
2.0.2
### Operating System
Amazon Linux
### Deployment
MWAA
### Deployment details
_No response_
### What happened
When a new Glue job is created using the AwsGlueJobOperator, the job is defaulting to Python2. Setting the version in create_job_kwargs fails with key error.
### What you expected to happen
Expected the Glue job to be created with a Python3 runtime. create_job_kwargs are passed to the boto3 glue client create_job method which includes a "Command" parameter that is a dictionary containing the Python version.
### How to reproduce
Create a dag with an AwsGlueJobOperator and pass a "Command" parameter in the create_job_kwargs argument.
```
create_glue_job_args = {
"Command": {
"Name": "abalone-preprocess",
"ScriptLocation": f"s3://{output_bucket}/code/preprocess.py",
"PythonVersion": "3"
}
}
glue_etl = AwsGlueJobOperator(
task_id="glue_etl",
s3_bucket=output_bucket,
script_args={
'--S3_INPUT_BUCKET': data_bucket,
'--S3_INPUT_KEY_PREFIX': 'input/raw',
'--S3_UPLOADS_KEY_PREFIX': 'input/uploads',
'--S3_OUTPUT_BUCKET': output_bucket,
'--S3_OUTPUT_KEY_PREFIX': str(determine_dataset_id.output) +'/input/data'
},
iam_role_name="MLOps",
retry_limit=2,
concurrent_run_limit=3,
create_job_kwargs=create_glue_job_args,
dag=dag)
```
```
[2022-01-04 16:43:42,053] {{logging_mixin.py:104}} INFO - [2022-01-04 16:43:42,053] {{glue.py:190}} ERROR - Failed to create aws glue job, error: 'Command'
[2022-01-04 16:43:42,081] {{logging_mixin.py:104}} INFO - [2022-01-04 16:43:42,081] {{glue.py:112}} ERROR - Failed to run aws glue job, error: 'Command'
[2022-01-04 16:43:42,101] {{taskinstance.py:1482}} ERROR - Task failed with exception
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/glue.py", line 166, in get_or_create_glue_job
get_job_response = glue_client.get_job(JobName=self.job_name)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.EntityNotFoundException: An error occurred (EntityNotFoundException) when calling the GetJob operation: Job with name: abalone-preprocess not found.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1138, in _run_raw_task
self._prepare_and_execute_task_with_callbacks(context, task)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1311, in _prepare_and_execute_task_with_callbacks
result = self._execute_task(context, task_copy)
File "/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py", line 1341, in _execute_task
result = task_copy.execute(context=context)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/operators/glue.py", line 121, in execute
glue_job_run = glue_job.initialize_job(self.script_args)
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/glue.py", line 108, in initialize_job
job_name = self.get_or_create_glue_job()
File "/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/glue.py", line 186, in get_or_create_glue_job
**self.create_job_kwargs,
KeyError: 'Command'
```
### Anything else
When a new job is being created.
### Are you willing to submit PR?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/providers/amazon/aws/hooks/glue.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 import time
20 import warnings
21 from typing import Dict, List, Optional
22
23 from airflow.exceptions import AirflowException
24 from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
25
26
27 class GlueJobHook(AwsBaseHook):
28 """
29 Interact with AWS Glue - create job, trigger, crawler
30
31 :param s3_bucket: S3 bucket where logs and local etl script will be uploaded
32 :param job_name: unique job name per AWS account
33 :param desc: job description
34 :param concurrent_run_limit: The maximum number of concurrent runs allowed for a job
35 :param script_location: path to etl script on s3
36 :param retry_limit: Maximum number of times to retry this job if it fails
37 :param num_of_dpus: Number of AWS Glue DPUs to allocate to this Job
38 :param region_name: aws region name (example: us-east-1)
39 :param iam_role_name: AWS IAM Role for Glue Job Execution
40 :param create_job_kwargs: Extra arguments for Glue Job Creation
41 """
42
43 JOB_POLL_INTERVAL = 6 # polls job status after every JOB_POLL_INTERVAL seconds
44
45 def __init__(
46 self,
47 s3_bucket: Optional[str] = None,
48 job_name: Optional[str] = None,
49 desc: Optional[str] = None,
50 concurrent_run_limit: int = 1,
51 script_location: Optional[str] = None,
52 retry_limit: int = 0,
53 num_of_dpus: Optional[int] = None,
54 iam_role_name: Optional[str] = None,
55 create_job_kwargs: Optional[dict] = None,
56 *args,
57 **kwargs,
58 ):
59 self.job_name = job_name
60 self.desc = desc
61 self.concurrent_run_limit = concurrent_run_limit
62 self.script_location = script_location
63 self.retry_limit = retry_limit
64 self.s3_bucket = s3_bucket
65 self.role_name = iam_role_name
66 self.s3_glue_logs = 'logs/glue-logs/'
67 self.create_job_kwargs = create_job_kwargs or {}
68
69 worker_type_exists = "WorkerType" in self.create_job_kwargs
70 num_workers_exists = "NumberOfWorkers" in self.create_job_kwargs
71
72 if worker_type_exists and num_workers_exists:
73 if num_of_dpus is not None:
74 raise ValueError("Cannot specify num_of_dpus with custom WorkerType")
75 elif not worker_type_exists and num_workers_exists:
76 raise ValueError("Need to specify custom WorkerType when specifying NumberOfWorkers")
77 elif worker_type_exists and not num_workers_exists:
78 raise ValueError("Need to specify NumberOfWorkers when specifying custom WorkerType")
79 elif num_of_dpus is None:
80 self.num_of_dpus = 10
81 else:
82 self.num_of_dpus = num_of_dpus
83
84 kwargs['client_type'] = 'glue'
85 super().__init__(*args, **kwargs)
86
87 def list_jobs(self) -> List:
88 """:return: Lists of Jobs"""
89 conn = self.get_conn()
90 return conn.get_jobs()
91
92 def get_iam_execution_role(self) -> Dict:
93 """:return: iam role for job execution"""
94 session, endpoint_url = self._get_credentials(region_name=self.region_name)
95 iam_client = session.client('iam', endpoint_url=endpoint_url, config=self.config, verify=self.verify)
96
97 try:
98 glue_execution_role = iam_client.get_role(RoleName=self.role_name)
99 self.log.info("Iam Role Name: %s", self.role_name)
100 return glue_execution_role
101 except Exception as general_error:
102 self.log.error("Failed to create aws glue job, error: %s", general_error)
103 raise
104
105 def initialize_job(
106 self,
107 script_arguments: Optional[dict] = None,
108 run_kwargs: Optional[dict] = None,
109 ) -> Dict[str, str]:
110 """
111 Initializes connection with AWS Glue
112 to run job
113 :return:
114 """
115 glue_client = self.get_conn()
116 script_arguments = script_arguments or {}
117 run_kwargs = run_kwargs or {}
118
119 try:
120 job_name = self.get_or_create_glue_job()
121 return glue_client.start_job_run(JobName=job_name, Arguments=script_arguments, **run_kwargs)
122
123 except Exception as general_error:
124 self.log.error("Failed to run aws glue job, error: %s", general_error)
125 raise
126
127 def get_job_state(self, job_name: str, run_id: str) -> str:
128 """
129 Get state of the Glue job. The job state can be
130 running, finished, failed, stopped or timeout.
131 :param job_name: unique job name per AWS account
132 :param run_id: The job-run ID of the predecessor job run
133 :return: State of the Glue job
134 """
135 glue_client = self.get_conn()
136 job_run = glue_client.get_job_run(JobName=job_name, RunId=run_id, PredecessorsIncluded=True)
137 return job_run['JobRun']['JobRunState']
138
139 def job_completion(self, job_name: str, run_id: str) -> Dict[str, str]:
140 """
141 Waits until Glue job with job_name completes or
142 fails and return final state if finished.
143 Raises AirflowException when the job failed
144 :param job_name: unique job name per AWS account
145 :param run_id: The job-run ID of the predecessor job run
146 :return: Dict of JobRunState and JobRunId
147 """
148 failed_states = ['FAILED', 'TIMEOUT']
149 finished_states = ['SUCCEEDED', 'STOPPED']
150
151 while True:
152 job_run_state = self.get_job_state(job_name, run_id)
153 if job_run_state in finished_states:
154 self.log.info("Exiting Job %s Run State: %s", run_id, job_run_state)
155 return {'JobRunState': job_run_state, 'JobRunId': run_id}
156 if job_run_state in failed_states:
157 job_error_message = f"Exiting Job {run_id} Run State: {job_run_state}"
158 self.log.info(job_error_message)
159 raise AirflowException(job_error_message)
160 else:
161 self.log.info(
162 "Polling for AWS Glue Job %s current run state with status %s", job_name, job_run_state
163 )
164 time.sleep(self.JOB_POLL_INTERVAL)
165
166 def get_or_create_glue_job(self) -> str:
167 """
168 Creates(or just returns) and returns the Job name
169 :return:Name of the Job
170 """
171 glue_client = self.get_conn()
172 try:
173 get_job_response = glue_client.get_job(JobName=self.job_name)
174 self.log.info("Job Already exist. Returning Name of the job")
175 return get_job_response['Job']['Name']
176
177 except glue_client.exceptions.EntityNotFoundException:
178 self.log.info("Job doesn't exist. Now creating and running AWS Glue Job")
179 if self.s3_bucket is None:
180 raise AirflowException('Could not initialize glue job, error: Specify Parameter `s3_bucket`')
181 s3_log_path = f's3://{self.s3_bucket}/{self.s3_glue_logs}{self.job_name}'
182 execution_role = self.get_iam_execution_role()
183 try:
184 if "WorkerType" in self.create_job_kwargs and "NumberOfWorkers" in self.create_job_kwargs:
185 create_job_response = glue_client.create_job(
186 Name=self.job_name,
187 Description=self.desc,
188 LogUri=s3_log_path,
189 Role=execution_role['Role']['Arn'],
190 ExecutionProperty={"MaxConcurrentRuns": self.concurrent_run_limit},
191 Command={"Name": "glueetl", "ScriptLocation": self.script_location},
192 MaxRetries=self.retry_limit,
193 **self.create_job_kwargs,
194 )
195 else:
196 create_job_response = glue_client.create_job(
197 Name=self.job_name,
198 Description=self.desc,
199 LogUri=s3_log_path,
200 Role=execution_role['Role']['Arn'],
201 ExecutionProperty={"MaxConcurrentRuns": self.concurrent_run_limit},
202 Command={"Name": "glueetl", "ScriptLocation": self.script_location},
203 MaxRetries=self.retry_limit,
204 MaxCapacity=self.num_of_dpus,
205 **self.create_job_kwargs,
206 )
207 return create_job_response['Name']
208 except Exception as general_error:
209 self.log.error("Failed to create aws glue job, error: %s", general_error)
210 raise
211
212
213 class AwsGlueJobHook(GlueJobHook):
214 """
215 This hook is deprecated.
216 Please use :class:`airflow.providers.amazon.aws.hooks.glue.GlueJobHook`.
217 """
218
219 def __init__(self, *args, **kwargs):
220 warnings.warn(
221 "This hook is deprecated. "
222 "Please use :class:`airflow.providers.amazon.aws.hooks.glue.GlueJobHook`.",
223 DeprecationWarning,
224 stacklevel=2,
225 )
226 super().__init__(*args, **kwargs)
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/providers/amazon/aws/hooks/glue.py b/airflow/providers/amazon/aws/hooks/glue.py
--- a/airflow/providers/amazon/aws/hooks/glue.py
+++ b/airflow/providers/amazon/aws/hooks/glue.py
@@ -181,6 +181,12 @@
s3_log_path = f's3://{self.s3_bucket}/{self.s3_glue_logs}{self.job_name}'
execution_role = self.get_iam_execution_role()
try:
+ default_command = {
+ "Name": "glueetl",
+ "ScriptLocation": self.script_location,
+ }
+ command = self.create_job_kwargs.get("Command", default_command)
+
if "WorkerType" in self.create_job_kwargs and "NumberOfWorkers" in self.create_job_kwargs:
create_job_response = glue_client.create_job(
Name=self.job_name,
@@ -188,7 +194,7 @@
LogUri=s3_log_path,
Role=execution_role['Role']['Arn'],
ExecutionProperty={"MaxConcurrentRuns": self.concurrent_run_limit},
- Command={"Name": "glueetl", "ScriptLocation": self.script_location},
+ Command=command,
MaxRetries=self.retry_limit,
**self.create_job_kwargs,
)
@@ -199,7 +205,7 @@
LogUri=s3_log_path,
Role=execution_role['Role']['Arn'],
ExecutionProperty={"MaxConcurrentRuns": self.concurrent_run_limit},
- Command={"Name": "glueetl", "ScriptLocation": self.script_location},
+ Command=command,
MaxRetries=self.retry_limit,
MaxCapacity=self.num_of_dpus,
**self.create_job_kwargs,
|
{"golden_diff": "diff --git a/airflow/providers/amazon/aws/hooks/glue.py b/airflow/providers/amazon/aws/hooks/glue.py\n--- a/airflow/providers/amazon/aws/hooks/glue.py\n+++ b/airflow/providers/amazon/aws/hooks/glue.py\n@@ -181,6 +181,12 @@\n s3_log_path = f's3://{self.s3_bucket}/{self.s3_glue_logs}{self.job_name}'\n execution_role = self.get_iam_execution_role()\n try:\n+ default_command = {\n+ \"Name\": \"glueetl\",\n+ \"ScriptLocation\": self.script_location,\n+ }\n+ command = self.create_job_kwargs.get(\"Command\", default_command)\n+\n if \"WorkerType\" in self.create_job_kwargs and \"NumberOfWorkers\" in self.create_job_kwargs:\n create_job_response = glue_client.create_job(\n Name=self.job_name,\n@@ -188,7 +194,7 @@\n LogUri=s3_log_path,\n Role=execution_role['Role']['Arn'],\n ExecutionProperty={\"MaxConcurrentRuns\": self.concurrent_run_limit},\n- Command={\"Name\": \"glueetl\", \"ScriptLocation\": self.script_location},\n+ Command=command,\n MaxRetries=self.retry_limit,\n **self.create_job_kwargs,\n )\n@@ -199,7 +205,7 @@\n LogUri=s3_log_path,\n Role=execution_role['Role']['Arn'],\n ExecutionProperty={\"MaxConcurrentRuns\": self.concurrent_run_limit},\n- Command={\"Name\": \"glueetl\", \"ScriptLocation\": self.script_location},\n+ Command=command,\n MaxRetries=self.retry_limit,\n MaxCapacity=self.num_of_dpus,\n **self.create_job_kwargs,\n", "issue": "Unable to specify Python version for AwsGlueJobOperator\n### Apache Airflow Provider(s)\n\namazon\n\n### Versions of Apache Airflow Providers\n\n_No response_\n\n### Apache Airflow version\n\n2.0.2\n\n### Operating System\n\nAmazon Linux\n\n### Deployment\n\nMWAA\n\n### Deployment details\n\n_No response_\n\n### What happened\n\nWhen a new Glue job is created using the AwsGlueJobOperator, the job is defaulting to Python2. Setting the version in create_job_kwargs fails with key error.\n\n### What you expected to happen\n\nExpected the Glue job to be created with a Python3 runtime. create_job_kwargs are passed to the boto3 glue client create_job method which includes a \"Command\" parameter that is a dictionary containing the Python version.\r\n\r\n\n\n### How to reproduce\n\nCreate a dag with an AwsGlueJobOperator and pass a \"Command\" parameter in the create_job_kwargs argument.\r\n\r\n```\r\n create_glue_job_args = {\r\n \"Command\": {\r\n \"Name\": \"abalone-preprocess\",\r\n \"ScriptLocation\": f\"s3://{output_bucket}/code/preprocess.py\",\r\n \"PythonVersion\": \"3\"\r\n }\r\n }\r\n glue_etl = AwsGlueJobOperator( \r\n task_id=\"glue_etl\", \r\n s3_bucket=output_bucket,\r\n script_args={\r\n '--S3_INPUT_BUCKET': data_bucket,\r\n '--S3_INPUT_KEY_PREFIX': 'input/raw',\r\n '--S3_UPLOADS_KEY_PREFIX': 'input/uploads',\r\n '--S3_OUTPUT_BUCKET': output_bucket,\r\n '--S3_OUTPUT_KEY_PREFIX': str(determine_dataset_id.output) +'/input/data' \r\n },\r\n iam_role_name=\"MLOps\", \r\n retry_limit=2,\r\n concurrent_run_limit=3,\r\n create_job_kwargs=create_glue_job_args,\r\n dag=dag) \r\n```\r\n\r\n```\r\n[2022-01-04 16:43:42,053] {{logging_mixin.py:104}} INFO - [2022-01-04 16:43:42,053] {{glue.py:190}} ERROR - Failed to create aws glue job, error: 'Command'\r\n[2022-01-04 16:43:42,081] {{logging_mixin.py:104}} INFO - [2022-01-04 16:43:42,081] {{glue.py:112}} ERROR - Failed to run aws glue job, error: 'Command'\r\n[2022-01-04 16:43:42,101] {{taskinstance.py:1482}} ERROR - Task failed with exception\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/glue.py\", line 166, in get_or_create_glue_job\r\n get_job_response = glue_client.get_job(JobName=self.job_name)\r\n File \"/usr/local/lib/python3.7/site-packages/botocore/client.py\", line 357, in _api_call\r\n return self._make_api_call(operation_name, kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/botocore/client.py\", line 676, in _make_api_call\r\n raise error_class(parsed_response, operation_name)\r\nbotocore.errorfactory.EntityNotFoundException: An error occurred (EntityNotFoundException) when calling the GetJob operation: Job with name: abalone-preprocess not found.\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py\", line 1138, in _run_raw_task\r\n self._prepare_and_execute_task_with_callbacks(context, task)\r\n File \"/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py\", line 1311, in _prepare_and_execute_task_with_callbacks\r\n result = self._execute_task(context, task_copy)\r\n File \"/usr/local/lib/python3.7/site-packages/airflow/models/taskinstance.py\", line 1341, in _execute_task\r\n result = task_copy.execute(context=context)\r\n File \"/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/operators/glue.py\", line 121, in execute\r\n glue_job_run = glue_job.initialize_job(self.script_args)\r\n File \"/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/glue.py\", line 108, in initialize_job\r\n job_name = self.get_or_create_glue_job()\r\n File \"/usr/local/lib/python3.7/site-packages/airflow/providers/amazon/aws/hooks/glue.py\", line 186, in get_or_create_glue_job\r\n **self.create_job_kwargs,\r\nKeyError: 'Command'\r\n```\n\n### Anything else\n\nWhen a new job is being created.\n\n### Are you willing to submit PR?\n\n- [ ] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport time\nimport warnings\nfrom typing import Dict, List, Optional\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook\n\n\nclass GlueJobHook(AwsBaseHook):\n \"\"\"\n Interact with AWS Glue - create job, trigger, crawler\n\n :param s3_bucket: S3 bucket where logs and local etl script will be uploaded\n :param job_name: unique job name per AWS account\n :param desc: job description\n :param concurrent_run_limit: The maximum number of concurrent runs allowed for a job\n :param script_location: path to etl script on s3\n :param retry_limit: Maximum number of times to retry this job if it fails\n :param num_of_dpus: Number of AWS Glue DPUs to allocate to this Job\n :param region_name: aws region name (example: us-east-1)\n :param iam_role_name: AWS IAM Role for Glue Job Execution\n :param create_job_kwargs: Extra arguments for Glue Job Creation\n \"\"\"\n\n JOB_POLL_INTERVAL = 6 # polls job status after every JOB_POLL_INTERVAL seconds\n\n def __init__(\n self,\n s3_bucket: Optional[str] = None,\n job_name: Optional[str] = None,\n desc: Optional[str] = None,\n concurrent_run_limit: int = 1,\n script_location: Optional[str] = None,\n retry_limit: int = 0,\n num_of_dpus: Optional[int] = None,\n iam_role_name: Optional[str] = None,\n create_job_kwargs: Optional[dict] = None,\n *args,\n **kwargs,\n ):\n self.job_name = job_name\n self.desc = desc\n self.concurrent_run_limit = concurrent_run_limit\n self.script_location = script_location\n self.retry_limit = retry_limit\n self.s3_bucket = s3_bucket\n self.role_name = iam_role_name\n self.s3_glue_logs = 'logs/glue-logs/'\n self.create_job_kwargs = create_job_kwargs or {}\n\n worker_type_exists = \"WorkerType\" in self.create_job_kwargs\n num_workers_exists = \"NumberOfWorkers\" in self.create_job_kwargs\n\n if worker_type_exists and num_workers_exists:\n if num_of_dpus is not None:\n raise ValueError(\"Cannot specify num_of_dpus with custom WorkerType\")\n elif not worker_type_exists and num_workers_exists:\n raise ValueError(\"Need to specify custom WorkerType when specifying NumberOfWorkers\")\n elif worker_type_exists and not num_workers_exists:\n raise ValueError(\"Need to specify NumberOfWorkers when specifying custom WorkerType\")\n elif num_of_dpus is None:\n self.num_of_dpus = 10\n else:\n self.num_of_dpus = num_of_dpus\n\n kwargs['client_type'] = 'glue'\n super().__init__(*args, **kwargs)\n\n def list_jobs(self) -> List:\n \"\"\":return: Lists of Jobs\"\"\"\n conn = self.get_conn()\n return conn.get_jobs()\n\n def get_iam_execution_role(self) -> Dict:\n \"\"\":return: iam role for job execution\"\"\"\n session, endpoint_url = self._get_credentials(region_name=self.region_name)\n iam_client = session.client('iam', endpoint_url=endpoint_url, config=self.config, verify=self.verify)\n\n try:\n glue_execution_role = iam_client.get_role(RoleName=self.role_name)\n self.log.info(\"Iam Role Name: %s\", self.role_name)\n return glue_execution_role\n except Exception as general_error:\n self.log.error(\"Failed to create aws glue job, error: %s\", general_error)\n raise\n\n def initialize_job(\n self,\n script_arguments: Optional[dict] = None,\n run_kwargs: Optional[dict] = None,\n ) -> Dict[str, str]:\n \"\"\"\n Initializes connection with AWS Glue\n to run job\n :return:\n \"\"\"\n glue_client = self.get_conn()\n script_arguments = script_arguments or {}\n run_kwargs = run_kwargs or {}\n\n try:\n job_name = self.get_or_create_glue_job()\n return glue_client.start_job_run(JobName=job_name, Arguments=script_arguments, **run_kwargs)\n\n except Exception as general_error:\n self.log.error(\"Failed to run aws glue job, error: %s\", general_error)\n raise\n\n def get_job_state(self, job_name: str, run_id: str) -> str:\n \"\"\"\n Get state of the Glue job. The job state can be\n running, finished, failed, stopped or timeout.\n :param job_name: unique job name per AWS account\n :param run_id: The job-run ID of the predecessor job run\n :return: State of the Glue job\n \"\"\"\n glue_client = self.get_conn()\n job_run = glue_client.get_job_run(JobName=job_name, RunId=run_id, PredecessorsIncluded=True)\n return job_run['JobRun']['JobRunState']\n\n def job_completion(self, job_name: str, run_id: str) -> Dict[str, str]:\n \"\"\"\n Waits until Glue job with job_name completes or\n fails and return final state if finished.\n Raises AirflowException when the job failed\n :param job_name: unique job name per AWS account\n :param run_id: The job-run ID of the predecessor job run\n :return: Dict of JobRunState and JobRunId\n \"\"\"\n failed_states = ['FAILED', 'TIMEOUT']\n finished_states = ['SUCCEEDED', 'STOPPED']\n\n while True:\n job_run_state = self.get_job_state(job_name, run_id)\n if job_run_state in finished_states:\n self.log.info(\"Exiting Job %s Run State: %s\", run_id, job_run_state)\n return {'JobRunState': job_run_state, 'JobRunId': run_id}\n if job_run_state in failed_states:\n job_error_message = f\"Exiting Job {run_id} Run State: {job_run_state}\"\n self.log.info(job_error_message)\n raise AirflowException(job_error_message)\n else:\n self.log.info(\n \"Polling for AWS Glue Job %s current run state with status %s\", job_name, job_run_state\n )\n time.sleep(self.JOB_POLL_INTERVAL)\n\n def get_or_create_glue_job(self) -> str:\n \"\"\"\n Creates(or just returns) and returns the Job name\n :return:Name of the Job\n \"\"\"\n glue_client = self.get_conn()\n try:\n get_job_response = glue_client.get_job(JobName=self.job_name)\n self.log.info(\"Job Already exist. Returning Name of the job\")\n return get_job_response['Job']['Name']\n\n except glue_client.exceptions.EntityNotFoundException:\n self.log.info(\"Job doesn't exist. Now creating and running AWS Glue Job\")\n if self.s3_bucket is None:\n raise AirflowException('Could not initialize glue job, error: Specify Parameter `s3_bucket`')\n s3_log_path = f's3://{self.s3_bucket}/{self.s3_glue_logs}{self.job_name}'\n execution_role = self.get_iam_execution_role()\n try:\n if \"WorkerType\" in self.create_job_kwargs and \"NumberOfWorkers\" in self.create_job_kwargs:\n create_job_response = glue_client.create_job(\n Name=self.job_name,\n Description=self.desc,\n LogUri=s3_log_path,\n Role=execution_role['Role']['Arn'],\n ExecutionProperty={\"MaxConcurrentRuns\": self.concurrent_run_limit},\n Command={\"Name\": \"glueetl\", \"ScriptLocation\": self.script_location},\n MaxRetries=self.retry_limit,\n **self.create_job_kwargs,\n )\n else:\n create_job_response = glue_client.create_job(\n Name=self.job_name,\n Description=self.desc,\n LogUri=s3_log_path,\n Role=execution_role['Role']['Arn'],\n ExecutionProperty={\"MaxConcurrentRuns\": self.concurrent_run_limit},\n Command={\"Name\": \"glueetl\", \"ScriptLocation\": self.script_location},\n MaxRetries=self.retry_limit,\n MaxCapacity=self.num_of_dpus,\n **self.create_job_kwargs,\n )\n return create_job_response['Name']\n except Exception as general_error:\n self.log.error(\"Failed to create aws glue job, error: %s\", general_error)\n raise\n\n\nclass AwsGlueJobHook(GlueJobHook):\n \"\"\"\n This hook is deprecated.\n Please use :class:`airflow.providers.amazon.aws.hooks.glue.GlueJobHook`.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\n \"This hook is deprecated. \"\n \"Please use :class:`airflow.providers.amazon.aws.hooks.glue.GlueJobHook`.\",\n DeprecationWarning,\n stacklevel=2,\n )\n super().__init__(*args, **kwargs)\n", "path": "airflow/providers/amazon/aws/hooks/glue.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport time\nimport warnings\nfrom typing import Dict, List, Optional\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook\n\n\nclass GlueJobHook(AwsBaseHook):\n \"\"\"\n Interact with AWS Glue - create job, trigger, crawler\n\n :param s3_bucket: S3 bucket where logs and local etl script will be uploaded\n :param job_name: unique job name per AWS account\n :param desc: job description\n :param concurrent_run_limit: The maximum number of concurrent runs allowed for a job\n :param script_location: path to etl script on s3\n :param retry_limit: Maximum number of times to retry this job if it fails\n :param num_of_dpus: Number of AWS Glue DPUs to allocate to this Job\n :param region_name: aws region name (example: us-east-1)\n :param iam_role_name: AWS IAM Role for Glue Job Execution\n :param create_job_kwargs: Extra arguments for Glue Job Creation\n \"\"\"\n\n JOB_POLL_INTERVAL = 6 # polls job status after every JOB_POLL_INTERVAL seconds\n\n def __init__(\n self,\n s3_bucket: Optional[str] = None,\n job_name: Optional[str] = None,\n desc: Optional[str] = None,\n concurrent_run_limit: int = 1,\n script_location: Optional[str] = None,\n retry_limit: int = 0,\n num_of_dpus: Optional[int] = None,\n iam_role_name: Optional[str] = None,\n create_job_kwargs: Optional[dict] = None,\n *args,\n **kwargs,\n ):\n self.job_name = job_name\n self.desc = desc\n self.concurrent_run_limit = concurrent_run_limit\n self.script_location = script_location\n self.retry_limit = retry_limit\n self.s3_bucket = s3_bucket\n self.role_name = iam_role_name\n self.s3_glue_logs = 'logs/glue-logs/'\n self.create_job_kwargs = create_job_kwargs or {}\n\n worker_type_exists = \"WorkerType\" in self.create_job_kwargs\n num_workers_exists = \"NumberOfWorkers\" in self.create_job_kwargs\n\n if worker_type_exists and num_workers_exists:\n if num_of_dpus is not None:\n raise ValueError(\"Cannot specify num_of_dpus with custom WorkerType\")\n elif not worker_type_exists and num_workers_exists:\n raise ValueError(\"Need to specify custom WorkerType when specifying NumberOfWorkers\")\n elif worker_type_exists and not num_workers_exists:\n raise ValueError(\"Need to specify NumberOfWorkers when specifying custom WorkerType\")\n elif num_of_dpus is None:\n self.num_of_dpus = 10\n else:\n self.num_of_dpus = num_of_dpus\n\n kwargs['client_type'] = 'glue'\n super().__init__(*args, **kwargs)\n\n def list_jobs(self) -> List:\n \"\"\":return: Lists of Jobs\"\"\"\n conn = self.get_conn()\n return conn.get_jobs()\n\n def get_iam_execution_role(self) -> Dict:\n \"\"\":return: iam role for job execution\"\"\"\n session, endpoint_url = self._get_credentials(region_name=self.region_name)\n iam_client = session.client('iam', endpoint_url=endpoint_url, config=self.config, verify=self.verify)\n\n try:\n glue_execution_role = iam_client.get_role(RoleName=self.role_name)\n self.log.info(\"Iam Role Name: %s\", self.role_name)\n return glue_execution_role\n except Exception as general_error:\n self.log.error(\"Failed to create aws glue job, error: %s\", general_error)\n raise\n\n def initialize_job(\n self,\n script_arguments: Optional[dict] = None,\n run_kwargs: Optional[dict] = None,\n ) -> Dict[str, str]:\n \"\"\"\n Initializes connection with AWS Glue\n to run job\n :return:\n \"\"\"\n glue_client = self.get_conn()\n script_arguments = script_arguments or {}\n run_kwargs = run_kwargs or {}\n\n try:\n job_name = self.get_or_create_glue_job()\n return glue_client.start_job_run(JobName=job_name, Arguments=script_arguments, **run_kwargs)\n\n except Exception as general_error:\n self.log.error(\"Failed to run aws glue job, error: %s\", general_error)\n raise\n\n def get_job_state(self, job_name: str, run_id: str) -> str:\n \"\"\"\n Get state of the Glue job. The job state can be\n running, finished, failed, stopped or timeout.\n :param job_name: unique job name per AWS account\n :param run_id: The job-run ID of the predecessor job run\n :return: State of the Glue job\n \"\"\"\n glue_client = self.get_conn()\n job_run = glue_client.get_job_run(JobName=job_name, RunId=run_id, PredecessorsIncluded=True)\n return job_run['JobRun']['JobRunState']\n\n def job_completion(self, job_name: str, run_id: str) -> Dict[str, str]:\n \"\"\"\n Waits until Glue job with job_name completes or\n fails and return final state if finished.\n Raises AirflowException when the job failed\n :param job_name: unique job name per AWS account\n :param run_id: The job-run ID of the predecessor job run\n :return: Dict of JobRunState and JobRunId\n \"\"\"\n failed_states = ['FAILED', 'TIMEOUT']\n finished_states = ['SUCCEEDED', 'STOPPED']\n\n while True:\n job_run_state = self.get_job_state(job_name, run_id)\n if job_run_state in finished_states:\n self.log.info(\"Exiting Job %s Run State: %s\", run_id, job_run_state)\n return {'JobRunState': job_run_state, 'JobRunId': run_id}\n if job_run_state in failed_states:\n job_error_message = f\"Exiting Job {run_id} Run State: {job_run_state}\"\n self.log.info(job_error_message)\n raise AirflowException(job_error_message)\n else:\n self.log.info(\n \"Polling for AWS Glue Job %s current run state with status %s\", job_name, job_run_state\n )\n time.sleep(self.JOB_POLL_INTERVAL)\n\n def get_or_create_glue_job(self) -> str:\n \"\"\"\n Creates(or just returns) and returns the Job name\n :return:Name of the Job\n \"\"\"\n glue_client = self.get_conn()\n try:\n get_job_response = glue_client.get_job(JobName=self.job_name)\n self.log.info(\"Job Already exist. Returning Name of the job\")\n return get_job_response['Job']['Name']\n\n except glue_client.exceptions.EntityNotFoundException:\n self.log.info(\"Job doesn't exist. Now creating and running AWS Glue Job\")\n if self.s3_bucket is None:\n raise AirflowException('Could not initialize glue job, error: Specify Parameter `s3_bucket`')\n s3_log_path = f's3://{self.s3_bucket}/{self.s3_glue_logs}{self.job_name}'\n execution_role = self.get_iam_execution_role()\n try:\n default_command = {\n \"Name\": \"glueetl\",\n \"ScriptLocation\": self.script_location,\n }\n command = self.create_job_kwargs.get(\"Command\", default_command)\n\n if \"WorkerType\" in self.create_job_kwargs and \"NumberOfWorkers\" in self.create_job_kwargs:\n create_job_response = glue_client.create_job(\n Name=self.job_name,\n Description=self.desc,\n LogUri=s3_log_path,\n Role=execution_role['Role']['Arn'],\n ExecutionProperty={\"MaxConcurrentRuns\": self.concurrent_run_limit},\n Command=command,\n MaxRetries=self.retry_limit,\n **self.create_job_kwargs,\n )\n else:\n create_job_response = glue_client.create_job(\n Name=self.job_name,\n Description=self.desc,\n LogUri=s3_log_path,\n Role=execution_role['Role']['Arn'],\n ExecutionProperty={\"MaxConcurrentRuns\": self.concurrent_run_limit},\n Command=command,\n MaxRetries=self.retry_limit,\n MaxCapacity=self.num_of_dpus,\n **self.create_job_kwargs,\n )\n return create_job_response['Name']\n except Exception as general_error:\n self.log.error(\"Failed to create aws glue job, error: %s\", general_error)\n raise\n\n\nclass AwsGlueJobHook(GlueJobHook):\n \"\"\"\n This hook is deprecated.\n Please use :class:`airflow.providers.amazon.aws.hooks.glue.GlueJobHook`.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\n \"This hook is deprecated. \"\n \"Please use :class:`airflow.providers.amazon.aws.hooks.glue.GlueJobHook`.\",\n DeprecationWarning,\n stacklevel=2,\n )\n super().__init__(*args, **kwargs)\n", "path": "airflow/providers/amazon/aws/hooks/glue.py"}]}
| 4,090 | 381 |
gh_patches_debug_19619
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-2946
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Up arrow key history search gets stuck in QTConsole
With the ipython installed with with commit 50a583890fd0640492fcd76d6ecc97b5c3f23ae0, going through history with up arrow key in qtconsole works without any problem. But in current commit 48b63c19a53f9e26dc4f64150641c730cb6a2305, in qtconsole, it gets "stuck" after several items - pressing up arrow key does not do anything. It seems to be happening when the currently showing history is longer than the qtconsole width.
Since there are more than 120 commits in between, I haven't tried to identify what was the commit which caused this.
Also, in terminal this does not happen.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/frontend/qt/console/history_console_widget.py`
Content:
```
1 # System library imports
2 from IPython.external.qt import QtGui
3
4 # Local imports
5 from IPython.utils.traitlets import Bool
6 from console_widget import ConsoleWidget
7
8
9 class HistoryConsoleWidget(ConsoleWidget):
10 """ A ConsoleWidget that keeps a history of the commands that have been
11 executed and provides a readline-esque interface to this history.
12 """
13
14 #------ Configuration ------------------------------------------------------
15
16 # If enabled, the input buffer will become "locked" to history movement when
17 # an edit is made to a multi-line input buffer. To override the lock, use
18 # Shift in conjunction with the standard history cycling keys.
19 history_lock = Bool(False, config=True)
20
21 #---------------------------------------------------------------------------
22 # 'object' interface
23 #---------------------------------------------------------------------------
24
25 def __init__(self, *args, **kw):
26 super(HistoryConsoleWidget, self).__init__(*args, **kw)
27
28 # HistoryConsoleWidget protected variables.
29 self._history = []
30 self._history_edits = {}
31 self._history_index = 0
32 self._history_prefix = ''
33
34 #---------------------------------------------------------------------------
35 # 'ConsoleWidget' public interface
36 #---------------------------------------------------------------------------
37
38 def execute(self, source=None, hidden=False, interactive=False):
39 """ Reimplemented to the store history.
40 """
41 if not hidden:
42 history = self.input_buffer if source is None else source
43
44 executed = super(HistoryConsoleWidget, self).execute(
45 source, hidden, interactive)
46
47 if executed and not hidden:
48 # Save the command unless it was an empty string or was identical
49 # to the previous command.
50 history = history.rstrip()
51 if history and (not self._history or self._history[-1] != history):
52 self._history.append(history)
53
54 # Emulate readline: reset all history edits.
55 self._history_edits = {}
56
57 # Move the history index to the most recent item.
58 self._history_index = len(self._history)
59
60 return executed
61
62 #---------------------------------------------------------------------------
63 # 'ConsoleWidget' abstract interface
64 #---------------------------------------------------------------------------
65
66 def _up_pressed(self, shift_modifier):
67 """ Called when the up key is pressed. Returns whether to continue
68 processing the event.
69 """
70 prompt_cursor = self._get_prompt_cursor()
71 if self._get_cursor().blockNumber() == prompt_cursor.blockNumber():
72 # Bail out if we're locked.
73 if self._history_locked() and not shift_modifier:
74 return False
75
76 # Set a search prefix based on the cursor position.
77 col = self._get_input_buffer_cursor_column()
78 input_buffer = self.input_buffer
79 # use the *shortest* of the cursor column and the history prefix
80 # to determine if the prefix has changed
81 n = min(col, len(self._history_prefix))
82
83 # prefix changed, restart search from the beginning
84 if (self._history_prefix[:n] != input_buffer[:n]):
85 self._history_index = len(self._history)
86
87 # the only time we shouldn't set the history prefix
88 # to the line up to the cursor is if we are already
89 # in a simple scroll (no prefix),
90 # and the cursor is at the end of the first line
91 first_line = input_buffer.split('\n', 1)[0]
92 if self._history_index == len(self._history) or \
93 not (self._history_prefix == '' and col == len(first_line)) or \
94 not (self._get_edited_history(self._history_index)[:col] == input_buffer[:col]):
95 self._history_prefix = input_buffer[:col]
96
97 # Perform the search.
98 self.history_previous(self._history_prefix,
99 as_prefix=not shift_modifier)
100
101 # Go to the first line of the prompt for seemless history scrolling.
102 # Emulate readline: keep the cursor position fixed for a prefix
103 # search.
104 cursor = self._get_prompt_cursor()
105 if self._history_prefix:
106 cursor.movePosition(QtGui.QTextCursor.Right,
107 n=len(self._history_prefix))
108 else:
109 cursor.movePosition(QtGui.QTextCursor.EndOfLine)
110 self._set_cursor(cursor)
111
112 return False
113
114 return True
115
116 def _down_pressed(self, shift_modifier):
117 """ Called when the down key is pressed. Returns whether to continue
118 processing the event.
119 """
120 end_cursor = self._get_end_cursor()
121 if self._get_cursor().blockNumber() == end_cursor.blockNumber():
122 # Bail out if we're locked.
123 if self._history_locked() and not shift_modifier:
124 return False
125
126 # Perform the search.
127 replaced = self.history_next(self._history_prefix,
128 as_prefix=not shift_modifier)
129
130 # Emulate readline: keep the cursor position fixed for a prefix
131 # search. (We don't need to move the cursor to the end of the buffer
132 # in the other case because this happens automatically when the
133 # input buffer is set.)
134 if self._history_prefix and replaced:
135 cursor = self._get_prompt_cursor()
136 cursor.movePosition(QtGui.QTextCursor.Right,
137 n=len(self._history_prefix))
138 self._set_cursor(cursor)
139
140 return False
141
142 return True
143
144 #---------------------------------------------------------------------------
145 # 'HistoryConsoleWidget' public interface
146 #---------------------------------------------------------------------------
147
148 def history_previous(self, substring='', as_prefix=True):
149 """ If possible, set the input buffer to a previous history item.
150
151 Parameters:
152 -----------
153 substring : str, optional
154 If specified, search for an item with this substring.
155 as_prefix : bool, optional
156 If True, the substring must match at the beginning (default).
157
158 Returns:
159 --------
160 Whether the input buffer was changed.
161 """
162 index = self._history_index
163 replace = False
164 while index > 0:
165 index -= 1
166 history = self._get_edited_history(index)
167 if (as_prefix and history.startswith(substring)) \
168 or (not as_prefix and substring in history):
169 replace = True
170 break
171
172 if replace:
173 self._store_edits()
174 self._history_index = index
175 self.input_buffer = history
176
177 return replace
178
179 def history_next(self, substring='', as_prefix=True):
180 """ If possible, set the input buffer to a subsequent history item.
181
182 Parameters:
183 -----------
184 substring : str, optional
185 If specified, search for an item with this substring.
186 as_prefix : bool, optional
187 If True, the substring must match at the beginning (default).
188
189 Returns:
190 --------
191 Whether the input buffer was changed.
192 """
193 index = self._history_index
194 replace = False
195 while index < len(self._history):
196 index += 1
197 history = self._get_edited_history(index)
198 if (as_prefix and history.startswith(substring)) \
199 or (not as_prefix and substring in history):
200 replace = True
201 break
202
203 if replace:
204 self._store_edits()
205 self._history_index = index
206 self.input_buffer = history
207
208 return replace
209
210 def history_tail(self, n=10):
211 """ Get the local history list.
212
213 Parameters:
214 -----------
215 n : int
216 The (maximum) number of history items to get.
217 """
218 return self._history[-n:]
219
220 def _request_update_session_history_length(self):
221 msg_id = self.kernel_manager.shell_channel.execute('',
222 silent=True,
223 user_expressions={
224 'hlen':'len(get_ipython().history_manager.input_hist_raw)',
225 }
226 )
227 self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id, 'save_magic')
228
229 def _handle_execute_reply(self, msg):
230 """ Handles replies for code execution, here only session history length
231 """
232 msg_id = msg['parent_header']['msg_id']
233 info = self._request_info['execute'].pop(msg_id,None)
234 if info and info.kind == 'save_magic' and not self._hidden:
235 content = msg['content']
236 status = content['status']
237 if status == 'ok':
238 self._max_session_history=(int(content['user_expressions']['hlen']))
239
240 def save_magic(self):
241 # update the session history length
242 self._request_update_session_history_length()
243
244 file_name,extFilter = QtGui.QFileDialog.getSaveFileName(self,
245 "Enter A filename",
246 filter='Python File (*.py);; All files (*.*)'
247 )
248
249 # let's the user search/type for a file name, while the history length
250 # is fetched
251
252 if file_name:
253 hist_range, ok = QtGui.QInputDialog.getText(self,
254 'Please enter an interval of command to save',
255 'Saving commands:',
256 text=str('1-'+str(self._max_session_history))
257 )
258 if ok:
259 self.execute("%save"+" "+file_name+" "+str(hist_range))
260
261 #---------------------------------------------------------------------------
262 # 'HistoryConsoleWidget' protected interface
263 #---------------------------------------------------------------------------
264
265 def _history_locked(self):
266 """ Returns whether history movement is locked.
267 """
268 return (self.history_lock and
269 (self._get_edited_history(self._history_index) !=
270 self.input_buffer) and
271 (self._get_prompt_cursor().blockNumber() !=
272 self._get_end_cursor().blockNumber()))
273
274 def _get_edited_history(self, index):
275 """ Retrieves a history item, possibly with temporary edits.
276 """
277 if index in self._history_edits:
278 return self._history_edits[index]
279 elif index == len(self._history):
280 return unicode()
281 return self._history[index]
282
283 def _set_history(self, history):
284 """ Replace the current history with a sequence of history items.
285 """
286 self._history = list(history)
287 self._history_edits = {}
288 self._history_index = len(self._history)
289
290 def _store_edits(self):
291 """ If there are edits to the current input buffer, store them.
292 """
293 current = self.input_buffer
294 if self._history_index == len(self._history) or \
295 self._history[self._history_index] != current:
296 self._history_edits[self._history_index] = current
297
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/IPython/frontend/qt/console/history_console_widget.py b/IPython/frontend/qt/console/history_console_widget.py
--- a/IPython/frontend/qt/console/history_console_widget.py
+++ b/IPython/frontend/qt/console/history_console_widget.py
@@ -88,9 +88,15 @@
# to the line up to the cursor is if we are already
# in a simple scroll (no prefix),
# and the cursor is at the end of the first line
- first_line = input_buffer.split('\n', 1)[0]
+
+ # check if we are at the end of the first line
+ c = self._get_cursor()
+ current_pos = c.position()
+ c.movePosition(QtGui.QTextCursor.EndOfLine)
+ at_eol = (c.position() == current_pos)
+
if self._history_index == len(self._history) or \
- not (self._history_prefix == '' and col == len(first_line)) or \
+ not (self._history_prefix == '' and at_eol) or \
not (self._get_edited_history(self._history_index)[:col] == input_buffer[:col]):
self._history_prefix = input_buffer[:col]
|
{"golden_diff": "diff --git a/IPython/frontend/qt/console/history_console_widget.py b/IPython/frontend/qt/console/history_console_widget.py\n--- a/IPython/frontend/qt/console/history_console_widget.py\n+++ b/IPython/frontend/qt/console/history_console_widget.py\n@@ -88,9 +88,15 @@\n # to the line up to the cursor is if we are already\n # in a simple scroll (no prefix),\n # and the cursor is at the end of the first line\n- first_line = input_buffer.split('\\n', 1)[0]\n+ \n+ # check if we are at the end of the first line\n+ c = self._get_cursor()\n+ current_pos = c.position()\n+ c.movePosition(QtGui.QTextCursor.EndOfLine)\n+ at_eol = (c.position() == current_pos)\n+ \n if self._history_index == len(self._history) or \\\n- not (self._history_prefix == '' and col == len(first_line)) or \\\n+ not (self._history_prefix == '' and at_eol) or \\\n not (self._get_edited_history(self._history_index)[:col] == input_buffer[:col]):\n self._history_prefix = input_buffer[:col]\n", "issue": "Up arrow key history search gets stuck in QTConsole\nWith the ipython installed with with commit 50a583890fd0640492fcd76d6ecc97b5c3f23ae0, going through history with up arrow key in qtconsole works without any problem. But in current commit 48b63c19a53f9e26dc4f64150641c730cb6a2305, in qtconsole, it gets \"stuck\" after several items - pressing up arrow key does not do anything. It seems to be happening when the currently showing history is longer than the qtconsole width. \n\nSince there are more than 120 commits in between, I haven't tried to identify what was the commit which caused this.\n\nAlso, in terminal this does not happen.\n\n", "before_files": [{"content": "# System library imports\nfrom IPython.external.qt import QtGui\n\n# Local imports\nfrom IPython.utils.traitlets import Bool\nfrom console_widget import ConsoleWidget\n\n\nclass HistoryConsoleWidget(ConsoleWidget):\n \"\"\" A ConsoleWidget that keeps a history of the commands that have been\n executed and provides a readline-esque interface to this history.\n \"\"\"\n\n #------ Configuration ------------------------------------------------------\n\n # If enabled, the input buffer will become \"locked\" to history movement when\n # an edit is made to a multi-line input buffer. To override the lock, use\n # Shift in conjunction with the standard history cycling keys.\n history_lock = Bool(False, config=True)\n\n #---------------------------------------------------------------------------\n # 'object' interface\n #---------------------------------------------------------------------------\n\n def __init__(self, *args, **kw):\n super(HistoryConsoleWidget, self).__init__(*args, **kw)\n\n # HistoryConsoleWidget protected variables.\n self._history = []\n self._history_edits = {}\n self._history_index = 0\n self._history_prefix = ''\n\n #---------------------------------------------------------------------------\n # 'ConsoleWidget' public interface\n #---------------------------------------------------------------------------\n\n def execute(self, source=None, hidden=False, interactive=False):\n \"\"\" Reimplemented to the store history.\n \"\"\"\n if not hidden:\n history = self.input_buffer if source is None else source\n\n executed = super(HistoryConsoleWidget, self).execute(\n source, hidden, interactive)\n\n if executed and not hidden:\n # Save the command unless it was an empty string or was identical\n # to the previous command.\n history = history.rstrip()\n if history and (not self._history or self._history[-1] != history):\n self._history.append(history)\n\n # Emulate readline: reset all history edits.\n self._history_edits = {}\n\n # Move the history index to the most recent item.\n self._history_index = len(self._history)\n\n return executed\n\n #---------------------------------------------------------------------------\n # 'ConsoleWidget' abstract interface\n #---------------------------------------------------------------------------\n\n def _up_pressed(self, shift_modifier):\n \"\"\" Called when the up key is pressed. Returns whether to continue\n processing the event.\n \"\"\"\n prompt_cursor = self._get_prompt_cursor()\n if self._get_cursor().blockNumber() == prompt_cursor.blockNumber():\n # Bail out if we're locked.\n if self._history_locked() and not shift_modifier:\n return False\n\n # Set a search prefix based on the cursor position.\n col = self._get_input_buffer_cursor_column()\n input_buffer = self.input_buffer\n # use the *shortest* of the cursor column and the history prefix\n # to determine if the prefix has changed\n n = min(col, len(self._history_prefix))\n \n # prefix changed, restart search from the beginning\n if (self._history_prefix[:n] != input_buffer[:n]):\n self._history_index = len(self._history)\n \n # the only time we shouldn't set the history prefix\n # to the line up to the cursor is if we are already\n # in a simple scroll (no prefix),\n # and the cursor is at the end of the first line\n first_line = input_buffer.split('\\n', 1)[0]\n if self._history_index == len(self._history) or \\\n not (self._history_prefix == '' and col == len(first_line)) or \\\n not (self._get_edited_history(self._history_index)[:col] == input_buffer[:col]):\n self._history_prefix = input_buffer[:col]\n\n # Perform the search.\n self.history_previous(self._history_prefix,\n as_prefix=not shift_modifier)\n\n # Go to the first line of the prompt for seemless history scrolling.\n # Emulate readline: keep the cursor position fixed for a prefix\n # search.\n cursor = self._get_prompt_cursor()\n if self._history_prefix:\n cursor.movePosition(QtGui.QTextCursor.Right,\n n=len(self._history_prefix))\n else:\n cursor.movePosition(QtGui.QTextCursor.EndOfLine)\n self._set_cursor(cursor)\n\n return False\n\n return True\n\n def _down_pressed(self, shift_modifier):\n \"\"\" Called when the down key is pressed. Returns whether to continue\n processing the event.\n \"\"\"\n end_cursor = self._get_end_cursor()\n if self._get_cursor().blockNumber() == end_cursor.blockNumber():\n # Bail out if we're locked.\n if self._history_locked() and not shift_modifier:\n return False\n\n # Perform the search.\n replaced = self.history_next(self._history_prefix,\n as_prefix=not shift_modifier)\n\n # Emulate readline: keep the cursor position fixed for a prefix\n # search. (We don't need to move the cursor to the end of the buffer\n # in the other case because this happens automatically when the\n # input buffer is set.)\n if self._history_prefix and replaced:\n cursor = self._get_prompt_cursor()\n cursor.movePosition(QtGui.QTextCursor.Right,\n n=len(self._history_prefix))\n self._set_cursor(cursor)\n\n return False\n\n return True\n\n #---------------------------------------------------------------------------\n # 'HistoryConsoleWidget' public interface\n #---------------------------------------------------------------------------\n\n def history_previous(self, substring='', as_prefix=True):\n \"\"\" If possible, set the input buffer to a previous history item.\n\n Parameters:\n -----------\n substring : str, optional\n If specified, search for an item with this substring.\n as_prefix : bool, optional\n If True, the substring must match at the beginning (default).\n\n Returns:\n --------\n Whether the input buffer was changed.\n \"\"\"\n index = self._history_index\n replace = False\n while index > 0:\n index -= 1\n history = self._get_edited_history(index)\n if (as_prefix and history.startswith(substring)) \\\n or (not as_prefix and substring in history):\n replace = True\n break\n\n if replace:\n self._store_edits()\n self._history_index = index\n self.input_buffer = history\n\n return replace\n\n def history_next(self, substring='', as_prefix=True):\n \"\"\" If possible, set the input buffer to a subsequent history item.\n\n Parameters:\n -----------\n substring : str, optional\n If specified, search for an item with this substring.\n as_prefix : bool, optional\n If True, the substring must match at the beginning (default).\n\n Returns:\n --------\n Whether the input buffer was changed.\n \"\"\"\n index = self._history_index\n replace = False\n while index < len(self._history):\n index += 1\n history = self._get_edited_history(index)\n if (as_prefix and history.startswith(substring)) \\\n or (not as_prefix and substring in history):\n replace = True\n break\n\n if replace:\n self._store_edits()\n self._history_index = index\n self.input_buffer = history\n\n return replace\n\n def history_tail(self, n=10):\n \"\"\" Get the local history list.\n\n Parameters:\n -----------\n n : int\n The (maximum) number of history items to get.\n \"\"\"\n return self._history[-n:]\n\n def _request_update_session_history_length(self):\n msg_id = self.kernel_manager.shell_channel.execute('',\n silent=True,\n user_expressions={\n 'hlen':'len(get_ipython().history_manager.input_hist_raw)',\n }\n )\n self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id, 'save_magic')\n\n def _handle_execute_reply(self, msg):\n \"\"\" Handles replies for code execution, here only session history length\n \"\"\"\n msg_id = msg['parent_header']['msg_id']\n info = self._request_info['execute'].pop(msg_id,None)\n if info and info.kind == 'save_magic' and not self._hidden:\n content = msg['content']\n status = content['status']\n if status == 'ok':\n self._max_session_history=(int(content['user_expressions']['hlen']))\n\n def save_magic(self):\n # update the session history length\n self._request_update_session_history_length()\n\n file_name,extFilter = QtGui.QFileDialog.getSaveFileName(self,\n \"Enter A filename\",\n filter='Python File (*.py);; All files (*.*)'\n )\n\n # let's the user search/type for a file name, while the history length\n # is fetched\n\n if file_name:\n hist_range, ok = QtGui.QInputDialog.getText(self,\n 'Please enter an interval of command to save',\n 'Saving commands:',\n text=str('1-'+str(self._max_session_history))\n )\n if ok:\n self.execute(\"%save\"+\" \"+file_name+\" \"+str(hist_range))\n\n #---------------------------------------------------------------------------\n # 'HistoryConsoleWidget' protected interface\n #---------------------------------------------------------------------------\n\n def _history_locked(self):\n \"\"\" Returns whether history movement is locked.\n \"\"\"\n return (self.history_lock and\n (self._get_edited_history(self._history_index) !=\n self.input_buffer) and\n (self._get_prompt_cursor().blockNumber() !=\n self._get_end_cursor().blockNumber()))\n\n def _get_edited_history(self, index):\n \"\"\" Retrieves a history item, possibly with temporary edits.\n \"\"\"\n if index in self._history_edits:\n return self._history_edits[index]\n elif index == len(self._history):\n return unicode()\n return self._history[index]\n\n def _set_history(self, history):\n \"\"\" Replace the current history with a sequence of history items.\n \"\"\"\n self._history = list(history)\n self._history_edits = {}\n self._history_index = len(self._history)\n\n def _store_edits(self):\n \"\"\" If there are edits to the current input buffer, store them.\n \"\"\"\n current = self.input_buffer\n if self._history_index == len(self._history) or \\\n self._history[self._history_index] != current:\n self._history_edits[self._history_index] = current\n", "path": "IPython/frontend/qt/console/history_console_widget.py"}], "after_files": [{"content": "# System library imports\nfrom IPython.external.qt import QtGui\n\n# Local imports\nfrom IPython.utils.traitlets import Bool\nfrom console_widget import ConsoleWidget\n\n\nclass HistoryConsoleWidget(ConsoleWidget):\n \"\"\" A ConsoleWidget that keeps a history of the commands that have been\n executed and provides a readline-esque interface to this history.\n \"\"\"\n\n #------ Configuration ------------------------------------------------------\n\n # If enabled, the input buffer will become \"locked\" to history movement when\n # an edit is made to a multi-line input buffer. To override the lock, use\n # Shift in conjunction with the standard history cycling keys.\n history_lock = Bool(False, config=True)\n\n #---------------------------------------------------------------------------\n # 'object' interface\n #---------------------------------------------------------------------------\n\n def __init__(self, *args, **kw):\n super(HistoryConsoleWidget, self).__init__(*args, **kw)\n\n # HistoryConsoleWidget protected variables.\n self._history = []\n self._history_edits = {}\n self._history_index = 0\n self._history_prefix = ''\n\n #---------------------------------------------------------------------------\n # 'ConsoleWidget' public interface\n #---------------------------------------------------------------------------\n\n def execute(self, source=None, hidden=False, interactive=False):\n \"\"\" Reimplemented to the store history.\n \"\"\"\n if not hidden:\n history = self.input_buffer if source is None else source\n\n executed = super(HistoryConsoleWidget, self).execute(\n source, hidden, interactive)\n\n if executed and not hidden:\n # Save the command unless it was an empty string or was identical\n # to the previous command.\n history = history.rstrip()\n if history and (not self._history or self._history[-1] != history):\n self._history.append(history)\n\n # Emulate readline: reset all history edits.\n self._history_edits = {}\n\n # Move the history index to the most recent item.\n self._history_index = len(self._history)\n\n return executed\n\n #---------------------------------------------------------------------------\n # 'ConsoleWidget' abstract interface\n #---------------------------------------------------------------------------\n\n def _up_pressed(self, shift_modifier):\n \"\"\" Called when the up key is pressed. Returns whether to continue\n processing the event.\n \"\"\"\n prompt_cursor = self._get_prompt_cursor()\n if self._get_cursor().blockNumber() == prompt_cursor.blockNumber():\n # Bail out if we're locked.\n if self._history_locked() and not shift_modifier:\n return False\n\n # Set a search prefix based on the cursor position.\n col = self._get_input_buffer_cursor_column()\n input_buffer = self.input_buffer\n # use the *shortest* of the cursor column and the history prefix\n # to determine if the prefix has changed\n n = min(col, len(self._history_prefix))\n \n # prefix changed, restart search from the beginning\n if (self._history_prefix[:n] != input_buffer[:n]):\n self._history_index = len(self._history)\n \n # the only time we shouldn't set the history prefix\n # to the line up to the cursor is if we are already\n # in a simple scroll (no prefix),\n # and the cursor is at the end of the first line\n \n # check if we are at the end of the first line\n c = self._get_cursor()\n current_pos = c.position()\n c.movePosition(QtGui.QTextCursor.EndOfLine)\n at_eol = (c.position() == current_pos)\n \n if self._history_index == len(self._history) or \\\n not (self._history_prefix == '' and at_eol) or \\\n not (self._get_edited_history(self._history_index)[:col] == input_buffer[:col]):\n self._history_prefix = input_buffer[:col]\n\n # Perform the search.\n self.history_previous(self._history_prefix,\n as_prefix=not shift_modifier)\n\n # Go to the first line of the prompt for seemless history scrolling.\n # Emulate readline: keep the cursor position fixed for a prefix\n # search.\n cursor = self._get_prompt_cursor()\n if self._history_prefix:\n cursor.movePosition(QtGui.QTextCursor.Right,\n n=len(self._history_prefix))\n else:\n cursor.movePosition(QtGui.QTextCursor.EndOfLine)\n self._set_cursor(cursor)\n\n return False\n\n return True\n\n def _down_pressed(self, shift_modifier):\n \"\"\" Called when the down key is pressed. Returns whether to continue\n processing the event.\n \"\"\"\n end_cursor = self._get_end_cursor()\n if self._get_cursor().blockNumber() == end_cursor.blockNumber():\n # Bail out if we're locked.\n if self._history_locked() and not shift_modifier:\n return False\n\n # Perform the search.\n replaced = self.history_next(self._history_prefix,\n as_prefix=not shift_modifier)\n\n # Emulate readline: keep the cursor position fixed for a prefix\n # search. (We don't need to move the cursor to the end of the buffer\n # in the other case because this happens automatically when the\n # input buffer is set.)\n if self._history_prefix and replaced:\n cursor = self._get_prompt_cursor()\n cursor.movePosition(QtGui.QTextCursor.Right,\n n=len(self._history_prefix))\n self._set_cursor(cursor)\n\n return False\n\n return True\n\n #---------------------------------------------------------------------------\n # 'HistoryConsoleWidget' public interface\n #---------------------------------------------------------------------------\n\n def history_previous(self, substring='', as_prefix=True):\n \"\"\" If possible, set the input buffer to a previous history item.\n\n Parameters:\n -----------\n substring : str, optional\n If specified, search for an item with this substring.\n as_prefix : bool, optional\n If True, the substring must match at the beginning (default).\n\n Returns:\n --------\n Whether the input buffer was changed.\n \"\"\"\n index = self._history_index\n replace = False\n while index > 0:\n index -= 1\n history = self._get_edited_history(index)\n if (as_prefix and history.startswith(substring)) \\\n or (not as_prefix and substring in history):\n replace = True\n break\n\n if replace:\n self._store_edits()\n self._history_index = index\n self.input_buffer = history\n\n return replace\n\n def history_next(self, substring='', as_prefix=True):\n \"\"\" If possible, set the input buffer to a subsequent history item.\n\n Parameters:\n -----------\n substring : str, optional\n If specified, search for an item with this substring.\n as_prefix : bool, optional\n If True, the substring must match at the beginning (default).\n\n Returns:\n --------\n Whether the input buffer was changed.\n \"\"\"\n index = self._history_index\n replace = False\n while index < len(self._history):\n index += 1\n history = self._get_edited_history(index)\n if (as_prefix and history.startswith(substring)) \\\n or (not as_prefix and substring in history):\n replace = True\n break\n\n if replace:\n self._store_edits()\n self._history_index = index\n self.input_buffer = history\n\n return replace\n\n def history_tail(self, n=10):\n \"\"\" Get the local history list.\n\n Parameters:\n -----------\n n : int\n The (maximum) number of history items to get.\n \"\"\"\n return self._history[-n:]\n\n def _request_update_session_history_length(self):\n msg_id = self.kernel_manager.shell_channel.execute('',\n silent=True,\n user_expressions={\n 'hlen':'len(get_ipython().history_manager.input_hist_raw)',\n }\n )\n self._request_info['execute'][msg_id] = self._ExecutionRequest(msg_id, 'save_magic')\n\n def _handle_execute_reply(self, msg):\n \"\"\" Handles replies for code execution, here only session history length\n \"\"\"\n msg_id = msg['parent_header']['msg_id']\n info = self._request_info['execute'].pop(msg_id,None)\n if info and info.kind == 'save_magic' and not self._hidden:\n content = msg['content']\n status = content['status']\n if status == 'ok':\n self._max_session_history=(int(content['user_expressions']['hlen']))\n\n def save_magic(self):\n # update the session history length\n self._request_update_session_history_length()\n\n file_name,extFilter = QtGui.QFileDialog.getSaveFileName(self,\n \"Enter A filename\",\n filter='Python File (*.py);; All files (*.*)'\n )\n\n # let's the user search/type for a file name, while the history length\n # is fetched\n\n if file_name:\n hist_range, ok = QtGui.QInputDialog.getText(self,\n 'Please enter an interval of command to save',\n 'Saving commands:',\n text=str('1-'+str(self._max_session_history))\n )\n if ok:\n self.execute(\"%save\"+\" \"+file_name+\" \"+str(hist_range))\n\n #---------------------------------------------------------------------------\n # 'HistoryConsoleWidget' protected interface\n #---------------------------------------------------------------------------\n\n def _history_locked(self):\n \"\"\" Returns whether history movement is locked.\n \"\"\"\n return (self.history_lock and\n (self._get_edited_history(self._history_index) !=\n self.input_buffer) and\n (self._get_prompt_cursor().blockNumber() !=\n self._get_end_cursor().blockNumber()))\n\n def _get_edited_history(self, index):\n \"\"\" Retrieves a history item, possibly with temporary edits.\n \"\"\"\n if index in self._history_edits:\n return self._history_edits[index]\n elif index == len(self._history):\n return unicode()\n return self._history[index]\n\n def _set_history(self, history):\n \"\"\" Replace the current history with a sequence of history items.\n \"\"\"\n self._history = list(history)\n self._history_edits = {}\n self._history_index = len(self._history)\n\n def _store_edits(self):\n \"\"\" If there are edits to the current input buffer, store them.\n \"\"\"\n current = self.input_buffer\n if self._history_index == len(self._history) or \\\n self._history[self._history_index] != current:\n self._history_edits[self._history_index] = current\n", "path": "IPython/frontend/qt/console/history_console_widget.py"}]}
| 3,425 | 262 |
gh_patches_debug_12827
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-8099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
marginal_counts is broken for pulse results
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: 0.18.0
- **Python version**: 3.9
- **Operating system**: OSx
### What is the current behavior?
`marginal_counts` is broken for pulse jobs. I
### Steps to reproduce the problem
```python
from qiskit import pulse, QuantumCircuit, schedule, transpile, execute, IBMQ
from qiskit.result import marginal_counts
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
backend = provider.get_backend('ibmq_armonk')
qc = QuantumCircuit(1,1)
qc.measure(0, 0)
job = backend.run(schedule(transpile(qc, backend), backend))
marginal_counts(job.result(), indices=[0])
```
Gives output
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-8e897d2c2a54> in <module>
11
12 job = backend.run(schedule(transpile(qc, backend), backend))
---> 13 marginal_counts(job.result(), indices=[0])
~/opt/anaconda3/envs/python39/lib/python3.9/site-packages/qiskit/result/utils.py in marginal_counts(result, indices, inplace, format_marginal)
55 experiment_result.data.counts = new_counts_hex
56 experiment_result.header.memory_slots = len(indices)
---> 57 csize = experiment_result.header.creg_sizes
58 experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)
59 return result
AttributeError: 'QobjExperimentHeader' object has no attribute 'creg_sizes'
```
### What is the expected behavior?
### Suggested solutions
It seems like it is assuming `creg` will be present in the header. This is not true for pulse jobs which do not have classical registers and therefore this field won't be present. The fix should make marginal_counts work when the header is empty.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/result/utils.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017, 2018.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 # pylint: disable=c-extension-no-member
14
15 """Utility functions for working with Results."""
16
17 from typing import List, Union, Optional, Dict
18 from collections import Counter
19 from copy import deepcopy
20
21 from qiskit.exceptions import QiskitError
22 from qiskit.result.result import Result
23 from qiskit.result.counts import Counts
24 from qiskit.result.distributions.probability import ProbDistribution
25 from qiskit.result.distributions.quasi import QuasiDistribution
26
27 from qiskit.result.postprocess import _bin_to_hex, _hex_to_bin
28
29 # pylint: disable=import-error, no-name-in-module
30 from qiskit._accelerate import results as results_rs
31
32
33 def marginal_counts(
34 result: Union[dict, Result],
35 indices: Optional[List[int]] = None,
36 inplace: bool = False,
37 format_marginal: bool = False,
38 marginalize_memory: Optional[bool] = True,
39 ) -> Union[Dict[str, int], Result]:
40 """Marginalize counts from an experiment over some indices of interest.
41
42 Args:
43 result: result to be marginalized
44 (a Result object or a dict(str, int) of counts).
45 indices: The bit positions of interest
46 to marginalize over. If ``None`` (default), do not marginalize at all.
47 inplace: Default: False. Operates on the original Result
48 argument if True, leading to loss of original Job Result.
49 It has no effect if ``result`` is a dict.
50 format_marginal: Default: False. If True, takes the output of
51 marginalize and formats it with placeholders between cregs and
52 for non-indices.
53 marginalize_memory: If True, then also marginalize the memory field (if present).
54 If False, remove the memory field from the result.
55 If None, leave the memory field as is.
56
57 Returns:
58 Result or dict(str, int): A Result object or a dictionary with
59 the observed counts, marginalized to only account for frequency
60 of observations of bits of interest.
61
62 Raises:
63 QiskitError: in case of invalid indices to marginalize over.
64 """
65 if isinstance(result, Result):
66 if not inplace:
67 result = deepcopy(result)
68 for i, experiment_result in enumerate(result.results):
69 counts = result.get_counts(i)
70 new_counts = _marginalize(counts, indices)
71 new_counts_hex = {}
72 for k, v in new_counts.items():
73 new_counts_hex[_bin_to_hex(k)] = v
74 experiment_result.data.counts = new_counts_hex
75
76 if indices is not None:
77 experiment_result.header.memory_slots = len(indices)
78 csize = experiment_result.header.creg_sizes
79 experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)
80
81 if getattr(experiment_result.data, "memory", None) is not None and indices is not None:
82 if marginalize_memory is False:
83 delattr(experiment_result.data, "memory")
84 elif marginalize_memory is None:
85 pass # leave as is
86 else:
87 sorted_indices = sorted(
88 indices, reverse=True
89 ) # same convention as for the counts
90 bit_strings = [_hex_to_bin(s) for s in experiment_result.data.memory]
91 marginal_bit_strings = [
92 "".join([s[-idx - 1] for idx in sorted_indices if idx < len(s)]) or "0"
93 for s in bit_strings
94 ]
95 experiment_result.data.memory = [_bin_to_hex(s) for s in marginal_bit_strings]
96 return result
97 else:
98 marg_counts = _marginalize(result, indices)
99 if format_marginal and indices is not None:
100 marg_counts = _format_marginal(result, marg_counts, indices)
101 return marg_counts
102
103
104 def _adjust_creg_sizes(creg_sizes, indices):
105 """Helper to reduce creg_sizes to match indices"""
106
107 # Zero out creg_sizes list
108 new_creg_sizes = [[creg[0], 0] for creg in creg_sizes]
109 indices_sort = sorted(indices)
110
111 # Get creg num values and then convert to the cumulative last index per creg.
112 # e.g. [2, 1, 3] => [1, 2, 5]
113 creg_nums = [x for _, x in creg_sizes]
114 creg_limits = [sum(creg_nums[0:x:1]) - 1 for x in range(0, len(creg_nums) + 1)][1:]
115
116 # Now iterate over indices and find which creg that index is in.
117 # When found increment the creg size
118 creg_idx = 0
119 for ind in indices_sort:
120 for idx in range(creg_idx, len(creg_limits)):
121 if ind <= creg_limits[idx]:
122 creg_idx = idx
123 new_creg_sizes[idx][1] += 1
124 break
125 # Throw away any cregs with 0 size
126 new_creg_sizes = [creg for creg in new_creg_sizes if creg[1] != 0]
127 return new_creg_sizes
128
129
130 def marginal_distribution(
131 counts: dict, indices: Optional[List[int]] = None, format_marginal: bool = False
132 ) -> Dict[str, int]:
133 """Marginalize counts from an experiment over some indices of interest.
134
135 Unlike :func:`~.marginal_counts` this function respects the order of
136 the input ``indices``. If the input ``indices`` list is specified, the order
137 the bit indices will be the output order of the bitstrings
138 in the marginalized output.
139
140 Args:
141 counts: result to be marginalized
142 indices: The bit positions of interest
143 to marginalize over. If ``None`` (default), do not marginalize at all.
144 format_marginal: Default: False. If True, takes the output of
145 marginalize and formats it with placeholders between cregs and
146 for non-indices.
147 Returns:
148 dict(str, int): A marginalized dictionary
149 Raises:
150 QiskitError: If any value in ``indices`` is invalid or the ``counts`` dict
151 is invalid.
152 """
153 num_clbits = len(max(counts.keys()).replace(" ", ""))
154 if indices is not None and (not indices or not set(indices).issubset(range(num_clbits))):
155 raise QiskitError(f"indices must be in range [0, {num_clbits - 1}].")
156
157 if isinstance(counts, Counts):
158 res = results_rs.marginal_counts(counts, indices)
159 elif isinstance(counts, (ProbDistribution, QuasiDistribution)):
160 res = results_rs.marginal_distribution(counts, indices)
161 else:
162 first_value = next(iter(counts.values()))
163 if isinstance(first_value, int):
164 res = results_rs.marginal_counts(counts, indices)
165 elif isinstance(first_value, float):
166 res = results_rs.marginal_distribution(counts, indices)
167 else:
168 raise QiskitError("Values of counts must be an int or float")
169
170 if format_marginal and indices is not None:
171 return _format_marginal(counts, res, indices)
172 return res
173
174
175 def _marginalize(counts, indices=None):
176 """Get the marginal counts for the given set of indices"""
177 num_clbits = len(next(iter(counts)).replace(" ", ""))
178 # Check if we do not need to marginalize and if so, trim
179 # whitespace and '_' and return
180 if (indices is None) or set(range(num_clbits)) == set(indices):
181 ret = {}
182 for key, val in counts.items():
183 key = _remove_space_underscore(key)
184 ret[key] = val
185 return ret
186
187 if not indices or not set(indices).issubset(set(range(num_clbits))):
188 raise QiskitError(f"indices must be in range [0, {num_clbits - 1}].")
189
190 # Sort the indices to keep in descending order
191 # Since bitstrings have qubit-0 as least significant bit
192 indices = sorted(indices, reverse=True)
193
194 # Build the return list
195 new_counts = Counter()
196 for key, val in counts.items():
197 new_key = "".join([_remove_space_underscore(key)[-idx - 1] for idx in indices])
198 new_counts[new_key] += val
199 return dict(new_counts)
200
201
202 def _format_marginal(counts, marg_counts, indices):
203 """Take the output of marginalize and add placeholders for
204 multiple cregs and non-indices."""
205 format_counts = {}
206 counts_template = next(iter(counts))
207 counts_len = len(counts_template.replace(" ", ""))
208 indices_rev = sorted(indices, reverse=True)
209
210 for count in marg_counts:
211 index_dict = dict(zip(indices_rev, count))
212 count_bits = "".join(
213 [index_dict[index] if index in index_dict else "_" for index in range(counts_len)]
214 )[::-1]
215 for index, bit in enumerate(counts_template):
216 if bit == " ":
217 count_bits = count_bits[:index] + " " + count_bits[index:]
218 format_counts[count_bits] = marg_counts[count]
219 return format_counts
220
221
222 def _remove_space_underscore(bitstring):
223 """Removes all spaces and underscores from bitstring"""
224 return bitstring.replace(" ", "").replace("_", "")
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/result/utils.py b/qiskit/result/utils.py
--- a/qiskit/result/utils.py
+++ b/qiskit/result/utils.py
@@ -75,8 +75,9 @@
if indices is not None:
experiment_result.header.memory_slots = len(indices)
- csize = experiment_result.header.creg_sizes
- experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)
+ csize = getattr(experiment_result.header, "creg_sizes", None)
+ if csize is not None:
+ experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)
if getattr(experiment_result.data, "memory", None) is not None and indices is not None:
if marginalize_memory is False:
|
{"golden_diff": "diff --git a/qiskit/result/utils.py b/qiskit/result/utils.py\n--- a/qiskit/result/utils.py\n+++ b/qiskit/result/utils.py\n@@ -75,8 +75,9 @@\n \n if indices is not None:\n experiment_result.header.memory_slots = len(indices)\n- csize = experiment_result.header.creg_sizes\n- experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)\n+ csize = getattr(experiment_result.header, \"creg_sizes\", None)\n+ if csize is not None:\n+ experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)\n \n if getattr(experiment_result.data, \"memory\", None) is not None and indices is not None:\n if marginalize_memory is False:\n", "issue": "marginal_counts is broken for pulse results\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**: 0.18.0\r\n- **Python version**: 3.9\r\n- **Operating system**: OSx\r\n\r\n### What is the current behavior?\r\n`marginal_counts` is broken for pulse jobs. I\r\n\r\n\r\n### Steps to reproduce the problem\r\n```python\r\nfrom qiskit import pulse, QuantumCircuit, schedule, transpile, execute, IBMQ\r\nfrom qiskit.result import marginal_counts\r\n\r\n\r\nIBMQ.load_account()\r\nprovider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')\r\nbackend = provider.get_backend('ibmq_armonk')\r\n\r\nqc = QuantumCircuit(1,1)\r\nqc.measure(0, 0)\r\n\r\njob = backend.run(schedule(transpile(qc, backend), backend))\r\nmarginal_counts(job.result(), indices=[0])\r\n```\r\nGives output\r\n```\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-6-8e897d2c2a54> in <module>\r\n 11 \r\n 12 job = backend.run(schedule(transpile(qc, backend), backend))\r\n---> 13 marginal_counts(job.result(), indices=[0])\r\n\r\n~/opt/anaconda3/envs/python39/lib/python3.9/site-packages/qiskit/result/utils.py in marginal_counts(result, indices, inplace, format_marginal)\r\n 55 experiment_result.data.counts = new_counts_hex\r\n 56 experiment_result.header.memory_slots = len(indices)\r\n---> 57 csize = experiment_result.header.creg_sizes\r\n 58 experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)\r\n 59 return result\r\n\r\nAttributeError: 'QobjExperimentHeader' object has no attribute 'creg_sizes'\r\n```\r\n\r\n\r\n### What is the expected behavior?\r\n\r\n\r\n\r\n### Suggested solutions\r\nIt seems like it is assuming `creg` will be present in the header. This is not true for pulse jobs which do not have classical registers and therefore this field won't be present. The fix should make marginal_counts work when the header is empty.\r\n\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=c-extension-no-member\n\n\"\"\"Utility functions for working with Results.\"\"\"\n\nfrom typing import List, Union, Optional, Dict\nfrom collections import Counter\nfrom copy import deepcopy\n\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.result.result import Result\nfrom qiskit.result.counts import Counts\nfrom qiskit.result.distributions.probability import ProbDistribution\nfrom qiskit.result.distributions.quasi import QuasiDistribution\n\nfrom qiskit.result.postprocess import _bin_to_hex, _hex_to_bin\n\n# pylint: disable=import-error, no-name-in-module\nfrom qiskit._accelerate import results as results_rs\n\n\ndef marginal_counts(\n result: Union[dict, Result],\n indices: Optional[List[int]] = None,\n inplace: bool = False,\n format_marginal: bool = False,\n marginalize_memory: Optional[bool] = True,\n) -> Union[Dict[str, int], Result]:\n \"\"\"Marginalize counts from an experiment over some indices of interest.\n\n Args:\n result: result to be marginalized\n (a Result object or a dict(str, int) of counts).\n indices: The bit positions of interest\n to marginalize over. If ``None`` (default), do not marginalize at all.\n inplace: Default: False. Operates on the original Result\n argument if True, leading to loss of original Job Result.\n It has no effect if ``result`` is a dict.\n format_marginal: Default: False. If True, takes the output of\n marginalize and formats it with placeholders between cregs and\n for non-indices.\n marginalize_memory: If True, then also marginalize the memory field (if present).\n If False, remove the memory field from the result.\n If None, leave the memory field as is.\n\n Returns:\n Result or dict(str, int): A Result object or a dictionary with\n the observed counts, marginalized to only account for frequency\n of observations of bits of interest.\n\n Raises:\n QiskitError: in case of invalid indices to marginalize over.\n \"\"\"\n if isinstance(result, Result):\n if not inplace:\n result = deepcopy(result)\n for i, experiment_result in enumerate(result.results):\n counts = result.get_counts(i)\n new_counts = _marginalize(counts, indices)\n new_counts_hex = {}\n for k, v in new_counts.items():\n new_counts_hex[_bin_to_hex(k)] = v\n experiment_result.data.counts = new_counts_hex\n\n if indices is not None:\n experiment_result.header.memory_slots = len(indices)\n csize = experiment_result.header.creg_sizes\n experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)\n\n if getattr(experiment_result.data, \"memory\", None) is not None and indices is not None:\n if marginalize_memory is False:\n delattr(experiment_result.data, \"memory\")\n elif marginalize_memory is None:\n pass # leave as is\n else:\n sorted_indices = sorted(\n indices, reverse=True\n ) # same convention as for the counts\n bit_strings = [_hex_to_bin(s) for s in experiment_result.data.memory]\n marginal_bit_strings = [\n \"\".join([s[-idx - 1] for idx in sorted_indices if idx < len(s)]) or \"0\"\n for s in bit_strings\n ]\n experiment_result.data.memory = [_bin_to_hex(s) for s in marginal_bit_strings]\n return result\n else:\n marg_counts = _marginalize(result, indices)\n if format_marginal and indices is not None:\n marg_counts = _format_marginal(result, marg_counts, indices)\n return marg_counts\n\n\ndef _adjust_creg_sizes(creg_sizes, indices):\n \"\"\"Helper to reduce creg_sizes to match indices\"\"\"\n\n # Zero out creg_sizes list\n new_creg_sizes = [[creg[0], 0] for creg in creg_sizes]\n indices_sort = sorted(indices)\n\n # Get creg num values and then convert to the cumulative last index per creg.\n # e.g. [2, 1, 3] => [1, 2, 5]\n creg_nums = [x for _, x in creg_sizes]\n creg_limits = [sum(creg_nums[0:x:1]) - 1 for x in range(0, len(creg_nums) + 1)][1:]\n\n # Now iterate over indices and find which creg that index is in.\n # When found increment the creg size\n creg_idx = 0\n for ind in indices_sort:\n for idx in range(creg_idx, len(creg_limits)):\n if ind <= creg_limits[idx]:\n creg_idx = idx\n new_creg_sizes[idx][1] += 1\n break\n # Throw away any cregs with 0 size\n new_creg_sizes = [creg for creg in new_creg_sizes if creg[1] != 0]\n return new_creg_sizes\n\n\ndef marginal_distribution(\n counts: dict, indices: Optional[List[int]] = None, format_marginal: bool = False\n) -> Dict[str, int]:\n \"\"\"Marginalize counts from an experiment over some indices of interest.\n\n Unlike :func:`~.marginal_counts` this function respects the order of\n the input ``indices``. If the input ``indices`` list is specified, the order\n the bit indices will be the output order of the bitstrings\n in the marginalized output.\n\n Args:\n counts: result to be marginalized\n indices: The bit positions of interest\n to marginalize over. If ``None`` (default), do not marginalize at all.\n format_marginal: Default: False. If True, takes the output of\n marginalize and formats it with placeholders between cregs and\n for non-indices.\n Returns:\n dict(str, int): A marginalized dictionary\n Raises:\n QiskitError: If any value in ``indices`` is invalid or the ``counts`` dict\n is invalid.\n \"\"\"\n num_clbits = len(max(counts.keys()).replace(\" \", \"\"))\n if indices is not None and (not indices or not set(indices).issubset(range(num_clbits))):\n raise QiskitError(f\"indices must be in range [0, {num_clbits - 1}].\")\n\n if isinstance(counts, Counts):\n res = results_rs.marginal_counts(counts, indices)\n elif isinstance(counts, (ProbDistribution, QuasiDistribution)):\n res = results_rs.marginal_distribution(counts, indices)\n else:\n first_value = next(iter(counts.values()))\n if isinstance(first_value, int):\n res = results_rs.marginal_counts(counts, indices)\n elif isinstance(first_value, float):\n res = results_rs.marginal_distribution(counts, indices)\n else:\n raise QiskitError(\"Values of counts must be an int or float\")\n\n if format_marginal and indices is not None:\n return _format_marginal(counts, res, indices)\n return res\n\n\ndef _marginalize(counts, indices=None):\n \"\"\"Get the marginal counts for the given set of indices\"\"\"\n num_clbits = len(next(iter(counts)).replace(\" \", \"\"))\n # Check if we do not need to marginalize and if so, trim\n # whitespace and '_' and return\n if (indices is None) or set(range(num_clbits)) == set(indices):\n ret = {}\n for key, val in counts.items():\n key = _remove_space_underscore(key)\n ret[key] = val\n return ret\n\n if not indices or not set(indices).issubset(set(range(num_clbits))):\n raise QiskitError(f\"indices must be in range [0, {num_clbits - 1}].\")\n\n # Sort the indices to keep in descending order\n # Since bitstrings have qubit-0 as least significant bit\n indices = sorted(indices, reverse=True)\n\n # Build the return list\n new_counts = Counter()\n for key, val in counts.items():\n new_key = \"\".join([_remove_space_underscore(key)[-idx - 1] for idx in indices])\n new_counts[new_key] += val\n return dict(new_counts)\n\n\ndef _format_marginal(counts, marg_counts, indices):\n \"\"\"Take the output of marginalize and add placeholders for\n multiple cregs and non-indices.\"\"\"\n format_counts = {}\n counts_template = next(iter(counts))\n counts_len = len(counts_template.replace(\" \", \"\"))\n indices_rev = sorted(indices, reverse=True)\n\n for count in marg_counts:\n index_dict = dict(zip(indices_rev, count))\n count_bits = \"\".join(\n [index_dict[index] if index in index_dict else \"_\" for index in range(counts_len)]\n )[::-1]\n for index, bit in enumerate(counts_template):\n if bit == \" \":\n count_bits = count_bits[:index] + \" \" + count_bits[index:]\n format_counts[count_bits] = marg_counts[count]\n return format_counts\n\n\ndef _remove_space_underscore(bitstring):\n \"\"\"Removes all spaces and underscores from bitstring\"\"\"\n return bitstring.replace(\" \", \"\").replace(\"_\", \"\")\n", "path": "qiskit/result/utils.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n# pylint: disable=c-extension-no-member\n\n\"\"\"Utility functions for working with Results.\"\"\"\n\nfrom typing import List, Union, Optional, Dict\nfrom collections import Counter\nfrom copy import deepcopy\n\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.result.result import Result\nfrom qiskit.result.counts import Counts\nfrom qiskit.result.distributions.probability import ProbDistribution\nfrom qiskit.result.distributions.quasi import QuasiDistribution\n\nfrom qiskit.result.postprocess import _bin_to_hex, _hex_to_bin\n\n# pylint: disable=import-error, no-name-in-module\nfrom qiskit._accelerate import results as results_rs\n\n\ndef marginal_counts(\n result: Union[dict, Result],\n indices: Optional[List[int]] = None,\n inplace: bool = False,\n format_marginal: bool = False,\n marginalize_memory: Optional[bool] = True,\n) -> Union[Dict[str, int], Result]:\n \"\"\"Marginalize counts from an experiment over some indices of interest.\n\n Args:\n result: result to be marginalized\n (a Result object or a dict(str, int) of counts).\n indices: The bit positions of interest\n to marginalize over. If ``None`` (default), do not marginalize at all.\n inplace: Default: False. Operates on the original Result\n argument if True, leading to loss of original Job Result.\n It has no effect if ``result`` is a dict.\n format_marginal: Default: False. If True, takes the output of\n marginalize and formats it with placeholders between cregs and\n for non-indices.\n marginalize_memory: If True, then also marginalize the memory field (if present).\n If False, remove the memory field from the result.\n If None, leave the memory field as is.\n\n Returns:\n Result or dict(str, int): A Result object or a dictionary with\n the observed counts, marginalized to only account for frequency\n of observations of bits of interest.\n\n Raises:\n QiskitError: in case of invalid indices to marginalize over.\n \"\"\"\n if isinstance(result, Result):\n if not inplace:\n result = deepcopy(result)\n for i, experiment_result in enumerate(result.results):\n counts = result.get_counts(i)\n new_counts = _marginalize(counts, indices)\n new_counts_hex = {}\n for k, v in new_counts.items():\n new_counts_hex[_bin_to_hex(k)] = v\n experiment_result.data.counts = new_counts_hex\n\n if indices is not None:\n experiment_result.header.memory_slots = len(indices)\n csize = getattr(experiment_result.header, \"creg_sizes\", None)\n if csize is not None:\n experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)\n\n if getattr(experiment_result.data, \"memory\", None) is not None and indices is not None:\n if marginalize_memory is False:\n delattr(experiment_result.data, \"memory\")\n elif marginalize_memory is None:\n pass # leave as is\n else:\n sorted_indices = sorted(\n indices, reverse=True\n ) # same convention as for the counts\n bit_strings = [_hex_to_bin(s) for s in experiment_result.data.memory]\n marginal_bit_strings = [\n \"\".join([s[-idx - 1] for idx in sorted_indices if idx < len(s)]) or \"0\"\n for s in bit_strings\n ]\n experiment_result.data.memory = [_bin_to_hex(s) for s in marginal_bit_strings]\n return result\n else:\n marg_counts = _marginalize(result, indices)\n if format_marginal and indices is not None:\n marg_counts = _format_marginal(result, marg_counts, indices)\n return marg_counts\n\n\ndef _adjust_creg_sizes(creg_sizes, indices):\n \"\"\"Helper to reduce creg_sizes to match indices\"\"\"\n\n # Zero out creg_sizes list\n new_creg_sizes = [[creg[0], 0] for creg in creg_sizes]\n indices_sort = sorted(indices)\n\n # Get creg num values and then convert to the cumulative last index per creg.\n # e.g. [2, 1, 3] => [1, 2, 5]\n creg_nums = [x for _, x in creg_sizes]\n creg_limits = [sum(creg_nums[0:x:1]) - 1 for x in range(0, len(creg_nums) + 1)][1:]\n\n # Now iterate over indices and find which creg that index is in.\n # When found increment the creg size\n creg_idx = 0\n for ind in indices_sort:\n for idx in range(creg_idx, len(creg_limits)):\n if ind <= creg_limits[idx]:\n creg_idx = idx\n new_creg_sizes[idx][1] += 1\n break\n # Throw away any cregs with 0 size\n new_creg_sizes = [creg for creg in new_creg_sizes if creg[1] != 0]\n return new_creg_sizes\n\n\ndef marginal_distribution(\n counts: dict, indices: Optional[List[int]] = None, format_marginal: bool = False\n) -> Dict[str, int]:\n \"\"\"Marginalize counts from an experiment over some indices of interest.\n\n Unlike :func:`~.marginal_counts` this function respects the order of\n the input ``indices``. If the input ``indices`` list is specified, the order\n the bit indices will be the output order of the bitstrings\n in the marginalized output.\n\n Args:\n counts: result to be marginalized\n indices: The bit positions of interest\n to marginalize over. If ``None`` (default), do not marginalize at all.\n format_marginal: Default: False. If True, takes the output of\n marginalize and formats it with placeholders between cregs and\n for non-indices.\n Returns:\n dict(str, int): A marginalized dictionary\n Raises:\n QiskitError: If any value in ``indices`` is invalid or the ``counts`` dict\n is invalid.\n \"\"\"\n num_clbits = len(max(counts.keys()).replace(\" \", \"\"))\n if indices is not None and (not indices or not set(indices).issubset(range(num_clbits))):\n raise QiskitError(f\"indices must be in range [0, {num_clbits - 1}].\")\n\n if isinstance(counts, Counts):\n res = results_rs.marginal_counts(counts, indices)\n elif isinstance(counts, (ProbDistribution, QuasiDistribution)):\n res = results_rs.marginal_distribution(counts, indices)\n else:\n first_value = next(iter(counts.values()))\n if isinstance(first_value, int):\n res = results_rs.marginal_counts(counts, indices)\n elif isinstance(first_value, float):\n res = results_rs.marginal_distribution(counts, indices)\n else:\n raise QiskitError(\"Values of counts must be an int or float\")\n\n if format_marginal and indices is not None:\n return _format_marginal(counts, res, indices)\n return res\n\n\ndef _marginalize(counts, indices=None):\n \"\"\"Get the marginal counts for the given set of indices\"\"\"\n num_clbits = len(next(iter(counts)).replace(\" \", \"\"))\n # Check if we do not need to marginalize and if so, trim\n # whitespace and '_' and return\n if (indices is None) or set(range(num_clbits)) == set(indices):\n ret = {}\n for key, val in counts.items():\n key = _remove_space_underscore(key)\n ret[key] = val\n return ret\n\n if not indices or not set(indices).issubset(set(range(num_clbits))):\n raise QiskitError(f\"indices must be in range [0, {num_clbits - 1}].\")\n\n # Sort the indices to keep in descending order\n # Since bitstrings have qubit-0 as least significant bit\n indices = sorted(indices, reverse=True)\n\n # Build the return list\n new_counts = Counter()\n for key, val in counts.items():\n new_key = \"\".join([_remove_space_underscore(key)[-idx - 1] for idx in indices])\n new_counts[new_key] += val\n return dict(new_counts)\n\n\ndef _format_marginal(counts, marg_counts, indices):\n \"\"\"Take the output of marginalize and add placeholders for\n multiple cregs and non-indices.\"\"\"\n format_counts = {}\n counts_template = next(iter(counts))\n counts_len = len(counts_template.replace(\" \", \"\"))\n indices_rev = sorted(indices, reverse=True)\n\n for count in marg_counts:\n index_dict = dict(zip(indices_rev, count))\n count_bits = \"\".join(\n [index_dict[index] if index in index_dict else \"_\" for index in range(counts_len)]\n )[::-1]\n for index, bit in enumerate(counts_template):\n if bit == \" \":\n count_bits = count_bits[:index] + \" \" + count_bits[index:]\n format_counts[count_bits] = marg_counts[count]\n return format_counts\n\n\ndef _remove_space_underscore(bitstring):\n \"\"\"Removes all spaces and underscores from bitstring\"\"\"\n return bitstring.replace(\" \", \"\").replace(\"_\", \"\")\n", "path": "qiskit/result/utils.py"}]}
| 3,498 | 174 |
gh_patches_debug_49287
|
rasdani/github-patches
|
git_diff
|
ansible__awx-7270
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tower settings fails with stacktrace - expected to exit gracefully
##### ISSUE TYPE
- Bug Report
##### SUMMARY
Running awx.awx.settings module throws `AttributeError: 'NoneType' object has no attribute 'startswith'`
##### ENVIRONMENT
* AWX version: 11.2.0
* AWX install method: setup.sh
* Ansible version: 2.9.7
* Operating System: Tower on RHEL, Ansible on Fedora
##### STEPS TO REPRODUCE
```
---
# tasks file for ansible_tower_genie_settings- name: Update Ansible Tower Settings
awx.awx.tower_settings:
name: "{{ tower_setting_item.name | default(omit) }}"
value: "{{ tower_setting_item.value | default(omit) }}"
tower_config_file: "{{ tower_config_file | default(omit) }}"
tower_host: "{{ tower_hostname | default(omit) }}"
tower_password: "{{ tower_password | default(omit) }}"
tower_username: "{{ tower_username | default(omit) }}"
validate_certs: "{{ validate_certs | default('false') }}"
loop: "{{ tower_settings }}"
loop_control:
loop_var: tower_setting_item
...
```
Data:
```
---
tower_settings:
- name: AWX_TASK_ENV
setting: {'GIT_SSL_NO_VERIFY': 'True'}
```
##### EXPECTED RESULTS
Error should be more helpful, gracefully handled vs flat out traceback if possible.
##### ACTUAL RESULTS
```
The full traceback is:
Traceback (most recent call last):
File "/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py", line 102, in <module>
_ansiballz_main()
File "/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py", line 94, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File "/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py", line 40, in invoke_module
runpy.run_module(mod_name='ansible_collections.awx.awx.plugins.modules.tower_settings', init_globals=None, run_name='__main__', alter_sys=True)
File "/usr/lib64/python3.8/runpy.py", line 206, in run_module
return _run_module_code(code, init_globals, run_name, mod_spec)
File "/usr/lib64/python3.8/runpy.py", line 96, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "/usr/lib64/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py", line 184, in <module>
File "/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py", line 136, in main
File "/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py", line 95, in coerce_type
AttributeError: 'NoneType' object has no attribute 'startswith'
failed: [localhost] (item={'name': 'AWX_TASK_ENV', 'setting': {'GIT_SSL_NO_VERIFY': 'True'}}) => {
"ansible_loop_var": "tower_setting_item",
"changed": false,
"module_stderr": "Traceback (most recent call last):\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 102, in <module>\n _ansiballz_main()\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 94, in _ansiballz_main\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 40, in invoke_module\n runpy.run_module(mod_name='ansible_collections.awx.awx.plugins.modules.tower_settings', init_globals=None, run_name='__main__', alter_sys=True)\n File \"/usr/lib64/python3.8/runpy.py\", line 206, in run_module\n return _run_module_code(code, init_globals, run_name, mod_spec)\n File \"/usr/lib64/python3.8/runpy.py\", line 96, in _run_module_code\n _run_code(code, mod_globals, init_globals,\n File \"/usr/lib64/python3.8/runpy.py\", line 86, in _run_code\n exec(code, run_globals)\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 184, in <module>\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 136, in main\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 95, in coerce_type\nAttributeError: 'NoneType' object has no attribute 'startswith'\n",
"module_stdout": "",
"msg": "MODULE FAILURE\nSee stdout/stderr for the exact error",
"rc": 1,
"tower_setting_item": {
"name": "AWX_TASK_ENV",
"setting": {
"GIT_SSL_NO_VERIFY": "True"
}
}
}
```
##### ADDITIONAL INFORMATION
<!-- Include any links to sosreport, database dumps, screenshots or other
information. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx_collection/plugins/modules/tower_settings.py`
Content:
```
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # (c) 2018, Nikhil Jain <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10
11 ANSIBLE_METADATA = {'metadata_version': '1.1',
12 'status': ['preview'],
13 'supported_by': 'community'}
14
15
16 DOCUMENTATION = '''
17 ---
18 module: tower_settings
19 author: "Nikhil Jain (@jainnikhil30)"
20 short_description: Modify Ansible Tower settings.
21 description:
22 - Modify Ansible Tower settings. See
23 U(https://www.ansible.com/tower) for an overview.
24 options:
25 name:
26 description:
27 - Name of setting to modify
28 type: str
29 value:
30 description:
31 - Value to be modified for given setting.
32 - If given a non-string type, will make best effort to cast it to type API expects.
33 - For better control over types, use the C(settings) param instead.
34 type: str
35 settings:
36 description:
37 - A data structure to be sent into the settings endpoint
38 type: dict
39 requirements:
40 - pyyaml
41 extends_documentation_fragment: awx.awx.auth
42 '''
43
44 EXAMPLES = '''
45 - name: Set the value of AWX_PROOT_BASE_PATH
46 tower_settings:
47 name: AWX_PROOT_BASE_PATH
48 value: "/tmp"
49 register: testing_settings
50
51 - name: Set the value of AWX_PROOT_SHOW_PATHS
52 tower_settings:
53 name: "AWX_PROOT_SHOW_PATHS"
54 value: "'/var/lib/awx/projects/', '/tmp'"
55 register: testing_settings
56
57 - name: Set the LDAP Auth Bind Password
58 tower_settings:
59 name: "AUTH_LDAP_BIND_PASSWORD"
60 value: "Password"
61 no_log: true
62
63 - name: Set all the LDAP Auth Bind Params
64 tower_settings:
65 settings:
66 AUTH_LDAP_BIND_PASSWORD: "password"
67 AUTH_LDAP_USER_ATTR_MAP:
68 email: "mail"
69 first_name: "givenName"
70 last_name: "surname"
71 '''
72
73 from ..module_utils.tower_api import TowerModule
74
75 try:
76 import yaml
77 HAS_YAML = True
78 except ImportError:
79 HAS_YAML = False
80
81
82 def coerce_type(module, value):
83 yaml_ish = bool((
84 value.startswith('{') and value.endswith('}')
85 ) or (
86 value.startswith('[') and value.endswith(']'))
87 )
88 if yaml_ish:
89 if not HAS_YAML:
90 module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
91 return yaml.safe_load(value)
92 elif value.lower in ('true', 'false', 't', 'f'):
93 return {'t': True, 'f': False}[value[0].lower()]
94 try:
95 return int(value)
96 except ValueError:
97 pass
98 return value
99
100
101 def main():
102 # Any additional arguments that are not fields of the item can be added here
103 argument_spec = dict(
104 name=dict(),
105 value=dict(),
106 settings=dict(type='dict'),
107 )
108
109 # Create a module for ourselves
110 module = TowerModule(
111 argument_spec=argument_spec,
112 required_one_of=[['name', 'settings']],
113 mutually_exclusive=[['name', 'settings']],
114 required_if=[['name', 'present', ['value']]]
115 )
116
117 # Extract our parameters
118 name = module.params.get('name')
119 value = module.params.get('value')
120 new_settings = module.params.get('settings')
121
122 # If we were given a name/value pair we will just make settings out of that and proceed normally
123 if new_settings is None:
124 new_value = coerce_type(module, value)
125
126 new_settings = {name: new_value}
127
128 # Load the existing settings
129 existing_settings = module.get_endpoint('settings/all')['json']
130
131 # Begin a json response
132 json_response = {'changed': False, 'old_values': {}}
133
134 # Check any of the settings to see if anything needs to be updated
135 needs_update = False
136 for a_setting in new_settings:
137 if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:
138 # At least one thing is different so we need to patch
139 needs_update = True
140 json_response['old_values'][a_setting] = existing_settings[a_setting]
141
142 # If nothing needs an update we can simply exit with the response (as not changed)
143 if not needs_update:
144 module.exit_json(**json_response)
145
146 # Make the call to update the settings
147 response = module.patch_endpoint('settings/all', **{'data': new_settings})
148
149 if response['status_code'] == 200:
150 # Set the changed response to True
151 json_response['changed'] = True
152
153 # To deal with the old style values we need to return 'value' in the response
154 new_values = {}
155 for a_setting in new_settings:
156 new_values[a_setting] = response['json'][a_setting]
157
158 # If we were using a name we will just add a value of a string, otherwise we will return an array in values
159 if name is not None:
160 json_response['value'] = new_values[name]
161 else:
162 json_response['values'] = new_values
163
164 module.exit_json(**json_response)
165 elif 'json' in response and '__all__' in response['json']:
166 module.fail_json(msg=response['json']['__all__'])
167 else:
168 module.fail_json(**{'msg': "Unable to update settings, see response", 'response': response})
169
170
171 if __name__ == '__main__':
172 main()
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awx_collection/plugins/modules/tower_settings.py b/awx_collection/plugins/modules/tower_settings.py
--- a/awx_collection/plugins/modules/tower_settings.py
+++ b/awx_collection/plugins/modules/tower_settings.py
@@ -82,6 +82,10 @@
def coerce_type(module, value):
+ # If our value is already None we can just return directly
+ if value is None:
+ return value
+
yaml_ish = bool((
value.startswith('{') and value.endswith('}')
) or (
|
{"golden_diff": "diff --git a/awx_collection/plugins/modules/tower_settings.py b/awx_collection/plugins/modules/tower_settings.py\n--- a/awx_collection/plugins/modules/tower_settings.py\n+++ b/awx_collection/plugins/modules/tower_settings.py\n@@ -82,6 +82,10 @@\n \n \n def coerce_type(module, value):\n+ # If our value is already None we can just return directly\n+ if value is None:\n+ return value\n+\n yaml_ish = bool((\n value.startswith('{') and value.endswith('}')\n ) or (\n", "issue": "Tower settings fails with stacktrace - expected to exit gracefully\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### SUMMARY\r\nRunning awx.awx.settings module throws `AttributeError: 'NoneType' object has no attribute 'startswith'`\r\n\r\n##### ENVIRONMENT\r\n* AWX version: 11.2.0\r\n* AWX install method: setup.sh\r\n* Ansible version: 2.9.7\r\n* Operating System: Tower on RHEL, Ansible on Fedora\r\n\r\n##### STEPS TO REPRODUCE\r\n\r\n```\r\n---\r\n# tasks file for ansible_tower_genie_settings- name: Update Ansible Tower Settings\r\n awx.awx.tower_settings:\r\n name: \"{{ tower_setting_item.name | default(omit) }}\"\r\n value: \"{{ tower_setting_item.value | default(omit) }}\"\r\n tower_config_file: \"{{ tower_config_file | default(omit) }}\"\r\n tower_host: \"{{ tower_hostname | default(omit) }}\"\r\n tower_password: \"{{ tower_password | default(omit) }}\"\r\n tower_username: \"{{ tower_username | default(omit) }}\"\r\n validate_certs: \"{{ validate_certs | default('false') }}\"\r\n loop: \"{{ tower_settings }}\"\r\n loop_control:\r\n loop_var: tower_setting_item\r\n...\r\n\r\n```\r\nData:\r\n```\r\n---\r\ntower_settings:\r\n - name: AWX_TASK_ENV\r\n setting: {'GIT_SSL_NO_VERIFY': 'True'}\r\n```\r\n\r\n##### EXPECTED RESULTS\r\n\r\nError should be more helpful, gracefully handled vs flat out traceback if possible.\r\n\r\n##### ACTUAL RESULTS\r\n\r\n```\r\nThe full traceback is:\r\nTraceback (most recent call last):\r\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 102, in <module>\r\n _ansiballz_main()\r\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 94, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\", line 40, in invoke_module\r\n runpy.run_module(mod_name='ansible_collections.awx.awx.plugins.modules.tower_settings', init_globals=None, run_name='__main__', alter_sys=True)\r\n File \"/usr/lib64/python3.8/runpy.py\", line 206, in run_module\r\n return _run_module_code(code, init_globals, run_name, mod_spec)\r\n File \"/usr/lib64/python3.8/runpy.py\", line 96, in _run_module_code\r\n _run_code(code, mod_globals, init_globals,\r\n File \"/usr/lib64/python3.8/runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 184, in <module>\r\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 136, in main\r\n File \"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\", line 95, in coerce_type\r\nAttributeError: 'NoneType' object has no attribute 'startswith'\r\nfailed: [localhost] (item={'name': 'AWX_TASK_ENV', 'setting': {'GIT_SSL_NO_VERIFY': 'True'}}) => {\r\n \"ansible_loop_var\": \"tower_setting_item\",\r\n \"changed\": false,\r\n \"module_stderr\": \"Traceback (most recent call last):\\n File \\\"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\\\", line 102, in <module>\\n _ansiballz_main()\\n File \\\"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\\\", line 94, in _ansiballz_main\\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\\n File \\\"/home/kkulkarni/.ansible/tmp/ansible-tmp-1591366383.968238-878504-224766098821440/AnsiballZ_tower_settings.py\\\", line 40, in invoke_module\\n runpy.run_module(mod_name='ansible_collections.awx.awx.plugins.modules.tower_settings', init_globals=None, run_name='__main__', alter_sys=True)\\n File \\\"/usr/lib64/python3.8/runpy.py\\\", line 206, in run_module\\n return _run_module_code(code, init_globals, run_name, mod_spec)\\n File \\\"/usr/lib64/python3.8/runpy.py\\\", line 96, in _run_module_code\\n _run_code(code, mod_globals, init_globals,\\n File \\\"/usr/lib64/python3.8/runpy.py\\\", line 86, in _run_code\\n exec(code, run_globals)\\n File \\\"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\\\", line 184, in <module>\\n File \\\"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\\\", line 136, in main\\n File \\\"/tmp/ansible_awx.awx.tower_settings_payload_x13vlw6v/ansible_awx.awx.tower_settings_payload.zip/ansible_collections/awx/awx/plugins/modules/tower_settings.py\\\", line 95, in coerce_type\\nAttributeError: 'NoneType' object has no attribute 'startswith'\\n\",\r\n \"module_stdout\": \"\",\r\n \"msg\": \"MODULE FAILURE\\nSee stdout/stderr for the exact error\",\r\n \"rc\": 1,\r\n \"tower_setting_item\": {\r\n \"name\": \"AWX_TASK_ENV\",\r\n \"setting\": {\r\n \"GIT_SSL_NO_VERIFY\": \"True\"\r\n }\r\n }\r\n}\r\n```\r\n\r\n##### ADDITIONAL INFORMATION\r\n\r\n<!-- Include any links to sosreport, database dumps, screenshots or other\r\ninformation. -->\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2018, Nikhil Jain <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_settings\nauthor: \"Nikhil Jain (@jainnikhil30)\"\nshort_description: Modify Ansible Tower settings.\ndescription:\n - Modify Ansible Tower settings. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - Name of setting to modify\n type: str\n value:\n description:\n - Value to be modified for given setting.\n - If given a non-string type, will make best effort to cast it to type API expects.\n - For better control over types, use the C(settings) param instead.\n type: str\n settings:\n description:\n - A data structure to be sent into the settings endpoint\n type: dict\nrequirements:\n - pyyaml\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Set the value of AWX_PROOT_BASE_PATH\n tower_settings:\n name: AWX_PROOT_BASE_PATH\n value: \"/tmp\"\n register: testing_settings\n\n- name: Set the value of AWX_PROOT_SHOW_PATHS\n tower_settings:\n name: \"AWX_PROOT_SHOW_PATHS\"\n value: \"'/var/lib/awx/projects/', '/tmp'\"\n register: testing_settings\n\n- name: Set the LDAP Auth Bind Password\n tower_settings:\n name: \"AUTH_LDAP_BIND_PASSWORD\"\n value: \"Password\"\n no_log: true\n\n- name: Set all the LDAP Auth Bind Params\n tower_settings:\n settings:\n AUTH_LDAP_BIND_PASSWORD: \"password\"\n AUTH_LDAP_USER_ATTR_MAP:\n email: \"mail\"\n first_name: \"givenName\"\n last_name: \"surname\"\n'''\n\nfrom ..module_utils.tower_api import TowerModule\n\ntry:\n import yaml\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\n\ndef coerce_type(module, value):\n yaml_ish = bool((\n value.startswith('{') and value.endswith('}')\n ) or (\n value.startswith('[') and value.endswith(']'))\n )\n if yaml_ish:\n if not HAS_YAML:\n module.fail_json(msg=\"yaml is not installed, try 'pip install pyyaml'\")\n return yaml.safe_load(value)\n elif value.lower in ('true', 'false', 't', 'f'):\n return {'t': True, 'f': False}[value[0].lower()]\n try:\n return int(value)\n except ValueError:\n pass\n return value\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(),\n value=dict(),\n settings=dict(type='dict'),\n )\n\n # Create a module for ourselves\n module = TowerModule(\n argument_spec=argument_spec,\n required_one_of=[['name', 'settings']],\n mutually_exclusive=[['name', 'settings']],\n required_if=[['name', 'present', ['value']]]\n )\n\n # Extract our parameters\n name = module.params.get('name')\n value = module.params.get('value')\n new_settings = module.params.get('settings')\n\n # If we were given a name/value pair we will just make settings out of that and proceed normally\n if new_settings is None:\n new_value = coerce_type(module, value)\n\n new_settings = {name: new_value}\n\n # Load the existing settings\n existing_settings = module.get_endpoint('settings/all')['json']\n\n # Begin a json response\n json_response = {'changed': False, 'old_values': {}}\n\n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n for a_setting in new_settings:\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n json_response['old_values'][a_setting] = existing_settings[a_setting]\n\n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n module.exit_json(**json_response)\n\n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n\n if response['status_code'] == 200:\n # Set the changed response to True\n json_response['changed'] = True\n\n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n for a_setting in new_settings:\n new_values[a_setting] = response['json'][a_setting]\n\n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n json_response['value'] = new_values[name]\n else:\n json_response['values'] = new_values\n\n module.exit_json(**json_response)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n module.fail_json(**{'msg': \"Unable to update settings, see response\", 'response': response})\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_settings.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2018, Nikhil Jain <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_settings\nauthor: \"Nikhil Jain (@jainnikhil30)\"\nversion_added: \"2.7\"\nshort_description: Modify Ansible Tower settings.\ndescription:\n - Modify Ansible Tower settings. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - Name of setting to modify\n type: str\n value:\n description:\n - Value to be modified for given setting.\n - If given a non-string type, will make best effort to cast it to type API expects.\n - For better control over types, use the C(settings) param instead.\n type: str\n settings:\n description:\n - A data structure to be sent into the settings endpoint\n type: dict\n version_added: \"3.7\"\nrequirements:\n - pyyaml\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Set the value of AWX_PROOT_BASE_PATH\n tower_settings:\n name: AWX_PROOT_BASE_PATH\n value: \"/tmp\"\n register: testing_settings\n\n- name: Set the value of AWX_PROOT_SHOW_PATHS\n tower_settings:\n name: \"AWX_PROOT_SHOW_PATHS\"\n value: \"'/var/lib/awx/projects/', '/tmp'\"\n register: testing_settings\n\n- name: Set the LDAP Auth Bind Password\n tower_settings:\n name: \"AUTH_LDAP_BIND_PASSWORD\"\n value: \"Password\"\n no_log: true\n\n- name: Set all the LDAP Auth Bind Params\n tower_settings:\n settings:\n AUTH_LDAP_BIND_PASSWORD: \"password\"\n AUTH_LDAP_USER_ATTR_MAP:\n email: \"mail\"\n first_name: \"givenName\"\n last_name: \"surname\"\n'''\n\nfrom ..module_utils.tower_api import TowerModule\n\ntry:\n import yaml\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\n\ndef coerce_type(module, value):\n # If our value is already None we can just return directly\n if value is None:\n return value\n\n yaml_ish = bool((\n value.startswith('{') and value.endswith('}')\n ) or (\n value.startswith('[') and value.endswith(']'))\n )\n if yaml_ish:\n if not HAS_YAML:\n module.fail_json(msg=\"yaml is not installed, try 'pip install pyyaml'\")\n return yaml.safe_load(value)\n elif value.lower in ('true', 'false', 't', 'f'):\n return {'t': True, 'f': False}[value[0].lower()]\n try:\n return int(value)\n except ValueError:\n pass\n return value\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(),\n value=dict(),\n settings=dict(type='dict'),\n )\n\n # Create a module for ourselves\n module = TowerModule(\n argument_spec=argument_spec,\n required_one_of=[['name', 'settings']],\n mutually_exclusive=[['name', 'settings']],\n required_if=[['name', 'present', ['value']]]\n )\n\n # Extract our parameters\n name = module.params.get('name')\n value = module.params.get('value')\n new_settings = module.params.get('settings')\n\n # If we were given a name/value pair we will just make settings out of that and proceed normally\n if new_settings is None:\n new_value = coerce_type(module, value)\n\n new_settings = {name: new_value}\n\n # Load the existing settings\n existing_settings = module.get_endpoint('settings/all')['json']\n\n # Begin a json response\n json_response = {'changed': False, 'old_values': {}}\n\n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n for a_setting in new_settings:\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n json_response['old_values'][a_setting] = existing_settings[a_setting]\n\n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n module.exit_json(**json_response)\n\n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n\n if response['status_code'] == 200:\n # Set the changed response to True\n json_response['changed'] = True\n\n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n for a_setting in new_settings:\n new_values[a_setting] = response['json'][a_setting]\n\n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n json_response['value'] = new_values[name]\n else:\n json_response['values'] = new_values\n\n module.exit_json(**json_response)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n module.fail_json(**{'msg': \"Unable to update settings, see response\", 'response': response})\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_settings.py"}]}
| 3,644 | 122 |
gh_patches_debug_22224
|
rasdani/github-patches
|
git_diff
|
quantopian__zipline-1723
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs build script requires `path.py` without specifying it as a dependency.
We should probably just remove it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/deploy.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import print_function
3 from contextlib import contextmanager
4 from glob import glob
5 from path import path
6 import os
7 from os.path import abspath, basename, dirname, exists, isfile
8 from shutil import move, rmtree
9 from subprocess import check_call
10
11 HERE = dirname(abspath(__file__))
12 ZIPLINE_ROOT = dirname(HERE)
13 TEMP_LOCATION = '/tmp/zipline-doc'
14 TEMP_LOCATION_GLOB = TEMP_LOCATION + '/*'
15
16
17 @contextmanager
18 def removing(path):
19 try:
20 yield
21 finally:
22 rmtree(path)
23
24
25 def ensure_not_exists(path):
26 if not exists(path):
27 return
28 if isfile(path):
29 os.unlink(path)
30 else:
31 rmtree(path)
32
33
34 def main():
35 print("Moving to %s." % HERE)
36 with path(HERE):
37 print("Building docs with 'make html'")
38 check_call(['make', 'html'])
39
40 print("Clearing temp location '%s'" % TEMP_LOCATION)
41 rmtree(TEMP_LOCATION, ignore_errors=True)
42
43 with removing(TEMP_LOCATION):
44 print("Copying built files to temp location.")
45 move('build/html', TEMP_LOCATION)
46
47 print("Moving to '%s'" % ZIPLINE_ROOT)
48 os.chdir(ZIPLINE_ROOT)
49
50 print("Checking out gh-pages branch.")
51 check_call(
52 [
53 'git', 'branch', '-f',
54 '--track', 'gh-pages', 'origin/gh-pages'
55 ]
56 )
57 check_call(['git', 'checkout', 'gh-pages'])
58 check_call(['git', 'reset', '--hard', 'origin/gh-pages'])
59
60 print("Copying built files:")
61 for file_ in glob(TEMP_LOCATION_GLOB):
62 base = basename(file_)
63
64 print("%s -> %s" % (file_, base))
65 ensure_not_exists(base)
66 move(file_, '.')
67
68 print()
69 print("Updated documentation branch in directory %s" % ZIPLINE_ROOT)
70 print("If you are happy with these changes, commit and push to gh-pages.")
71
72 if __name__ == '__main__':
73 main()
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/deploy.py b/docs/deploy.py
--- a/docs/deploy.py
+++ b/docs/deploy.py
@@ -2,7 +2,6 @@
from __future__ import print_function
from contextlib import contextmanager
from glob import glob
-from path import path
import os
from os.path import abspath, basename, dirname, exists, isfile
from shutil import move, rmtree
@@ -32,8 +31,11 @@
def main():
+ old_dir = os.getcwd()
print("Moving to %s." % HERE)
- with path(HERE):
+ os.chdir(HERE)
+
+ try:
print("Building docs with 'make html'")
check_call(['make', 'html'])
@@ -64,6 +66,8 @@
print("%s -> %s" % (file_, base))
ensure_not_exists(base)
move(file_, '.')
+ finally:
+ os.chdir(old_dir)
print()
print("Updated documentation branch in directory %s" % ZIPLINE_ROOT)
|
{"golden_diff": "diff --git a/docs/deploy.py b/docs/deploy.py\n--- a/docs/deploy.py\n+++ b/docs/deploy.py\n@@ -2,7 +2,6 @@\n from __future__ import print_function\n from contextlib import contextmanager\n from glob import glob\n-from path import path\n import os\n from os.path import abspath, basename, dirname, exists, isfile\n from shutil import move, rmtree\n@@ -32,8 +31,11 @@\n \n \n def main():\n+ old_dir = os.getcwd()\n print(\"Moving to %s.\" % HERE)\n- with path(HERE):\n+ os.chdir(HERE)\n+\n+ try:\n print(\"Building docs with 'make html'\")\n check_call(['make', 'html'])\n \n@@ -64,6 +66,8 @@\n print(\"%s -> %s\" % (file_, base))\n ensure_not_exists(base)\n move(file_, '.')\n+ finally:\n+ os.chdir(old_dir)\n \n print()\n print(\"Updated documentation branch in directory %s\" % ZIPLINE_ROOT)\n", "issue": "Docs build script requires `path.py` without specifying it as a dependency.\nWe should probably just remove it.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\nfrom contextlib import contextmanager\nfrom glob import glob\nfrom path import path\nimport os\nfrom os.path import abspath, basename, dirname, exists, isfile\nfrom shutil import move, rmtree\nfrom subprocess import check_call\n\nHERE = dirname(abspath(__file__))\nZIPLINE_ROOT = dirname(HERE)\nTEMP_LOCATION = '/tmp/zipline-doc'\nTEMP_LOCATION_GLOB = TEMP_LOCATION + '/*'\n\n\n@contextmanager\ndef removing(path):\n try:\n yield\n finally:\n rmtree(path)\n\n\ndef ensure_not_exists(path):\n if not exists(path):\n return\n if isfile(path):\n os.unlink(path)\n else:\n rmtree(path)\n\n\ndef main():\n print(\"Moving to %s.\" % HERE)\n with path(HERE):\n print(\"Building docs with 'make html'\")\n check_call(['make', 'html'])\n\n print(\"Clearing temp location '%s'\" % TEMP_LOCATION)\n rmtree(TEMP_LOCATION, ignore_errors=True)\n\n with removing(TEMP_LOCATION):\n print(\"Copying built files to temp location.\")\n move('build/html', TEMP_LOCATION)\n\n print(\"Moving to '%s'\" % ZIPLINE_ROOT)\n os.chdir(ZIPLINE_ROOT)\n\n print(\"Checking out gh-pages branch.\")\n check_call(\n [\n 'git', 'branch', '-f',\n '--track', 'gh-pages', 'origin/gh-pages'\n ]\n )\n check_call(['git', 'checkout', 'gh-pages'])\n check_call(['git', 'reset', '--hard', 'origin/gh-pages'])\n\n print(\"Copying built files:\")\n for file_ in glob(TEMP_LOCATION_GLOB):\n base = basename(file_)\n\n print(\"%s -> %s\" % (file_, base))\n ensure_not_exists(base)\n move(file_, '.')\n\n print()\n print(\"Updated documentation branch in directory %s\" % ZIPLINE_ROOT)\n print(\"If you are happy with these changes, commit and push to gh-pages.\")\n\nif __name__ == '__main__':\n main()\n", "path": "docs/deploy.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function\nfrom contextlib import contextmanager\nfrom glob import glob\nimport os\nfrom os.path import abspath, basename, dirname, exists, isfile\nfrom shutil import move, rmtree\nfrom subprocess import check_call\n\nHERE = dirname(abspath(__file__))\nZIPLINE_ROOT = dirname(HERE)\nTEMP_LOCATION = '/tmp/zipline-doc'\nTEMP_LOCATION_GLOB = TEMP_LOCATION + '/*'\n\n\n@contextmanager\ndef removing(path):\n try:\n yield\n finally:\n rmtree(path)\n\n\ndef ensure_not_exists(path):\n if not exists(path):\n return\n if isfile(path):\n os.unlink(path)\n else:\n rmtree(path)\n\n\ndef main():\n old_dir = os.getcwd()\n print(\"Moving to %s.\" % HERE)\n os.chdir(HERE)\n\n try:\n print(\"Building docs with 'make html'\")\n check_call(['make', 'html'])\n\n print(\"Clearing temp location '%s'\" % TEMP_LOCATION)\n rmtree(TEMP_LOCATION, ignore_errors=True)\n\n with removing(TEMP_LOCATION):\n print(\"Copying built files to temp location.\")\n move('build/html', TEMP_LOCATION)\n\n print(\"Moving to '%s'\" % ZIPLINE_ROOT)\n os.chdir(ZIPLINE_ROOT)\n\n print(\"Checking out gh-pages branch.\")\n check_call(\n [\n 'git', 'branch', '-f',\n '--track', 'gh-pages', 'origin/gh-pages'\n ]\n )\n check_call(['git', 'checkout', 'gh-pages'])\n check_call(['git', 'reset', '--hard', 'origin/gh-pages'])\n\n print(\"Copying built files:\")\n for file_ in glob(TEMP_LOCATION_GLOB):\n base = basename(file_)\n\n print(\"%s -> %s\" % (file_, base))\n ensure_not_exists(base)\n move(file_, '.')\n finally:\n os.chdir(old_dir)\n\n print()\n print(\"Updated documentation branch in directory %s\" % ZIPLINE_ROOT)\n print(\"If you are happy with these changes, commit and push to gh-pages.\")\n\nif __name__ == '__main__':\n main()\n", "path": "docs/deploy.py"}]}
| 883 | 236 |
gh_patches_debug_28957
|
rasdani/github-patches
|
git_diff
|
dask__distributed-2862
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scheduler timeout fails to exit `dask-scheduler` process
The scheduler timeout introduced in #2652 seems to successfully close the scheduler comms, however the process doesn't exit.
I would expect the process to also exit when the scheduler has closed.
```console
$ pip install git+https://github.com/dask/distributed.git
$ export DASK_DISTRIBUTED__SCHEDULER__IDLE_TIMEOUT='200 ms'
$ python -c 'import distributed; print(distributed.__version__)'
2.1.0+4.ge5ec8daa
$ dask-scheduler
distributed.scheduler - INFO - -----------------------------------------------
distributed.scheduler - INFO - Clear task state
distributed.dashboard.proxy - INFO - To route to workers diagnostics web server please install jupyter-server-proxy: pip install jupyter-server-proxy
distributed.scheduler - INFO - Scheduler at: tcp://172.17.0.2:8786
distributed.scheduler - INFO - dashboard at: :8787
distributed.scheduler - INFO - Local Directory: /tmp/scheduler-gx0ramly
distributed.scheduler - INFO - -----------------------------------------------
distributed.scheduler - INFO - Scheduler closing...
distributed.scheduler - INFO - Scheduler closing all comms
... (hit enter a bunch of times and waited a while here before hitting CTRL+c)...
^Cdistributed.scheduler - INFO - End scheduler at 'tcp://172.17.0.2:8786'
```
The scheduler should exit cleanly to satisfy the requirements in #2346.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/cli/dask_scheduler.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2
3 import atexit
4 import dask
5 import logging
6 import gc
7 import os
8 import re
9 import shutil
10 import sys
11 import tempfile
12 import warnings
13
14 import click
15
16 from tornado.ioloop import IOLoop
17
18 from distributed import Scheduler
19 from distributed.security import Security
20 from distributed.cli.utils import check_python_3, install_signal_handlers
21 from distributed.preloading import preload_modules, validate_preload_argv
22 from distributed.proctitle import (
23 enable_proctitle_on_children,
24 enable_proctitle_on_current,
25 )
26
27 logger = logging.getLogger("distributed.scheduler")
28
29
30 pem_file_option_type = click.Path(exists=True, resolve_path=True)
31
32
33 @click.command(context_settings=dict(ignore_unknown_options=True))
34 @click.option("--host", type=str, default="", help="URI, IP or hostname of this server")
35 @click.option("--port", type=int, default=None, help="Serving port")
36 @click.option(
37 "--interface",
38 type=str,
39 default=None,
40 help="Preferred network interface like 'eth0' or 'ib0'",
41 )
42 @click.option(
43 "--protocol", type=str, default=None, help="Protocol like tcp, tls, or ucx"
44 )
45 @click.option(
46 "--tls-ca-file",
47 type=pem_file_option_type,
48 default=None,
49 help="CA cert(s) file for TLS (in PEM format)",
50 )
51 @click.option(
52 "--tls-cert",
53 type=pem_file_option_type,
54 default=None,
55 help="certificate file for TLS (in PEM format)",
56 )
57 @click.option(
58 "--tls-key",
59 type=pem_file_option_type,
60 default=None,
61 help="private key file for TLS (in PEM format)",
62 )
63 # XXX default port (or URI) values should be centralized somewhere
64 @click.option(
65 "--bokeh-port", type=int, default=None, help="Deprecated. See --dashboard-address"
66 )
67 @click.option(
68 "--dashboard-address",
69 type=str,
70 default=":8787",
71 help="Address on which to listen for diagnostics dashboard",
72 )
73 @click.option(
74 "--dashboard/--no-dashboard",
75 "dashboard",
76 default=True,
77 show_default=True,
78 required=False,
79 help="Launch the Dashboard",
80 )
81 @click.option(
82 "--bokeh/--no-bokeh",
83 "bokeh",
84 default=None,
85 required=False,
86 help="Deprecated. See --dashboard/--no-dashboard.",
87 )
88 @click.option("--show/--no-show", default=False, help="Show web UI")
89 @click.option(
90 "--dashboard-prefix", type=str, default=None, help="Prefix for the dashboard app"
91 )
92 @click.option(
93 "--use-xheaders",
94 type=bool,
95 default=False,
96 show_default=True,
97 help="User xheaders in dashboard app for ssl termination in header",
98 )
99 @click.option("--pid-file", type=str, default="", help="File to write the process PID")
100 @click.option(
101 "--scheduler-file",
102 type=str,
103 default="",
104 help="File to write connection information. "
105 "This may be a good way to share connection information if your "
106 "cluster is on a shared network file system.",
107 )
108 @click.option(
109 "--local-directory", default="", type=str, help="Directory to place scheduler files"
110 )
111 @click.option(
112 "--preload",
113 type=str,
114 multiple=True,
115 is_eager=True,
116 default="",
117 help="Module that should be loaded by the scheduler process "
118 'like "foo.bar" or "/path/to/foo.py".',
119 )
120 @click.argument(
121 "preload_argv", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv
122 )
123 @click.version_option()
124 def main(
125 host,
126 port,
127 bokeh_port,
128 show,
129 dashboard,
130 bokeh,
131 dashboard_prefix,
132 use_xheaders,
133 pid_file,
134 scheduler_file,
135 interface,
136 protocol,
137 local_directory,
138 preload,
139 preload_argv,
140 tls_ca_file,
141 tls_cert,
142 tls_key,
143 dashboard_address,
144 ):
145 g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653
146 gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)
147
148 enable_proctitle_on_current()
149 enable_proctitle_on_children()
150
151 if bokeh_port is not None:
152 warnings.warn(
153 "The --bokeh-port flag has been renamed to --dashboard-address. "
154 "Consider adding ``--dashboard-address :%d`` " % bokeh_port
155 )
156 dashboard_address = bokeh_port
157 if bokeh is not None:
158 warnings.warn(
159 "The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
160 )
161 dashboard = bokeh
162
163 if port is None and (not host or not re.search(r":\d", host)):
164 port = 8786
165
166 sec = Security(
167 tls_ca_file=tls_ca_file, tls_scheduler_cert=tls_cert, tls_scheduler_key=tls_key
168 )
169
170 if not host and (tls_ca_file or tls_cert or tls_key):
171 host = "tls://"
172
173 if pid_file:
174 with open(pid_file, "w") as f:
175 f.write(str(os.getpid()))
176
177 def del_pid_file():
178 if os.path.exists(pid_file):
179 os.remove(pid_file)
180
181 atexit.register(del_pid_file)
182
183 local_directory_created = False
184 if local_directory:
185 if not os.path.exists(local_directory):
186 os.mkdir(local_directory)
187 local_directory_created = True
188 else:
189 local_directory = tempfile.mkdtemp(prefix="scheduler-")
190 local_directory_created = True
191 if local_directory not in sys.path:
192 sys.path.insert(0, local_directory)
193
194 if sys.platform.startswith("linux"):
195 import resource # module fails importing on Windows
196
197 soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
198 limit = max(soft, hard // 2)
199 resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))
200
201 loop = IOLoop.current()
202 logger.info("-" * 47)
203
204 scheduler = Scheduler(
205 loop=loop,
206 scheduler_file=scheduler_file,
207 security=sec,
208 host=host,
209 port=port,
210 interface=interface,
211 protocol=protocol,
212 dashboard_address=dashboard_address if dashboard else None,
213 service_kwargs={"dashboard": {"prefix": dashboard_prefix}},
214 )
215 scheduler.start()
216 if not preload:
217 preload = dask.config.get("distributed.scheduler.preload")
218 if not preload_argv:
219 preload_argv = dask.config.get("distributed.scheduler.preload-argv")
220 preload_modules(
221 preload, parameter=scheduler, file_dir=local_directory, argv=preload_argv
222 )
223
224 logger.info("Local Directory: %26s", local_directory)
225 logger.info("-" * 47)
226
227 install_signal_handlers(loop)
228
229 try:
230 loop.start()
231 loop.close()
232 finally:
233 scheduler.stop()
234 if local_directory_created:
235 shutil.rmtree(local_directory)
236
237 logger.info("End scheduler at %r", scheduler.address)
238
239
240 def go():
241 check_python_3()
242 main()
243
244
245 if __name__ == "__main__":
246 go()
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/distributed/cli/dask_scheduler.py b/distributed/cli/dask_scheduler.py
--- a/distributed/cli/dask_scheduler.py
+++ b/distributed/cli/dask_scheduler.py
@@ -120,6 +120,12 @@
@click.argument(
"preload_argv", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv
)
[email protected](
+ "--idle-timeout",
+ default=None,
+ type=str,
+ help="Time of inactivity after which to kill the scheduler",
+)
@click.version_option()
def main(
host,
@@ -141,6 +147,7 @@
tls_cert,
tls_key,
dashboard_address,
+ idle_timeout,
):
g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653
gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)
@@ -211,6 +218,7 @@
protocol=protocol,
dashboard_address=dashboard_address if dashboard else None,
service_kwargs={"dashboard": {"prefix": dashboard_prefix}},
+ idle_timeout=idle_timeout,
)
scheduler.start()
if not preload:
@@ -226,9 +234,12 @@
install_signal_handlers(loop)
+ async def run():
+ await scheduler
+ await scheduler.finished()
+
try:
- loop.start()
- loop.close()
+ loop.run_sync(run)
finally:
scheduler.stop()
if local_directory_created:
|
{"golden_diff": "diff --git a/distributed/cli/dask_scheduler.py b/distributed/cli/dask_scheduler.py\n--- a/distributed/cli/dask_scheduler.py\n+++ b/distributed/cli/dask_scheduler.py\n@@ -120,6 +120,12 @@\n @click.argument(\n \"preload_argv\", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv\n )\[email protected](\n+ \"--idle-timeout\",\n+ default=None,\n+ type=str,\n+ help=\"Time of inactivity after which to kill the scheduler\",\n+)\n @click.version_option()\n def main(\n host,\n@@ -141,6 +147,7 @@\n tls_cert,\n tls_key,\n dashboard_address,\n+ idle_timeout,\n ):\n g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653\n gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)\n@@ -211,6 +218,7 @@\n protocol=protocol,\n dashboard_address=dashboard_address if dashboard else None,\n service_kwargs={\"dashboard\": {\"prefix\": dashboard_prefix}},\n+ idle_timeout=idle_timeout,\n )\n scheduler.start()\n if not preload:\n@@ -226,9 +234,12 @@\n \n install_signal_handlers(loop)\n \n+ async def run():\n+ await scheduler\n+ await scheduler.finished()\n+\n try:\n- loop.start()\n- loop.close()\n+ loop.run_sync(run)\n finally:\n scheduler.stop()\n if local_directory_created:\n", "issue": "Scheduler timeout fails to exit `dask-scheduler` process\nThe scheduler timeout introduced in #2652 seems to successfully close the scheduler comms, however the process doesn't exit.\r\n\r\nI would expect the process to also exit when the scheduler has closed.\r\n\r\n```console\r\n$ pip install git+https://github.com/dask/distributed.git\r\n\r\n$ export DASK_DISTRIBUTED__SCHEDULER__IDLE_TIMEOUT='200 ms'\r\n\r\n$ python -c 'import distributed; print(distributed.__version__)' \r\n2.1.0+4.ge5ec8daa \r\n\r\n$ dask-scheduler \r\ndistributed.scheduler - INFO - ----------------------------------------------- \r\ndistributed.scheduler - INFO - Clear task state \r\ndistributed.dashboard.proxy - INFO - To route to workers diagnostics web server please install jupyter-server-proxy: pip install jupyter-server-proxy\r\ndistributed.scheduler - INFO - Scheduler at: tcp://172.17.0.2:8786 \r\ndistributed.scheduler - INFO - dashboard at: :8787\r\ndistributed.scheduler - INFO - Local Directory: /tmp/scheduler-gx0ramly\r\ndistributed.scheduler - INFO - -----------------------------------------------\r\ndistributed.scheduler - INFO - Scheduler closing... \r\ndistributed.scheduler - INFO - Scheduler closing all comms\r\n\r\n\r\n... (hit enter a bunch of times and waited a while here before hitting CTRL+c)...\r\n\r\n\r\n^Cdistributed.scheduler - INFO - End scheduler at 'tcp://172.17.0.2:8786' \r\n```\r\n\r\nThe scheduler should exit cleanly to satisfy the requirements in #2346.\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nimport dask\nimport logging\nimport gc\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport warnings\n\nimport click\n\nfrom tornado.ioloop import IOLoop\n\nfrom distributed import Scheduler\nfrom distributed.security import Security\nfrom distributed.cli.utils import check_python_3, install_signal_handlers\nfrom distributed.preloading import preload_modules, validate_preload_argv\nfrom distributed.proctitle import (\n enable_proctitle_on_children,\n enable_proctitle_on_current,\n)\n\nlogger = logging.getLogger(\"distributed.scheduler\")\n\n\npem_file_option_type = click.Path(exists=True, resolve_path=True)\n\n\[email protected](context_settings=dict(ignore_unknown_options=True))\[email protected](\"--host\", type=str, default=\"\", help=\"URI, IP or hostname of this server\")\[email protected](\"--port\", type=int, default=None, help=\"Serving port\")\[email protected](\n \"--interface\",\n type=str,\n default=None,\n help=\"Preferred network interface like 'eth0' or 'ib0'\",\n)\[email protected](\n \"--protocol\", type=str, default=None, help=\"Protocol like tcp, tls, or ucx\"\n)\[email protected](\n \"--tls-ca-file\",\n type=pem_file_option_type,\n default=None,\n help=\"CA cert(s) file for TLS (in PEM format)\",\n)\[email protected](\n \"--tls-cert\",\n type=pem_file_option_type,\n default=None,\n help=\"certificate file for TLS (in PEM format)\",\n)\[email protected](\n \"--tls-key\",\n type=pem_file_option_type,\n default=None,\n help=\"private key file for TLS (in PEM format)\",\n)\n# XXX default port (or URI) values should be centralized somewhere\[email protected](\n \"--bokeh-port\", type=int, default=None, help=\"Deprecated. See --dashboard-address\"\n)\[email protected](\n \"--dashboard-address\",\n type=str,\n default=\":8787\",\n help=\"Address on which to listen for diagnostics dashboard\",\n)\[email protected](\n \"--dashboard/--no-dashboard\",\n \"dashboard\",\n default=True,\n show_default=True,\n required=False,\n help=\"Launch the Dashboard\",\n)\[email protected](\n \"--bokeh/--no-bokeh\",\n \"bokeh\",\n default=None,\n required=False,\n help=\"Deprecated. See --dashboard/--no-dashboard.\",\n)\[email protected](\"--show/--no-show\", default=False, help=\"Show web UI\")\[email protected](\n \"--dashboard-prefix\", type=str, default=None, help=\"Prefix for the dashboard app\"\n)\[email protected](\n \"--use-xheaders\",\n type=bool,\n default=False,\n show_default=True,\n help=\"User xheaders in dashboard app for ssl termination in header\",\n)\[email protected](\"--pid-file\", type=str, default=\"\", help=\"File to write the process PID\")\[email protected](\n \"--scheduler-file\",\n type=str,\n default=\"\",\n help=\"File to write connection information. \"\n \"This may be a good way to share connection information if your \"\n \"cluster is on a shared network file system.\",\n)\[email protected](\n \"--local-directory\", default=\"\", type=str, help=\"Directory to place scheduler files\"\n)\[email protected](\n \"--preload\",\n type=str,\n multiple=True,\n is_eager=True,\n default=\"\",\n help=\"Module that should be loaded by the scheduler process \"\n 'like \"foo.bar\" or \"/path/to/foo.py\".',\n)\[email protected](\n \"preload_argv\", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv\n)\[email protected]_option()\ndef main(\n host,\n port,\n bokeh_port,\n show,\n dashboard,\n bokeh,\n dashboard_prefix,\n use_xheaders,\n pid_file,\n scheduler_file,\n interface,\n protocol,\n local_directory,\n preload,\n preload_argv,\n tls_ca_file,\n tls_cert,\n tls_key,\n dashboard_address,\n):\n g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653\n gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)\n\n enable_proctitle_on_current()\n enable_proctitle_on_children()\n\n if bokeh_port is not None:\n warnings.warn(\n \"The --bokeh-port flag has been renamed to --dashboard-address. \"\n \"Consider adding ``--dashboard-address :%d`` \" % bokeh_port\n )\n dashboard_address = bokeh_port\n if bokeh is not None:\n warnings.warn(\n \"The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. \"\n )\n dashboard = bokeh\n\n if port is None and (not host or not re.search(r\":\\d\", host)):\n port = 8786\n\n sec = Security(\n tls_ca_file=tls_ca_file, tls_scheduler_cert=tls_cert, tls_scheduler_key=tls_key\n )\n\n if not host and (tls_ca_file or tls_cert or tls_key):\n host = \"tls://\"\n\n if pid_file:\n with open(pid_file, \"w\") as f:\n f.write(str(os.getpid()))\n\n def del_pid_file():\n if os.path.exists(pid_file):\n os.remove(pid_file)\n\n atexit.register(del_pid_file)\n\n local_directory_created = False\n if local_directory:\n if not os.path.exists(local_directory):\n os.mkdir(local_directory)\n local_directory_created = True\n else:\n local_directory = tempfile.mkdtemp(prefix=\"scheduler-\")\n local_directory_created = True\n if local_directory not in sys.path:\n sys.path.insert(0, local_directory)\n\n if sys.platform.startswith(\"linux\"):\n import resource # module fails importing on Windows\n\n soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)\n limit = max(soft, hard // 2)\n resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))\n\n loop = IOLoop.current()\n logger.info(\"-\" * 47)\n\n scheduler = Scheduler(\n loop=loop,\n scheduler_file=scheduler_file,\n security=sec,\n host=host,\n port=port,\n interface=interface,\n protocol=protocol,\n dashboard_address=dashboard_address if dashboard else None,\n service_kwargs={\"dashboard\": {\"prefix\": dashboard_prefix}},\n )\n scheduler.start()\n if not preload:\n preload = dask.config.get(\"distributed.scheduler.preload\")\n if not preload_argv:\n preload_argv = dask.config.get(\"distributed.scheduler.preload-argv\")\n preload_modules(\n preload, parameter=scheduler, file_dir=local_directory, argv=preload_argv\n )\n\n logger.info(\"Local Directory: %26s\", local_directory)\n logger.info(\"-\" * 47)\n\n install_signal_handlers(loop)\n\n try:\n loop.start()\n loop.close()\n finally:\n scheduler.stop()\n if local_directory_created:\n shutil.rmtree(local_directory)\n\n logger.info(\"End scheduler at %r\", scheduler.address)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == \"__main__\":\n go()\n", "path": "distributed/cli/dask_scheduler.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport atexit\nimport dask\nimport logging\nimport gc\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport warnings\n\nimport click\n\nfrom tornado.ioloop import IOLoop\n\nfrom distributed import Scheduler\nfrom distributed.security import Security\nfrom distributed.cli.utils import check_python_3, install_signal_handlers\nfrom distributed.preloading import preload_modules, validate_preload_argv\nfrom distributed.proctitle import (\n enable_proctitle_on_children,\n enable_proctitle_on_current,\n)\n\nlogger = logging.getLogger(\"distributed.scheduler\")\n\n\npem_file_option_type = click.Path(exists=True, resolve_path=True)\n\n\[email protected](context_settings=dict(ignore_unknown_options=True))\[email protected](\"--host\", type=str, default=\"\", help=\"URI, IP or hostname of this server\")\[email protected](\"--port\", type=int, default=None, help=\"Serving port\")\[email protected](\n \"--interface\",\n type=str,\n default=None,\n help=\"Preferred network interface like 'eth0' or 'ib0'\",\n)\[email protected](\n \"--protocol\", type=str, default=None, help=\"Protocol like tcp, tls, or ucx\"\n)\[email protected](\n \"--tls-ca-file\",\n type=pem_file_option_type,\n default=None,\n help=\"CA cert(s) file for TLS (in PEM format)\",\n)\[email protected](\n \"--tls-cert\",\n type=pem_file_option_type,\n default=None,\n help=\"certificate file for TLS (in PEM format)\",\n)\[email protected](\n \"--tls-key\",\n type=pem_file_option_type,\n default=None,\n help=\"private key file for TLS (in PEM format)\",\n)\n# XXX default port (or URI) values should be centralized somewhere\[email protected](\n \"--bokeh-port\", type=int, default=None, help=\"Deprecated. See --dashboard-address\"\n)\[email protected](\n \"--dashboard-address\",\n type=str,\n default=\":8787\",\n help=\"Address on which to listen for diagnostics dashboard\",\n)\[email protected](\n \"--dashboard/--no-dashboard\",\n \"dashboard\",\n default=True,\n show_default=True,\n required=False,\n help=\"Launch the Dashboard\",\n)\[email protected](\n \"--bokeh/--no-bokeh\",\n \"bokeh\",\n default=None,\n required=False,\n help=\"Deprecated. See --dashboard/--no-dashboard.\",\n)\[email protected](\"--show/--no-show\", default=False, help=\"Show web UI\")\[email protected](\n \"--dashboard-prefix\", type=str, default=None, help=\"Prefix for the dashboard app\"\n)\[email protected](\n \"--use-xheaders\",\n type=bool,\n default=False,\n show_default=True,\n help=\"User xheaders in dashboard app for ssl termination in header\",\n)\[email protected](\"--pid-file\", type=str, default=\"\", help=\"File to write the process PID\")\[email protected](\n \"--scheduler-file\",\n type=str,\n default=\"\",\n help=\"File to write connection information. \"\n \"This may be a good way to share connection information if your \"\n \"cluster is on a shared network file system.\",\n)\[email protected](\n \"--local-directory\", default=\"\", type=str, help=\"Directory to place scheduler files\"\n)\[email protected](\n \"--preload\",\n type=str,\n multiple=True,\n is_eager=True,\n default=\"\",\n help=\"Module that should be loaded by the scheduler process \"\n 'like \"foo.bar\" or \"/path/to/foo.py\".',\n)\[email protected](\n \"preload_argv\", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv\n)\[email protected](\n \"--idle-timeout\",\n default=None,\n type=str,\n help=\"Time of inactivity after which to kill the scheduler\",\n)\[email protected]_option()\ndef main(\n host,\n port,\n bokeh_port,\n show,\n dashboard,\n bokeh,\n dashboard_prefix,\n use_xheaders,\n pid_file,\n scheduler_file,\n interface,\n protocol,\n local_directory,\n preload,\n preload_argv,\n tls_ca_file,\n tls_cert,\n tls_key,\n dashboard_address,\n idle_timeout,\n):\n g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653\n gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)\n\n enable_proctitle_on_current()\n enable_proctitle_on_children()\n\n if bokeh_port is not None:\n warnings.warn(\n \"The --bokeh-port flag has been renamed to --dashboard-address. \"\n \"Consider adding ``--dashboard-address :%d`` \" % bokeh_port\n )\n dashboard_address = bokeh_port\n if bokeh is not None:\n warnings.warn(\n \"The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. \"\n )\n dashboard = bokeh\n\n if port is None and (not host or not re.search(r\":\\d\", host)):\n port = 8786\n\n sec = Security(\n tls_ca_file=tls_ca_file, tls_scheduler_cert=tls_cert, tls_scheduler_key=tls_key\n )\n\n if not host and (tls_ca_file or tls_cert or tls_key):\n host = \"tls://\"\n\n if pid_file:\n with open(pid_file, \"w\") as f:\n f.write(str(os.getpid()))\n\n def del_pid_file():\n if os.path.exists(pid_file):\n os.remove(pid_file)\n\n atexit.register(del_pid_file)\n\n local_directory_created = False\n if local_directory:\n if not os.path.exists(local_directory):\n os.mkdir(local_directory)\n local_directory_created = True\n else:\n local_directory = tempfile.mkdtemp(prefix=\"scheduler-\")\n local_directory_created = True\n if local_directory not in sys.path:\n sys.path.insert(0, local_directory)\n\n if sys.platform.startswith(\"linux\"):\n import resource # module fails importing on Windows\n\n soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)\n limit = max(soft, hard // 2)\n resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))\n\n loop = IOLoop.current()\n logger.info(\"-\" * 47)\n\n scheduler = Scheduler(\n loop=loop,\n scheduler_file=scheduler_file,\n security=sec,\n host=host,\n port=port,\n interface=interface,\n protocol=protocol,\n dashboard_address=dashboard_address if dashboard else None,\n service_kwargs={\"dashboard\": {\"prefix\": dashboard_prefix}},\n idle_timeout=idle_timeout,\n )\n scheduler.start()\n if not preload:\n preload = dask.config.get(\"distributed.scheduler.preload\")\n if not preload_argv:\n preload_argv = dask.config.get(\"distributed.scheduler.preload-argv\")\n preload_modules(\n preload, parameter=scheduler, file_dir=local_directory, argv=preload_argv\n )\n\n logger.info(\"Local Directory: %26s\", local_directory)\n logger.info(\"-\" * 47)\n\n install_signal_handlers(loop)\n\n async def run():\n await scheduler\n await scheduler.finished()\n\n try:\n loop.run_sync(run)\n finally:\n scheduler.stop()\n if local_directory_created:\n shutil.rmtree(local_directory)\n\n logger.info(\"End scheduler at %r\", scheduler.address)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == \"__main__\":\n go()\n", "path": "distributed/cli/dask_scheduler.py"}]}
| 2,803 | 354 |
gh_patches_debug_15605
|
rasdani/github-patches
|
git_diff
|
pypa__virtualenv-1758
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
virtualenv raises StopIteration exception when processing extra search directory wheels.
**Issue**
When creating virtualenv when using `--extra-search-directory`, ``StopIteration`` exception is raised when processing wheels.
**Environment**
Provide at least:
- OS: Centos7
- ``virtualenv`` is installed:
virtualenv version: 20.0.15
```console
Exception in thread Thread-3:
Traceback (most recent call last):
File "/usr/lib64/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib64/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.6/site-packages/virtualenv/seed/via_app_data/via_app_data.py", line 75, in _get
self.app_data,
File "/usr/local/lib/python3.6/site-packages/virtualenv/seed/embed/wheels/acquire.py", line 30, in get_wheels
acquire_from_dir(processed, for_py_version, wheel_cache_dir, extra_search_dir)
File "/usr/local/lib/python3.6/site-packages/virtualenv/seed/embed/wheels/acquire.py", line 74, in acquire_from_dir
if wheel_support_py(filename, for_py_version):
File "/usr/local/lib/python3.6/site-packages/virtualenv/seed/embed/wheels/acquire.py", line 91, in wheel_support_py
requires = next(i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker))
StopIteration
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/virtualenv/seed/embed/wheels/acquire.py`
Content:
```
1 """Bootstrap"""
2 from __future__ import absolute_import, unicode_literals
3
4 import logging
5 import os
6 import sys
7 from collections import defaultdict
8 from contextlib import contextmanager
9 from copy import copy
10 from shutil import copy2
11 from zipfile import ZipFile
12
13 from virtualenv.info import IS_ZIPAPP
14 from virtualenv.util.path import Path
15 from virtualenv.util.six import ensure_str, ensure_text
16 from virtualenv.util.subprocess import Popen, subprocess
17 from virtualenv.util.zipapp import ensure_file_on_disk
18
19 from . import BUNDLE_SUPPORT, MAX
20
21 BUNDLE_FOLDER = Path(os.path.abspath(__file__)).parent
22
23
24 def get_wheels(for_py_version, wheel_cache_dir, extra_search_dir, download, packages, app_data):
25 # not all wheels are compatible with all python versions, so we need to py version qualify it
26 processed = copy(packages)
27 # 1. acquire from bundle
28 acquire_from_bundle(processed, for_py_version, wheel_cache_dir)
29 # 2. acquire from extra search dir
30 acquire_from_dir(processed, for_py_version, wheel_cache_dir, extra_search_dir)
31 # 3. download from the internet
32 if download and processed:
33 download_wheel(processed, for_py_version, wheel_cache_dir, app_data)
34
35 # in the end just get the wheels
36 wheels = _get_wheels(wheel_cache_dir, packages)
37 return {p: next(iter(ver_to_files))[1] for p, ver_to_files in wheels.items()}
38
39
40 def acquire_from_bundle(packages, for_py_version, to_folder):
41 for pkg, version in list(packages.items()):
42 bundle = get_bundled_wheel(pkg, for_py_version)
43 if bundle is not None:
44 pkg_version = bundle.stem.split("-")[1]
45 exact_version_match = version == pkg_version
46 if exact_version_match:
47 del packages[pkg]
48 if version is None or exact_version_match:
49 bundled_wheel_file = to_folder / bundle.name
50 if not bundled_wheel_file.exists():
51 logging.debug("get bundled wheel %s", bundle)
52 if IS_ZIPAPP:
53 from virtualenv.util.zipapp import extract
54
55 extract(bundle, bundled_wheel_file)
56 else:
57 copy2(str(bundle), str(bundled_wheel_file))
58
59
60 def get_bundled_wheel(package, version_release):
61 return BUNDLE_FOLDER / (BUNDLE_SUPPORT.get(version_release, {}) or BUNDLE_SUPPORT[MAX]).get(package)
62
63
64 def acquire_from_dir(packages, for_py_version, to_folder, extra_search_dir):
65 if not packages:
66 return
67 for search_dir in extra_search_dir:
68 wheels = _get_wheels(search_dir, packages)
69 for pkg, ver_wheels in wheels.items():
70 stop = False
71 for _, filename in ver_wheels:
72 dest = to_folder / filename.name
73 if not dest.exists():
74 if wheel_support_py(filename, for_py_version):
75 logging.debug("get extra search dir wheel %s", filename)
76 copy2(str(filename), str(dest))
77 stop = True
78 else:
79 stop = True
80 if stop and packages[pkg] is not None:
81 del packages[pkg]
82 break
83
84
85 def wheel_support_py(filename, py_version):
86 name = "{}.dist-info/METADATA".format("-".join(filename.stem.split("-")[0:2]))
87 with ZipFile(ensure_text(str(filename)), "r") as zip_file:
88 metadata = zip_file.read(name).decode("utf-8")
89 marker = "Requires-Python:"
90 requires = next(i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker))
91 py_version_int = tuple(int(i) for i in py_version.split("."))
92 for require in (i.strip() for i in requires.split(",")):
93 # https://www.python.org/dev/peps/pep-0345/#version-specifiers
94 for operator, check in [
95 ("!=", lambda v: py_version_int != v),
96 ("==", lambda v: py_version_int == v),
97 ("<=", lambda v: py_version_int <= v),
98 (">=", lambda v: py_version_int >= v),
99 ("<", lambda v: py_version_int < v),
100 (">", lambda v: py_version_int > v),
101 ]:
102 if require.startswith(operator):
103 ver_str = require[len(operator) :].strip()
104 version = tuple((int(i) if i != "*" else None) for i in ver_str.split("."))[0:2]
105 if not check(version):
106 return False
107 break
108 return True
109
110
111 def _get_wheels(from_folder, packages):
112 wheels = defaultdict(list)
113 for filename in from_folder.iterdir():
114 if filename.suffix == ".whl":
115 data = filename.stem.split("-")
116 if len(data) >= 2:
117 pkg, version = data[0:2]
118 if pkg in packages:
119 pkg_version = packages[pkg]
120 if pkg_version is None or pkg_version == version:
121 wheels[pkg].append((version, filename))
122 for versions in wheels.values():
123 versions.sort(
124 key=lambda a: tuple(int(i) if i.isdigit() else i for i in a[0].split(".")), reverse=True,
125 )
126 return wheels
127
128
129 def download_wheel(packages, for_py_version, to_folder, app_data):
130 to_download = list(p if v is None else "{}={}".format(p, v) for p, v in packages.items())
131 logging.debug("download wheels %s", to_download)
132 cmd = [
133 sys.executable,
134 "-m",
135 "pip",
136 "download",
137 "--disable-pip-version-check",
138 "--only-binary=:all:",
139 "--no-deps",
140 "--python-version",
141 for_py_version,
142 "-d",
143 str(to_folder),
144 ]
145 cmd.extend(to_download)
146 # pip has no interface in python - must be a new sub-process
147
148 with pip_wheel_env_run("{}{}".format(*sys.version_info[0:2]), app_data) as env:
149 process = Popen(cmd, env=env, stdout=subprocess.PIPE)
150 process.communicate()
151 if process.returncode != 0:
152 raise RuntimeError("failed to download wheels")
153
154
155 @contextmanager
156 def pip_wheel_env_run(version, app_data):
157 env = os.environ.copy()
158 env.update(
159 {
160 ensure_str(k): str(v) # python 2 requires these to be string only (non-unicode)
161 for k, v in {"PIP_USE_WHEEL": "1", "PIP_USER": "0", "PIP_NO_INPUT": "1"}.items()
162 }
163 )
164 with ensure_file_on_disk(get_bundled_wheel("pip", version), app_data) as pip_wheel_path:
165 # put the bundled wheel onto the path, and use it to do the bootstrap operation
166 env[str("PYTHONPATH")] = str(pip_wheel_path)
167 yield env
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/virtualenv/seed/embed/wheels/acquire.py b/src/virtualenv/seed/embed/wheels/acquire.py
--- a/src/virtualenv/seed/embed/wheels/acquire.py
+++ b/src/virtualenv/seed/embed/wheels/acquire.py
@@ -87,7 +87,9 @@
with ZipFile(ensure_text(str(filename)), "r") as zip_file:
metadata = zip_file.read(name).decode("utf-8")
marker = "Requires-Python:"
- requires = next(i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker))
+ requires = next((i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker)), None)
+ if requires is None: # if it does not specify a python requires the assumption is compatible
+ return True
py_version_int = tuple(int(i) for i in py_version.split("."))
for require in (i.strip() for i in requires.split(",")):
# https://www.python.org/dev/peps/pep-0345/#version-specifiers
|
{"golden_diff": "diff --git a/src/virtualenv/seed/embed/wheels/acquire.py b/src/virtualenv/seed/embed/wheels/acquire.py\n--- a/src/virtualenv/seed/embed/wheels/acquire.py\n+++ b/src/virtualenv/seed/embed/wheels/acquire.py\n@@ -87,7 +87,9 @@\n with ZipFile(ensure_text(str(filename)), \"r\") as zip_file:\n metadata = zip_file.read(name).decode(\"utf-8\")\n marker = \"Requires-Python:\"\n- requires = next(i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker))\n+ requires = next((i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker)), None)\n+ if requires is None: # if it does not specify a python requires the assumption is compatible\n+ return True\n py_version_int = tuple(int(i) for i in py_version.split(\".\"))\n for require in (i.strip() for i in requires.split(\",\")):\n # https://www.python.org/dev/peps/pep-0345/#version-specifiers\n", "issue": "virtualenv raises StopIteration exception when processing extra search directory wheels.\n**Issue**\r\n\r\nWhen creating virtualenv when using `--extra-search-directory`, ``StopIteration`` exception is raised when processing wheels.\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: Centos7\r\n- ``virtualenv`` is installed:\r\nvirtualenv version: 20.0.15\r\n```console\r\nException in thread Thread-3:\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib64/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/local/lib/python3.6/site-packages/virtualenv/seed/via_app_data/via_app_data.py\", line 75, in _get\r\n self.app_data,\r\n File \"/usr/local/lib/python3.6/site-packages/virtualenv/seed/embed/wheels/acquire.py\", line 30, in get_wheels\r\n acquire_from_dir(processed, for_py_version, wheel_cache_dir, extra_search_dir)\r\n File \"/usr/local/lib/python3.6/site-packages/virtualenv/seed/embed/wheels/acquire.py\", line 74, in acquire_from_dir\r\n if wheel_support_py(filename, for_py_version):\r\n File \"/usr/local/lib/python3.6/site-packages/virtualenv/seed/embed/wheels/acquire.py\", line 91, in wheel_support_py\r\n requires = next(i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker))\r\nStopIteration\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Bootstrap\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\nimport os\nimport sys\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom copy import copy\nfrom shutil import copy2\nfrom zipfile import ZipFile\n\nfrom virtualenv.info import IS_ZIPAPP\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_str, ensure_text\nfrom virtualenv.util.subprocess import Popen, subprocess\nfrom virtualenv.util.zipapp import ensure_file_on_disk\n\nfrom . import BUNDLE_SUPPORT, MAX\n\nBUNDLE_FOLDER = Path(os.path.abspath(__file__)).parent\n\n\ndef get_wheels(for_py_version, wheel_cache_dir, extra_search_dir, download, packages, app_data):\n # not all wheels are compatible with all python versions, so we need to py version qualify it\n processed = copy(packages)\n # 1. acquire from bundle\n acquire_from_bundle(processed, for_py_version, wheel_cache_dir)\n # 2. acquire from extra search dir\n acquire_from_dir(processed, for_py_version, wheel_cache_dir, extra_search_dir)\n # 3. download from the internet\n if download and processed:\n download_wheel(processed, for_py_version, wheel_cache_dir, app_data)\n\n # in the end just get the wheels\n wheels = _get_wheels(wheel_cache_dir, packages)\n return {p: next(iter(ver_to_files))[1] for p, ver_to_files in wheels.items()}\n\n\ndef acquire_from_bundle(packages, for_py_version, to_folder):\n for pkg, version in list(packages.items()):\n bundle = get_bundled_wheel(pkg, for_py_version)\n if bundle is not None:\n pkg_version = bundle.stem.split(\"-\")[1]\n exact_version_match = version == pkg_version\n if exact_version_match:\n del packages[pkg]\n if version is None or exact_version_match:\n bundled_wheel_file = to_folder / bundle.name\n if not bundled_wheel_file.exists():\n logging.debug(\"get bundled wheel %s\", bundle)\n if IS_ZIPAPP:\n from virtualenv.util.zipapp import extract\n\n extract(bundle, bundled_wheel_file)\n else:\n copy2(str(bundle), str(bundled_wheel_file))\n\n\ndef get_bundled_wheel(package, version_release):\n return BUNDLE_FOLDER / (BUNDLE_SUPPORT.get(version_release, {}) or BUNDLE_SUPPORT[MAX]).get(package)\n\n\ndef acquire_from_dir(packages, for_py_version, to_folder, extra_search_dir):\n if not packages:\n return\n for search_dir in extra_search_dir:\n wheels = _get_wheels(search_dir, packages)\n for pkg, ver_wheels in wheels.items():\n stop = False\n for _, filename in ver_wheels:\n dest = to_folder / filename.name\n if not dest.exists():\n if wheel_support_py(filename, for_py_version):\n logging.debug(\"get extra search dir wheel %s\", filename)\n copy2(str(filename), str(dest))\n stop = True\n else:\n stop = True\n if stop and packages[pkg] is not None:\n del packages[pkg]\n break\n\n\ndef wheel_support_py(filename, py_version):\n name = \"{}.dist-info/METADATA\".format(\"-\".join(filename.stem.split(\"-\")[0:2]))\n with ZipFile(ensure_text(str(filename)), \"r\") as zip_file:\n metadata = zip_file.read(name).decode(\"utf-8\")\n marker = \"Requires-Python:\"\n requires = next(i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker))\n py_version_int = tuple(int(i) for i in py_version.split(\".\"))\n for require in (i.strip() for i in requires.split(\",\")):\n # https://www.python.org/dev/peps/pep-0345/#version-specifiers\n for operator, check in [\n (\"!=\", lambda v: py_version_int != v),\n (\"==\", lambda v: py_version_int == v),\n (\"<=\", lambda v: py_version_int <= v),\n (\">=\", lambda v: py_version_int >= v),\n (\"<\", lambda v: py_version_int < v),\n (\">\", lambda v: py_version_int > v),\n ]:\n if require.startswith(operator):\n ver_str = require[len(operator) :].strip()\n version = tuple((int(i) if i != \"*\" else None) for i in ver_str.split(\".\"))[0:2]\n if not check(version):\n return False\n break\n return True\n\n\ndef _get_wheels(from_folder, packages):\n wheels = defaultdict(list)\n for filename in from_folder.iterdir():\n if filename.suffix == \".whl\":\n data = filename.stem.split(\"-\")\n if len(data) >= 2:\n pkg, version = data[0:2]\n if pkg in packages:\n pkg_version = packages[pkg]\n if pkg_version is None or pkg_version == version:\n wheels[pkg].append((version, filename))\n for versions in wheels.values():\n versions.sort(\n key=lambda a: tuple(int(i) if i.isdigit() else i for i in a[0].split(\".\")), reverse=True,\n )\n return wheels\n\n\ndef download_wheel(packages, for_py_version, to_folder, app_data):\n to_download = list(p if v is None else \"{}={}\".format(p, v) for p, v in packages.items())\n logging.debug(\"download wheels %s\", to_download)\n cmd = [\n sys.executable,\n \"-m\",\n \"pip\",\n \"download\",\n \"--disable-pip-version-check\",\n \"--only-binary=:all:\",\n \"--no-deps\",\n \"--python-version\",\n for_py_version,\n \"-d\",\n str(to_folder),\n ]\n cmd.extend(to_download)\n # pip has no interface in python - must be a new sub-process\n\n with pip_wheel_env_run(\"{}{}\".format(*sys.version_info[0:2]), app_data) as env:\n process = Popen(cmd, env=env, stdout=subprocess.PIPE)\n process.communicate()\n if process.returncode != 0:\n raise RuntimeError(\"failed to download wheels\")\n\n\n@contextmanager\ndef pip_wheel_env_run(version, app_data):\n env = os.environ.copy()\n env.update(\n {\n ensure_str(k): str(v) # python 2 requires these to be string only (non-unicode)\n for k, v in {\"PIP_USE_WHEEL\": \"1\", \"PIP_USER\": \"0\", \"PIP_NO_INPUT\": \"1\"}.items()\n }\n )\n with ensure_file_on_disk(get_bundled_wheel(\"pip\", version), app_data) as pip_wheel_path:\n # put the bundled wheel onto the path, and use it to do the bootstrap operation\n env[str(\"PYTHONPATH\")] = str(pip_wheel_path)\n yield env\n", "path": "src/virtualenv/seed/embed/wheels/acquire.py"}], "after_files": [{"content": "\"\"\"Bootstrap\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\nimport os\nimport sys\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom copy import copy\nfrom shutil import copy2\nfrom zipfile import ZipFile\n\nfrom virtualenv.info import IS_ZIPAPP\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_str, ensure_text\nfrom virtualenv.util.subprocess import Popen, subprocess\nfrom virtualenv.util.zipapp import ensure_file_on_disk\n\nfrom . import BUNDLE_SUPPORT, MAX\n\nBUNDLE_FOLDER = Path(os.path.abspath(__file__)).parent\n\n\ndef get_wheels(for_py_version, wheel_cache_dir, extra_search_dir, download, packages, app_data):\n # not all wheels are compatible with all python versions, so we need to py version qualify it\n processed = copy(packages)\n # 1. acquire from bundle\n acquire_from_bundle(processed, for_py_version, wheel_cache_dir)\n # 2. acquire from extra search dir\n acquire_from_dir(processed, for_py_version, wheel_cache_dir, extra_search_dir)\n # 3. download from the internet\n if download and processed:\n download_wheel(processed, for_py_version, wheel_cache_dir, app_data)\n\n # in the end just get the wheels\n wheels = _get_wheels(wheel_cache_dir, packages)\n return {p: next(iter(ver_to_files))[1] for p, ver_to_files in wheels.items()}\n\n\ndef acquire_from_bundle(packages, for_py_version, to_folder):\n for pkg, version in list(packages.items()):\n bundle = get_bundled_wheel(pkg, for_py_version)\n if bundle is not None:\n pkg_version = bundle.stem.split(\"-\")[1]\n exact_version_match = version == pkg_version\n if exact_version_match:\n del packages[pkg]\n if version is None or exact_version_match:\n bundled_wheel_file = to_folder / bundle.name\n if not bundled_wheel_file.exists():\n logging.debug(\"get bundled wheel %s\", bundle)\n if IS_ZIPAPP:\n from virtualenv.util.zipapp import extract\n\n extract(bundle, bundled_wheel_file)\n else:\n copy2(str(bundle), str(bundled_wheel_file))\n\n\ndef get_bundled_wheel(package, version_release):\n return BUNDLE_FOLDER / (BUNDLE_SUPPORT.get(version_release, {}) or BUNDLE_SUPPORT[MAX]).get(package)\n\n\ndef acquire_from_dir(packages, for_py_version, to_folder, extra_search_dir):\n if not packages:\n return\n for search_dir in extra_search_dir:\n wheels = _get_wheels(search_dir, packages)\n for pkg, ver_wheels in wheels.items():\n stop = False\n for _, filename in ver_wheels:\n dest = to_folder / filename.name\n if not dest.exists():\n if wheel_support_py(filename, for_py_version):\n logging.debug(\"get extra search dir wheel %s\", filename)\n copy2(str(filename), str(dest))\n stop = True\n else:\n stop = True\n if stop and packages[pkg] is not None:\n del packages[pkg]\n break\n\n\ndef wheel_support_py(filename, py_version):\n name = \"{}.dist-info/METADATA\".format(\"-\".join(filename.stem.split(\"-\")[0:2]))\n with ZipFile(ensure_text(str(filename)), \"r\") as zip_file:\n metadata = zip_file.read(name).decode(\"utf-8\")\n marker = \"Requires-Python:\"\n requires = next((i[len(marker) :] for i in metadata.splitlines() if i.startswith(marker)), None)\n if requires is None: # if it does not specify a python requires the assumption is compatible\n return True\n py_version_int = tuple(int(i) for i in py_version.split(\".\"))\n for require in (i.strip() for i in requires.split(\",\")):\n # https://www.python.org/dev/peps/pep-0345/#version-specifiers\n for operator, check in [\n (\"!=\", lambda v: py_version_int != v),\n (\"==\", lambda v: py_version_int == v),\n (\"<=\", lambda v: py_version_int <= v),\n (\">=\", lambda v: py_version_int >= v),\n (\"<\", lambda v: py_version_int < v),\n (\">\", lambda v: py_version_int > v),\n ]:\n if require.startswith(operator):\n ver_str = require[len(operator) :].strip()\n version = tuple((int(i) if i != \"*\" else None) for i in ver_str.split(\".\"))[0:2]\n if not check(version):\n return False\n break\n return True\n\n\ndef _get_wheels(from_folder, packages):\n wheels = defaultdict(list)\n for filename in from_folder.iterdir():\n if filename.suffix == \".whl\":\n data = filename.stem.split(\"-\")\n if len(data) >= 2:\n pkg, version = data[0:2]\n if pkg in packages:\n pkg_version = packages[pkg]\n if pkg_version is None or pkg_version == version:\n wheels[pkg].append((version, filename))\n for versions in wheels.values():\n versions.sort(\n key=lambda a: tuple(int(i) if i.isdigit() else i for i in a[0].split(\".\")), reverse=True,\n )\n return wheels\n\n\ndef download_wheel(packages, for_py_version, to_folder, app_data):\n to_download = list(p if v is None else \"{}={}\".format(p, v) for p, v in packages.items())\n logging.debug(\"download wheels %s\", to_download)\n cmd = [\n sys.executable,\n \"-m\",\n \"pip\",\n \"download\",\n \"--disable-pip-version-check\",\n \"--only-binary=:all:\",\n \"--no-deps\",\n \"--python-version\",\n for_py_version,\n \"-d\",\n str(to_folder),\n ]\n cmd.extend(to_download)\n # pip has no interface in python - must be a new sub-process\n\n with pip_wheel_env_run(\"{}{}\".format(*sys.version_info[0:2]), app_data) as env:\n process = Popen(cmd, env=env, stdout=subprocess.PIPE)\n process.communicate()\n if process.returncode != 0:\n raise RuntimeError(\"failed to download wheels\")\n\n\n@contextmanager\ndef pip_wheel_env_run(version, app_data):\n env = os.environ.copy()\n env.update(\n {\n ensure_str(k): str(v) # python 2 requires these to be string only (non-unicode)\n for k, v in {\"PIP_USE_WHEEL\": \"1\", \"PIP_USER\": \"0\", \"PIP_NO_INPUT\": \"1\"}.items()\n }\n )\n with ensure_file_on_disk(get_bundled_wheel(\"pip\", version), app_data) as pip_wheel_path:\n # put the bundled wheel onto the path, and use it to do the bootstrap operation\n env[str(\"PYTHONPATH\")] = str(pip_wheel_path)\n yield env\n", "path": "src/virtualenv/seed/embed/wheels/acquire.py"}]}
| 2,506 | 241 |
gh_patches_debug_19273
|
rasdani/github-patches
|
git_diff
|
xonsh__xonsh-1767
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BaseShell.settitle() uses os.system()
And [arguments are inserted without escaping](https://github.com/xonsh/xonsh/blob/2e5234da02b5b6590c21dd932bc50b8f614ade9f/xonsh/base_shell.py#L292). EDIT: It does escape, making them safe. Probably. Hopefully.
Additionally, this only works under CMD, not PowerShell.
My recommendation is to call [SetConsoleTitleW()](https://msdn.microsoft.com/en-us/library/windows/desktop/ms686050%28v=vs.85%29.aspx) via [ctypes](https://docs.python.org/3/library/ctypes.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/base_shell.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """The base class for xonsh shell"""
3 import io
4 import os
5 import sys
6 import time
7 import builtins
8
9 from xonsh.tools import (XonshError, escape_windows_cmd_string, print_exception,
10 DefaultNotGiven, check_for_partial_string)
11 from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS
12 from xonsh.codecache import (should_use_cache, code_cache_name,
13 code_cache_check, get_cache_filename,
14 update_cache, run_compiled_code)
15 from xonsh.completer import Completer
16 from xonsh.prompt.base import multiline_prompt, partial_format_prompt
17 from xonsh.events import events
18
19
20 class _TeeOut(object):
21 """Tees stdout into the original sys.stdout and another buffer."""
22
23 def __init__(self, buf):
24 self.buffer = buf
25 self.stdout = sys.stdout
26 self.encoding = self.stdout.encoding
27 self.errors = self.stdout.errors
28 sys.stdout = self
29
30 def __del__(self):
31 sys.stdout = self.stdout
32
33 def close(self):
34 """Restores the original stdout."""
35 sys.stdout = self.stdout
36
37 def write(self, data):
38 """Writes data to the original stdout and the buffer."""
39 # data = data.replace('\001', '').replace('\002', '')
40 self.stdout.write(data)
41 self.buffer.write(data)
42
43 def flush(self):
44 """Flushes both the original stdout and the buffer."""
45 self.stdout.flush()
46 self.buffer.flush()
47
48 def fileno(self):
49 """Tunnel fileno() calls."""
50 return self.stdout.fileno()
51
52
53 class _TeeErr(object):
54 """Tees stderr into the original sys.stdout and another buffer."""
55
56 def __init__(self, buf):
57 self.buffer = buf
58 self.stderr = sys.stderr
59 self.encoding = self.stderr.encoding
60 self.errors = self.stderr.errors
61 sys.stderr = self
62
63 def __del__(self):
64 sys.stderr = self.stderr
65
66 def close(self):
67 """Restores the original stderr."""
68 sys.stderr = self.stderr
69
70 def write(self, data):
71 """Writes data to the original stderr and the buffer."""
72 # data = data.replace('\001', '').replace('\002', '')
73 self.stderr.write(data)
74 self.buffer.write(data)
75
76 def flush(self):
77 """Flushes both the original stderr and the buffer."""
78 self.stderr.flush()
79 self.buffer.flush()
80
81 def fileno(self):
82 """Tunnel fileno() calls."""
83 return self.stderr.fileno()
84
85
86 class Tee(io.StringIO):
87 """Class that merges tee'd stdout and stderr into a single buffer.
88
89 This represents what a user would actually see on the command line.
90 """
91 # pylint is a stupid about counting public methods when using inheritance.
92 # pylint: disable=too-few-public-methods
93
94 def __init__(self, *args, **kwargs):
95 super().__init__(*args, **kwargs)
96 self.stdout = _TeeOut(self)
97 self.stderr = _TeeErr(self)
98
99 def __del__(self):
100 del self.stdout, self.stderr
101 super().__del__()
102
103 def close(self):
104 """Closes the buffer as well as the stdout and stderr tees."""
105 self.stdout.close()
106 self.stderr.close()
107 super().close()
108
109
110 class BaseShell(object):
111 """The xonsh shell."""
112
113 def __init__(self, execer, ctx, **kwargs):
114 super().__init__()
115 self.execer = execer
116 self.ctx = ctx
117 self.completer = Completer() if kwargs.get('completer', True) else None
118 self.buffer = []
119 self.need_more_lines = False
120 self.mlprompt = None
121 self._styler = DefaultNotGiven
122
123 @property
124 def styler(self):
125 if self._styler is DefaultNotGiven:
126 if HAS_PYGMENTS:
127 from xonsh.pyghooks import XonshStyle
128 env = builtins.__xonsh_env__
129 self._styler = XonshStyle(env.get('XONSH_COLOR_STYLE'))
130 else:
131 self._styler = None
132 return self._styler
133
134 @styler.setter
135 def styler(self, value):
136 self._styler = value
137
138 @styler.deleter
139 def styler(self):
140 self._styler = DefaultNotGiven
141
142 def emptyline(self):
143 """Called when an empty line has been entered."""
144 self.need_more_lines = False
145 self.default('')
146
147 def singleline(self, **kwargs):
148 """Reads a single line of input from the shell."""
149 msg = '{0} has not implemented singleline().'
150 raise RuntimeError(msg.format(self.__class__.__name__))
151
152 def precmd(self, line):
153 """Called just before execution of line."""
154 return line if self.need_more_lines else line.lstrip()
155
156 def default(self, line):
157 """Implements code execution."""
158 line = line if line.endswith('\n') else line + '\n'
159 src, code = self.push(line)
160 if code is None:
161 return
162
163 events.on_precommand.fire(src)
164
165 hist = builtins.__xonsh_history__ # pylint: disable=no-member
166 ts1 = None
167 store_stdout = builtins.__xonsh_env__.get('XONSH_STORE_STDOUT') # pylint: disable=no-member
168 tee = Tee() if store_stdout else io.StringIO()
169 try:
170 ts0 = time.time()
171 run_compiled_code(code, self.ctx, None, 'single')
172 ts1 = time.time()
173 if hist.last_cmd_rtn is None:
174 hist.last_cmd_rtn = 0 # returncode for success
175 except XonshError as e:
176 print(e.args[0], file=sys.stderr)
177 if hist.last_cmd_rtn is None:
178 hist.last_cmd_rtn = 1 # return code for failure
179 except Exception: # pylint: disable=broad-except
180 print_exception()
181 if hist.last_cmd_rtn is None:
182 hist.last_cmd_rtn = 1 # return code for failure
183 finally:
184 ts1 = ts1 or time.time()
185 self._append_history(inp=src, ts=[ts0, ts1], tee_out=tee.getvalue())
186 tee.close()
187
188 self._fix_cwd()
189 if builtins.__xonsh_exit__: # pylint: disable=no-member
190 return True
191
192 def _append_history(self, tee_out=None, **info):
193 """
194 Append information about the command to the history.
195
196 (Also handles on_postcommand because this is the place where all the information is available)
197 """
198 hist = builtins.__xonsh_history__ # pylint: disable=no-member
199 info['rtn'] = hist.last_cmd_rtn
200 tee_out = tee_out or None
201 last_out = hist.last_cmd_out or None
202 if last_out is None and tee_out is None:
203 pass
204 elif last_out is None and tee_out is not None:
205 info['out'] = tee_out
206 elif last_out is not None and tee_out is None:
207 info['out'] = last_out
208 else:
209 info['out'] = tee_out + '\n' + last_out
210
211 events.on_postcommand.fire(
212 info['inp'],
213 info['rtn'],
214 info.get('out', None),
215 info['ts']
216 )
217
218 hist.append(info)
219 hist.last_cmd_rtn = hist.last_cmd_out = None
220
221 def _fix_cwd(self):
222 """Check if the cwd changed out from under us"""
223 cwd = os.getcwd()
224 if cwd != builtins.__xonsh_env__.get('PWD'):
225 old = builtins.__xonsh_env__.get('PWD') # working directory changed without updating $PWD
226 builtins.__xonsh_env__['PWD'] = cwd # track it now
227 if old is not None:
228 builtins.__xonsh_env__['OLDPWD'] = old # and update $OLDPWD like dirstack.
229 events.on_chdir.fire(old, cwd) # fire event after cwd actually changed.
230
231 def push(self, line):
232 """Pushes a line onto the buffer and compiles the code in a way that
233 enables multiline input.
234 """
235 code = None
236 self.buffer.append(line)
237 if self.need_more_lines:
238 return None, code
239 src = ''.join(self.buffer)
240 _cache = should_use_cache(self.execer, 'single')
241 if _cache:
242 codefname = code_cache_name(src)
243 cachefname = get_cache_filename(codefname, code=True)
244 usecache, code = code_cache_check(cachefname)
245 if usecache:
246 self.reset_buffer()
247 return src, code
248 try:
249 code = self.execer.compile(src,
250 mode='single',
251 glbs=self.ctx,
252 locs=None)
253 if _cache:
254 update_cache(code, cachefname)
255 self.reset_buffer()
256 except SyntaxError:
257 partial_string_info = check_for_partial_string(src)
258 in_partial_string = (partial_string_info[0] is not None and
259 partial_string_info[1] is None)
260 if ((line == '\n' and not in_partial_string)):
261 self.reset_buffer()
262 print_exception()
263 return src, None
264 self.need_more_lines = True
265 except Exception: # pylint: disable=broad-except
266 self.reset_buffer()
267 print_exception()
268 return src, None
269 return src, code
270
271 def reset_buffer(self):
272 """Resets the line buffer."""
273 self.buffer.clear()
274 self.need_more_lines = False
275 self.mlprompt = None
276
277 def settitle(self):
278 """Sets terminal title."""
279 env = builtins.__xonsh_env__ # pylint: disable=no-member
280 term = env.get('TERM', None)
281 # Shells running in emacs sets TERM to "dumb" or "eterm-color".
282 # Do not set title for these to avoid garbled prompt.
283 if (term is None and not ON_WINDOWS) or term in ['dumb', 'eterm-color',
284 'linux']:
285 return
286 t = env.get('TITLE')
287 if t is None:
288 return
289 t = partial_format_prompt(t)
290 if ON_WINDOWS and 'ANSICON' not in env:
291 t = escape_windows_cmd_string(t)
292 os.system('title {}'.format(t))
293 else:
294 with open(1, 'wb', closefd=False) as f:
295 # prevent xonsh from answering interative questions
296 # on the next command by writing the title
297 f.write("\x1b]0;{0}\x07".format(t).encode())
298 f.flush()
299
300 @property
301 def prompt(self):
302 """Obtains the current prompt string."""
303 if self.need_more_lines:
304 if self.mlprompt is None:
305 try:
306 self.mlprompt = multiline_prompt()
307 except Exception: # pylint: disable=broad-except
308 print_exception()
309 self.mlprompt = '<multiline prompt error> '
310 return self.mlprompt
311 env = builtins.__xonsh_env__ # pylint: disable=no-member
312 p = env.get('PROMPT')
313 try:
314 p = partial_format_prompt(p)
315 except Exception: # pylint: disable=broad-except
316 print_exception()
317 self.settitle()
318 return p
319
320 def format_color(self, string, **kwargs):
321 """Formats the colors in a string. This base implmentation does not
322 actually do any coloring, but just returns the string directly.
323 """
324 return string
325
326 def print_color(self, string, **kwargs):
327 """Prints a string in color. This base implmentation does not actually
328 do any coloring, but just prints the string directly.
329 """
330 if not isinstance(string, str):
331 string = ''.join([x for _, x in string])
332 print(string, **kwargs)
333
334 def color_style_names(self):
335 """Returns an iterable of all available style names."""
336 return ()
337
338 def color_style(self):
339 """Returns the current color map."""
340 return {}
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/xonsh/base_shell.py b/xonsh/base_shell.py
--- a/xonsh/base_shell.py
+++ b/xonsh/base_shell.py
@@ -16,6 +16,11 @@
from xonsh.prompt.base import multiline_prompt, partial_format_prompt
from xonsh.events import events
+if ON_WINDOWS:
+ import ctypes
+ kernel32 = ctypes.windll.kernel32
+ kernel32.SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]
+
class _TeeOut(object):
"""Tees stdout into the original sys.stdout and another buffer."""
@@ -288,8 +293,7 @@
return
t = partial_format_prompt(t)
if ON_WINDOWS and 'ANSICON' not in env:
- t = escape_windows_cmd_string(t)
- os.system('title {}'.format(t))
+ kernel32.SetConsoleTitleW(t)
else:
with open(1, 'wb', closefd=False) as f:
# prevent xonsh from answering interative questions
|
{"golden_diff": "diff --git a/xonsh/base_shell.py b/xonsh/base_shell.py\n--- a/xonsh/base_shell.py\n+++ b/xonsh/base_shell.py\n@@ -16,6 +16,11 @@\n from xonsh.prompt.base import multiline_prompt, partial_format_prompt\n from xonsh.events import events\n \n+if ON_WINDOWS:\n+ import ctypes\n+ kernel32 = ctypes.windll.kernel32\n+ kernel32.SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]\n+\n \n class _TeeOut(object):\n \"\"\"Tees stdout into the original sys.stdout and another buffer.\"\"\"\n@@ -288,8 +293,7 @@\n return\n t = partial_format_prompt(t)\n if ON_WINDOWS and 'ANSICON' not in env:\n- t = escape_windows_cmd_string(t)\n- os.system('title {}'.format(t))\n+ kernel32.SetConsoleTitleW(t)\n else:\n with open(1, 'wb', closefd=False) as f:\n # prevent xonsh from answering interative questions\n", "issue": "BaseShell.settitle() uses os.system()\nAnd [arguments are inserted without escaping](https://github.com/xonsh/xonsh/blob/2e5234da02b5b6590c21dd932bc50b8f614ade9f/xonsh/base_shell.py#L292). EDIT: It does escape, making them safe. Probably. Hopefully.\n\nAdditionally, this only works under CMD, not PowerShell.\n\nMy recommendation is to call [SetConsoleTitleW()](https://msdn.microsoft.com/en-us/library/windows/desktop/ms686050%28v=vs.85%29.aspx) via [ctypes](https://docs.python.org/3/library/ctypes.html).\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"The base class for xonsh shell\"\"\"\nimport io\nimport os\nimport sys\nimport time\nimport builtins\n\nfrom xonsh.tools import (XonshError, escape_windows_cmd_string, print_exception,\n DefaultNotGiven, check_for_partial_string)\nfrom xonsh.platform import HAS_PYGMENTS, ON_WINDOWS\nfrom xonsh.codecache import (should_use_cache, code_cache_name,\n code_cache_check, get_cache_filename,\n update_cache, run_compiled_code)\nfrom xonsh.completer import Completer\nfrom xonsh.prompt.base import multiline_prompt, partial_format_prompt\nfrom xonsh.events import events\n\n\nclass _TeeOut(object):\n \"\"\"Tees stdout into the original sys.stdout and another buffer.\"\"\"\n\n def __init__(self, buf):\n self.buffer = buf\n self.stdout = sys.stdout\n self.encoding = self.stdout.encoding\n self.errors = self.stdout.errors\n sys.stdout = self\n\n def __del__(self):\n sys.stdout = self.stdout\n\n def close(self):\n \"\"\"Restores the original stdout.\"\"\"\n sys.stdout = self.stdout\n\n def write(self, data):\n \"\"\"Writes data to the original stdout and the buffer.\"\"\"\n # data = data.replace('\\001', '').replace('\\002', '')\n self.stdout.write(data)\n self.buffer.write(data)\n\n def flush(self):\n \"\"\"Flushes both the original stdout and the buffer.\"\"\"\n self.stdout.flush()\n self.buffer.flush()\n\n def fileno(self):\n \"\"\"Tunnel fileno() calls.\"\"\"\n return self.stdout.fileno()\n\n\nclass _TeeErr(object):\n \"\"\"Tees stderr into the original sys.stdout and another buffer.\"\"\"\n\n def __init__(self, buf):\n self.buffer = buf\n self.stderr = sys.stderr\n self.encoding = self.stderr.encoding\n self.errors = self.stderr.errors\n sys.stderr = self\n\n def __del__(self):\n sys.stderr = self.stderr\n\n def close(self):\n \"\"\"Restores the original stderr.\"\"\"\n sys.stderr = self.stderr\n\n def write(self, data):\n \"\"\"Writes data to the original stderr and the buffer.\"\"\"\n # data = data.replace('\\001', '').replace('\\002', '')\n self.stderr.write(data)\n self.buffer.write(data)\n\n def flush(self):\n \"\"\"Flushes both the original stderr and the buffer.\"\"\"\n self.stderr.flush()\n self.buffer.flush()\n\n def fileno(self):\n \"\"\"Tunnel fileno() calls.\"\"\"\n return self.stderr.fileno()\n\n\nclass Tee(io.StringIO):\n \"\"\"Class that merges tee'd stdout and stderr into a single buffer.\n\n This represents what a user would actually see on the command line.\n \"\"\"\n # pylint is a stupid about counting public methods when using inheritance.\n # pylint: disable=too-few-public-methods\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.stdout = _TeeOut(self)\n self.stderr = _TeeErr(self)\n\n def __del__(self):\n del self.stdout, self.stderr\n super().__del__()\n\n def close(self):\n \"\"\"Closes the buffer as well as the stdout and stderr tees.\"\"\"\n self.stdout.close()\n self.stderr.close()\n super().close()\n\n\nclass BaseShell(object):\n \"\"\"The xonsh shell.\"\"\"\n\n def __init__(self, execer, ctx, **kwargs):\n super().__init__()\n self.execer = execer\n self.ctx = ctx\n self.completer = Completer() if kwargs.get('completer', True) else None\n self.buffer = []\n self.need_more_lines = False\n self.mlprompt = None\n self._styler = DefaultNotGiven\n\n @property\n def styler(self):\n if self._styler is DefaultNotGiven:\n if HAS_PYGMENTS:\n from xonsh.pyghooks import XonshStyle\n env = builtins.__xonsh_env__\n self._styler = XonshStyle(env.get('XONSH_COLOR_STYLE'))\n else:\n self._styler = None\n return self._styler\n\n @styler.setter\n def styler(self, value):\n self._styler = value\n\n @styler.deleter\n def styler(self):\n self._styler = DefaultNotGiven\n\n def emptyline(self):\n \"\"\"Called when an empty line has been entered.\"\"\"\n self.need_more_lines = False\n self.default('')\n\n def singleline(self, **kwargs):\n \"\"\"Reads a single line of input from the shell.\"\"\"\n msg = '{0} has not implemented singleline().'\n raise RuntimeError(msg.format(self.__class__.__name__))\n\n def precmd(self, line):\n \"\"\"Called just before execution of line.\"\"\"\n return line if self.need_more_lines else line.lstrip()\n\n def default(self, line):\n \"\"\"Implements code execution.\"\"\"\n line = line if line.endswith('\\n') else line + '\\n'\n src, code = self.push(line)\n if code is None:\n return\n\n events.on_precommand.fire(src)\n\n hist = builtins.__xonsh_history__ # pylint: disable=no-member\n ts1 = None\n store_stdout = builtins.__xonsh_env__.get('XONSH_STORE_STDOUT') # pylint: disable=no-member\n tee = Tee() if store_stdout else io.StringIO()\n try:\n ts0 = time.time()\n run_compiled_code(code, self.ctx, None, 'single')\n ts1 = time.time()\n if hist.last_cmd_rtn is None:\n hist.last_cmd_rtn = 0 # returncode for success\n except XonshError as e:\n print(e.args[0], file=sys.stderr)\n if hist.last_cmd_rtn is None:\n hist.last_cmd_rtn = 1 # return code for failure\n except Exception: # pylint: disable=broad-except\n print_exception()\n if hist.last_cmd_rtn is None:\n hist.last_cmd_rtn = 1 # return code for failure\n finally:\n ts1 = ts1 or time.time()\n self._append_history(inp=src, ts=[ts0, ts1], tee_out=tee.getvalue())\n tee.close()\n\n self._fix_cwd()\n if builtins.__xonsh_exit__: # pylint: disable=no-member\n return True\n\n def _append_history(self, tee_out=None, **info):\n \"\"\"\n Append information about the command to the history.\n\n (Also handles on_postcommand because this is the place where all the information is available)\n \"\"\"\n hist = builtins.__xonsh_history__ # pylint: disable=no-member\n info['rtn'] = hist.last_cmd_rtn\n tee_out = tee_out or None\n last_out = hist.last_cmd_out or None\n if last_out is None and tee_out is None:\n pass\n elif last_out is None and tee_out is not None:\n info['out'] = tee_out\n elif last_out is not None and tee_out is None:\n info['out'] = last_out\n else:\n info['out'] = tee_out + '\\n' + last_out\n\n events.on_postcommand.fire(\n info['inp'],\n info['rtn'],\n info.get('out', None),\n info['ts']\n )\n\n hist.append(info)\n hist.last_cmd_rtn = hist.last_cmd_out = None\n\n def _fix_cwd(self):\n \"\"\"Check if the cwd changed out from under us\"\"\"\n cwd = os.getcwd()\n if cwd != builtins.__xonsh_env__.get('PWD'):\n old = builtins.__xonsh_env__.get('PWD') # working directory changed without updating $PWD\n builtins.__xonsh_env__['PWD'] = cwd # track it now\n if old is not None:\n builtins.__xonsh_env__['OLDPWD'] = old # and update $OLDPWD like dirstack.\n events.on_chdir.fire(old, cwd) # fire event after cwd actually changed.\n\n def push(self, line):\n \"\"\"Pushes a line onto the buffer and compiles the code in a way that\n enables multiline input.\n \"\"\"\n code = None\n self.buffer.append(line)\n if self.need_more_lines:\n return None, code\n src = ''.join(self.buffer)\n _cache = should_use_cache(self.execer, 'single')\n if _cache:\n codefname = code_cache_name(src)\n cachefname = get_cache_filename(codefname, code=True)\n usecache, code = code_cache_check(cachefname)\n if usecache:\n self.reset_buffer()\n return src, code\n try:\n code = self.execer.compile(src,\n mode='single',\n glbs=self.ctx,\n locs=None)\n if _cache:\n update_cache(code, cachefname)\n self.reset_buffer()\n except SyntaxError:\n partial_string_info = check_for_partial_string(src)\n in_partial_string = (partial_string_info[0] is not None and\n partial_string_info[1] is None)\n if ((line == '\\n' and not in_partial_string)):\n self.reset_buffer()\n print_exception()\n return src, None\n self.need_more_lines = True\n except Exception: # pylint: disable=broad-except\n self.reset_buffer()\n print_exception()\n return src, None\n return src, code\n\n def reset_buffer(self):\n \"\"\"Resets the line buffer.\"\"\"\n self.buffer.clear()\n self.need_more_lines = False\n self.mlprompt = None\n\n def settitle(self):\n \"\"\"Sets terminal title.\"\"\"\n env = builtins.__xonsh_env__ # pylint: disable=no-member\n term = env.get('TERM', None)\n # Shells running in emacs sets TERM to \"dumb\" or \"eterm-color\".\n # Do not set title for these to avoid garbled prompt.\n if (term is None and not ON_WINDOWS) or term in ['dumb', 'eterm-color',\n 'linux']:\n return\n t = env.get('TITLE')\n if t is None:\n return\n t = partial_format_prompt(t)\n if ON_WINDOWS and 'ANSICON' not in env:\n t = escape_windows_cmd_string(t)\n os.system('title {}'.format(t))\n else:\n with open(1, 'wb', closefd=False) as f:\n # prevent xonsh from answering interative questions\n # on the next command by writing the title\n f.write(\"\\x1b]0;{0}\\x07\".format(t).encode())\n f.flush()\n\n @property\n def prompt(self):\n \"\"\"Obtains the current prompt string.\"\"\"\n if self.need_more_lines:\n if self.mlprompt is None:\n try:\n self.mlprompt = multiline_prompt()\n except Exception: # pylint: disable=broad-except\n print_exception()\n self.mlprompt = '<multiline prompt error> '\n return self.mlprompt\n env = builtins.__xonsh_env__ # pylint: disable=no-member\n p = env.get('PROMPT')\n try:\n p = partial_format_prompt(p)\n except Exception: # pylint: disable=broad-except\n print_exception()\n self.settitle()\n return p\n\n def format_color(self, string, **kwargs):\n \"\"\"Formats the colors in a string. This base implmentation does not\n actually do any coloring, but just returns the string directly.\n \"\"\"\n return string\n\n def print_color(self, string, **kwargs):\n \"\"\"Prints a string in color. This base implmentation does not actually\n do any coloring, but just prints the string directly.\n \"\"\"\n if not isinstance(string, str):\n string = ''.join([x for _, x in string])\n print(string, **kwargs)\n\n def color_style_names(self):\n \"\"\"Returns an iterable of all available style names.\"\"\"\n return ()\n\n def color_style(self):\n \"\"\"Returns the current color map.\"\"\"\n return {}\n", "path": "xonsh/base_shell.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"The base class for xonsh shell\"\"\"\nimport io\nimport os\nimport sys\nimport time\nimport builtins\n\nfrom xonsh.tools import (XonshError, escape_windows_cmd_string, print_exception,\n DefaultNotGiven, check_for_partial_string)\nfrom xonsh.platform import HAS_PYGMENTS, ON_WINDOWS\nfrom xonsh.codecache import (should_use_cache, code_cache_name,\n code_cache_check, get_cache_filename,\n update_cache, run_compiled_code)\nfrom xonsh.completer import Completer\nfrom xonsh.prompt.base import multiline_prompt, partial_format_prompt\nfrom xonsh.events import events\n\nif ON_WINDOWS:\n import ctypes\n kernel32 = ctypes.windll.kernel32\n kernel32.SetConsoleTitleW.argtypes = [ctypes.c_wchar_p]\n\n\nclass _TeeOut(object):\n \"\"\"Tees stdout into the original sys.stdout and another buffer.\"\"\"\n\n def __init__(self, buf):\n self.buffer = buf\n self.stdout = sys.stdout\n self.encoding = self.stdout.encoding\n self.errors = self.stdout.errors\n sys.stdout = self\n\n def __del__(self):\n sys.stdout = self.stdout\n\n def close(self):\n \"\"\"Restores the original stdout.\"\"\"\n sys.stdout = self.stdout\n\n def write(self, data):\n \"\"\"Writes data to the original stdout and the buffer.\"\"\"\n # data = data.replace('\\001', '').replace('\\002', '')\n self.stdout.write(data)\n self.buffer.write(data)\n\n def flush(self):\n \"\"\"Flushes both the original stdout and the buffer.\"\"\"\n self.stdout.flush()\n self.buffer.flush()\n\n def fileno(self):\n \"\"\"Tunnel fileno() calls.\"\"\"\n return self.stdout.fileno()\n\n\nclass _TeeErr(object):\n \"\"\"Tees stderr into the original sys.stdout and another buffer.\"\"\"\n\n def __init__(self, buf):\n self.buffer = buf\n self.stderr = sys.stderr\n self.encoding = self.stderr.encoding\n self.errors = self.stderr.errors\n sys.stderr = self\n\n def __del__(self):\n sys.stderr = self.stderr\n\n def close(self):\n \"\"\"Restores the original stderr.\"\"\"\n sys.stderr = self.stderr\n\n def write(self, data):\n \"\"\"Writes data to the original stderr and the buffer.\"\"\"\n # data = data.replace('\\001', '').replace('\\002', '')\n self.stderr.write(data)\n self.buffer.write(data)\n\n def flush(self):\n \"\"\"Flushes both the original stderr and the buffer.\"\"\"\n self.stderr.flush()\n self.buffer.flush()\n\n def fileno(self):\n \"\"\"Tunnel fileno() calls.\"\"\"\n return self.stderr.fileno()\n\n\nclass Tee(io.StringIO):\n \"\"\"Class that merges tee'd stdout and stderr into a single buffer.\n\n This represents what a user would actually see on the command line.\n \"\"\"\n # pylint is a stupid about counting public methods when using inheritance.\n # pylint: disable=too-few-public-methods\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.stdout = _TeeOut(self)\n self.stderr = _TeeErr(self)\n\n def __del__(self):\n del self.stdout, self.stderr\n super().__del__()\n\n def close(self):\n \"\"\"Closes the buffer as well as the stdout and stderr tees.\"\"\"\n self.stdout.close()\n self.stderr.close()\n super().close()\n\n\nclass BaseShell(object):\n \"\"\"The xonsh shell.\"\"\"\n\n def __init__(self, execer, ctx, **kwargs):\n super().__init__()\n self.execer = execer\n self.ctx = ctx\n self.completer = Completer() if kwargs.get('completer', True) else None\n self.buffer = []\n self.need_more_lines = False\n self.mlprompt = None\n self._styler = DefaultNotGiven\n\n @property\n def styler(self):\n if self._styler is DefaultNotGiven:\n if HAS_PYGMENTS:\n from xonsh.pyghooks import XonshStyle\n env = builtins.__xonsh_env__\n self._styler = XonshStyle(env.get('XONSH_COLOR_STYLE'))\n else:\n self._styler = None\n return self._styler\n\n @styler.setter\n def styler(self, value):\n self._styler = value\n\n @styler.deleter\n def styler(self):\n self._styler = DefaultNotGiven\n\n def emptyline(self):\n \"\"\"Called when an empty line has been entered.\"\"\"\n self.need_more_lines = False\n self.default('')\n\n def singleline(self, **kwargs):\n \"\"\"Reads a single line of input from the shell.\"\"\"\n msg = '{0} has not implemented singleline().'\n raise RuntimeError(msg.format(self.__class__.__name__))\n\n def precmd(self, line):\n \"\"\"Called just before execution of line.\"\"\"\n return line if self.need_more_lines else line.lstrip()\n\n def default(self, line):\n \"\"\"Implements code execution.\"\"\"\n line = line if line.endswith('\\n') else line + '\\n'\n src, code = self.push(line)\n if code is None:\n return\n\n events.on_precommand.fire(src)\n\n hist = builtins.__xonsh_history__ # pylint: disable=no-member\n ts1 = None\n store_stdout = builtins.__xonsh_env__.get('XONSH_STORE_STDOUT') # pylint: disable=no-member\n tee = Tee() if store_stdout else io.StringIO()\n try:\n ts0 = time.time()\n run_compiled_code(code, self.ctx, None, 'single')\n ts1 = time.time()\n if hist.last_cmd_rtn is None:\n hist.last_cmd_rtn = 0 # returncode for success\n except XonshError as e:\n print(e.args[0], file=sys.stderr)\n if hist.last_cmd_rtn is None:\n hist.last_cmd_rtn = 1 # return code for failure\n except Exception: # pylint: disable=broad-except\n print_exception()\n if hist.last_cmd_rtn is None:\n hist.last_cmd_rtn = 1 # return code for failure\n finally:\n ts1 = ts1 or time.time()\n self._append_history(inp=src, ts=[ts0, ts1], tee_out=tee.getvalue())\n tee.close()\n\n self._fix_cwd()\n if builtins.__xonsh_exit__: # pylint: disable=no-member\n return True\n\n def _append_history(self, tee_out=None, **info):\n \"\"\"\n Append information about the command to the history.\n\n (Also handles on_postcommand because this is the place where all the information is available)\n \"\"\"\n hist = builtins.__xonsh_history__ # pylint: disable=no-member\n info['rtn'] = hist.last_cmd_rtn\n tee_out = tee_out or None\n last_out = hist.last_cmd_out or None\n if last_out is None and tee_out is None:\n pass\n elif last_out is None and tee_out is not None:\n info['out'] = tee_out\n elif last_out is not None and tee_out is None:\n info['out'] = last_out\n else:\n info['out'] = tee_out + '\\n' + last_out\n\n events.on_postcommand.fire(\n info['inp'],\n info['rtn'],\n info.get('out', None),\n info['ts']\n )\n\n hist.append(info)\n hist.last_cmd_rtn = hist.last_cmd_out = None\n\n def _fix_cwd(self):\n \"\"\"Check if the cwd changed out from under us\"\"\"\n cwd = os.getcwd()\n if cwd != builtins.__xonsh_env__.get('PWD'):\n old = builtins.__xonsh_env__.get('PWD') # working directory changed without updating $PWD\n builtins.__xonsh_env__['PWD'] = cwd # track it now\n if old is not None:\n builtins.__xonsh_env__['OLDPWD'] = old # and update $OLDPWD like dirstack.\n events.on_chdir.fire(old, cwd) # fire event after cwd actually changed.\n\n def push(self, line):\n \"\"\"Pushes a line onto the buffer and compiles the code in a way that\n enables multiline input.\n \"\"\"\n code = None\n self.buffer.append(line)\n if self.need_more_lines:\n return None, code\n src = ''.join(self.buffer)\n _cache = should_use_cache(self.execer, 'single')\n if _cache:\n codefname = code_cache_name(src)\n cachefname = get_cache_filename(codefname, code=True)\n usecache, code = code_cache_check(cachefname)\n if usecache:\n self.reset_buffer()\n return src, code\n try:\n code = self.execer.compile(src,\n mode='single',\n glbs=self.ctx,\n locs=None)\n if _cache:\n update_cache(code, cachefname)\n self.reset_buffer()\n except SyntaxError:\n partial_string_info = check_for_partial_string(src)\n in_partial_string = (partial_string_info[0] is not None and\n partial_string_info[1] is None)\n if ((line == '\\n' and not in_partial_string)):\n self.reset_buffer()\n print_exception()\n return src, None\n self.need_more_lines = True\n except Exception: # pylint: disable=broad-except\n self.reset_buffer()\n print_exception()\n return src, None\n return src, code\n\n def reset_buffer(self):\n \"\"\"Resets the line buffer.\"\"\"\n self.buffer.clear()\n self.need_more_lines = False\n self.mlprompt = None\n\n def settitle(self):\n \"\"\"Sets terminal title.\"\"\"\n env = builtins.__xonsh_env__ # pylint: disable=no-member\n term = env.get('TERM', None)\n # Shells running in emacs sets TERM to \"dumb\" or \"eterm-color\".\n # Do not set title for these to avoid garbled prompt.\n if (term is None and not ON_WINDOWS) or term in ['dumb', 'eterm-color',\n 'linux']:\n return\n t = env.get('TITLE')\n if t is None:\n return\n t = partial_format_prompt(t)\n if ON_WINDOWS and 'ANSICON' not in env:\n kernel32.SetConsoleTitleW(t)\n else:\n with open(1, 'wb', closefd=False) as f:\n # prevent xonsh from answering interative questions\n # on the next command by writing the title\n f.write(\"\\x1b]0;{0}\\x07\".format(t).encode())\n f.flush()\n\n @property\n def prompt(self):\n \"\"\"Obtains the current prompt string.\"\"\"\n if self.need_more_lines:\n if self.mlprompt is None:\n try:\n self.mlprompt = multiline_prompt()\n except Exception: # pylint: disable=broad-except\n print_exception()\n self.mlprompt = '<multiline prompt error> '\n return self.mlprompt\n env = builtins.__xonsh_env__ # pylint: disable=no-member\n p = env.get('PROMPT')\n try:\n p = partial_format_prompt(p)\n except Exception: # pylint: disable=broad-except\n print_exception()\n self.settitle()\n return p\n\n def format_color(self, string, **kwargs):\n \"\"\"Formats the colors in a string. This base implmentation does not\n actually do any coloring, but just returns the string directly.\n \"\"\"\n return string\n\n def print_color(self, string, **kwargs):\n \"\"\"Prints a string in color. This base implmentation does not actually\n do any coloring, but just prints the string directly.\n \"\"\"\n if not isinstance(string, str):\n string = ''.join([x for _, x in string])\n print(string, **kwargs)\n\n def color_style_names(self):\n \"\"\"Returns an iterable of all available style names.\"\"\"\n return ()\n\n def color_style(self):\n \"\"\"Returns the current color map.\"\"\"\n return {}\n", "path": "xonsh/base_shell.py"}]}
| 4,004 | 237 |
gh_patches_debug_3417
|
rasdani/github-patches
|
git_diff
|
freqtrade__freqtrade-3471
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`test-pairlist` fails if no markets loaded
`test-pairlist` fails in unexpected way if markets were not able to be loaded (due to 4xx error, for example);
This should be handled gracefully with a DependencyException (for example, or OperationalException) exception raised, an error message and exit code 1, it should not crash with traceback.
```
$ freqtrade test-pairlist -c config_kraken.json
2020-06-09 12:46:47,312 - freqtrade.loggers - INFO - Verbosity set to 0
2020-06-09 12:46:47,312 - freqtrade.configuration.configuration - INFO - Using user-data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data ...
2020-06-09 12:46:47,313 - freqtrade.configuration.configuration - INFO - Using data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data/kraken ...
2020-06-09 12:46:47,313 - freqtrade.configuration.check_exchange - INFO - Checking exchange...
2020-06-09 12:46:47,313 - freqtrade.configuration.check_exchange - INFO - Exchange "kraken" is officially supported by the Freqtrade development team.
2020-06-09 12:46:47,313 - freqtrade.configuration.configuration - INFO - Using pairlist from configuration.
2020-06-09 12:46:47,313 - freqtrade.configuration.config_validation - INFO - Validating configuration ...
2020-06-09 12:46:47,315 - freqtrade.exchange.exchange - INFO - Instance is running with dry_run enabled
2020-06-09 12:46:47,315 - freqtrade.exchange.exchange - INFO - Applying additional ccxt config: {'enableRateLimit': True}
2020-06-09 12:46:47,317 - freqtrade.exchange.exchange - INFO - Applying additional ccxt config: {'enableRateLimit': True, 'rateLimit': 3100}
2020-06-09 12:46:47,320 - freqtrade.exchange.exchange - INFO - Using Exchange "Kraken"
2020-06-09 12:46:47,320 - freqtrade.resolvers.exchange_resolver - INFO - Using resolved exchange 'Kraken'...
2020-06-09 12:46:47,324 - freqtrade.resolvers.iresolver - INFO - Using resolved pairlist StaticPairList from '/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/pairlist/StaticPairList.py'...
2020-06-09 12:46:47,324 - freqtrade.exchange.exchange - WARNING - Markets were not loaded. Loading them now..
2020-06-09 12:46:53,817 - freqtrade.exchange.exchange - WARNING - Unable to initialize markets. Reason: GET https://kraken.zendesk.com/api/v2/help_center/en-us/articles/205893708 429 Too Many Requests Number of allowed API requests per minute exceeded
2020-06-09 12:46:53,818 - freqtrade - ERROR - Fatal exception!
Traceback (most recent call last):
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py", line 36, in main
return_code = args['func'](args)
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/commands/pairlist_commands.py", line 30, in start_test_pairlist
pairlists.refresh_pairlist()
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/pairlist/pairlistmanager.py", line 91, in refresh_pairlist
pairlist = self._pairlist_handlers[0].gen_pairlist(self._whitelist, tickers)
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/pairlist/StaticPairList.py", line 50, in gen_pairlist
return self._whitelist_for_active_markets(self._config['exchange']['pair_whitelist'])
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/pairlist/IPairList.py", line 157, in _whitelist_for_active_markets
if pair not in markets:
TypeError: argument of type 'NoneType' is not iterable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/pairlist/IPairList.py`
Content:
```
1 """
2 PairList Handler base class
3 """
4 import logging
5 from abc import ABC, abstractmethod, abstractproperty
6 from copy import deepcopy
7 from typing import Any, Dict, List
8
9 from cachetools import TTLCache, cached
10
11 from freqtrade.exceptions import OperationalException
12 from freqtrade.exchange import market_is_active
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class IPairList(ABC):
19
20 def __init__(self, exchange, pairlistmanager,
21 config: Dict[str, Any], pairlistconfig: Dict[str, Any],
22 pairlist_pos: int) -> None:
23 """
24 :param exchange: Exchange instance
25 :param pairlistmanager: Instantiated Pairlist manager
26 :param config: Global bot configuration
27 :param pairlistconfig: Configuration for this Pairlist Handler - can be empty.
28 :param pairlist_pos: Position of the Pairlist Handler in the chain
29 """
30 self._enabled = True
31
32 self._exchange = exchange
33 self._pairlistmanager = pairlistmanager
34 self._config = config
35 self._pairlistconfig = pairlistconfig
36 self._pairlist_pos = pairlist_pos
37 self.refresh_period = self._pairlistconfig.get('refresh_period', 1800)
38 self._last_refresh = 0
39 self._log_cache = TTLCache(maxsize=1024, ttl=self.refresh_period)
40
41 @property
42 def name(self) -> str:
43 """
44 Gets name of the class
45 -> no need to overwrite in subclasses
46 """
47 return self.__class__.__name__
48
49 def log_on_refresh(self, logmethod, message: str) -> None:
50 """
51 Logs message - not more often than "refresh_period" to avoid log spamming
52 Logs the log-message as debug as well to simplify debugging.
53 :param logmethod: Function that'll be called. Most likely `logger.info`.
54 :param message: String containing the message to be sent to the function.
55 :return: None.
56 """
57
58 @cached(cache=self._log_cache)
59 def _log_on_refresh(message: str):
60 logmethod(message)
61
62 # Log as debug first
63 logger.debug(message)
64 # Call hidden function.
65 _log_on_refresh(message)
66
67 @abstractproperty
68 def needstickers(self) -> bool:
69 """
70 Boolean property defining if tickers are necessary.
71 If no Pairlist requries tickers, an empty List is passed
72 as tickers argument to filter_pairlist
73 """
74
75 @abstractmethod
76 def short_desc(self) -> str:
77 """
78 Short whitelist method description - used for startup-messages
79 -> Please overwrite in subclasses
80 """
81
82 def _validate_pair(self, ticker) -> bool:
83 """
84 Check one pair against Pairlist Handler's specific conditions.
85
86 Either implement it in the Pairlist Handler or override the generic
87 filter_pairlist() method.
88
89 :param ticker: ticker dict as returned from ccxt.load_markets()
90 :return: True if the pair can stay, false if it should be removed
91 """
92 raise NotImplementedError()
93
94 def gen_pairlist(self, cached_pairlist: List[str], tickers: Dict) -> List[str]:
95 """
96 Generate the pairlist.
97
98 This method is called once by the pairlistmanager in the refresh_pairlist()
99 method to supply the starting pairlist for the chain of the Pairlist Handlers.
100 Pairlist Filters (those Pairlist Handlers that cannot be used at the first
101 position in the chain) shall not override this base implementation --
102 it will raise the exception if a Pairlist Handler is used at the first
103 position in the chain.
104
105 :param cached_pairlist: Previously generated pairlist (cached)
106 :param tickers: Tickers (from exchange.get_tickers()).
107 :return: List of pairs
108 """
109 raise OperationalException("This Pairlist Handler should not be used "
110 "at the first position in the list of Pairlist Handlers.")
111
112 def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
113 """
114 Filters and sorts pairlist and returns the whitelist again.
115
116 Called on each bot iteration - please use internal caching if necessary
117 This generic implementation calls self._validate_pair() for each pair
118 in the pairlist.
119
120 Some Pairlist Handlers override this generic implementation and employ
121 own filtration.
122
123 :param pairlist: pairlist to filter or sort
124 :param tickers: Tickers (from exchange.get_tickers()). May be cached.
125 :return: new whitelist
126 """
127 if self._enabled:
128 # Copy list since we're modifying this list
129 for p in deepcopy(pairlist):
130 # Filter out assets
131 if not self._validate_pair(tickers[p]):
132 pairlist.remove(p)
133
134 return pairlist
135
136 def verify_blacklist(self, pairlist: List[str], logmethod) -> List[str]:
137 """
138 Proxy method to verify_blacklist for easy access for child classes.
139 :param pairlist: Pairlist to validate
140 :param logmethod: Function that'll be called, `logger.info` or `logger.warning`.
141 :return: pairlist - blacklisted pairs
142 """
143 return self._pairlistmanager.verify_blacklist(pairlist, logmethod)
144
145 def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]:
146 """
147 Check available markets and remove pair from whitelist if necessary
148 :param whitelist: the sorted list of pairs the user might want to trade
149 :return: the list of pairs the user wants to trade without those unavailable or
150 black_listed
151 """
152 markets = self._exchange.markets
153
154 sanitized_whitelist: List[str] = []
155 for pair in pairlist:
156 # pair is not in the generated dynamic market or has the wrong stake currency
157 if pair not in markets:
158 logger.warning(f"Pair {pair} is not compatible with exchange "
159 f"{self._exchange.name}. Removing it from whitelist..")
160 continue
161
162 if self._exchange.get_pair_quote_currency(pair) != self._config['stake_currency']:
163 logger.warning(f"Pair {pair} is not compatible with your stake currency "
164 f"{self._config['stake_currency']}. Removing it from whitelist..")
165 continue
166
167 # Check if market is active
168 market = markets[pair]
169 if not market_is_active(market):
170 logger.info(f"Ignoring {pair} from whitelist. Market is not active.")
171 continue
172 if pair not in sanitized_whitelist:
173 sanitized_whitelist.append(pair)
174
175 # We need to remove pairs that are unknown
176 return sanitized_whitelist
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/freqtrade/pairlist/IPairList.py b/freqtrade/pairlist/IPairList.py
--- a/freqtrade/pairlist/IPairList.py
+++ b/freqtrade/pairlist/IPairList.py
@@ -150,6 +150,9 @@
black_listed
"""
markets = self._exchange.markets
+ if not markets:
+ raise OperationalException(
+ 'Markets not loaded. Make sure that exchange is initialized correctly.')
sanitized_whitelist: List[str] = []
for pair in pairlist:
|
{"golden_diff": "diff --git a/freqtrade/pairlist/IPairList.py b/freqtrade/pairlist/IPairList.py\n--- a/freqtrade/pairlist/IPairList.py\n+++ b/freqtrade/pairlist/IPairList.py\n@@ -150,6 +150,9 @@\n black_listed\n \"\"\"\n markets = self._exchange.markets\n+ if not markets:\n+ raise OperationalException(\n+ 'Markets not loaded. Make sure that exchange is initialized correctly.')\n \n sanitized_whitelist: List[str] = []\n for pair in pairlist:\n", "issue": "`test-pairlist` fails if no markets loaded\n`test-pairlist` fails in unexpected way if markets were not able to be loaded (due to 4xx error, for example);\r\n\r\nThis should be handled gracefully with a DependencyException (for example, or OperationalException) exception raised, an error message and exit code 1, it should not crash with traceback.\r\n\r\n```\r\n$ freqtrade test-pairlist -c config_kraken.json\r\n2020-06-09 12:46:47,312 - freqtrade.loggers - INFO - Verbosity set to 0\r\n2020-06-09 12:46:47,312 - freqtrade.configuration.configuration - INFO - Using user-data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data ...\r\n2020-06-09 12:46:47,313 - freqtrade.configuration.configuration - INFO - Using data directory: /home/user/freqtrade-wrk/github-hroff-1902/freqtrade/user_data/data/kraken ...\r\n2020-06-09 12:46:47,313 - freqtrade.configuration.check_exchange - INFO - Checking exchange...\r\n2020-06-09 12:46:47,313 - freqtrade.configuration.check_exchange - INFO - Exchange \"kraken\" is officially supported by the Freqtrade development team.\r\n2020-06-09 12:46:47,313 - freqtrade.configuration.configuration - INFO - Using pairlist from configuration.\r\n2020-06-09 12:46:47,313 - freqtrade.configuration.config_validation - INFO - Validating configuration ...\r\n2020-06-09 12:46:47,315 - freqtrade.exchange.exchange - INFO - Instance is running with dry_run enabled\r\n2020-06-09 12:46:47,315 - freqtrade.exchange.exchange - INFO - Applying additional ccxt config: {'enableRateLimit': True}\r\n2020-06-09 12:46:47,317 - freqtrade.exchange.exchange - INFO - Applying additional ccxt config: {'enableRateLimit': True, 'rateLimit': 3100}\r\n2020-06-09 12:46:47,320 - freqtrade.exchange.exchange - INFO - Using Exchange \"Kraken\"\r\n2020-06-09 12:46:47,320 - freqtrade.resolvers.exchange_resolver - INFO - Using resolved exchange 'Kraken'...\r\n2020-06-09 12:46:47,324 - freqtrade.resolvers.iresolver - INFO - Using resolved pairlist StaticPairList from '/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/pairlist/StaticPairList.py'...\r\n2020-06-09 12:46:47,324 - freqtrade.exchange.exchange - WARNING - Markets were not loaded. Loading them now..\r\n2020-06-09 12:46:53,817 - freqtrade.exchange.exchange - WARNING - Unable to initialize markets. Reason: GET https://kraken.zendesk.com/api/v2/help_center/en-us/articles/205893708 429 Too Many Requests Number of allowed API requests per minute exceeded\r\n2020-06-09 12:46:53,818 - freqtrade - ERROR - Fatal exception!\r\nTraceback (most recent call last):\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py\", line 36, in main\r\n return_code = args['func'](args)\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/commands/pairlist_commands.py\", line 30, in start_test_pairlist\r\n pairlists.refresh_pairlist()\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/pairlist/pairlistmanager.py\", line 91, in refresh_pairlist\r\n pairlist = self._pairlist_handlers[0].gen_pairlist(self._whitelist, tickers)\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/pairlist/StaticPairList.py\", line 50, in gen_pairlist\r\n return self._whitelist_for_active_markets(self._config['exchange']['pair_whitelist'])\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/pairlist/IPairList.py\", line 157, in _whitelist_for_active_markets\r\n if pair not in markets:\r\nTypeError: argument of type 'NoneType' is not iterable\r\n```\n", "before_files": [{"content": "\"\"\"\nPairList Handler base class\n\"\"\"\nimport logging\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom copy import deepcopy\nfrom typing import Any, Dict, List\n\nfrom cachetools import TTLCache, cached\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.exchange import market_is_active\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass IPairList(ABC):\n\n def __init__(self, exchange, pairlistmanager,\n config: Dict[str, Any], pairlistconfig: Dict[str, Any],\n pairlist_pos: int) -> None:\n \"\"\"\n :param exchange: Exchange instance\n :param pairlistmanager: Instantiated Pairlist manager\n :param config: Global bot configuration\n :param pairlistconfig: Configuration for this Pairlist Handler - can be empty.\n :param pairlist_pos: Position of the Pairlist Handler in the chain\n \"\"\"\n self._enabled = True\n\n self._exchange = exchange\n self._pairlistmanager = pairlistmanager\n self._config = config\n self._pairlistconfig = pairlistconfig\n self._pairlist_pos = pairlist_pos\n self.refresh_period = self._pairlistconfig.get('refresh_period', 1800)\n self._last_refresh = 0\n self._log_cache = TTLCache(maxsize=1024, ttl=self.refresh_period)\n\n @property\n def name(self) -> str:\n \"\"\"\n Gets name of the class\n -> no need to overwrite in subclasses\n \"\"\"\n return self.__class__.__name__\n\n def log_on_refresh(self, logmethod, message: str) -> None:\n \"\"\"\n Logs message - not more often than \"refresh_period\" to avoid log spamming\n Logs the log-message as debug as well to simplify debugging.\n :param logmethod: Function that'll be called. Most likely `logger.info`.\n :param message: String containing the message to be sent to the function.\n :return: None.\n \"\"\"\n\n @cached(cache=self._log_cache)\n def _log_on_refresh(message: str):\n logmethod(message)\n\n # Log as debug first\n logger.debug(message)\n # Call hidden function.\n _log_on_refresh(message)\n\n @abstractproperty\n def needstickers(self) -> bool:\n \"\"\"\n Boolean property defining if tickers are necessary.\n If no Pairlist requries tickers, an empty List is passed\n as tickers argument to filter_pairlist\n \"\"\"\n\n @abstractmethod\n def short_desc(self) -> str:\n \"\"\"\n Short whitelist method description - used for startup-messages\n -> Please overwrite in subclasses\n \"\"\"\n\n def _validate_pair(self, ticker) -> bool:\n \"\"\"\n Check one pair against Pairlist Handler's specific conditions.\n\n Either implement it in the Pairlist Handler or override the generic\n filter_pairlist() method.\n\n :param ticker: ticker dict as returned from ccxt.load_markets()\n :return: True if the pair can stay, false if it should be removed\n \"\"\"\n raise NotImplementedError()\n\n def gen_pairlist(self, cached_pairlist: List[str], tickers: Dict) -> List[str]:\n \"\"\"\n Generate the pairlist.\n\n This method is called once by the pairlistmanager in the refresh_pairlist()\n method to supply the starting pairlist for the chain of the Pairlist Handlers.\n Pairlist Filters (those Pairlist Handlers that cannot be used at the first\n position in the chain) shall not override this base implementation --\n it will raise the exception if a Pairlist Handler is used at the first\n position in the chain.\n\n :param cached_pairlist: Previously generated pairlist (cached)\n :param tickers: Tickers (from exchange.get_tickers()).\n :return: List of pairs\n \"\"\"\n raise OperationalException(\"This Pairlist Handler should not be used \"\n \"at the first position in the list of Pairlist Handlers.\")\n\n def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:\n \"\"\"\n Filters and sorts pairlist and returns the whitelist again.\n\n Called on each bot iteration - please use internal caching if necessary\n This generic implementation calls self._validate_pair() for each pair\n in the pairlist.\n\n Some Pairlist Handlers override this generic implementation and employ\n own filtration.\n\n :param pairlist: pairlist to filter or sort\n :param tickers: Tickers (from exchange.get_tickers()). May be cached.\n :return: new whitelist\n \"\"\"\n if self._enabled:\n # Copy list since we're modifying this list\n for p in deepcopy(pairlist):\n # Filter out assets\n if not self._validate_pair(tickers[p]):\n pairlist.remove(p)\n\n return pairlist\n\n def verify_blacklist(self, pairlist: List[str], logmethod) -> List[str]:\n \"\"\"\n Proxy method to verify_blacklist for easy access for child classes.\n :param pairlist: Pairlist to validate\n :param logmethod: Function that'll be called, `logger.info` or `logger.warning`.\n :return: pairlist - blacklisted pairs\n \"\"\"\n return self._pairlistmanager.verify_blacklist(pairlist, logmethod)\n\n def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]:\n \"\"\"\n Check available markets and remove pair from whitelist if necessary\n :param whitelist: the sorted list of pairs the user might want to trade\n :return: the list of pairs the user wants to trade without those unavailable or\n black_listed\n \"\"\"\n markets = self._exchange.markets\n\n sanitized_whitelist: List[str] = []\n for pair in pairlist:\n # pair is not in the generated dynamic market or has the wrong stake currency\n if pair not in markets:\n logger.warning(f\"Pair {pair} is not compatible with exchange \"\n f\"{self._exchange.name}. Removing it from whitelist..\")\n continue\n\n if self._exchange.get_pair_quote_currency(pair) != self._config['stake_currency']:\n logger.warning(f\"Pair {pair} is not compatible with your stake currency \"\n f\"{self._config['stake_currency']}. Removing it from whitelist..\")\n continue\n\n # Check if market is active\n market = markets[pair]\n if not market_is_active(market):\n logger.info(f\"Ignoring {pair} from whitelist. Market is not active.\")\n continue\n if pair not in sanitized_whitelist:\n sanitized_whitelist.append(pair)\n\n # We need to remove pairs that are unknown\n return sanitized_whitelist\n", "path": "freqtrade/pairlist/IPairList.py"}], "after_files": [{"content": "\"\"\"\nPairList Handler base class\n\"\"\"\nimport logging\nfrom abc import ABC, abstractmethod, abstractproperty\nfrom copy import deepcopy\nfrom typing import Any, Dict, List\n\nfrom cachetools import TTLCache, cached\n\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.exchange import market_is_active\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass IPairList(ABC):\n\n def __init__(self, exchange, pairlistmanager,\n config: Dict[str, Any], pairlistconfig: Dict[str, Any],\n pairlist_pos: int) -> None:\n \"\"\"\n :param exchange: Exchange instance\n :param pairlistmanager: Instantiated Pairlist manager\n :param config: Global bot configuration\n :param pairlistconfig: Configuration for this Pairlist Handler - can be empty.\n :param pairlist_pos: Position of the Pairlist Handler in the chain\n \"\"\"\n self._enabled = True\n\n self._exchange = exchange\n self._pairlistmanager = pairlistmanager\n self._config = config\n self._pairlistconfig = pairlistconfig\n self._pairlist_pos = pairlist_pos\n self.refresh_period = self._pairlistconfig.get('refresh_period', 1800)\n self._last_refresh = 0\n self._log_cache = TTLCache(maxsize=1024, ttl=self.refresh_period)\n\n @property\n def name(self) -> str:\n \"\"\"\n Gets name of the class\n -> no need to overwrite in subclasses\n \"\"\"\n return self.__class__.__name__\n\n def log_on_refresh(self, logmethod, message: str) -> None:\n \"\"\"\n Logs message - not more often than \"refresh_period\" to avoid log spamming\n Logs the log-message as debug as well to simplify debugging.\n :param logmethod: Function that'll be called. Most likely `logger.info`.\n :param message: String containing the message to be sent to the function.\n :return: None.\n \"\"\"\n\n @cached(cache=self._log_cache)\n def _log_on_refresh(message: str):\n logmethod(message)\n\n # Log as debug first\n logger.debug(message)\n # Call hidden function.\n _log_on_refresh(message)\n\n @abstractproperty\n def needstickers(self) -> bool:\n \"\"\"\n Boolean property defining if tickers are necessary.\n If no Pairlist requries tickers, an empty List is passed\n as tickers argument to filter_pairlist\n \"\"\"\n\n @abstractmethod\n def short_desc(self) -> str:\n \"\"\"\n Short whitelist method description - used for startup-messages\n -> Please overwrite in subclasses\n \"\"\"\n\n def _validate_pair(self, ticker) -> bool:\n \"\"\"\n Check one pair against Pairlist Handler's specific conditions.\n\n Either implement it in the Pairlist Handler or override the generic\n filter_pairlist() method.\n\n :param ticker: ticker dict as returned from ccxt.load_markets()\n :return: True if the pair can stay, false if it should be removed\n \"\"\"\n raise NotImplementedError()\n\n def gen_pairlist(self, cached_pairlist: List[str], tickers: Dict) -> List[str]:\n \"\"\"\n Generate the pairlist.\n\n This method is called once by the pairlistmanager in the refresh_pairlist()\n method to supply the starting pairlist for the chain of the Pairlist Handlers.\n Pairlist Filters (those Pairlist Handlers that cannot be used at the first\n position in the chain) shall not override this base implementation --\n it will raise the exception if a Pairlist Handler is used at the first\n position in the chain.\n\n :param cached_pairlist: Previously generated pairlist (cached)\n :param tickers: Tickers (from exchange.get_tickers()).\n :return: List of pairs\n \"\"\"\n raise OperationalException(\"This Pairlist Handler should not be used \"\n \"at the first position in the list of Pairlist Handlers.\")\n\n def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:\n \"\"\"\n Filters and sorts pairlist and returns the whitelist again.\n\n Called on each bot iteration - please use internal caching if necessary\n This generic implementation calls self._validate_pair() for each pair\n in the pairlist.\n\n Some Pairlist Handlers override this generic implementation and employ\n own filtration.\n\n :param pairlist: pairlist to filter or sort\n :param tickers: Tickers (from exchange.get_tickers()). May be cached.\n :return: new whitelist\n \"\"\"\n if self._enabled:\n # Copy list since we're modifying this list\n for p in deepcopy(pairlist):\n # Filter out assets\n if not self._validate_pair(tickers[p]):\n pairlist.remove(p)\n\n return pairlist\n\n def verify_blacklist(self, pairlist: List[str], logmethod) -> List[str]:\n \"\"\"\n Proxy method to verify_blacklist for easy access for child classes.\n :param pairlist: Pairlist to validate\n :param logmethod: Function that'll be called, `logger.info` or `logger.warning`.\n :return: pairlist - blacklisted pairs\n \"\"\"\n return self._pairlistmanager.verify_blacklist(pairlist, logmethod)\n\n def _whitelist_for_active_markets(self, pairlist: List[str]) -> List[str]:\n \"\"\"\n Check available markets and remove pair from whitelist if necessary\n :param whitelist: the sorted list of pairs the user might want to trade\n :return: the list of pairs the user wants to trade without those unavailable or\n black_listed\n \"\"\"\n markets = self._exchange.markets\n if not markets:\n raise OperationalException(\n 'Markets not loaded. Make sure that exchange is initialized correctly.')\n\n sanitized_whitelist: List[str] = []\n for pair in pairlist:\n # pair is not in the generated dynamic market or has the wrong stake currency\n if pair not in markets:\n logger.warning(f\"Pair {pair} is not compatible with exchange \"\n f\"{self._exchange.name}. Removing it from whitelist..\")\n continue\n\n if self._exchange.get_pair_quote_currency(pair) != self._config['stake_currency']:\n logger.warning(f\"Pair {pair} is not compatible with your stake currency \"\n f\"{self._config['stake_currency']}. Removing it from whitelist..\")\n continue\n\n # Check if market is active\n market = markets[pair]\n if not market_is_active(market):\n logger.info(f\"Ignoring {pair} from whitelist. Market is not active.\")\n continue\n if pair not in sanitized_whitelist:\n sanitized_whitelist.append(pair)\n\n # We need to remove pairs that are unknown\n return sanitized_whitelist\n", "path": "freqtrade/pairlist/IPairList.py"}]}
| 3,283 | 128 |
gh_patches_debug_29136
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2404
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add links section to text format
Update the [text based rule report](https://github.com/RedHatInsights/insights-core/blob/master/insights/formats/text.py) to automatically include links associated with rules. simple_html and html already include them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/formats/text.py`
Content:
```
1 from __future__ import print_function
2 import six
3 import sys
4 import inspect
5 from collections import namedtuple
6
7 from pprint import pprint
8 from insights import dr, datasource, rule, condition, incident, parser
9 from insights.core.context import ExecutionContext
10 from insights.formats import Formatter, FormatterAdapter, render
11
12
13 try:
14 from colorama import Fore, Style, init
15 init()
16 except ImportError:
17 print("Install colorama if console colors are preferred.")
18
19 class Default(type):
20 def __getattr__(*args):
21 return ""
22
23 class Fore(six.with_metaclass(Default)):
24 pass
25
26 class Style(six.with_metaclass(Default)):
27 pass
28
29
30 def _find_context(broker):
31 for k, v in broker.instances.items():
32 if inspect.isclass(k) and issubclass(k, ExecutionContext):
33 return v
34
35
36 class HumanReadableFormat(Formatter):
37 """
38 This class prints a human readable summary of rule hits. It should be used
39 as a context manager and given an instance of an
40 ``insights.core.dr.Broker``. ``dr.run`` should be called within the context
41 using the same broker.
42
43 Args:
44 broker (Broker): the broker to watch and provide a summary about.
45 missing (bool): shows rules with missing dependencies if True.
46 tracebacks (bool): shows tracebacks if any exceptions were raised.
47 Useful for debugging.
48 dropped (bool): Shows any files that weren't collected if running
49 against an archive. Included for a corner case and typically not
50 used in general.
51 stream (file-like): Output is written to stream. Defaults to sys.stdout.
52 """
53 def __init__(self, broker,
54 missing=False,
55 tracebacks=False,
56 dropped=False,
57 fail_only=False,
58 stream=sys.stdout):
59 self.broker = broker
60 self.missing = missing
61 self.tracebacks = tracebacks
62 self.dropped = dropped
63 self.fail_only = fail_only
64 self.stream = stream
65
66 def print_header(self, header, color):
67 ln = len(header)
68 print(color + '-' * ln, file=self.stream)
69 print(header, file=self.stream)
70 print('-' * ln + Style.RESET_ALL, file=self.stream)
71
72 def preprocess(self):
73 response = namedtuple('response', 'color label intl title')
74 self.responses = {
75 'pass': response(color=Fore.GREEN, label="PASS", intl='P', title="Passed : "),
76 'rule': response(color=Fore.RED, label="FAIL", intl='F', title="Failed : "),
77 'info': response(color=Fore.WHITE, label="INFO", intl='I', title="Info : "),
78 'skip': response(color=Fore.BLUE, label="SKIP", intl='S', title="Missing Deps: "),
79 'fingerprint': response(color=Fore.YELLOW, label="FINGERPRINT", intl='P',
80 title="Fingerprint : "),
81 'metadata': response(color=Fore.YELLOW, label="META", intl='M', title="Metadata : "),
82 'metadata_key': response(color=Fore.MAGENTA, label="META", intl='K', title="Metadata Key: "),
83 'exception': response(color=Fore.RED, label="EXCEPT", intl='E', title="Exceptions : ")
84 }
85
86 self.counts = {}
87 for key in self.responses:
88 self.counts[key] = 0
89
90 self.print_header("Progress:", Fore.CYAN)
91 self.broker.add_observer(self.progress_bar, rule)
92 self.broker.add_observer(self.progress_bar, condition)
93 self.broker.add_observer(self.progress_bar, incident)
94 self.broker.add_observer(self.progress_bar, parser)
95
96 def progress_bar(self, c, broker):
97 """
98 Print the formated progress information for the processed return types
99 """
100 v = broker.get(c)
101
102 if v and isinstance(v, dict) and len(v) > 0 and 'type' in v:
103 if v["type"] in self.responses:
104 print(self.responses[v["type"]].color + self.responses[v["type"]].intl + Style.RESET_ALL, end="", file=self.stream)
105 else:
106 print(".", end="", file=self.stream)
107 elif c in broker.exceptions:
108 self.counts['exception'] += len(broker.exceptions[c])
109 print(Fore.RED + "E" + Style.RESET_ALL, end="", file=self.stream)
110 return self
111
112 def show_tracebacks(self):
113 """ Show tracebacks """
114 if self.broker.tracebacks:
115 print(file=self.stream)
116 print("Tracebacks:", file=self.stream)
117 for t in self.broker.tracebacks.values():
118 print(t, file=self.stream)
119
120 def show_dropped(self):
121 """ Show dropped files """
122 ctx = _find_context(self.broker)
123 if ctx and ctx.all_files:
124 ds = self.broker.get_by_type(datasource)
125 vals = []
126 for v in ds.values():
127 if isinstance(v, list):
128 vals.extend(d.path for d in v)
129 else:
130 vals.append(v.path)
131 dropped = set(ctx.all_files) - set(vals)
132 pprint("Dropped Files:", stream=self.stream)
133 pprint(dropped, indent=4, stream=self.stream)
134
135 def show_description(self):
136 """ Prints the formatted response for the matching return type """
137
138 def printit(c, v):
139 resp = self.responses[v["type"]]
140 name = "[%s] %s" % (resp.label, dr.get_name(c))
141 underline = "-" * len(name)
142 name = "%s%s%s" % (resp.color, name, Style.RESET_ALL)
143 print(name, file=self.stream)
144 print(underline, file=self.stream)
145 print(render(c, v), file=self.stream)
146 print(file=self.stream)
147
148 for c in sorted(self.broker.get_by_type(rule), key=dr.get_name):
149 v = self.broker[c]
150 _type = v.get('type')
151 if _type in self.responses:
152 self.counts[_type] += 1
153 if (_type and ((self.fail_only and _type == 'rule') or
154 ((self.missing and _type == 'skip') or
155 (not self.fail_only and _type != 'skip')))):
156 printit(c, v)
157 print(file=self.stream)
158
159 self.print_header("Rule Execution Summary", Fore.CYAN)
160 for c in self.counts:
161 print(self.responses[c].color + self.responses[c].title + str(self.counts[c]) + Style.RESET_ALL, file=self.stream)
162
163 def postprocess(self):
164 if self.tracebacks:
165 self.show_tracebacks()
166 if self.dropped:
167 self.show_dropped()
168
169 print(file=self.stream)
170 print(file=self.stream)
171 self.print_header("Rules Executed", Fore.CYAN)
172 self.show_description()
173
174
175 class HumanReadableFormatAdapter(FormatterAdapter):
176 """ Displays results in a human readable format. """
177
178 @staticmethod
179 def configure(p):
180 p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true")
181 p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true")
182 p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true")
183 p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true")
184
185 def __init__(self, args):
186 self.missing = args.missing
187 self.tracebacks = args.tracebacks
188 self.dropped = args.dropped
189 self.fail_only = args.fail_only
190 self.formatter = None
191 if self.missing and self.fail_only:
192 print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr)
193 self.fail_only = False
194
195 def preprocess(self, broker):
196 self.formatter = HumanReadableFormat(broker,
197 self.missing, self.tracebacks, self.dropped, self.fail_only)
198 self.formatter.preprocess()
199
200 def postprocess(self, broker):
201 self.formatter.postprocess()
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/insights/formats/text.py b/insights/formats/text.py
--- a/insights/formats/text.py
+++ b/insights/formats/text.py
@@ -5,6 +5,7 @@
from collections import namedtuple
from pprint import pprint
+from six import StringIO
from insights import dr, datasource, rule, condition, incident, parser
from insights.core.context import ExecutionContext
from insights.formats import Formatter, FormatterAdapter, render
@@ -33,6 +34,24 @@
return v
+def render_links(component):
+ links = dr.get_delegate(component).links or {}
+ if any(links.values()):
+ space = " " * 4
+ dbl_space = space * 2
+ output = StringIO()
+ output.write("Links:\n")
+ for key in sorted(links):
+ values = sorted(links[key])
+ if values:
+ output.write(space + key + ":\n")
+ for v in values:
+ output.write(dbl_space + v + "\n")
+ output.seek(0)
+ return output.read()
+ return ""
+
+
class HumanReadableFormat(Formatter):
"""
This class prints a human readable summary of rule hits. It should be used
@@ -142,6 +161,7 @@
name = "%s%s%s" % (resp.color, name, Style.RESET_ALL)
print(name, file=self.stream)
print(underline, file=self.stream)
+ print(render_links(c), file=self.stream)
print(render(c, v), file=self.stream)
print(file=self.stream)
|
{"golden_diff": "diff --git a/insights/formats/text.py b/insights/formats/text.py\n--- a/insights/formats/text.py\n+++ b/insights/formats/text.py\n@@ -5,6 +5,7 @@\n from collections import namedtuple\n \n from pprint import pprint\n+from six import StringIO\n from insights import dr, datasource, rule, condition, incident, parser\n from insights.core.context import ExecutionContext\n from insights.formats import Formatter, FormatterAdapter, render\n@@ -33,6 +34,24 @@\n return v\n \n \n+def render_links(component):\n+ links = dr.get_delegate(component).links or {}\n+ if any(links.values()):\n+ space = \" \" * 4\n+ dbl_space = space * 2\n+ output = StringIO()\n+ output.write(\"Links:\\n\")\n+ for key in sorted(links):\n+ values = sorted(links[key])\n+ if values:\n+ output.write(space + key + \":\\n\")\n+ for v in values:\n+ output.write(dbl_space + v + \"\\n\")\n+ output.seek(0)\n+ return output.read()\n+ return \"\"\n+\n+\n class HumanReadableFormat(Formatter):\n \"\"\"\n This class prints a human readable summary of rule hits. It should be used\n@@ -142,6 +161,7 @@\n name = \"%s%s%s\" % (resp.color, name, Style.RESET_ALL)\n print(name, file=self.stream)\n print(underline, file=self.stream)\n+ print(render_links(c), file=self.stream)\n print(render(c, v), file=self.stream)\n print(file=self.stream)\n", "issue": "Add links section to text format\nUpdate the [text based rule report](https://github.com/RedHatInsights/insights-core/blob/master/insights/formats/text.py) to automatically include links associated with rules. simple_html and html already include them.\n", "before_files": [{"content": "from __future__ import print_function\nimport six\nimport sys\nimport inspect\nfrom collections import namedtuple\n\nfrom pprint import pprint\nfrom insights import dr, datasource, rule, condition, incident, parser\nfrom insights.core.context import ExecutionContext\nfrom insights.formats import Formatter, FormatterAdapter, render\n\n\ntry:\n from colorama import Fore, Style, init\n init()\nexcept ImportError:\n print(\"Install colorama if console colors are preferred.\")\n\n class Default(type):\n def __getattr__(*args):\n return \"\"\n\n class Fore(six.with_metaclass(Default)):\n pass\n\n class Style(six.with_metaclass(Default)):\n pass\n\n\ndef _find_context(broker):\n for k, v in broker.instances.items():\n if inspect.isclass(k) and issubclass(k, ExecutionContext):\n return v\n\n\nclass HumanReadableFormat(Formatter):\n \"\"\"\n This class prints a human readable summary of rule hits. It should be used\n as a context manager and given an instance of an\n ``insights.core.dr.Broker``. ``dr.run`` should be called within the context\n using the same broker.\n\n Args:\n broker (Broker): the broker to watch and provide a summary about.\n missing (bool): shows rules with missing dependencies if True.\n tracebacks (bool): shows tracebacks if any exceptions were raised.\n Useful for debugging.\n dropped (bool): Shows any files that weren't collected if running\n against an archive. Included for a corner case and typically not\n used in general.\n stream (file-like): Output is written to stream. Defaults to sys.stdout.\n \"\"\"\n def __init__(self, broker,\n missing=False,\n tracebacks=False,\n dropped=False,\n fail_only=False,\n stream=sys.stdout):\n self.broker = broker\n self.missing = missing\n self.tracebacks = tracebacks\n self.dropped = dropped\n self.fail_only = fail_only\n self.stream = stream\n\n def print_header(self, header, color):\n ln = len(header)\n print(color + '-' * ln, file=self.stream)\n print(header, file=self.stream)\n print('-' * ln + Style.RESET_ALL, file=self.stream)\n\n def preprocess(self):\n response = namedtuple('response', 'color label intl title')\n self.responses = {\n 'pass': response(color=Fore.GREEN, label=\"PASS\", intl='P', title=\"Passed : \"),\n 'rule': response(color=Fore.RED, label=\"FAIL\", intl='F', title=\"Failed : \"),\n 'info': response(color=Fore.WHITE, label=\"INFO\", intl='I', title=\"Info : \"),\n 'skip': response(color=Fore.BLUE, label=\"SKIP\", intl='S', title=\"Missing Deps: \"),\n 'fingerprint': response(color=Fore.YELLOW, label=\"FINGERPRINT\", intl='P',\n title=\"Fingerprint : \"),\n 'metadata': response(color=Fore.YELLOW, label=\"META\", intl='M', title=\"Metadata : \"),\n 'metadata_key': response(color=Fore.MAGENTA, label=\"META\", intl='K', title=\"Metadata Key: \"),\n 'exception': response(color=Fore.RED, label=\"EXCEPT\", intl='E', title=\"Exceptions : \")\n }\n\n self.counts = {}\n for key in self.responses:\n self.counts[key] = 0\n\n self.print_header(\"Progress:\", Fore.CYAN)\n self.broker.add_observer(self.progress_bar, rule)\n self.broker.add_observer(self.progress_bar, condition)\n self.broker.add_observer(self.progress_bar, incident)\n self.broker.add_observer(self.progress_bar, parser)\n\n def progress_bar(self, c, broker):\n \"\"\"\n Print the formated progress information for the processed return types\n \"\"\"\n v = broker.get(c)\n\n if v and isinstance(v, dict) and len(v) > 0 and 'type' in v:\n if v[\"type\"] in self.responses:\n print(self.responses[v[\"type\"]].color + self.responses[v[\"type\"]].intl + Style.RESET_ALL, end=\"\", file=self.stream)\n else:\n print(\".\", end=\"\", file=self.stream)\n elif c in broker.exceptions:\n self.counts['exception'] += len(broker.exceptions[c])\n print(Fore.RED + \"E\" + Style.RESET_ALL, end=\"\", file=self.stream)\n return self\n\n def show_tracebacks(self):\n \"\"\" Show tracebacks \"\"\"\n if self.broker.tracebacks:\n print(file=self.stream)\n print(\"Tracebacks:\", file=self.stream)\n for t in self.broker.tracebacks.values():\n print(t, file=self.stream)\n\n def show_dropped(self):\n \"\"\" Show dropped files \"\"\"\n ctx = _find_context(self.broker)\n if ctx and ctx.all_files:\n ds = self.broker.get_by_type(datasource)\n vals = []\n for v in ds.values():\n if isinstance(v, list):\n vals.extend(d.path for d in v)\n else:\n vals.append(v.path)\n dropped = set(ctx.all_files) - set(vals)\n pprint(\"Dropped Files:\", stream=self.stream)\n pprint(dropped, indent=4, stream=self.stream)\n\n def show_description(self):\n \"\"\" Prints the formatted response for the matching return type \"\"\"\n\n def printit(c, v):\n resp = self.responses[v[\"type\"]]\n name = \"[%s] %s\" % (resp.label, dr.get_name(c))\n underline = \"-\" * len(name)\n name = \"%s%s%s\" % (resp.color, name, Style.RESET_ALL)\n print(name, file=self.stream)\n print(underline, file=self.stream)\n print(render(c, v), file=self.stream)\n print(file=self.stream)\n\n for c in sorted(self.broker.get_by_type(rule), key=dr.get_name):\n v = self.broker[c]\n _type = v.get('type')\n if _type in self.responses:\n self.counts[_type] += 1\n if (_type and ((self.fail_only and _type == 'rule') or\n ((self.missing and _type == 'skip') or\n (not self.fail_only and _type != 'skip')))):\n printit(c, v)\n print(file=self.stream)\n\n self.print_header(\"Rule Execution Summary\", Fore.CYAN)\n for c in self.counts:\n print(self.responses[c].color + self.responses[c].title + str(self.counts[c]) + Style.RESET_ALL, file=self.stream)\n\n def postprocess(self):\n if self.tracebacks:\n self.show_tracebacks()\n if self.dropped:\n self.show_dropped()\n\n print(file=self.stream)\n print(file=self.stream)\n self.print_header(\"Rules Executed\", Fore.CYAN)\n self.show_description()\n\n\nclass HumanReadableFormatAdapter(FormatterAdapter):\n \"\"\" Displays results in a human readable format. \"\"\"\n\n @staticmethod\n def configure(p):\n p.add_argument(\"-m\", \"--missing\", help=\"Show missing requirements.\", action=\"store_true\")\n p.add_argument(\"-t\", \"--tracebacks\", help=\"Show stack traces.\", action=\"store_true\")\n p.add_argument(\"-d\", \"--dropped\", help=\"Show collected files that weren't processed.\", action=\"store_true\")\n p.add_argument(\"-F\", \"--fail-only\", help=\"Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together\", action=\"store_true\")\n\n def __init__(self, args):\n self.missing = args.missing\n self.tracebacks = args.tracebacks\n self.dropped = args.dropped\n self.fail_only = args.fail_only\n self.formatter = None\n if self.missing and self.fail_only:\n print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr)\n self.fail_only = False\n\n def preprocess(self, broker):\n self.formatter = HumanReadableFormat(broker,\n self.missing, self.tracebacks, self.dropped, self.fail_only)\n self.formatter.preprocess()\n\n def postprocess(self, broker):\n self.formatter.postprocess()\n", "path": "insights/formats/text.py"}], "after_files": [{"content": "from __future__ import print_function\nimport six\nimport sys\nimport inspect\nfrom collections import namedtuple\n\nfrom pprint import pprint\nfrom six import StringIO\nfrom insights import dr, datasource, rule, condition, incident, parser\nfrom insights.core.context import ExecutionContext\nfrom insights.formats import Formatter, FormatterAdapter, render\n\n\ntry:\n from colorama import Fore, Style, init\n init()\nexcept ImportError:\n print(\"Install colorama if console colors are preferred.\")\n\n class Default(type):\n def __getattr__(*args):\n return \"\"\n\n class Fore(six.with_metaclass(Default)):\n pass\n\n class Style(six.with_metaclass(Default)):\n pass\n\n\ndef _find_context(broker):\n for k, v in broker.instances.items():\n if inspect.isclass(k) and issubclass(k, ExecutionContext):\n return v\n\n\ndef render_links(component):\n links = dr.get_delegate(component).links or {}\n if any(links.values()):\n space = \" \" * 4\n dbl_space = space * 2\n output = StringIO()\n output.write(\"Links:\\n\")\n for key in sorted(links):\n values = sorted(links[key])\n if values:\n output.write(space + key + \":\\n\")\n for v in values:\n output.write(dbl_space + v + \"\\n\")\n output.seek(0)\n return output.read()\n return \"\"\n\n\nclass HumanReadableFormat(Formatter):\n \"\"\"\n This class prints a human readable summary of rule hits. It should be used\n as a context manager and given an instance of an\n ``insights.core.dr.Broker``. ``dr.run`` should be called within the context\n using the same broker.\n\n Args:\n broker (Broker): the broker to watch and provide a summary about.\n missing (bool): shows rules with missing dependencies if True.\n tracebacks (bool): shows tracebacks if any exceptions were raised.\n Useful for debugging.\n dropped (bool): Shows any files that weren't collected if running\n against an archive. Included for a corner case and typically not\n used in general.\n stream (file-like): Output is written to stream. Defaults to sys.stdout.\n \"\"\"\n def __init__(self, broker,\n missing=False,\n tracebacks=False,\n dropped=False,\n fail_only=False,\n stream=sys.stdout):\n self.broker = broker\n self.missing = missing\n self.tracebacks = tracebacks\n self.dropped = dropped\n self.fail_only = fail_only\n self.stream = stream\n\n def print_header(self, header, color):\n ln = len(header)\n print(color + '-' * ln, file=self.stream)\n print(header, file=self.stream)\n print('-' * ln + Style.RESET_ALL, file=self.stream)\n\n def preprocess(self):\n response = namedtuple('response', 'color label intl title')\n self.responses = {\n 'pass': response(color=Fore.GREEN, label=\"PASS\", intl='P', title=\"Passed : \"),\n 'rule': response(color=Fore.RED, label=\"FAIL\", intl='F', title=\"Failed : \"),\n 'info': response(color=Fore.WHITE, label=\"INFO\", intl='I', title=\"Info : \"),\n 'skip': response(color=Fore.BLUE, label=\"SKIP\", intl='S', title=\"Missing Deps: \"),\n 'fingerprint': response(color=Fore.YELLOW, label=\"FINGERPRINT\", intl='P',\n title=\"Fingerprint : \"),\n 'metadata': response(color=Fore.YELLOW, label=\"META\", intl='M', title=\"Metadata : \"),\n 'metadata_key': response(color=Fore.MAGENTA, label=\"META\", intl='K', title=\"Metadata Key: \"),\n 'exception': response(color=Fore.RED, label=\"EXCEPT\", intl='E', title=\"Exceptions : \")\n }\n\n self.counts = {}\n for key in self.responses:\n self.counts[key] = 0\n\n self.print_header(\"Progress:\", Fore.CYAN)\n self.broker.add_observer(self.progress_bar, rule)\n self.broker.add_observer(self.progress_bar, condition)\n self.broker.add_observer(self.progress_bar, incident)\n self.broker.add_observer(self.progress_bar, parser)\n\n def progress_bar(self, c, broker):\n \"\"\"\n Print the formated progress information for the processed return types\n \"\"\"\n v = broker.get(c)\n\n if v and isinstance(v, dict) and len(v) > 0 and 'type' in v:\n if v[\"type\"] in self.responses:\n print(self.responses[v[\"type\"]].color + self.responses[v[\"type\"]].intl + Style.RESET_ALL, end=\"\", file=self.stream)\n else:\n print(\".\", end=\"\", file=self.stream)\n elif c in broker.exceptions:\n self.counts['exception'] += len(broker.exceptions[c])\n print(Fore.RED + \"E\" + Style.RESET_ALL, end=\"\", file=self.stream)\n return self\n\n def show_tracebacks(self):\n \"\"\" Show tracebacks \"\"\"\n if self.broker.tracebacks:\n print(file=self.stream)\n print(\"Tracebacks:\", file=self.stream)\n for t in self.broker.tracebacks.values():\n print(t, file=self.stream)\n\n def show_dropped(self):\n \"\"\" Show dropped files \"\"\"\n ctx = _find_context(self.broker)\n if ctx and ctx.all_files:\n ds = self.broker.get_by_type(datasource)\n vals = []\n for v in ds.values():\n if isinstance(v, list):\n vals.extend(d.path for d in v)\n else:\n vals.append(v.path)\n dropped = set(ctx.all_files) - set(vals)\n pprint(\"Dropped Files:\", stream=self.stream)\n pprint(dropped, indent=4, stream=self.stream)\n\n def show_description(self):\n \"\"\" Prints the formatted response for the matching return type \"\"\"\n\n def printit(c, v):\n resp = self.responses[v[\"type\"]]\n name = \"[%s] %s\" % (resp.label, dr.get_name(c))\n underline = \"-\" * len(name)\n name = \"%s%s%s\" % (resp.color, name, Style.RESET_ALL)\n print(name, file=self.stream)\n print(underline, file=self.stream)\n print(render_links(c), file=self.stream)\n print(render(c, v), file=self.stream)\n print(file=self.stream)\n\n for c in sorted(self.broker.get_by_type(rule), key=dr.get_name):\n v = self.broker[c]\n _type = v.get('type')\n if _type in self.responses:\n self.counts[_type] += 1\n if (_type and ((self.fail_only and _type == 'rule') or\n ((self.missing and _type == 'skip') or\n (not self.fail_only and _type != 'skip')))):\n printit(c, v)\n print(file=self.stream)\n\n self.print_header(\"Rule Execution Summary\", Fore.CYAN)\n for c in self.counts:\n print(self.responses[c].color + self.responses[c].title + str(self.counts[c]) + Style.RESET_ALL, file=self.stream)\n\n def postprocess(self):\n if self.tracebacks:\n self.show_tracebacks()\n if self.dropped:\n self.show_dropped()\n\n print(file=self.stream)\n print(file=self.stream)\n self.print_header(\"Rules Executed\", Fore.CYAN)\n self.show_description()\n\n\nclass HumanReadableFormatAdapter(FormatterAdapter):\n \"\"\" Displays results in a human readable format. \"\"\"\n\n @staticmethod\n def configure(p):\n p.add_argument(\"-m\", \"--missing\", help=\"Show missing requirements.\", action=\"store_true\")\n p.add_argument(\"-t\", \"--tracebacks\", help=\"Show stack traces.\", action=\"store_true\")\n p.add_argument(\"-d\", \"--dropped\", help=\"Show collected files that weren't processed.\", action=\"store_true\")\n p.add_argument(\"-F\", \"--fail-only\", help=\"Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together\", action=\"store_true\")\n\n def __init__(self, args):\n self.missing = args.missing\n self.tracebacks = args.tracebacks\n self.dropped = args.dropped\n self.fail_only = args.fail_only\n self.formatter = None\n if self.missing and self.fail_only:\n print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr)\n self.fail_only = False\n\n def preprocess(self, broker):\n self.formatter = HumanReadableFormat(broker,\n self.missing, self.tracebacks, self.dropped, self.fail_only)\n self.formatter.preprocess()\n\n def postprocess(self, broker):\n self.formatter.postprocess()\n", "path": "insights/formats/text.py"}]}
| 2,570 | 354 |
gh_patches_debug_11027
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-1690
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Setitem for DataFrame leads to a wrong dtypes
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
Add columns for a DataFrame will lead to a wrong dtypes of input DataFrame.
**To Reproduce**
```python
In [1]: import mars.dataframe as md
In [2]: a = md.DataFrame({'a':[1,2,3]})
In [3]: a['new'] = 1
In [4]: a.op.inputs
Out[4]: [DataFrame <op=DataFrameDataSource, key=c212164d24d96ed634711c3b97f334cb>]
In [5]: a.op.inputs[0].dtypes
Out[5]:
a int64
new int64
dtype: object
```
**Expected behavior**
Input DataFrame's dtypes should have only one column.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/dataframe/indexing/setitem.py`
Content:
```
1 # Copyright 1999-2020 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import numpy as np
16 import pandas as pd
17 from pandas.api.types import is_list_like
18
19 from ... import opcodes
20 from ...core import OutputType
21 from ...serialize import KeyField, AnyField
22 from ...tensor.core import TENSOR_TYPE
23 from ...tiles import TilesError
24 from ..core import SERIES_TYPE, DataFrame
25 from ..initializer import Series as asseries
26 from ..operands import DataFrameOperand, DataFrameOperandMixin
27 from ..utils import parse_index
28
29
30 class DataFrameSetitem(DataFrameOperand, DataFrameOperandMixin):
31 _op_type_ = opcodes.INDEXSETVALUE
32
33 _target = KeyField('target')
34 _indexes = AnyField('indexes')
35 _value = AnyField('value')
36
37 def __init__(self, target=None, indexes=None, value=None, output_types=None, **kw):
38 super().__init__(_target=target, _indexes=indexes,
39 _value=value, _output_types=output_types, **kw)
40 if self.output_types is None:
41 self.output_types = [OutputType.dataframe]
42
43 @property
44 def target(self):
45 return self._target
46
47 @property
48 def indexes(self):
49 return self._indexes
50
51 @property
52 def value(self):
53 return self._value
54
55 def _set_inputs(self, inputs):
56 super()._set_inputs(inputs)
57 self._target = self._inputs[0]
58 if len(inputs) > 1:
59 self._value = self._inputs[-1]
60
61 def __call__(self, target: DataFrame, value):
62 inputs = [target]
63 if np.isscalar(value):
64 value_dtype = np.array(value).dtype
65 else:
66 if isinstance(value, (pd.Series, SERIES_TYPE)):
67 value = asseries(value)
68 inputs.append(value)
69 value_dtype = value.dtype
70 elif is_list_like(value) or isinstance(value, TENSOR_TYPE):
71 value = asseries(value, index=target.index)
72 inputs.append(value)
73 value_dtype = value.dtype
74 else: # pragma: no cover
75 raise TypeError('Wrong value type, could be one of scalar, Series or tensor')
76
77 if value.index_value.key != target.index_value.key: # pragma: no cover
78 raise NotImplementedError('Does not support setting value '
79 'with different index for now')
80
81 index_value = target.index_value
82 dtypes = target.dtypes.copy(deep=True)
83 dtypes.loc[self._indexes] = value_dtype
84 columns_value = parse_index(dtypes.index, store_data=True)
85 ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),
86 dtypes=dtypes, index_value=index_value,
87 columns_value=columns_value)
88 target.data = ret.data
89
90 @classmethod
91 def tile(cls, op):
92 out = op.outputs[0]
93 target = op.target
94 value = op.value
95 col = op.indexes
96 columns = target.columns_value.to_pandas()
97
98 if not np.isscalar(value):
99 # check if all chunk's index_value are identical
100 target_chunk_index_values = [c.index_value for c in target.chunks
101 if c.index[1] == 0]
102 value_chunk_index_values = [v.index_value for v in value.chunks]
103 is_identical = len(target_chunk_index_values) == len(target_chunk_index_values) and \
104 all(c.key == v.key for c, v in zip(target_chunk_index_values, value_chunk_index_values))
105 if not is_identical:
106 # do rechunk
107 if any(np.isnan(s) for s in target.nsplits[0]) or \
108 any(np.isnan(s) for s in value.nsplits[0]): # pragma: no cover
109 raise TilesError('target or value has unknown chunk shape')
110
111 value = value.rechunk({0: target.nsplits[0]})._inplace_tile()
112
113 out_chunks = []
114 nsplits = [list(ns) for ns in target.nsplits]
115 if col not in columns:
116 nsplits[1][-1] += 1
117 column_chunk_shape = target.chunk_shape[1]
118 # append to the last chunk on columns axis direction
119 for c in target.chunks:
120 if c.index[-1] != column_chunk_shape - 1:
121 # not effected, just output
122 out_chunks.append(c)
123 else:
124 chunk_op = op.copy().reset_key()
125 if np.isscalar(value):
126 chunk_inputs = [c]
127 else:
128 value_chunk = value.cix[c.index[0], ]
129 chunk_inputs = [c, value_chunk]
130
131 dtypes = c.dtypes
132 dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]
133 chunk = chunk_op.new_chunk(chunk_inputs,
134 shape=(c.shape[0], c.shape[1] + 1),
135 dtypes=dtypes,
136 index_value=c.index_value,
137 columns_value=parse_index(dtypes.index, store_data=True),
138 index=c.index)
139 out_chunks.append(chunk)
140 else:
141 # replace exist column
142 for c in target.chunks:
143 if col in c.dtypes:
144 chunk_inputs = [c]
145 if not np.isscalar(value):
146 chunk_inputs.append(value.cix[c.index[0], ])
147 chunk_op = op.copy().reset_key()
148 chunk = chunk_op.new_chunk(chunk_inputs,
149 shape=c.shape,
150 dtypes=c.dtypes,
151 index_value=c.index_value,
152 columns_value=c.columns_value,
153 index=c.index)
154 out_chunks.append(chunk)
155 else:
156 out_chunks.append(c)
157
158 params = out.params
159 params['nsplits'] = tuple(tuple(ns) for ns in nsplits)
160 params['chunks'] = out_chunks
161 new_op = op.copy()
162 return new_op.new_tileables(op.inputs, kws=[params])
163
164 @classmethod
165 def execute(cls, ctx, op):
166 target = ctx[op.target.key].copy()
167 value = ctx[op.value.key] if not np.isscalar(op.value) else op.value
168 target[op.indexes] = value
169 ctx[op.outputs[0].key] = target
170
171
172 def dataframe_setitem(df, col, value):
173 op = DataFrameSetitem(target=df, indexes=col, value=value)
174 return op(df, value)
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mars/dataframe/indexing/setitem.py b/mars/dataframe/indexing/setitem.py
--- a/mars/dataframe/indexing/setitem.py
+++ b/mars/dataframe/indexing/setitem.py
@@ -128,7 +128,7 @@
value_chunk = value.cix[c.index[0], ]
chunk_inputs = [c, value_chunk]
- dtypes = c.dtypes
+ dtypes = c.dtypes.copy(deep=True)
dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]
chunk = chunk_op.new_chunk(chunk_inputs,
shape=(c.shape[0], c.shape[1] + 1),
|
{"golden_diff": "diff --git a/mars/dataframe/indexing/setitem.py b/mars/dataframe/indexing/setitem.py\n--- a/mars/dataframe/indexing/setitem.py\n+++ b/mars/dataframe/indexing/setitem.py\n@@ -128,7 +128,7 @@\n value_chunk = value.cix[c.index[0], ]\n chunk_inputs = [c, value_chunk]\n \n- dtypes = c.dtypes\n+ dtypes = c.dtypes.copy(deep=True)\n dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=(c.shape[0], c.shape[1] + 1),\n", "issue": "[BUG] Setitem for DataFrame leads to a wrong dtypes\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nAdd columns for a DataFrame will lead to a wrong dtypes of input DataFrame.\r\n\r\n**To Reproduce**\r\n```python\r\nIn [1]: import mars.dataframe as md \r\n\r\nIn [2]: a = md.DataFrame({'a':[1,2,3]}) \r\n\r\nIn [3]: a['new'] = 1 \r\n\r\nIn [4]: a.op.inputs \r\nOut[4]: [DataFrame <op=DataFrameDataSource, key=c212164d24d96ed634711c3b97f334cb>]\r\n\r\nIn [5]: a.op.inputs[0].dtypes \r\nOut[5]: \r\na int64\r\nnew int64\r\ndtype: object\r\n```\r\n**Expected behavior**\r\nInput DataFrame's dtypes should have only one column.\r\n\n", "before_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_list_like\n\nfrom ... import opcodes\nfrom ...core import OutputType\nfrom ...serialize import KeyField, AnyField\nfrom ...tensor.core import TENSOR_TYPE\nfrom ...tiles import TilesError\nfrom ..core import SERIES_TYPE, DataFrame\nfrom ..initializer import Series as asseries\nfrom ..operands import DataFrameOperand, DataFrameOperandMixin\nfrom ..utils import parse_index\n\n\nclass DataFrameSetitem(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = opcodes.INDEXSETVALUE\n\n _target = KeyField('target')\n _indexes = AnyField('indexes')\n _value = AnyField('value')\n\n def __init__(self, target=None, indexes=None, value=None, output_types=None, **kw):\n super().__init__(_target=target, _indexes=indexes,\n _value=value, _output_types=output_types, **kw)\n if self.output_types is None:\n self.output_types = [OutputType.dataframe]\n\n @property\n def target(self):\n return self._target\n\n @property\n def indexes(self):\n return self._indexes\n\n @property\n def value(self):\n return self._value\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._target = self._inputs[0]\n if len(inputs) > 1:\n self._value = self._inputs[-1]\n\n def __call__(self, target: DataFrame, value):\n inputs = [target]\n if np.isscalar(value):\n value_dtype = np.array(value).dtype\n else:\n if isinstance(value, (pd.Series, SERIES_TYPE)):\n value = asseries(value)\n inputs.append(value)\n value_dtype = value.dtype\n elif is_list_like(value) or isinstance(value, TENSOR_TYPE):\n value = asseries(value, index=target.index)\n inputs.append(value)\n value_dtype = value.dtype\n else: # pragma: no cover\n raise TypeError('Wrong value type, could be one of scalar, Series or tensor')\n\n if value.index_value.key != target.index_value.key: # pragma: no cover\n raise NotImplementedError('Does not support setting value '\n 'with different index for now')\n\n index_value = target.index_value\n dtypes = target.dtypes.copy(deep=True)\n dtypes.loc[self._indexes] = value_dtype\n columns_value = parse_index(dtypes.index, store_data=True)\n ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),\n dtypes=dtypes, index_value=index_value,\n columns_value=columns_value)\n target.data = ret.data\n\n @classmethod\n def tile(cls, op):\n out = op.outputs[0]\n target = op.target\n value = op.value\n col = op.indexes\n columns = target.columns_value.to_pandas()\n\n if not np.isscalar(value):\n # check if all chunk's index_value are identical\n target_chunk_index_values = [c.index_value for c in target.chunks\n if c.index[1] == 0]\n value_chunk_index_values = [v.index_value for v in value.chunks]\n is_identical = len(target_chunk_index_values) == len(target_chunk_index_values) and \\\n all(c.key == v.key for c, v in zip(target_chunk_index_values, value_chunk_index_values))\n if not is_identical:\n # do rechunk\n if any(np.isnan(s) for s in target.nsplits[0]) or \\\n any(np.isnan(s) for s in value.nsplits[0]): # pragma: no cover\n raise TilesError('target or value has unknown chunk shape')\n\n value = value.rechunk({0: target.nsplits[0]})._inplace_tile()\n\n out_chunks = []\n nsplits = [list(ns) for ns in target.nsplits]\n if col not in columns:\n nsplits[1][-1] += 1\n column_chunk_shape = target.chunk_shape[1]\n # append to the last chunk on columns axis direction\n for c in target.chunks:\n if c.index[-1] != column_chunk_shape - 1:\n # not effected, just output\n out_chunks.append(c)\n else:\n chunk_op = op.copy().reset_key()\n if np.isscalar(value):\n chunk_inputs = [c]\n else:\n value_chunk = value.cix[c.index[0], ]\n chunk_inputs = [c, value_chunk]\n\n dtypes = c.dtypes\n dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=(c.shape[0], c.shape[1] + 1),\n dtypes=dtypes,\n index_value=c.index_value,\n columns_value=parse_index(dtypes.index, store_data=True),\n index=c.index)\n out_chunks.append(chunk)\n else:\n # replace exist column\n for c in target.chunks:\n if col in c.dtypes:\n chunk_inputs = [c]\n if not np.isscalar(value):\n chunk_inputs.append(value.cix[c.index[0], ])\n chunk_op = op.copy().reset_key()\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=c.shape,\n dtypes=c.dtypes,\n index_value=c.index_value,\n columns_value=c.columns_value,\n index=c.index)\n out_chunks.append(chunk)\n else:\n out_chunks.append(c)\n\n params = out.params\n params['nsplits'] = tuple(tuple(ns) for ns in nsplits)\n params['chunks'] = out_chunks\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, kws=[params])\n\n @classmethod\n def execute(cls, ctx, op):\n target = ctx[op.target.key].copy()\n value = ctx[op.value.key] if not np.isscalar(op.value) else op.value\n target[op.indexes] = value\n ctx[op.outputs[0].key] = target\n\n\ndef dataframe_setitem(df, col, value):\n op = DataFrameSetitem(target=df, indexes=col, value=value)\n return op(df, value)\n", "path": "mars/dataframe/indexing/setitem.py"}], "after_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import is_list_like\n\nfrom ... import opcodes\nfrom ...core import OutputType\nfrom ...serialize import KeyField, AnyField\nfrom ...tensor.core import TENSOR_TYPE\nfrom ...tiles import TilesError\nfrom ..core import SERIES_TYPE, DataFrame\nfrom ..initializer import Series as asseries\nfrom ..operands import DataFrameOperand, DataFrameOperandMixin\nfrom ..utils import parse_index\n\n\nclass DataFrameSetitem(DataFrameOperand, DataFrameOperandMixin):\n _op_type_ = opcodes.INDEXSETVALUE\n\n _target = KeyField('target')\n _indexes = AnyField('indexes')\n _value = AnyField('value')\n\n def __init__(self, target=None, indexes=None, value=None, output_types=None, **kw):\n super().__init__(_target=target, _indexes=indexes,\n _value=value, _output_types=output_types, **kw)\n if self.output_types is None:\n self.output_types = [OutputType.dataframe]\n\n @property\n def target(self):\n return self._target\n\n @property\n def indexes(self):\n return self._indexes\n\n @property\n def value(self):\n return self._value\n\n def _set_inputs(self, inputs):\n super()._set_inputs(inputs)\n self._target = self._inputs[0]\n if len(inputs) > 1:\n self._value = self._inputs[-1]\n\n def __call__(self, target: DataFrame, value):\n inputs = [target]\n if np.isscalar(value):\n value_dtype = np.array(value).dtype\n else:\n if isinstance(value, (pd.Series, SERIES_TYPE)):\n value = asseries(value)\n inputs.append(value)\n value_dtype = value.dtype\n elif is_list_like(value) or isinstance(value, TENSOR_TYPE):\n value = asseries(value, index=target.index)\n inputs.append(value)\n value_dtype = value.dtype\n else: # pragma: no cover\n raise TypeError('Wrong value type, could be one of scalar, Series or tensor')\n\n if value.index_value.key != target.index_value.key: # pragma: no cover\n raise NotImplementedError('Does not support setting value '\n 'with different index for now')\n\n index_value = target.index_value\n dtypes = target.dtypes.copy(deep=True)\n dtypes.loc[self._indexes] = value_dtype\n columns_value = parse_index(dtypes.index, store_data=True)\n ret = self.new_dataframe(inputs, shape=(target.shape[0], len(dtypes)),\n dtypes=dtypes, index_value=index_value,\n columns_value=columns_value)\n target.data = ret.data\n\n @classmethod\n def tile(cls, op):\n out = op.outputs[0]\n target = op.target\n value = op.value\n col = op.indexes\n columns = target.columns_value.to_pandas()\n\n if not np.isscalar(value):\n # check if all chunk's index_value are identical\n target_chunk_index_values = [c.index_value for c in target.chunks\n if c.index[1] == 0]\n value_chunk_index_values = [v.index_value for v in value.chunks]\n is_identical = len(target_chunk_index_values) == len(target_chunk_index_values) and \\\n all(c.key == v.key for c, v in zip(target_chunk_index_values, value_chunk_index_values))\n if not is_identical:\n # do rechunk\n if any(np.isnan(s) for s in target.nsplits[0]) or \\\n any(np.isnan(s) for s in value.nsplits[0]): # pragma: no cover\n raise TilesError('target or value has unknown chunk shape')\n\n value = value.rechunk({0: target.nsplits[0]})._inplace_tile()\n\n out_chunks = []\n nsplits = [list(ns) for ns in target.nsplits]\n if col not in columns:\n nsplits[1][-1] += 1\n column_chunk_shape = target.chunk_shape[1]\n # append to the last chunk on columns axis direction\n for c in target.chunks:\n if c.index[-1] != column_chunk_shape - 1:\n # not effected, just output\n out_chunks.append(c)\n else:\n chunk_op = op.copy().reset_key()\n if np.isscalar(value):\n chunk_inputs = [c]\n else:\n value_chunk = value.cix[c.index[0], ]\n chunk_inputs = [c, value_chunk]\n\n dtypes = c.dtypes.copy(deep=True)\n dtypes.loc[out.dtypes.index[-1]] = out.dtypes.iloc[-1]\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=(c.shape[0], c.shape[1] + 1),\n dtypes=dtypes,\n index_value=c.index_value,\n columns_value=parse_index(dtypes.index, store_data=True),\n index=c.index)\n out_chunks.append(chunk)\n else:\n # replace exist column\n for c in target.chunks:\n if col in c.dtypes:\n chunk_inputs = [c]\n if not np.isscalar(value):\n chunk_inputs.append(value.cix[c.index[0], ])\n chunk_op = op.copy().reset_key()\n chunk = chunk_op.new_chunk(chunk_inputs,\n shape=c.shape,\n dtypes=c.dtypes,\n index_value=c.index_value,\n columns_value=c.columns_value,\n index=c.index)\n out_chunks.append(chunk)\n else:\n out_chunks.append(c)\n\n params = out.params\n params['nsplits'] = tuple(tuple(ns) for ns in nsplits)\n params['chunks'] = out_chunks\n new_op = op.copy()\n return new_op.new_tileables(op.inputs, kws=[params])\n\n @classmethod\n def execute(cls, ctx, op):\n target = ctx[op.target.key].copy()\n value = ctx[op.value.key] if not np.isscalar(op.value) else op.value\n target[op.indexes] = value\n ctx[op.outputs[0].key] = target\n\n\ndef dataframe_setitem(df, col, value):\n op = DataFrameSetitem(target=df, indexes=col, value=value)\n return op(df, value)\n", "path": "mars/dataframe/indexing/setitem.py"}]}
| 2,404 | 154 |
gh_patches_debug_29308
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-544
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missed paid orders from Ya.Kassa
Text below is from PO
----
Мне se голову делает, что заказы оплаченные в яндексе кассе не приходят
Вот этих я в почте не увидел
http://prntscr.com/km0zp2
Хотя, я только что купил батарейку за 10 рублей. Я еще потестил. разные данные вбивал и с разными полями. У меня ок все.
Смотри как странно они идут в админке. видимо какой-то баг всн же есть http://prntscr.com/km124t
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/admin.py`
Content:
```
1 from django.contrib import admin
2 from django.contrib.admin.widgets import FilteredSelectMultiple
3 from django.contrib.redirects.models import Redirect
4 from django.db import models as django_models
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.utils.translation import ugettext_lazy as _
8
9 from django_select2.forms import ModelSelect2Widget
10
11 from ecommerce.models import Position
12 from pages.models import CustomPage, FlatPage, PageTemplate
13 from generic_admin import inlines, mixins, models, sites
14
15 from shopelectro import models as se_models
16 from shopelectro.views.admin import TableEditor
17
18
19 class SEAdminSite(sites.SiteWithTableEditor):
20
21 site_header = 'Shopelectro administration'
22 table_editor_view = TableEditor
23
24
25 def prepare_has_filter_queryset(value, db_table, queryset):
26 if not value:
27 return
28
29 query = '{}__tags__isnull'.format(db_table)
30
31 # Use brackets, because `Explicit is better than implicit`.
32 return queryset.filter(**{query: value != 'yes'})
33
34
35 class HasTagsFilter(admin.SimpleListFilter):
36
37 product_model = se_models.Product
38 title = _('has tags')
39 parameter_name = 'has_tags'
40
41 def lookups(self, request, model_admin):
42 return (
43 ('yes', _('Has tags')),
44 ('no', _('Has no tags')),
45 )
46
47 def queryset(self, request, queryset):
48 return prepare_has_filter_queryset(
49 self.value(),
50 self.product_model._meta.db_table,
51 queryset
52 )
53
54
55 class HasCategoryFilter(admin.SimpleListFilter):
56
57 product_model = se_models.Product
58 title = _('has category')
59 parameter_name = 'has_category'
60
61 def lookups(self, request, model_admin):
62 return (
63 ('yes', _('Has category')),
64 ('no', _('Has no category')),
65 )
66
67 def queryset(self, request, queryset):
68 return prepare_has_filter_queryset(
69 self.value(),
70 self.product_model._meta.db_table,
71 queryset
72 )
73
74
75 class TagInline(admin.StackedInline):
76
77 model = se_models.Tag
78 extra = 0
79
80
81 class CategoryInline(inlines.CategoryInline):
82
83 model = se_models.Category
84
85 fieldsets = ((None, {
86 'classes': ('primary-chars', ),
87 'fields': (
88 ('name', 'id'),
89 ('parent', 'correct_parent_id'),
90 )
91 }),)
92
93
94 class ProductInline(inlines.ProductInline):
95
96 model = se_models.Product
97
98 formfield_overrides = {
99 django_models.ManyToManyField: {
100 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)
101 },
102 }
103
104 fieldsets = ((None, {
105 'classes': ('primary-chars', ),
106 'fields': (
107 ('name', 'id', 'vendor_code'),
108 ('category', 'correct_category_id'),
109 ('price', 'in_stock'),
110 'is_popular',
111 ('purchase_price', 'wholesale_small'),
112 ('wholesale_medium', 'wholesale_large'),
113 'tags',
114 )
115 }),)
116
117 def formfield_for_dbfield(self, db_field, **kwargs):
118 if db_field.name == 'category':
119 kwargs['widget'] = ModelSelect2Widget(
120 model=se_models.Category,
121 search_fields=[
122 'name__icontains',
123 'pk__startswith',
124 ],
125 )
126 return super(ProductInline, self).formfield_for_dbfield(
127 db_field,
128 **kwargs,
129 )
130
131
132 class PositionInline(admin.StackedInline):
133
134 model = Position
135
136
137 class CategoryPageAdmin(models.CategoryPageAdmin):
138
139 add = True
140 delete = False
141 inlines = [CategoryInline, inlines.ImageInline]
142
143 def get_queryset(self, request):
144 return (
145 super(CategoryPageAdmin, self)
146 .get_queryset(request)
147 .select_related('shopelectro_category')
148 )
149
150
151 class ProductPageAdmin(models.ProductPageAdmin):
152
153 add = False
154 delete = False
155 category_page_model = se_models.CategoryPage
156 list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]
157 inlines = [ProductInline, inlines.ImageInline]
158 search_fields = [
159 'shopelectro_product__vendor_code', 'name', 'slug',
160 ]
161
162 def model_id(self, obj):
163 return obj.model.vendor_code
164
165 model_id.short_description = _('Vendor code')
166 model_id.admin_order_field = 'shopelectro_product__vendor_code'
167
168 def get_queryset(self, request):
169 return (
170 super(ProductPageAdmin, self)
171 .get_queryset(request)
172 .select_related('shopelectro_product')
173 )
174
175
176 class ProductFeedbackPageAdmin(admin.ModelAdmin):
177
178 add = False
179 delete = False
180 list_filter = ['rating']
181 list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']
182
183 def links(self, obj):
184 return format_html(
185 '''
186 <a href="{url}" class="field-link" title="Посмотреть на сайте" target="_blank">
187 <i class="fa fa-link" aria-hidden="true"></i>
188 </a>
189 '''.format(url=obj.product.url))
190
191 links.short_description = _('Link')
192
193 def get_queryset(self, request):
194 return (
195 super(ProductFeedbackPageAdmin, self)
196 .get_queryset(request)
197 .select_related('product')
198 )
199
200
201 class TagGroupAdmin(admin.ModelAdmin):
202
203 list_display = ['id', 'name', 'position', 'count_tags']
204 list_display_links = ['name']
205
206 inlines = [TagInline]
207
208 def get_queryset(self, request):
209 return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')
210
211 def count_tags(self, obj):
212 return obj.tags.count()
213
214
215 class TagAdmin(admin.ModelAdmin):
216
217 search_fields = ['id', 'name']
218 list_display = ['id', 'name', 'position', 'custom_group']
219 list_display_links = ['name']
220
221 def get_queryset(self, request):
222 return super(TagAdmin, self).get_queryset(request).prefetch_related('group')
223
224 def custom_group(self, obj):
225 group = obj.group
226
227 return format_html(
228 '<a href="{url}">{group}</a>',
229 group=group,
230 url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))
231 )
232
233 custom_group.admin_order_field = 'group'
234 custom_group.short_description = _('Group')
235
236
237 class OrderAdmin(mixins.PermissionsControl):
238
239 add = False
240 inlines = [PositionInline]
241 list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']
242 search_fields = ['name', 'email', 'phone']
243 list_display_links = ['name']
244
245 se_admin = SEAdminSite(name='se_admin')
246 se_admin.register(CustomPage, models.CustomPageAdmin)
247 se_admin.register(FlatPage, models.FlatPageAdmin)
248 se_admin.register(PageTemplate, models.CustomPageTemplateAdmin)
249
250 se_admin.register(se_models.CategoryPage, CategoryPageAdmin)
251 se_admin.register(se_models.Order, OrderAdmin)
252 se_admin.register(se_models.ProductPage, ProductPageAdmin)
253 se_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)
254 se_admin.register(se_models.TagGroup, TagGroupAdmin)
255 se_admin.register(se_models.Tag, TagAdmin)
256
257 se_admin.register(Redirect)
258
```
Path: `shopelectro/views/service.py`
Content:
```
1 from hashlib import md5
2
3 from django.conf import settings
4 from django.shortcuts import render
5 from django.views.decorators.csrf import csrf_exempt
6 from django.views.decorators.http import require_POST
7
8 from ecommerce import mailer
9 from ecommerce.views import get_keys_from_post
10
11 from shopelectro.models import Order
12
13 YANDEX_REQUEST_PARAM = (
14 'action', 'orderSumAmount', 'orderSumCurrencyPaycash', 'orderSumBankPaycash',
15 'shopId', 'invoiceId', 'customerNumber'
16 )
17
18
19 def generate_md5_for_ya_kassa(post_body):
20 """Generate md5 based on this param."""
21 params = [post_body[param] for param in YANDEX_REQUEST_PARAM]
22 params.append(settings.YANDEX_SHOP_PASS)
23 param_sequence = str(';'.join(params)).encode('utf-8')
24 return md5(param_sequence).hexdigest().upper()
25
26
27 def has_correct_md5(post_body):
28 """Compare our md5 with md5 from yandex request."""
29 md5 = generate_md5_for_ya_kassa(post_body)
30 return md5 == post_body['md5']
31
32
33 @csrf_exempt
34 def yandex_check(request):
35 """
36 Handle Yandex check.
37
38 We simply accept every check.
39 It's marked with @csrf_exempt, because we don't need
40 to check CSRF in yandex-requests.
41 """
42 return render(request, 'ecommerce/yandex_check.xml', {'invoice': request.POST['invoiceId']},
43 content_type='application/xhtml+xml')
44
45
46 @csrf_exempt
47 def yandex_aviso(request):
48 """
49 Handle Yandex Aviso check.
50
51 It's marked with @csrf_exempt, because we don't need to
52 check CSRF in yandex-requests.
53
54 1. Retrieve order number from request, find in in DB.
55 2. If it's a first aviso check (there might be more than one,
56 depends on Yandex)
57 send different emails to client and shop.
58 3. Get invoice id from request and return XML to Yandex.
59 """
60 def is_first_aviso(order_):
61 return order_ and not order_.paid
62
63 def send_mail_to_shop(order):
64 paid, profit = get_keys_from_post(request, 'orderSumAmount', 'shopSumAmount')
65 proportion = 100 * float(profit) / float(paid)
66 commission = round(100 - proportion, 2) # in percents
67 mailer.send_order(
68 template='ecommerce/yandex_order_email.html',
69 subject=settings.EMAIL_SUBJECTS['yandex_order'],
70 order=order,
71 to_customer=False,
72 extra_context={
73 'paid': paid,
74 'profit': profit,
75 'commission': commission,
76 })
77
78 def send_mail_to_customer(order):
79 mailer.send_order(
80 subject=settings.EMAIL_SUBJECTS['yandex_order'],
81 order=order,
82 to_shop=False,
83 )
84
85 if not has_correct_md5(request.POST):
86 return render(
87 request, 'ecommerce/yandex_aviso.xml',
88 content_type='application/xhtml+xml'
89 )
90
91 # maybe we can include django-annoying for such cases
92 # https://github.com/skorokithakis/django-annoying#get_object_or_none-function
93 try:
94 order = Order.objects.get(pk=request.POST['customerNumber'])
95 except Order.DoesNotExist:
96 order = None
97
98 if is_first_aviso(order):
99 order.paid = True
100 send_mail_to_customer(order)
101 send_mail_to_shop(order)
102 order.save()
103
104 invoice_id = request.POST['invoiceId']
105 return render(request, 'ecommerce/yandex_aviso.xml', {'invoice': invoice_id},
106 content_type='application/xhtml+xml')
107
108
109 @require_POST
110 @csrf_exempt
111 def ya_feedback_request(request):
112 """Send email to user with Y.Market feedback request."""
113 user_email = request.POST['email']
114 mailer.ya_feedback(user_email)
115
116 return render(request, 'ecommerce/yandex_feedback_success.html',
117 {'email': user_email})
118
119
120 def ya_feedback_with_redirect(request):
121 """Redirect user to Y.Market for feedback."""
122 return render(request, 'ecommerce/yandex_feedback_redirect.html')
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/admin.py b/shopelectro/admin.py
--- a/shopelectro/admin.py
+++ b/shopelectro/admin.py
@@ -238,10 +238,11 @@
add = False
inlines = [PositionInline]
- list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']
+ list_display = ['id', 'name', 'email', 'phone', 'total_price', 'payment_type', 'paid']
search_fields = ['name', 'email', 'phone']
list_display_links = ['name']
+
se_admin = SEAdminSite(name='se_admin')
se_admin.register(CustomPage, models.CustomPageAdmin)
se_admin.register(FlatPage, models.FlatPageAdmin)
diff --git a/shopelectro/views/service.py b/shopelectro/views/service.py
--- a/shopelectro/views/service.py
+++ b/shopelectro/views/service.py
@@ -1,3 +1,4 @@
+import logging
from hashlib import md5
from django.conf import settings
@@ -10,6 +11,9 @@
from shopelectro.models import Order
+logger = logging.getLogger(__name__)
+
+
YANDEX_REQUEST_PARAM = (
'action', 'orderSumAmount', 'orderSumCurrencyPaycash', 'orderSumBankPaycash',
'shopId', 'invoiceId', 'customerNumber'
@@ -88,6 +92,8 @@
content_type='application/xhtml+xml'
)
+ logger.info(f'received yandex aviso. Aviso data: {request.POST}')
+
# maybe we can include django-annoying for such cases
# https://github.com/skorokithakis/django-annoying#get_object_or_none-function
try:
|
{"golden_diff": "diff --git a/shopelectro/admin.py b/shopelectro/admin.py\n--- a/shopelectro/admin.py\n+++ b/shopelectro/admin.py\n@@ -238,10 +238,11 @@\n \n add = False\n inlines = [PositionInline]\n- list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']\n+ list_display = ['id', 'name', 'email', 'phone', 'total_price', 'payment_type', 'paid']\n search_fields = ['name', 'email', 'phone']\n list_display_links = ['name']\n \n+\n se_admin = SEAdminSite(name='se_admin')\n se_admin.register(CustomPage, models.CustomPageAdmin)\n se_admin.register(FlatPage, models.FlatPageAdmin)\ndiff --git a/shopelectro/views/service.py b/shopelectro/views/service.py\n--- a/shopelectro/views/service.py\n+++ b/shopelectro/views/service.py\n@@ -1,3 +1,4 @@\n+import logging\n from hashlib import md5\n \n from django.conf import settings\n@@ -10,6 +11,9 @@\n \n from shopelectro.models import Order\n \n+logger = logging.getLogger(__name__)\n+\n+\n YANDEX_REQUEST_PARAM = (\n 'action', 'orderSumAmount', 'orderSumCurrencyPaycash', 'orderSumBankPaycash',\n 'shopId', 'invoiceId', 'customerNumber'\n@@ -88,6 +92,8 @@\n content_type='application/xhtml+xml'\n )\n \n+ logger.info(f'received yandex aviso. Aviso data: {request.POST}')\n+\n # maybe we can include django-annoying for such cases\n # https://github.com/skorokithakis/django-annoying#get_object_or_none-function\n try:\n", "issue": "Missed paid orders from Ya.Kassa\nText below is from PO\r\n\r\n----\r\n\u041c\u043d\u0435 se \u0433\u043e\u043b\u043e\u0432\u0443 \u0434\u0435\u043b\u0430\u0435\u0442, \u0447\u0442\u043e \u0437\u0430\u043a\u0430\u0437\u044b \u043e\u043f\u043b\u0430\u0447\u0435\u043d\u043d\u044b\u0435 \u0432 \u044f\u043d\u0434\u0435\u043a\u0441\u0435 \u043a\u0430\u0441\u0441\u0435 \u043d\u0435 \u043f\u0440\u0438\u0445\u043e\u0434\u044f\u0442 \r\n\r\n\u0412\u043e\u0442 \u044d\u0442\u0438\u0445 \u044f \u0432 \u043f\u043e\u0447\u0442\u0435 \u043d\u0435 \u0443\u0432\u0438\u0434\u0435\u043b \r\nhttp://prntscr.com/km0zp2\r\n\r\n\u0425\u043e\u0442\u044f, \u044f \u0442\u043e\u043b\u044c\u043a\u043e \u0447\u0442\u043e \u043a\u0443\u043f\u0438\u043b \u0431\u0430\u0442\u0430\u0440\u0435\u0439\u043a\u0443 \u0437\u0430 10 \u0440\u0443\u0431\u043b\u0435\u0439. \u042f \u0435\u0449\u0435 \u043f\u043e\u0442\u0435\u0441\u0442\u0438\u043b. \u0440\u0430\u0437\u043d\u044b\u0435 \u0434\u0430\u043d\u043d\u044b\u0435 \u0432\u0431\u0438\u0432\u0430\u043b \u0438 \u0441 \u0440\u0430\u0437\u043d\u044b\u043c\u0438 \u043f\u043e\u043b\u044f\u043c\u0438. \u0423 \u043c\u0435\u043d\u044f \u043e\u043a \u0432\u0441\u0435.\r\n\r\n\u0421\u043c\u043e\u0442\u0440\u0438 \u043a\u0430\u043a \u0441\u0442\u0440\u0430\u043d\u043d\u043e \u043e\u043d\u0438 \u0438\u0434\u0443\u0442 \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0435. \u0432\u0438\u0434\u0438\u043c\u043e \u043a\u0430\u043a\u043e\u0439-\u0442\u043e \u0431\u0430\u0433 \u0432\u0441\u043d \u0436\u0435 \u0435\u0441\u0442\u044c http://prntscr.com/km124t\r\n\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.redirects.models import Redirect\nfrom django.db import models as django_models\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_select2.forms import ModelSelect2Widget\n\nfrom ecommerce.models import Position\nfrom pages.models import CustomPage, FlatPage, PageTemplate\nfrom generic_admin import inlines, mixins, models, sites\n\nfrom shopelectro import models as se_models\nfrom shopelectro.views.admin import TableEditor\n\n\nclass SEAdminSite(sites.SiteWithTableEditor):\n\n site_header = 'Shopelectro administration'\n table_editor_view = TableEditor\n\n\ndef prepare_has_filter_queryset(value, db_table, queryset):\n if not value:\n return\n\n query = '{}__tags__isnull'.format(db_table)\n\n # Use brackets, because `Explicit is better than implicit`.\n return queryset.filter(**{query: value != 'yes'})\n\n\nclass HasTagsFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has tags')\n parameter_name = 'has_tags'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has tags')),\n ('no', _('Has no tags')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass HasCategoryFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has category')\n parameter_name = 'has_category'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has category')),\n ('no', _('Has no category')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass TagInline(admin.StackedInline):\n\n model = se_models.Tag\n extra = 0\n\n\nclass CategoryInline(inlines.CategoryInline):\n\n model = se_models.Category\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('parent', 'correct_parent_id'),\n )\n }),)\n\n\nclass ProductInline(inlines.ProductInline):\n\n model = se_models.Product\n\n formfield_overrides = {\n django_models.ManyToManyField: {\n 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)\n },\n }\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id', 'vendor_code'),\n ('category', 'correct_category_id'),\n ('price', 'in_stock'),\n 'is_popular',\n ('purchase_price', 'wholesale_small'),\n ('wholesale_medium', 'wholesale_large'),\n 'tags',\n )\n }),)\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name == 'category':\n kwargs['widget'] = ModelSelect2Widget(\n model=se_models.Category,\n search_fields=[\n 'name__icontains',\n 'pk__startswith',\n ],\n )\n return super(ProductInline, self).formfield_for_dbfield(\n db_field,\n **kwargs,\n )\n\n\nclass PositionInline(admin.StackedInline):\n\n model = Position\n\n\nclass CategoryPageAdmin(models.CategoryPageAdmin):\n\n add = True\n delete = False\n inlines = [CategoryInline, inlines.ImageInline]\n\n def get_queryset(self, request):\n return (\n super(CategoryPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_category')\n )\n\n\nclass ProductPageAdmin(models.ProductPageAdmin):\n\n add = False\n delete = False\n category_page_model = se_models.CategoryPage\n list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]\n inlines = [ProductInline, inlines.ImageInline]\n search_fields = [\n 'shopelectro_product__vendor_code', 'name', 'slug',\n ]\n\n def model_id(self, obj):\n return obj.model.vendor_code\n\n model_id.short_description = _('Vendor code')\n model_id.admin_order_field = 'shopelectro_product__vendor_code'\n\n def get_queryset(self, request):\n return (\n super(ProductPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_product')\n )\n\n\nclass ProductFeedbackPageAdmin(admin.ModelAdmin):\n\n add = False\n delete = False\n list_filter = ['rating']\n list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']\n\n def links(self, obj):\n return format_html(\n '''\n <a href=\"{url}\" class=\"field-link\" title=\"\u041f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\" target=\"_blank\">\n <i class=\"fa fa-link\" aria-hidden=\"true\"></i>\n </a>\n '''.format(url=obj.product.url))\n\n links.short_description = _('Link')\n\n def get_queryset(self, request):\n return (\n super(ProductFeedbackPageAdmin, self)\n .get_queryset(request)\n .select_related('product')\n )\n\n\nclass TagGroupAdmin(admin.ModelAdmin):\n\n list_display = ['id', 'name', 'position', 'count_tags']\n list_display_links = ['name']\n\n inlines = [TagInline]\n\n def get_queryset(self, request):\n return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')\n\n def count_tags(self, obj):\n return obj.tags.count()\n\n\nclass TagAdmin(admin.ModelAdmin):\n\n search_fields = ['id', 'name']\n list_display = ['id', 'name', 'position', 'custom_group']\n list_display_links = ['name']\n\n def get_queryset(self, request):\n return super(TagAdmin, self).get_queryset(request).prefetch_related('group')\n\n def custom_group(self, obj):\n group = obj.group\n\n return format_html(\n '<a href=\"{url}\">{group}</a>',\n group=group,\n url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))\n )\n\n custom_group.admin_order_field = 'group'\n custom_group.short_description = _('Group')\n\n\nclass OrderAdmin(mixins.PermissionsControl):\n\n add = False\n inlines = [PositionInline]\n list_display = ['name', 'email', 'phone', 'address', 'city', 'payment_type', 'paid']\n search_fields = ['name', 'email', 'phone']\n list_display_links = ['name']\n\nse_admin = SEAdminSite(name='se_admin')\nse_admin.register(CustomPage, models.CustomPageAdmin)\nse_admin.register(FlatPage, models.FlatPageAdmin)\nse_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n\nse_admin.register(se_models.CategoryPage, CategoryPageAdmin)\nse_admin.register(se_models.Order, OrderAdmin)\nse_admin.register(se_models.ProductPage, ProductPageAdmin)\nse_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\nse_admin.register(se_models.TagGroup, TagGroupAdmin)\nse_admin.register(se_models.Tag, TagAdmin)\n\nse_admin.register(Redirect)\n", "path": "shopelectro/admin.py"}, {"content": "from hashlib import md5\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\n\nfrom ecommerce import mailer\nfrom ecommerce.views import get_keys_from_post\n\nfrom shopelectro.models import Order\n\nYANDEX_REQUEST_PARAM = (\n 'action', 'orderSumAmount', 'orderSumCurrencyPaycash', 'orderSumBankPaycash',\n 'shopId', 'invoiceId', 'customerNumber'\n)\n\n\ndef generate_md5_for_ya_kassa(post_body):\n \"\"\"Generate md5 based on this param.\"\"\"\n params = [post_body[param] for param in YANDEX_REQUEST_PARAM]\n params.append(settings.YANDEX_SHOP_PASS)\n param_sequence = str(';'.join(params)).encode('utf-8')\n return md5(param_sequence).hexdigest().upper()\n\n\ndef has_correct_md5(post_body):\n \"\"\"Compare our md5 with md5 from yandex request.\"\"\"\n md5 = generate_md5_for_ya_kassa(post_body)\n return md5 == post_body['md5']\n\n\n@csrf_exempt\ndef yandex_check(request):\n \"\"\"\n Handle Yandex check.\n\n We simply accept every check.\n It's marked with @csrf_exempt, because we don't need\n to check CSRF in yandex-requests.\n \"\"\"\n return render(request, 'ecommerce/yandex_check.xml', {'invoice': request.POST['invoiceId']},\n content_type='application/xhtml+xml')\n\n\n@csrf_exempt\ndef yandex_aviso(request):\n \"\"\"\n Handle Yandex Aviso check.\n\n It's marked with @csrf_exempt, because we don't need to\n check CSRF in yandex-requests.\n\n 1. Retrieve order number from request, find in in DB.\n 2. If it's a first aviso check (there might be more than one,\n depends on Yandex)\n send different emails to client and shop.\n 3. Get invoice id from request and return XML to Yandex.\n \"\"\"\n def is_first_aviso(order_):\n return order_ and not order_.paid\n\n def send_mail_to_shop(order):\n paid, profit = get_keys_from_post(request, 'orderSumAmount', 'shopSumAmount')\n proportion = 100 * float(profit) / float(paid)\n commission = round(100 - proportion, 2) # in percents\n mailer.send_order(\n template='ecommerce/yandex_order_email.html',\n subject=settings.EMAIL_SUBJECTS['yandex_order'],\n order=order,\n to_customer=False,\n extra_context={\n 'paid': paid,\n 'profit': profit,\n 'commission': commission,\n })\n\n def send_mail_to_customer(order):\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['yandex_order'],\n order=order,\n to_shop=False,\n )\n\n if not has_correct_md5(request.POST):\n return render(\n request, 'ecommerce/yandex_aviso.xml',\n content_type='application/xhtml+xml'\n )\n\n # maybe we can include django-annoying for such cases\n # https://github.com/skorokithakis/django-annoying#get_object_or_none-function\n try:\n order = Order.objects.get(pk=request.POST['customerNumber'])\n except Order.DoesNotExist:\n order = None\n\n if is_first_aviso(order):\n order.paid = True\n send_mail_to_customer(order)\n send_mail_to_shop(order)\n order.save()\n\n invoice_id = request.POST['invoiceId']\n return render(request, 'ecommerce/yandex_aviso.xml', {'invoice': invoice_id},\n content_type='application/xhtml+xml')\n\n\n@require_POST\n@csrf_exempt\ndef ya_feedback_request(request):\n \"\"\"Send email to user with Y.Market feedback request.\"\"\"\n user_email = request.POST['email']\n mailer.ya_feedback(user_email)\n\n return render(request, 'ecommerce/yandex_feedback_success.html',\n {'email': user_email})\n\n\ndef ya_feedback_with_redirect(request):\n \"\"\"Redirect user to Y.Market for feedback.\"\"\"\n return render(request, 'ecommerce/yandex_feedback_redirect.html')\n", "path": "shopelectro/views/service.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.redirects.models import Redirect\nfrom django.db import models as django_models\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_select2.forms import ModelSelect2Widget\n\nfrom ecommerce.models import Position\nfrom pages.models import CustomPage, FlatPage, PageTemplate\nfrom generic_admin import inlines, mixins, models, sites\n\nfrom shopelectro import models as se_models\nfrom shopelectro.views.admin import TableEditor\n\n\nclass SEAdminSite(sites.SiteWithTableEditor):\n\n site_header = 'Shopelectro administration'\n table_editor_view = TableEditor\n\n\ndef prepare_has_filter_queryset(value, db_table, queryset):\n if not value:\n return\n\n query = '{}__tags__isnull'.format(db_table)\n\n # Use brackets, because `Explicit is better than implicit`.\n return queryset.filter(**{query: value != 'yes'})\n\n\nclass HasTagsFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has tags')\n parameter_name = 'has_tags'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has tags')),\n ('no', _('Has no tags')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass HasCategoryFilter(admin.SimpleListFilter):\n\n product_model = se_models.Product\n title = _('has category')\n parameter_name = 'has_category'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Has category')),\n ('no', _('Has no category')),\n )\n\n def queryset(self, request, queryset):\n return prepare_has_filter_queryset(\n self.value(),\n self.product_model._meta.db_table,\n queryset\n )\n\n\nclass TagInline(admin.StackedInline):\n\n model = se_models.Tag\n extra = 0\n\n\nclass CategoryInline(inlines.CategoryInline):\n\n model = se_models.Category\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id'),\n ('parent', 'correct_parent_id'),\n )\n }),)\n\n\nclass ProductInline(inlines.ProductInline):\n\n model = se_models.Product\n\n formfield_overrides = {\n django_models.ManyToManyField: {\n 'widget': FilteredSelectMultiple(verbose_name='Tags', is_stacked=False)\n },\n }\n\n fieldsets = ((None, {\n 'classes': ('primary-chars', ),\n 'fields': (\n ('name', 'id', 'vendor_code'),\n ('category', 'correct_category_id'),\n ('price', 'in_stock'),\n 'is_popular',\n ('purchase_price', 'wholesale_small'),\n ('wholesale_medium', 'wholesale_large'),\n 'tags',\n )\n }),)\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name == 'category':\n kwargs['widget'] = ModelSelect2Widget(\n model=se_models.Category,\n search_fields=[\n 'name__icontains',\n 'pk__startswith',\n ],\n )\n return super(ProductInline, self).formfield_for_dbfield(\n db_field,\n **kwargs,\n )\n\n\nclass PositionInline(admin.StackedInline):\n\n model = Position\n\n\nclass CategoryPageAdmin(models.CategoryPageAdmin):\n\n add = True\n delete = False\n inlines = [CategoryInline, inlines.ImageInline]\n\n def get_queryset(self, request):\n return (\n super(CategoryPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_category')\n )\n\n\nclass ProductPageAdmin(models.ProductPageAdmin):\n\n add = False\n delete = False\n category_page_model = se_models.CategoryPage\n list_filter = [*models.ProductPageAdmin.list_filter, HasTagsFilter, HasCategoryFilter]\n inlines = [ProductInline, inlines.ImageInline]\n search_fields = [\n 'shopelectro_product__vendor_code', 'name', 'slug',\n ]\n\n def model_id(self, obj):\n return obj.model.vendor_code\n\n model_id.short_description = _('Vendor code')\n model_id.admin_order_field = 'shopelectro_product__vendor_code'\n\n def get_queryset(self, request):\n return (\n super(ProductPageAdmin, self)\n .get_queryset(request)\n .select_related('shopelectro_product')\n )\n\n\nclass ProductFeedbackPageAdmin(admin.ModelAdmin):\n\n add = False\n delete = False\n list_filter = ['rating']\n list_display = ['rating', 'name', 'dignities', 'limitations', 'general', 'links']\n\n def links(self, obj):\n return format_html(\n '''\n <a href=\"{url}\" class=\"field-link\" title=\"\u041f\u043e\u0441\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u043d\u0430 \u0441\u0430\u0439\u0442\u0435\" target=\"_blank\">\n <i class=\"fa fa-link\" aria-hidden=\"true\"></i>\n </a>\n '''.format(url=obj.product.url))\n\n links.short_description = _('Link')\n\n def get_queryset(self, request):\n return (\n super(ProductFeedbackPageAdmin, self)\n .get_queryset(request)\n .select_related('product')\n )\n\n\nclass TagGroupAdmin(admin.ModelAdmin):\n\n list_display = ['id', 'name', 'position', 'count_tags']\n list_display_links = ['name']\n\n inlines = [TagInline]\n\n def get_queryset(self, request):\n return super(TagGroupAdmin, self).get_queryset(request).prefetch_related('tags')\n\n def count_tags(self, obj):\n return obj.tags.count()\n\n\nclass TagAdmin(admin.ModelAdmin):\n\n search_fields = ['id', 'name']\n list_display = ['id', 'name', 'position', 'custom_group']\n list_display_links = ['name']\n\n def get_queryset(self, request):\n return super(TagAdmin, self).get_queryset(request).prefetch_related('group')\n\n def custom_group(self, obj):\n group = obj.group\n\n return format_html(\n '<a href=\"{url}\">{group}</a>',\n group=group,\n url=reverse('admin:shopelectro_taggroup_change', args=(group.id, ))\n )\n\n custom_group.admin_order_field = 'group'\n custom_group.short_description = _('Group')\n\n\nclass OrderAdmin(mixins.PermissionsControl):\n\n add = False\n inlines = [PositionInline]\n list_display = ['id', 'name', 'email', 'phone', 'total_price', 'payment_type', 'paid']\n search_fields = ['name', 'email', 'phone']\n list_display_links = ['name']\n\n\nse_admin = SEAdminSite(name='se_admin')\nse_admin.register(CustomPage, models.CustomPageAdmin)\nse_admin.register(FlatPage, models.FlatPageAdmin)\nse_admin.register(PageTemplate, models.CustomPageTemplateAdmin)\n\nse_admin.register(se_models.CategoryPage, CategoryPageAdmin)\nse_admin.register(se_models.Order, OrderAdmin)\nse_admin.register(se_models.ProductPage, ProductPageAdmin)\nse_admin.register(se_models.ProductFeedback, ProductFeedbackPageAdmin)\nse_admin.register(se_models.TagGroup, TagGroupAdmin)\nse_admin.register(se_models.Tag, TagAdmin)\n\nse_admin.register(Redirect)\n", "path": "shopelectro/admin.py"}, {"content": "import logging\nfrom hashlib import md5\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_POST\n\nfrom ecommerce import mailer\nfrom ecommerce.views import get_keys_from_post\n\nfrom shopelectro.models import Order\n\nlogger = logging.getLogger(__name__)\n\n\nYANDEX_REQUEST_PARAM = (\n 'action', 'orderSumAmount', 'orderSumCurrencyPaycash', 'orderSumBankPaycash',\n 'shopId', 'invoiceId', 'customerNumber'\n)\n\n\ndef generate_md5_for_ya_kassa(post_body):\n \"\"\"Generate md5 based on this param.\"\"\"\n params = [post_body[param] for param in YANDEX_REQUEST_PARAM]\n params.append(settings.YANDEX_SHOP_PASS)\n param_sequence = str(';'.join(params)).encode('utf-8')\n return md5(param_sequence).hexdigest().upper()\n\n\ndef has_correct_md5(post_body):\n \"\"\"Compare our md5 with md5 from yandex request.\"\"\"\n md5 = generate_md5_for_ya_kassa(post_body)\n return md5 == post_body['md5']\n\n\n@csrf_exempt\ndef yandex_check(request):\n \"\"\"\n Handle Yandex check.\n\n We simply accept every check.\n It's marked with @csrf_exempt, because we don't need\n to check CSRF in yandex-requests.\n \"\"\"\n return render(request, 'ecommerce/yandex_check.xml', {'invoice': request.POST['invoiceId']},\n content_type='application/xhtml+xml')\n\n\n@csrf_exempt\ndef yandex_aviso(request):\n \"\"\"\n Handle Yandex Aviso check.\n\n It's marked with @csrf_exempt, because we don't need to\n check CSRF in yandex-requests.\n\n 1. Retrieve order number from request, find in in DB.\n 2. If it's a first aviso check (there might be more than one,\n depends on Yandex)\n send different emails to client and shop.\n 3. Get invoice id from request and return XML to Yandex.\n \"\"\"\n def is_first_aviso(order_):\n return order_ and not order_.paid\n\n def send_mail_to_shop(order):\n paid, profit = get_keys_from_post(request, 'orderSumAmount', 'shopSumAmount')\n proportion = 100 * float(profit) / float(paid)\n commission = round(100 - proportion, 2) # in percents\n mailer.send_order(\n template='ecommerce/yandex_order_email.html',\n subject=settings.EMAIL_SUBJECTS['yandex_order'],\n order=order,\n to_customer=False,\n extra_context={\n 'paid': paid,\n 'profit': profit,\n 'commission': commission,\n })\n\n def send_mail_to_customer(order):\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['yandex_order'],\n order=order,\n to_shop=False,\n )\n\n if not has_correct_md5(request.POST):\n return render(\n request, 'ecommerce/yandex_aviso.xml',\n content_type='application/xhtml+xml'\n )\n\n logger.info(f'received yandex aviso. Aviso data: {request.POST}')\n\n # maybe we can include django-annoying for such cases\n # https://github.com/skorokithakis/django-annoying#get_object_or_none-function\n try:\n order = Order.objects.get(pk=request.POST['customerNumber'])\n except Order.DoesNotExist:\n order = None\n\n if is_first_aviso(order):\n order.paid = True\n send_mail_to_customer(order)\n send_mail_to_shop(order)\n order.save()\n\n invoice_id = request.POST['invoiceId']\n return render(request, 'ecommerce/yandex_aviso.xml', {'invoice': invoice_id},\n content_type='application/xhtml+xml')\n\n\n@require_POST\n@csrf_exempt\ndef ya_feedback_request(request):\n \"\"\"Send email to user with Y.Market feedback request.\"\"\"\n user_email = request.POST['email']\n mailer.ya_feedback(user_email)\n\n return render(request, 'ecommerce/yandex_feedback_success.html',\n {'email': user_email})\n\n\ndef ya_feedback_with_redirect(request):\n \"\"\"Redirect user to Y.Market for feedback.\"\"\"\n return render(request, 'ecommerce/yandex_feedback_redirect.html')\n", "path": "shopelectro/views/service.py"}]}
| 3,877 | 405 |
gh_patches_debug_15000
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-136
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation fails in encryption step with DiskError and NameError
I tried to test the installer but hit this error while encrypting a partition. I followed the instructions in the [README](https://github.com/Torxed/archinstall#testing).
```
{
"!encryption-password": "******",
"!root-password": "******",
"filesystem": "ext4",
"harddrive": {
"model": null,
"path": "/dev/loop2",
"size": "5G"
},
"hostname": "laptop",
"keyboard-language": "l1",
"mirror-region": {
...
},
"nic": {
"nic": "wlp4s0"
},
"packages": [
"firefox",
"thunderbird"
],
"profile": {
"path": "/usr/lib/python3.9/site-packages/archinstall/profiles/gnome.py"
},
"superusers": {
"admin": {
"!password": "******"
}
},
"timezone": "Europe/Berlin",
"users": {}
}
```
```
Encrypting Partition(path=/dev/loop2p2, real_device=/dev/loop2, fs=ext4, rel_mountpoint=/) (This might take a while)
Traceback (most recent call last):
File "/usr/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/lib/python3.9/site-packages/archinstall/__main__.py", line 34, in <module>
run_as_a_module()
File "/usr/lib/python3.9/site-packages/archinstall/__main__.py", line 31, in run_as_a_module
script.execute()
File "/usr/lib/python3.9/site-packages/archinstall/lib/profiles.py", line 143, in execute
self.spec.loader.exec_module(sys.modules[self.namespace])
File "<frozen importlib._bootstrap_external>", line 790, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/usr/lib/python3.9/site-packages/archinstall/examples/guided.py", line 342, in <module>
perform_installation_steps()
File "/usr/lib/python3.9/site-packages/archinstall/examples/guided.py", line 283, in perform_installation_steps
perform_installation(device=fs.find_partition('/'),
File "/usr/lib/python3.9/site-packages/archinstall/lib/disk.py", line 422, in __exit__
raise args[1]
File "/usr/lib/python3.9/site-packages/archinstall/examples/guided.py", line 265, in perform_installation_steps
partition.encrypt(password=passwd)
File "/usr/lib/python3.9/site-packages/archinstall/lib/disk.py", line 263, in encrypt
return handle.encrypt(self, *args, **kwargs)
File "/usr/lib/python3.9/site-packages/archinstall/lib/luks.py", line 98, in encrypt
raise DiskError(f'Could not encrypt volume "{partition.path}": {o}')
NameError: name 'o' is not defined
```
Congratulations on the inclusion in an official arch installer image! :partying_face: (Even if this turns out to be an April fools joke ;-)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/luks.py`
Content:
```
1 import os
2 from .exceptions import *
3 from .general import *
4 from .disk import Partition
5 from .output import log, LOG_LEVELS
6 from .storage import storage
7
8 class luks2():
9 def __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):
10 self.password = password
11 self.partition = partition
12 self.mountpoint = mountpoint
13 self.args = args
14 self.kwargs = kwargs
15 self.key_file = key_file
16 self.auto_unmount = auto_unmount
17 self.filesystem = 'crypto_LUKS'
18 self.mapdev = None
19
20 def __enter__(self):
21 #if self.partition.allow_formatting:
22 # self.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)
23 #else:
24 if not self.key_file:
25 self.key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique?
26
27 if type(self.password) != bytes:
28 self.password = bytes(self.password, 'UTF-8')
29
30 with open(self.key_file, 'wb') as fh:
31 fh.write(self.password)
32
33 return self.unlock(self.partition, self.mountpoint, self.key_file)
34
35 def __exit__(self, *args, **kwargs):
36 # TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
37 if self.auto_unmount:
38 self.close()
39
40 if len(args) >= 2 and args[1]:
41 raise args[1]
42 return True
43
44 def encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):
45 # TODO: We should be able to integrate this into the main log some how.
46 # Perhaps post-mortem?
47 if not self.partition.allow_formatting:
48 raise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')
49
50 log(f'Encrypting {partition} (This might take a while)', level=LOG_LEVELS.Info)
51
52 if not key_file:
53 if self.key_file:
54 key_file = self.key_file
55 else:
56 key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique?
57
58 if not password:
59 password = self.password
60
61 if type(password) != bytes:
62 password = bytes(password, 'UTF-8')
63
64 with open(key_file, 'wb') as fh:
65 fh.write(password)
66
67 try:
68 # Try to setup the crypt-device
69 cmd_handle = sys_command(f'/usr/bin/cryptsetup -q -v --type luks2 --pbkdf argon2i --hash {hash_type} --key-size {key_size} --iter-time {iter_time} --key-file {os.path.abspath(key_file)} --use-urandom luksFormat {partition.path}')
70 except SysCallError as err:
71 if err.exit_code == 256:
72 log(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=LOG_LEVELS.Debug)
73 # Partition was in use, unmount it and try again
74 partition.unmount()
75
76 # Get crypt-information about the device by doing a reverse lookup starting with the partition path
77 # For instance: /dev/sda
78 devinfo = json.loads(b''.join(sys_command(f"lsblk --fs -J {partition.path}")).decode('UTF-8'))['blockdevices'][0]
79
80 # For each child (sub-partition/sub-device)
81 if len(children := devinfo.get('children', [])):
82 for child in children:
83 # Unmount the child location
84 if child_mountpoint := child.get('mountpoint', None):
85 log(f'Unmounting {child_mountpoint}', level=LOG_LEVELS.Debug)
86 sys_command(f"umount -R {child_mountpoint}")
87
88 # And close it if possible.
89 log(f"Closing crypt device {child['name']}", level=LOG_LEVELS.Debug)
90 sys_command(f"cryptsetup close {child['name']}")
91
92 # Then try again to set up the crypt-device
93 cmd_handle = sys_command(f'/usr/bin/cryptsetup -q -v --type luks2 --pbkdf argon2i --hash {hash_type} --key-size {key_size} --iter-time {iter_time} --key-file {os.path.abspath(key_file)} --use-urandom luksFormat {partition.path}')
94 else:
95 raise err
96
97 if b'Command successful.' not in b''.join(cmd_handle):
98 raise DiskError(f'Could not encrypt volume "{partition.path}": {o}')
99
100 return key_file
101
102 def unlock(self, partition, mountpoint, key_file):
103 """
104 Mounts a lukts2 compatible partition to a certain mountpoint.
105 Keyfile must be specified as there's no way to interact with the pw-prompt atm.
106
107 :param mountpoint: The name without absolute path, for instance "luksdev" will point to /dev/mapper/luksdev
108 :type mountpoint: str
109 """
110 from .disk import get_filesystem_type
111 if '/' in mountpoint:
112 os.path.basename(mountpoint) # TODO: Raise exception instead?
113 sys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')
114 if os.path.islink(f'/dev/mapper/{mountpoint}'):
115 self.mapdev = f'/dev/mapper/{mountpoint}'
116 unlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)
117 unlocked_partition.allow_formatting = self.partition.allow_formatting
118 return unlocked_partition
119
120 def close(self, mountpoint=None):
121 if not mountpoint:
122 mountpoint = self.mapdev
123
124 sys_command(f'/usr/bin/cryptsetup close {self.mapdev}')
125 return os.path.islink(self.mapdev) is False
126
127 def format(self, path):
128 if (handle := sys_command(f"/usr/bin/cryptsetup -q -v luksErase {path}")).exit_code != 0:
129 raise DiskError(f'Could not format {path} with {self.filesystem} because: {b"".join(handle)}')
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py
--- a/archinstall/lib/luks.py
+++ b/archinstall/lib/luks.py
@@ -94,8 +94,8 @@
else:
raise err
- if b'Command successful.' not in b''.join(cmd_handle):
- raise DiskError(f'Could not encrypt volume "{partition.path}": {o}')
+ if b'Command successful.' not in (cmd_output := b''.join(cmd_handle)):
+ raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}')
return key_file
@@ -126,4 +126,4 @@
def format(self, path):
if (handle := sys_command(f"/usr/bin/cryptsetup -q -v luksErase {path}")).exit_code != 0:
- raise DiskError(f'Could not format {path} with {self.filesystem} because: {b"".join(handle)}')
\ No newline at end of file
+ raise DiskError(f'Could not format {path} with {self.filesystem} because: {b"".join(handle)}')
|
{"golden_diff": "diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py\n--- a/archinstall/lib/luks.py\n+++ b/archinstall/lib/luks.py\n@@ -94,8 +94,8 @@\n \t\t\telse:\n \t\t\t\traise err\n \n-\t\tif b'Command successful.' not in b''.join(cmd_handle):\n-\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {o}')\n+\t\tif b'Command successful.' not in (cmd_output := b''.join(cmd_handle)):\n+\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n \t\n \t\treturn key_file\n \n@@ -126,4 +126,4 @@\n \n \tdef format(self, path):\n \t\tif (handle := sys_command(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n-\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n\\ No newline at end of file\n+\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n", "issue": "Installation fails in encryption step with DiskError and NameError\nI tried to test the installer but hit this error while encrypting a partition. I followed the instructions in the [README](https://github.com/Torxed/archinstall#testing).\r\n\r\n```\r\n{\r\n \"!encryption-password\": \"******\",\r\n \"!root-password\": \"******\",\r\n \"filesystem\": \"ext4\",\r\n \"harddrive\": {\r\n \"model\": null,\r\n \"path\": \"/dev/loop2\",\r\n \"size\": \"5G\"\r\n },\r\n \"hostname\": \"laptop\",\r\n \"keyboard-language\": \"l1\",\r\n \"mirror-region\": {\r\n ...\r\n },\r\n \"nic\": {\r\n \"nic\": \"wlp4s0\"\r\n },\r\n \"packages\": [\r\n \"firefox\",\r\n \"thunderbird\"\r\n ],\r\n \"profile\": {\r\n \"path\": \"/usr/lib/python3.9/site-packages/archinstall/profiles/gnome.py\"\r\n },\r\n \"superusers\": {\r\n \"admin\": {\r\n \"!password\": \"******\"\r\n }\r\n },\r\n \"timezone\": \"Europe/Berlin\",\r\n \"users\": {}\r\n}\r\n```\r\n\r\n```\r\nEncrypting Partition(path=/dev/loop2p2, real_device=/dev/loop2, fs=ext4, rel_mountpoint=/) (This might take a while)\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.9/runpy.py\", line 197, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/lib/python3.9/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/usr/lib/python3.9/site-packages/archinstall/__main__.py\", line 34, in <module>\r\n run_as_a_module()\r\n File \"/usr/lib/python3.9/site-packages/archinstall/__main__.py\", line 31, in run_as_a_module\r\n script.execute()\r\n File \"/usr/lib/python3.9/site-packages/archinstall/lib/profiles.py\", line 143, in execute\r\n self.spec.loader.exec_module(sys.modules[self.namespace])\r\n File \"<frozen importlib._bootstrap_external>\", line 790, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\n File \"/usr/lib/python3.9/site-packages/archinstall/examples/guided.py\", line 342, in <module>\r\n perform_installation_steps()\r\n File \"/usr/lib/python3.9/site-packages/archinstall/examples/guided.py\", line 283, in perform_installation_steps\r\n perform_installation(device=fs.find_partition('/'),\r\n File \"/usr/lib/python3.9/site-packages/archinstall/lib/disk.py\", line 422, in __exit__\r\n raise args[1]\r\n File \"/usr/lib/python3.9/site-packages/archinstall/examples/guided.py\", line 265, in perform_installation_steps\r\n partition.encrypt(password=passwd)\r\n File \"/usr/lib/python3.9/site-packages/archinstall/lib/disk.py\", line 263, in encrypt\r\n return handle.encrypt(self, *args, **kwargs)\r\n File \"/usr/lib/python3.9/site-packages/archinstall/lib/luks.py\", line 98, in encrypt\r\n raise DiskError(f'Could not encrypt volume \"{partition.path}\": {o}')\r\nNameError: name 'o' is not defined\r\n```\r\n\r\nCongratulations on the inclusion in an official arch installer image! :partying_face: (Even if this turns out to be an April fools joke ;-)\n", "before_files": [{"content": "import os\nfrom .exceptions import *\nfrom .general import *\nfrom .disk import Partition\nfrom .output import log, LOG_LEVELS\nfrom .storage import storage\n\nclass luks2():\n\tdef __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):\n\t\tself.password = password\n\t\tself.partition = partition\n\t\tself.mountpoint = mountpoint\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.key_file = key_file\n\t\tself.auto_unmount = auto_unmount\n\t\tself.filesystem = 'crypto_LUKS'\n\t\tself.mapdev = None\n\n\tdef __enter__(self):\n\t\t#if self.partition.allow_formatting:\n\t\t#\tself.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)\n\t\t#else:\n\t\tif not self.key_file:\n\t\t\tself.key_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\t\t\n\t\tif type(self.password) != bytes:\n\t\t\tself.password = bytes(self.password, 'UTF-8')\n\n\t\twith open(self.key_file, 'wb') as fh:\n\t\t\tfh.write(self.password)\n\n\t\treturn self.unlock(self.partition, self.mountpoint, self.key_file)\n\n\tdef __exit__(self, *args, **kwargs):\n\t\t# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager\n\t\tif self.auto_unmount:\n\t\t\tself.close()\n\n\t\tif len(args) >= 2 and args[1]:\n\t\t\traise args[1]\n\t\treturn True\n\n\tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n\t\t# TODO: We should be able to integrate this into the main log some how.\n\t\t# Perhaps post-mortem?\n\t\tif not self.partition.allow_formatting:\n\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n\n\t\tlog(f'Encrypting {partition} (This might take a while)', level=LOG_LEVELS.Info)\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif not password:\n\t\t\tpassword = self.password\n\n\t\tif type(password) != bytes:\n\t\t\tpassword = bytes(password, 'UTF-8')\n\n\t\twith open(key_file, 'wb') as fh:\n\t\t\tfh.write(password)\n\n\t\ttry:\n\t\t\t# Try to setup the crypt-device\n\t\t\tcmd_handle = sys_command(f'/usr/bin/cryptsetup -q -v --type luks2 --pbkdf argon2i --hash {hash_type} --key-size {key_size} --iter-time {iter_time} --key-file {os.path.abspath(key_file)} --use-urandom luksFormat {partition.path}')\n\t\texcept SysCallError as err:\n\t\t\tif err.exit_code == 256:\n\t\t\t\tlog(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=LOG_LEVELS.Debug)\n\t\t\t\t# Partition was in use, unmount it and try again\n\t\t\t\tpartition.unmount()\n\n\t\t\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t\t\t# For instance: /dev/sda\n\t\t\t\tdevinfo = json.loads(b''.join(sys_command(f\"lsblk --fs -J {partition.path}\")).decode('UTF-8'))['blockdevices'][0]\n\n\t\t\t\t# For each child (sub-partition/sub-device)\n\t\t\t\tif len(children := devinfo.get('children', [])):\n\t\t\t\t\tfor child in children:\n\t\t\t\t\t\t# Unmount the child location\n\t\t\t\t\t\tif child_mountpoint := child.get('mountpoint', None):\n\t\t\t\t\t\t\tlog(f'Unmounting {child_mountpoint}', level=LOG_LEVELS.Debug)\n\t\t\t\t\t\t\tsys_command(f\"umount -R {child_mountpoint}\")\n\n\t\t\t\t\t\t# And close it if possible.\n\t\t\t\t\t\tlog(f\"Closing crypt device {child['name']}\", level=LOG_LEVELS.Debug)\n\t\t\t\t\t\tsys_command(f\"cryptsetup close {child['name']}\")\n\n\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\tcmd_handle = sys_command(f'/usr/bin/cryptsetup -q -v --type luks2 --pbkdf argon2i --hash {hash_type} --key-size {key_size} --iter-time {iter_time} --key-file {os.path.abspath(key_file)} --use-urandom luksFormat {partition.path}')\n\t\t\telse:\n\t\t\t\traise err\n\n\t\tif b'Command successful.' not in b''.join(cmd_handle):\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {o}')\n\t\n\t\treturn key_file\n\n\tdef unlock(self, partition, mountpoint, key_file):\n\t\t\"\"\"\n\t\tMounts a lukts2 compatible partition to a certain mountpoint.\n\t\tKeyfile must be specified as there's no way to interact with the pw-prompt atm.\n\n\t\t:param mountpoint: The name without absolute path, for instance \"luksdev\" will point to /dev/mapper/luksdev\n\t\t:type mountpoint: str\n\t\t\"\"\"\n\t\tfrom .disk import get_filesystem_type\n\t\tif '/' in mountpoint:\n\t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n\t\tsys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n\t\tif os.path.islink(f'/dev/mapper/{mountpoint}'):\n\t\t\tself.mapdev = f'/dev/mapper/{mountpoint}'\n\t\t\tunlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)\n\t\t\tunlocked_partition.allow_formatting = self.partition.allow_formatting\n\t\t\treturn unlocked_partition\n\n\tdef close(self, mountpoint=None):\n\t\tif not mountpoint:\n\t\t\tmountpoint = self.mapdev\n\n\t\tsys_command(f'/usr/bin/cryptsetup close {self.mapdev}')\n\t\treturn os.path.islink(self.mapdev) is False\n\n\tdef format(self, path):\n\t\tif (handle := sys_command(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')", "path": "archinstall/lib/luks.py"}], "after_files": [{"content": "import os\nfrom .exceptions import *\nfrom .general import *\nfrom .disk import Partition\nfrom .output import log, LOG_LEVELS\nfrom .storage import storage\n\nclass luks2():\n\tdef __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):\n\t\tself.password = password\n\t\tself.partition = partition\n\t\tself.mountpoint = mountpoint\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.key_file = key_file\n\t\tself.auto_unmount = auto_unmount\n\t\tself.filesystem = 'crypto_LUKS'\n\t\tself.mapdev = None\n\n\tdef __enter__(self):\n\t\t#if self.partition.allow_formatting:\n\t\t#\tself.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)\n\t\t#else:\n\t\tif not self.key_file:\n\t\t\tself.key_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\t\t\n\t\tif type(self.password) != bytes:\n\t\t\tself.password = bytes(self.password, 'UTF-8')\n\n\t\twith open(self.key_file, 'wb') as fh:\n\t\t\tfh.write(self.password)\n\n\t\treturn self.unlock(self.partition, self.mountpoint, self.key_file)\n\n\tdef __exit__(self, *args, **kwargs):\n\t\t# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager\n\t\tif self.auto_unmount:\n\t\t\tself.close()\n\n\t\tif len(args) >= 2 and args[1]:\n\t\t\traise args[1]\n\t\treturn True\n\n\tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n\t\t# TODO: We should be able to integrate this into the main log some how.\n\t\t# Perhaps post-mortem?\n\t\tif not self.partition.allow_formatting:\n\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n\n\t\tlog(f'Encrypting {partition} (This might take a while)', level=LOG_LEVELS.Info)\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif not password:\n\t\t\tpassword = self.password\n\n\t\tif type(password) != bytes:\n\t\t\tpassword = bytes(password, 'UTF-8')\n\n\t\twith open(key_file, 'wb') as fh:\n\t\t\tfh.write(password)\n\n\t\ttry:\n\t\t\t# Try to setup the crypt-device\n\t\t\tcmd_handle = sys_command(f'/usr/bin/cryptsetup -q -v --type luks2 --pbkdf argon2i --hash {hash_type} --key-size {key_size} --iter-time {iter_time} --key-file {os.path.abspath(key_file)} --use-urandom luksFormat {partition.path}')\n\t\texcept SysCallError as err:\n\t\t\tif err.exit_code == 256:\n\t\t\t\tlog(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=LOG_LEVELS.Debug)\n\t\t\t\t# Partition was in use, unmount it and try again\n\t\t\t\tpartition.unmount()\n\n\t\t\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t\t\t# For instance: /dev/sda\n\t\t\t\tdevinfo = json.loads(b''.join(sys_command(f\"lsblk --fs -J {partition.path}\")).decode('UTF-8'))['blockdevices'][0]\n\n\t\t\t\t# For each child (sub-partition/sub-device)\n\t\t\t\tif len(children := devinfo.get('children', [])):\n\t\t\t\t\tfor child in children:\n\t\t\t\t\t\t# Unmount the child location\n\t\t\t\t\t\tif child_mountpoint := child.get('mountpoint', None):\n\t\t\t\t\t\t\tlog(f'Unmounting {child_mountpoint}', level=LOG_LEVELS.Debug)\n\t\t\t\t\t\t\tsys_command(f\"umount -R {child_mountpoint}\")\n\n\t\t\t\t\t\t# And close it if possible.\n\t\t\t\t\t\tlog(f\"Closing crypt device {child['name']}\", level=LOG_LEVELS.Debug)\n\t\t\t\t\t\tsys_command(f\"cryptsetup close {child['name']}\")\n\n\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\tcmd_handle = sys_command(f'/usr/bin/cryptsetup -q -v --type luks2 --pbkdf argon2i --hash {hash_type} --key-size {key_size} --iter-time {iter_time} --key-file {os.path.abspath(key_file)} --use-urandom luksFormat {partition.path}')\n\t\t\telse:\n\t\t\t\traise err\n\n\t\tif b'Command successful.' not in (cmd_output := b''.join(cmd_handle)):\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n\t\n\t\treturn key_file\n\n\tdef unlock(self, partition, mountpoint, key_file):\n\t\t\"\"\"\n\t\tMounts a lukts2 compatible partition to a certain mountpoint.\n\t\tKeyfile must be specified as there's no way to interact with the pw-prompt atm.\n\n\t\t:param mountpoint: The name without absolute path, for instance \"luksdev\" will point to /dev/mapper/luksdev\n\t\t:type mountpoint: str\n\t\t\"\"\"\n\t\tfrom .disk import get_filesystem_type\n\t\tif '/' in mountpoint:\n\t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n\t\tsys_command(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n\t\tif os.path.islink(f'/dev/mapper/{mountpoint}'):\n\t\t\tself.mapdev = f'/dev/mapper/{mountpoint}'\n\t\t\tunlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)\n\t\t\tunlocked_partition.allow_formatting = self.partition.allow_formatting\n\t\t\treturn unlocked_partition\n\n\tdef close(self, mountpoint=None):\n\t\tif not mountpoint:\n\t\t\tmountpoint = self.mapdev\n\n\t\tsys_command(f'/usr/bin/cryptsetup close {self.mapdev}')\n\t\treturn os.path.islink(self.mapdev) is False\n\n\tdef format(self, path):\n\t\tif (handle := sys_command(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n", "path": "archinstall/lib/luks.py"}]}
| 2,768 | 258 |
gh_patches_debug_9508
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-3567
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip install toil-cwl-runner should work
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-879)
┆Issue Number: TOIL-879
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (C) 2015-2021 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import imp
15 import os
16
17 from tempfile import NamedTemporaryFile
18 from setuptools import find_packages, setup
19
20
21 def run_setup():
22 """
23 Calls setup(). This function exists so the setup() invocation preceded more internal
24 functionality. The `version` module is imported dynamically by import_version() below.
25 """
26 boto = 'boto>=2.48.0, <3'
27 boto3 = 'boto3>=1.7.50, <2'
28 futures = 'futures>=3.1.1, <4'
29 pycryptodome = 'pycryptodome==3.5.1'
30 pymesos = 'pymesos==0.3.15'
31 psutil = 'psutil >= 3.0.1, <6'
32 pynacl = 'pynacl==1.3.0'
33 gcs = 'google-cloud-storage==1.6.0'
34 gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'
35 apacheLibcloud = 'apache-libcloud==2.2.1'
36 cwltool = 'cwltool==3.0.20201203173111'
37 galaxyToolUtil = 'galaxy-tool-util'
38 htcondor = 'htcondor>=8.6.0'
39 kubernetes = 'kubernetes>=12.0.1, <13'
40 idna = 'idna>=2'
41 pytz = 'pytz>=2012'
42 pyyaml = 'pyyaml>=5, <6'
43 dill = 'dill>=0.3.2, <0.4'
44 requests = 'requests>=2, <3'
45 docker = 'docker==4.3.1'
46 dateutil = 'python-dateutil'
47 addict = 'addict>=2.2.1, <2.3'
48 enlighten = 'enlighten>=1.5.2, <2'
49 wdlparse = 'wdlparse==0.1.0'
50
51 core_reqs = [
52 dill,
53 requests,
54 docker,
55 dateutil,
56 psutil,
57 addict,
58 pytz,
59 pyyaml,
60 enlighten]
61 aws_reqs = [
62 boto,
63 boto3,
64 futures,
65 pycryptodome]
66 cwl_reqs = [
67 cwltool,
68 galaxyToolUtil]
69 encryption_reqs = [
70 pynacl]
71 google_reqs = [
72 gcs_oauth2_boto_plugin, # is this being used??
73 apacheLibcloud,
74 gcs]
75 htcondor_reqs = [
76 htcondor]
77 kubernetes_reqs = [
78 kubernetes,
79 idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.
80 mesos_reqs = [
81 pymesos,
82 psutil]
83 wdl_reqs = [
84 wdlparse
85 ]
86
87 # htcondor is not supported by apple
88 # this is tricky to conditionally support in 'all' due
89 # to how wheels work, so it is not included in all and
90 # must be explicitly installed as an extra
91 all_reqs = \
92 aws_reqs + \
93 cwl_reqs + \
94 encryption_reqs + \
95 google_reqs + \
96 kubernetes_reqs + \
97 mesos_reqs + \
98 wdl_reqs
99
100 setup(
101 name='toil',
102 version=version.distVersion,
103 description='Pipeline management software for clusters.',
104 author='Benedict Paten',
105 author_email='[email protected]',
106 url="https://github.com/DataBiosphere/toil",
107 classifiers=[
108 'Development Status :: 5 - Production/Stable',
109 'Environment :: Console',
110 'Intended Audience :: Developers',
111 'Intended Audience :: Science/Research',
112 'Intended Audience :: Healthcare Industry',
113 'License :: OSI Approved :: Apache Software License',
114 'Natural Language :: English',
115 'Operating System :: MacOS :: MacOS X',
116 'Operating System :: POSIX',
117 'Operating System :: POSIX :: Linux',
118 'Programming Language :: Python :: 3.6',
119 'Topic :: Scientific/Engineering',
120 'Topic :: Scientific/Engineering :: Bio-Informatics',
121 'Topic :: Scientific/Engineering :: Astronomy',
122 'Topic :: Scientific/Engineering :: Atmospheric Science',
123 'Topic :: Scientific/Engineering :: Information Analysis',
124 'Topic :: Scientific/Engineering :: Medical Science Apps.',
125 'Topic :: System :: Distributed Computing',
126 'Topic :: Utilities'],
127 license="Apache License v2.0",
128 python_requires=">=3.6",
129 install_requires=core_reqs,
130 extras_require={
131 'aws': aws_reqs,
132 'cwl': cwl_reqs,
133 'encryption': encryption_reqs,
134 'google': google_reqs,
135 'htcondor:sys_platform!="darwin"': htcondor_reqs,
136 'kubernetes': kubernetes_reqs,
137 'mesos': mesos_reqs,
138 'wdl': wdl_reqs,
139 'all': all_reqs},
140 package_dir={'': 'src'},
141 packages=find_packages(where='src',
142 # Note that we intentionally include the top-level `test` package for
143 # functionality like the @experimental and @integrative decorators:
144 exclude=['*.test.*']),
145 package_data={
146 '': ['*.yml', 'cloud-config'],
147 },
148 # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so
149 # you can't just change them here. Luckily, most of them are pretty unique strings, and thus
150 # easy to search for.
151 entry_points={
152 'console_scripts': [
153 'toil = toil.utils.toilMain:main',
154 '_toil_worker = toil.worker:main',
155 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',
156 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',
157 'toil-wdl-runner = toil.wdl.toilwdl:main',
158 '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',
159 '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})
160
161
162 def import_version():
163 """Return the module object for src/toil/version.py, generate from the template if required."""
164 if not os.path.exists('src/toil/version.py'):
165 # Use the template to generate src/toil/version.py
166 import version_template
167 with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:
168 f.write(version_template.expand_())
169 os.rename(f.name, 'src/toil/version.py')
170
171 # Unfortunately, we can't use a straight import here because that would also load the stuff
172 # defined in "src/toil/__init__.py" which imports modules from external dependencies that may
173 # yet to be installed when setup.py is invoked.
174 #
175 # This is also the reason we cannot switch from the "deprecated" imp library
176 # and use:
177 # from importlib.machinery import SourceFileLoader
178 # return SourceFileLoader('toil.version', path='src/toil/version.py').load_module()
179 #
180 # Because SourceFileLoader will error and load "src/toil/__init__.py" .
181 return imp.load_source('toil.version', 'src/toil/version.py')
182
183
184 version = import_version()
185 run_setup()
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -101,8 +101,8 @@
name='toil',
version=version.distVersion,
description='Pipeline management software for clusters.',
- author='Benedict Paten',
- author_email='[email protected]',
+ author='Benedict Paten and the Toil community',
+ author_email='[email protected]',
url="https://github.com/DataBiosphere/toil",
classifiers=[
'Development Status :: 5 - Production/Stable',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -101,8 +101,8 @@\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n- author='Benedict Paten',\n- author_email='[email protected]',\n+ author='Benedict Paten and the Toil community',\n+ author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n", "issue": "pip install toil-cwl-runner should work\n\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-879)\n\u2506Issue Number: TOIL-879\n\n", "before_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport imp\nimport os\n\nfrom tempfile import NamedTemporaryFile\nfrom setuptools import find_packages, setup\n\n\ndef run_setup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by import_version() below.\n \"\"\"\n boto = 'boto>=2.48.0, <3'\n boto3 = 'boto3>=1.7.50, <2'\n futures = 'futures>=3.1.1, <4'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.15'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.3.0'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool==3.0.20201203173111'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=12.0.1, <13'\n idna = 'idna>=2'\n pytz = 'pytz>=2012'\n pyyaml = 'pyyaml>=5, <6'\n dill = 'dill>=0.3.2, <0.4'\n requests = 'requests>=2, <3'\n docker = 'docker==4.3.1'\n dateutil = 'python-dateutil'\n addict = 'addict>=2.2.1, <2.3'\n enlighten = 'enlighten>=1.5.2, <2'\n wdlparse = 'wdlparse==0.1.0'\n\n core_reqs = [\n dill,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n pytz,\n pyyaml,\n enlighten]\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes,\n idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = [\n wdlparse\n ]\n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs + \\\n wdl_reqs\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n python_requires=\">=3.6\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decorators:\n exclude=['*.test.*']),\n package_data={\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef import_version():\n \"\"\"Return the module object for src/toil/version.py, generate from the template if required.\"\"\"\n if not os.path.exists('src/toil/version.py'):\n # Use the template to generate src/toil/version.py\n import version_template\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(version_template.expand_())\n os.rename(f.name, 'src/toil/version.py')\n\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in \"src/toil/__init__.py\" which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n #\n # This is also the reason we cannot switch from the \"deprecated\" imp library\n # and use:\n # from importlib.machinery import SourceFileLoader\n # return SourceFileLoader('toil.version', path='src/toil/version.py').load_module()\n #\n # Because SourceFileLoader will error and load \"src/toil/__init__.py\" .\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = import_version()\nrun_setup()\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport imp\nimport os\n\nfrom tempfile import NamedTemporaryFile\nfrom setuptools import find_packages, setup\n\n\ndef run_setup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by import_version() below.\n \"\"\"\n boto = 'boto>=2.48.0, <3'\n boto3 = 'boto3>=1.7.50, <2'\n futures = 'futures>=3.1.1, <4'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.15'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.3.0'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool==3.0.20201203173111'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=12.0.1, <13'\n idna = 'idna>=2'\n pytz = 'pytz>=2012'\n pyyaml = 'pyyaml>=5, <6'\n dill = 'dill>=0.3.2, <0.4'\n requests = 'requests>=2, <3'\n docker = 'docker==4.3.1'\n dateutil = 'python-dateutil'\n addict = 'addict>=2.2.1, <2.3'\n enlighten = 'enlighten>=1.5.2, <2'\n wdlparse = 'wdlparse==0.1.0'\n\n core_reqs = [\n dill,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n pytz,\n pyyaml,\n enlighten]\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes,\n idna] # Kubernetes's urllib3 can mange to use idna without really depending on it.\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = [\n wdlparse\n ]\n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs + \\\n wdl_reqs\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten and the Toil community',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n python_requires=\">=3.6\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decorators:\n exclude=['*.test.*']),\n package_data={\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef import_version():\n \"\"\"Return the module object for src/toil/version.py, generate from the template if required.\"\"\"\n if not os.path.exists('src/toil/version.py'):\n # Use the template to generate src/toil/version.py\n import version_template\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(version_template.expand_())\n os.rename(f.name, 'src/toil/version.py')\n\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in \"src/toil/__init__.py\" which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n #\n # This is also the reason we cannot switch from the \"deprecated\" imp library\n # and use:\n # from importlib.machinery import SourceFileLoader\n # return SourceFileLoader('toil.version', path='src/toil/version.py').load_module()\n #\n # Because SourceFileLoader will error and load \"src/toil/__init__.py\" .\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = import_version()\nrun_setup()\n", "path": "setup.py"}]}
| 2,561 | 139 |
gh_patches_debug_12484
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-1205
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PORT] Slack adapter updates for dialog interactions
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3744
Fixes #3733 #3726 #3725 #3724
* Adds missing values to SlackPayload model
* Expose SlackClientWrapper via public property
# Changed projects
* Adapters
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import Optional, List
5 from slack.web.classes.actions import Action
6 from botbuilder.adapters.slack.slack_message import SlackMessage
7
8
9 class SlackPayload:
10 def __init__(self, **kwargs):
11 self.type: [str] = kwargs.get("type")
12 self.token: str = kwargs.get("token")
13 self.channel: str = kwargs.get("channel")
14 self.thread_ts: str = kwargs.get("thread_ts")
15 self.team: str = kwargs.get("team")
16 self.user: str = kwargs.get("user")
17 self.actions: Optional[List[Action]] = None
18
19 if "message" in kwargs:
20 message = kwargs.get("message")
21 self.message = (
22 message
23 if isinstance(message) is SlackMessage
24 else SlackMessage(**message)
25 )
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py b/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py
--- a/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py
+++ b/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py
@@ -15,6 +15,12 @@
self.team: str = kwargs.get("team")
self.user: str = kwargs.get("user")
self.actions: Optional[List[Action]] = None
+ self.trigger_id: str = kwargs.get("trigger_id")
+ self.action_ts: str = kwargs.get("action_ts")
+ self.submission: str = kwargs.get("submission")
+ self.callback_id: str = kwargs.get("callback_id")
+ self.state: str = kwargs.get("state")
+ self.response_url: str = kwargs.get("response_url")
if "message" in kwargs:
message = kwargs.get("message")
|
{"golden_diff": "diff --git a/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py b/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py\n--- a/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py\n+++ b/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py\n@@ -15,6 +15,12 @@\n self.team: str = kwargs.get(\"team\")\r\n self.user: str = kwargs.get(\"user\")\r\n self.actions: Optional[List[Action]] = None\r\n+ self.trigger_id: str = kwargs.get(\"trigger_id\")\r\n+ self.action_ts: str = kwargs.get(\"action_ts\")\r\n+ self.submission: str = kwargs.get(\"submission\")\r\n+ self.callback_id: str = kwargs.get(\"callback_id\")\r\n+ self.state: str = kwargs.get(\"state\")\r\n+ self.response_url: str = kwargs.get(\"response_url\")\r\n \r\n if \"message\" in kwargs:\r\n message = kwargs.get(\"message\")\n", "issue": "[PORT] Slack adapter updates for dialog interactions\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3744\n\nFixes #3733 #3726 #3725 #3724 \r\n\r\n* Adds missing values to SlackPayload model \r\n* Expose SlackClientWrapper via public property\n\n\r\n# Changed projects\r\n* Adapters\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom typing import Optional, List\r\nfrom slack.web.classes.actions import Action\r\nfrom botbuilder.adapters.slack.slack_message import SlackMessage\r\n\r\n\r\nclass SlackPayload:\r\n def __init__(self, **kwargs):\r\n self.type: [str] = kwargs.get(\"type\")\r\n self.token: str = kwargs.get(\"token\")\r\n self.channel: str = kwargs.get(\"channel\")\r\n self.thread_ts: str = kwargs.get(\"thread_ts\")\r\n self.team: str = kwargs.get(\"team\")\r\n self.user: str = kwargs.get(\"user\")\r\n self.actions: Optional[List[Action]] = None\r\n\r\n if \"message\" in kwargs:\r\n message = kwargs.get(\"message\")\r\n self.message = (\r\n message\r\n if isinstance(message) is SlackMessage\r\n else SlackMessage(**message)\r\n )\r\n", "path": "libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom typing import Optional, List\r\nfrom slack.web.classes.actions import Action\r\nfrom botbuilder.adapters.slack.slack_message import SlackMessage\r\n\r\n\r\nclass SlackPayload:\r\n def __init__(self, **kwargs):\r\n self.type: [str] = kwargs.get(\"type\")\r\n self.token: str = kwargs.get(\"token\")\r\n self.channel: str = kwargs.get(\"channel\")\r\n self.thread_ts: str = kwargs.get(\"thread_ts\")\r\n self.team: str = kwargs.get(\"team\")\r\n self.user: str = kwargs.get(\"user\")\r\n self.actions: Optional[List[Action]] = None\r\n self.trigger_id: str = kwargs.get(\"trigger_id\")\r\n self.action_ts: str = kwargs.get(\"action_ts\")\r\n self.submission: str = kwargs.get(\"submission\")\r\n self.callback_id: str = kwargs.get(\"callback_id\")\r\n self.state: str = kwargs.get(\"state\")\r\n self.response_url: str = kwargs.get(\"response_url\")\r\n\r\n if \"message\" in kwargs:\r\n message = kwargs.get(\"message\")\r\n self.message = (\r\n message\r\n if isinstance(message) is SlackMessage\r\n else SlackMessage(**message)\r\n )\r\n", "path": "libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_payload.py"}]}
| 599 | 237 |
gh_patches_debug_23016
|
rasdani/github-patches
|
git_diff
|
rotki__rotki-7996
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Killing tasks on Startup fails on Windows (PC)
## Problem Definition
After an update Rotki would not restart on its own.
Closing the interface and restarting got no response at all (twice)
Logs showed an attempt to kill the tasks but recorded the failure.
## Logs
> 2024-04-19T13:56:55.600Z: (info): Checking for update
> 2024-04-19T13:56:57.360Z: (info): Update for version 1.32.2 is not available (latest version: 1.32.2, downgrade is disallowed).
> 2024-04-19T13:57:03.347Z: Restarting rotki-core> >
> 2024-04-19T13:57:03.350Z: Starting windows process termination
> 2024-04-19T13:57:07.274Z: Currently running: 246 tasks
> 2024-04-19T13:57:07.280Z: Detected the following running rotki-core processes: 2880, 7092
> 2024-04-19T13:57:07.282Z: Preparing to call "taskill /f /t /PID 2880 /PID 7092" on the rotki-core processes
> 2024-04-19T13:57:07.752Z: Call to taskkill exited
> 2024-04-19T13:57:07.754Z: The 2 processes are still running. Waiting for 2 seconds
> 2024-04-19T15:18:54.047Z:
Investigation and Remediation
Look for any running instances
> C:\Windows\system32>tasklist | find "rotki"
> rotki.exe 16380 Console 1 17,608 K
> rotki.exe 13204 Console 1 10,436 K
> rotki.exe 14164 Console 1 8,596 K
Kill the running instances:
> C:\Windows\system32>taskkill /f /t /PID 16380 /PID 13204 /PID 14164
> SUCCESS: The process with PID 13204 (child process of PID 16380) has been terminated.
> SUCCESS: The process with PID 14164 (child process of PID 16380) has been terminated.
> SUCCESS: The process with PID 16380 (child process of PID 7124) has been terminated.
Could it be the reason It can't kill the running instances is because there is a typo in the code?
As per the log;
2024-04-19T13:57:07.282Z: Preparing to call "taskill /f /t /PID 2880 /PID 7092" on the rotki-core processes
----------------------------------------------------^^^^------------
The command is taskkill (two k's) NOT taskill (one k).
-->
### System Description
Operating system:
`systeminfo | findstr /C:"OS"`
OS Name: Microsoft Windows 10 Home
OS Version: 10.0.19045 N/A Build 19045
OS Manufacturer: Microsoft Corporation
OS Configuration: Standalone Workstation
OS Build Type: Multiprocessor Free
BIOS Version: LENOVO DUCN33WW, 2021-08-10
Rotki version: 1.32.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/server.py`
Content:
```
1 import logging
2 import os
3 import signal
4
5 import gevent
6
7 from rotkehlchen.api.server import APIServer, RestAPI
8 from rotkehlchen.args import app_args
9 from rotkehlchen.logging import TRACE, RotkehlchenLogsAdapter, add_logging_level, configure_logging
10 from rotkehlchen.rotkehlchen import Rotkehlchen
11
12 logger = logging.getLogger(__name__)
13 log = RotkehlchenLogsAdapter(logger)
14
15
16 class RotkehlchenServer:
17 def __init__(self) -> None:
18 """Initializes the backend server
19 May raise:
20 - SystemPermissionError due to the given args containing a datadir
21 that does not have the correct permissions
22 """
23 arg_parser = app_args(
24 prog='rotki',
25 description=(
26 'rotki, the portfolio tracker and accounting tool that respects your privacy'
27 ),
28 )
29 self.args = arg_parser.parse_args()
30 add_logging_level('TRACE', TRACE)
31 configure_logging(self.args)
32 self.rotkehlchen = Rotkehlchen(self.args)
33 self.stop_event = gevent.event.Event()
34 if ',' in self.args.api_cors:
35 domain_list = [str(domain) for domain in self.args.api_cors.split(',')]
36 else:
37 domain_list = [str(self.args.api_cors)]
38 self.api_server = APIServer(
39 rest_api=RestAPI(rotkehlchen=self.rotkehlchen),
40 ws_notifier=self.rotkehlchen.rotki_notifier,
41 cors_domain_list=domain_list,
42 )
43
44 def shutdown(self) -> None:
45 log.debug('Shutdown initiated')
46 self.api_server.stop()
47 self.stop_event.set()
48
49 def main(self) -> None:
50 # disable printing hub exceptions in stderr. With using the hub to do various
51 # tasks that should raise exceptions and have them handled outside the hub
52 # printing them in stdout is now too much spam (and would worry users too)
53 hub = gevent.hub.get_hub()
54 hub.exception_stream = None
55 # we don't use threadpool much so go to 2 instead of default 10
56 hub.threadpool_size = 2
57 hub.threadpool.maxsize = 2
58 if os.name != 'nt':
59 gevent.hub.signal(signal.SIGQUIT, self.shutdown) # type: ignore[attr-defined,unused-ignore] # pylint: disable=no-member # linters don't understand the os.name check
60 gevent.hub.signal(signal.SIGINT, self.shutdown)
61 gevent.hub.signal(signal.SIGTERM, self.shutdown)
62 # The api server's RestAPI starts rotki main loop
63 self.api_server.start(
64 host=self.args.api_host,
65 rest_port=self.args.rest_api_port,
66 )
67 self.stop_event.wait()
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rotkehlchen/server.py b/rotkehlchen/server.py
--- a/rotkehlchen/server.py
+++ b/rotkehlchen/server.py
@@ -56,9 +56,17 @@
hub.threadpool_size = 2
hub.threadpool.maxsize = 2
if os.name != 'nt':
- gevent.hub.signal(signal.SIGQUIT, self.shutdown) # type: ignore[attr-defined,unused-ignore] # pylint: disable=no-member # linters don't understand the os.name check
+ gevent.hub.signal(signal.SIGQUIT, self.shutdown)
+ gevent.hub.signal(signal.SIGTERM, self.shutdown)
+ else:
+ # Handle the window control signal as stated here https://pyinstaller.org/en/stable/feature-notes.html#signal-handling-in-console-windows-applications-and-onefile-application-cleanup # noqa: E501
+ # This logic handles the signal sent from the bootloader equivalent to sigterm in
+ # addition to the signals sent by windows's taskkill.
+ # Research documented in https://github.com/yabirgb/rotki-python-research
+ import win32api # pylint: disable=import-outside-toplevel # isort:skip
+ win32api.SetConsoleCtrlHandler(self.shutdown, True)
+
gevent.hub.signal(signal.SIGINT, self.shutdown)
- gevent.hub.signal(signal.SIGTERM, self.shutdown)
# The api server's RestAPI starts rotki main loop
self.api_server.start(
host=self.args.api_host,
|
{"golden_diff": "diff --git a/rotkehlchen/server.py b/rotkehlchen/server.py\n--- a/rotkehlchen/server.py\n+++ b/rotkehlchen/server.py\n@@ -56,9 +56,17 @@\n hub.threadpool_size = 2\n hub.threadpool.maxsize = 2\n if os.name != 'nt':\n- gevent.hub.signal(signal.SIGQUIT, self.shutdown) # type: ignore[attr-defined,unused-ignore] # pylint: disable=no-member # linters don't understand the os.name check\n+ gevent.hub.signal(signal.SIGQUIT, self.shutdown)\n+ gevent.hub.signal(signal.SIGTERM, self.shutdown)\n+ else:\n+ # Handle the window control signal as stated here https://pyinstaller.org/en/stable/feature-notes.html#signal-handling-in-console-windows-applications-and-onefile-application-cleanup # noqa: E501\n+ # This logic handles the signal sent from the bootloader equivalent to sigterm in\n+ # addition to the signals sent by windows's taskkill.\n+ # Research documented in https://github.com/yabirgb/rotki-python-research\n+ import win32api # pylint: disable=import-outside-toplevel # isort:skip\n+ win32api.SetConsoleCtrlHandler(self.shutdown, True)\n+\n gevent.hub.signal(signal.SIGINT, self.shutdown)\n- gevent.hub.signal(signal.SIGTERM, self.shutdown)\n # The api server's RestAPI starts rotki main loop\n self.api_server.start(\n host=self.args.api_host,\n", "issue": "Killing tasks on Startup fails on Windows (PC)\n## Problem Definition\r\n\r\nAfter an update Rotki would not restart on its own.\r\n\r\nClosing the interface and restarting got no response at all (twice)\r\n\r\nLogs showed an attempt to kill the tasks but recorded the failure.\r\n\r\n## Logs\r\n\r\n> 2024-04-19T13:56:55.600Z: (info): Checking for update\r\n> 2024-04-19T13:56:57.360Z: (info): Update for version 1.32.2 is not available (latest version: 1.32.2, downgrade is disallowed).\r\n> 2024-04-19T13:57:03.347Z: Restarting rotki-core> > \r\n> 2024-04-19T13:57:03.350Z: Starting windows process termination\r\n> 2024-04-19T13:57:07.274Z: Currently running: 246 tasks\r\n> 2024-04-19T13:57:07.280Z: Detected the following running rotki-core processes: 2880, 7092\r\n> 2024-04-19T13:57:07.282Z: Preparing to call \"taskill /f /t /PID 2880 /PID 7092\" on the rotki-core processes\r\n> 2024-04-19T13:57:07.752Z: Call to taskkill exited\r\n> 2024-04-19T13:57:07.754Z: The 2 processes are still running. Waiting for 2 seconds\r\n> 2024-04-19T15:18:54.047Z: \r\n\r\nInvestigation and Remediation\r\n\r\nLook for any running instances \r\n\r\n> C:\\Windows\\system32>tasklist | find \"rotki\"\r\n> rotki.exe 16380 Console 1 17,608 K\r\n> rotki.exe 13204 Console 1 10,436 K\r\n> rotki.exe 14164 Console 1 8,596 K\r\n\r\nKill the running instances:\r\n\r\n> C:\\Windows\\system32>taskkill /f /t /PID 16380 /PID 13204 /PID 14164\r\n> SUCCESS: The process with PID 13204 (child process of PID 16380) has been terminated.\r\n> SUCCESS: The process with PID 14164 (child process of PID 16380) has been terminated.\r\n> SUCCESS: The process with PID 16380 (child process of PID 7124) has been terminated.\r\n\r\nCould it be the reason It can't kill the running instances is because there is a typo in the code?\r\n\r\nAs per the log;\r\n\r\n2024-04-19T13:57:07.282Z: Preparing to call \"taskill /f /t /PID 2880 /PID 7092\" on the rotki-core processes\r\n----------------------------------------------------^^^^------------\r\n\r\nThe command is taskkill (two k's) NOT taskill (one k). \r\n\r\n\r\n-->\r\n\r\n\r\n### System Description\r\n\r\n\r\nOperating system: \r\n\r\n`systeminfo | findstr /C:\"OS\"`\r\n\r\nOS Name: Microsoft Windows 10 Home\r\nOS Version: 10.0.19045 N/A Build 19045\r\nOS Manufacturer: Microsoft Corporation\r\nOS Configuration: Standalone Workstation\r\nOS Build Type: Multiprocessor Free\r\nBIOS Version: LENOVO DUCN33WW, 2021-08-10\r\n\r\n\r\nRotki version: 1.32.2\r\n\n", "before_files": [{"content": "import logging\nimport os\nimport signal\n\nimport gevent\n\nfrom rotkehlchen.api.server import APIServer, RestAPI\nfrom rotkehlchen.args import app_args\nfrom rotkehlchen.logging import TRACE, RotkehlchenLogsAdapter, add_logging_level, configure_logging\nfrom rotkehlchen.rotkehlchen import Rotkehlchen\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\nclass RotkehlchenServer:\n def __init__(self) -> None:\n \"\"\"Initializes the backend server\n May raise:\n - SystemPermissionError due to the given args containing a datadir\n that does not have the correct permissions\n \"\"\"\n arg_parser = app_args(\n prog='rotki',\n description=(\n 'rotki, the portfolio tracker and accounting tool that respects your privacy'\n ),\n )\n self.args = arg_parser.parse_args()\n add_logging_level('TRACE', TRACE)\n configure_logging(self.args)\n self.rotkehlchen = Rotkehlchen(self.args)\n self.stop_event = gevent.event.Event()\n if ',' in self.args.api_cors:\n domain_list = [str(domain) for domain in self.args.api_cors.split(',')]\n else:\n domain_list = [str(self.args.api_cors)]\n self.api_server = APIServer(\n rest_api=RestAPI(rotkehlchen=self.rotkehlchen),\n ws_notifier=self.rotkehlchen.rotki_notifier,\n cors_domain_list=domain_list,\n )\n\n def shutdown(self) -> None:\n log.debug('Shutdown initiated')\n self.api_server.stop()\n self.stop_event.set()\n\n def main(self) -> None:\n # disable printing hub exceptions in stderr. With using the hub to do various\n # tasks that should raise exceptions and have them handled outside the hub\n # printing them in stdout is now too much spam (and would worry users too)\n hub = gevent.hub.get_hub()\n hub.exception_stream = None\n # we don't use threadpool much so go to 2 instead of default 10\n hub.threadpool_size = 2\n hub.threadpool.maxsize = 2\n if os.name != 'nt':\n gevent.hub.signal(signal.SIGQUIT, self.shutdown) # type: ignore[attr-defined,unused-ignore] # pylint: disable=no-member # linters don't understand the os.name check\n gevent.hub.signal(signal.SIGINT, self.shutdown)\n gevent.hub.signal(signal.SIGTERM, self.shutdown)\n # The api server's RestAPI starts rotki main loop\n self.api_server.start(\n host=self.args.api_host,\n rest_port=self.args.rest_api_port,\n )\n self.stop_event.wait()\n", "path": "rotkehlchen/server.py"}], "after_files": [{"content": "import logging\nimport os\nimport signal\n\nimport gevent\n\nfrom rotkehlchen.api.server import APIServer, RestAPI\nfrom rotkehlchen.args import app_args\nfrom rotkehlchen.logging import TRACE, RotkehlchenLogsAdapter, add_logging_level, configure_logging\nfrom rotkehlchen.rotkehlchen import Rotkehlchen\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\nclass RotkehlchenServer:\n def __init__(self) -> None:\n \"\"\"Initializes the backend server\n May raise:\n - SystemPermissionError due to the given args containing a datadir\n that does not have the correct permissions\n \"\"\"\n arg_parser = app_args(\n prog='rotki',\n description=(\n 'rotki, the portfolio tracker and accounting tool that respects your privacy'\n ),\n )\n self.args = arg_parser.parse_args()\n add_logging_level('TRACE', TRACE)\n configure_logging(self.args)\n self.rotkehlchen = Rotkehlchen(self.args)\n self.stop_event = gevent.event.Event()\n if ',' in self.args.api_cors:\n domain_list = [str(domain) for domain in self.args.api_cors.split(',')]\n else:\n domain_list = [str(self.args.api_cors)]\n self.api_server = APIServer(\n rest_api=RestAPI(rotkehlchen=self.rotkehlchen),\n ws_notifier=self.rotkehlchen.rotki_notifier,\n cors_domain_list=domain_list,\n )\n\n def shutdown(self) -> None:\n log.debug('Shutdown initiated')\n self.api_server.stop()\n self.stop_event.set()\n\n def main(self) -> None:\n # disable printing hub exceptions in stderr. With using the hub to do various\n # tasks that should raise exceptions and have them handled outside the hub\n # printing them in stdout is now too much spam (and would worry users too)\n hub = gevent.hub.get_hub()\n hub.exception_stream = None\n # we don't use threadpool much so go to 2 instead of default 10\n hub.threadpool_size = 2\n hub.threadpool.maxsize = 2\n if os.name != 'nt':\n gevent.hub.signal(signal.SIGQUIT, self.shutdown)\n gevent.hub.signal(signal.SIGTERM, self.shutdown)\n else:\n # Handle the window control signal as stated here https://pyinstaller.org/en/stable/feature-notes.html#signal-handling-in-console-windows-applications-and-onefile-application-cleanup # noqa: E501\n # This logic handles the signal sent from the bootloader equivalent to sigterm in\n # addition to the signals sent by windows's taskkill.\n # Research documented in https://github.com/yabirgb/rotki-python-research\n import win32api # pylint: disable=import-outside-toplevel # isort:skip\n win32api.SetConsoleCtrlHandler(self.shutdown, True)\n\n gevent.hub.signal(signal.SIGINT, self.shutdown)\n # The api server's RestAPI starts rotki main loop\n self.api_server.start(\n host=self.args.api_host,\n rest_port=self.args.rest_api_port,\n )\n self.stop_event.wait()\n", "path": "rotkehlchen/server.py"}]}
| 1,910 | 351 |
gh_patches_debug_25644
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-3810
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tokens are able to enumerate project names regardless of organization membership.
We respond with 404s when there are missing project names, but 403s when there is an existing project. Probably should just check for organization membership before even going into the project fetching.
# Original HackerOne Report
Title: Project Enumeration of Other Teams
Types: Information Disclosure
Link: https://hackerone.com/reports/154048
Date: 2016-07-26 05:25:05 -0700
By: vagg-a-bond
Details:
Hello,
It is possible for any valid user of @sentry to enumerate project names in other team by comparing errors.
For example, if user user accesses `GET /api/0/projects/sumne/project_sumne_001/` which is not of his/her, then error will be `403 FORBIDDEN` and for `GET /api/0/projects/sumne/project_sumne_001_not_there/` application responds with `404 NOT FOUND`
So an attacker can enumerate all the projects available in a team by automating the process to request `GET /api/0/projects/TARGET_TEAM_NAME/PROJECT_NAME/`.
**Vulnerable Endpoint:**
> GET /api/0/projects/sumne/project_sumne_001/*
**Recommended Fix:**
Respond with `404 NOT FOUND` for both.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/api/bases/project.py`
Content:
```
1 from __future__ import absolute_import
2
3 from sentry.auth import access
4 from sentry.api.base import Endpoint
5 from sentry.api.exceptions import ResourceDoesNotExist
6 from sentry.api.permissions import ScopedPermission
7 from sentry.models import Project, ProjectStatus
8 from sentry.models.apikey import ROOT_KEY
9
10
11 class ProjectPermission(ScopedPermission):
12 scope_map = {
13 'GET': ['project:read', 'project:write', 'project:delete'],
14 'POST': ['project:write', 'project:delete'],
15 'PUT': ['project:write', 'project:delete'],
16 'DELETE': ['project:delete'],
17 }
18
19 def has_object_permission(self, request, view, project):
20 if request.user and request.user.is_authenticated() and request.auth:
21 request.access = access.from_request(
22 request, project.organization, scopes=request.auth.get_scopes(),
23 )
24
25 elif request.auth:
26 if request.auth is ROOT_KEY:
27 return True
28 return request.auth.organization_id == project.organization_id
29
30 else:
31 request.access = access.from_request(request, project.organization)
32
33 allowed_scopes = set(self.scope_map.get(request.method, []))
34 return any(
35 request.access.has_team_scope(project.team, s)
36 for s in allowed_scopes
37 )
38
39
40 class ProjectReleasePermission(ProjectPermission):
41 scope_map = {
42 'GET': ['project:read', 'project:write', 'project:delete', 'project:releases'],
43 'POST': ['project:write', 'project:delete', 'project:releases'],
44 'PUT': ['project:write', 'project:delete', 'project:releases'],
45 'DELETE': ['project:delete', 'project:releases'],
46 }
47
48
49 class ProjectEventPermission(ProjectPermission):
50 scope_map = {
51 'GET': ['event:read', 'event:write', 'event:delete'],
52 'POST': ['event:write', 'event:delete'],
53 'PUT': ['event:write', 'event:delete'],
54 'DELETE': ['event:delete'],
55 }
56
57
58 class ProjectEndpoint(Endpoint):
59 permission_classes = (ProjectPermission,)
60
61 def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
62 try:
63 project = Project.objects.get_from_cache(
64 organization__slug=organization_slug,
65 slug=project_slug,
66 )
67 except Project.DoesNotExist:
68 raise ResourceDoesNotExist
69
70 if project.status != ProjectStatus.VISIBLE:
71 raise ResourceDoesNotExist
72
73 self.check_object_permissions(request, project)
74
75 kwargs['project'] = project
76 return (args, kwargs)
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/api/bases/project.py b/src/sentry/api/bases/project.py
--- a/src/sentry/api/bases/project.py
+++ b/src/sentry/api/bases/project.py
@@ -4,7 +4,7 @@
from sentry.api.base import Endpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.permissions import ScopedPermission
-from sentry.models import Project, ProjectStatus
+from sentry.models import Organization, Project, ProjectStatus
from sentry.models.apikey import ROOT_KEY
@@ -59,9 +59,21 @@
permission_classes = (ProjectPermission,)
def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):
+ try:
+ org = Organization.objects.get_from_cache(slug=organization_slug)
+ if request.user:
+ can_access_org = any(access.from_request(request, org).memberships)
+ if request.auth:
+ can_access_org = request.auth.organization_id == org.id
+ if not can_access_org:
+ raise ResourceDoesNotExist
+
+ except Organization.DoesNotExist:
+ raise ResourceDoesNotExist
+
try:
project = Project.objects.get_from_cache(
- organization__slug=organization_slug,
+ organization=org,
slug=project_slug,
)
except Project.DoesNotExist:
|
{"golden_diff": "diff --git a/src/sentry/api/bases/project.py b/src/sentry/api/bases/project.py\n--- a/src/sentry/api/bases/project.py\n+++ b/src/sentry/api/bases/project.py\n@@ -4,7 +4,7 @@\n from sentry.api.base import Endpoint\n from sentry.api.exceptions import ResourceDoesNotExist\n from sentry.api.permissions import ScopedPermission\n-from sentry.models import Project, ProjectStatus\n+from sentry.models import Organization, Project, ProjectStatus\n from sentry.models.apikey import ROOT_KEY\n \n \n@@ -59,9 +59,21 @@\n permission_classes = (ProjectPermission,)\n \n def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):\n+ try:\n+ org = Organization.objects.get_from_cache(slug=organization_slug)\n+ if request.user:\n+ can_access_org = any(access.from_request(request, org).memberships)\n+ if request.auth:\n+ can_access_org = request.auth.organization_id == org.id\n+ if not can_access_org:\n+ raise ResourceDoesNotExist\n+\n+ except Organization.DoesNotExist:\n+ raise ResourceDoesNotExist\n+\n try:\n project = Project.objects.get_from_cache(\n- organization__slug=organization_slug,\n+ organization=org,\n slug=project_slug,\n )\n except Project.DoesNotExist:\n", "issue": "Tokens are able to enumerate project names regardless of organization membership.\nWe respond with 404s when there are missing project names, but 403s when there is an existing project. Probably should just check for organization membership before even going into the project fetching.\n# Original HackerOne Report\n\nTitle: Project Enumeration of Other Teams\nTypes: Information Disclosure\nLink: https://hackerone.com/reports/154048\nDate: 2016-07-26 05:25:05 -0700\nBy: vagg-a-bond\n\nDetails:\nHello,\n\nIt is possible for any valid user of @sentry to enumerate project names in other team by comparing errors. \n\nFor example, if user user accesses `GET /api/0/projects/sumne/project_sumne_001/` which is not of his/her, then error will be `403 FORBIDDEN` and for `GET /api/0/projects/sumne/project_sumne_001_not_there/` application responds with `404 NOT FOUND`\n\nSo an attacker can enumerate all the projects available in a team by automating the process to request `GET /api/0/projects/TARGET_TEAM_NAME/PROJECT_NAME/`. \n\n**Vulnerable Endpoint:**\n\n> GET /api/0/projects/sumne/project_sumne_001/*\n\n**Recommended Fix:**\nRespond with `404 NOT FOUND` for both. \n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry.auth import access\nfrom sentry.api.base import Endpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.permissions import ScopedPermission\nfrom sentry.models import Project, ProjectStatus\nfrom sentry.models.apikey import ROOT_KEY\n\n\nclass ProjectPermission(ScopedPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:delete'],\n 'POST': ['project:write', 'project:delete'],\n 'PUT': ['project:write', 'project:delete'],\n 'DELETE': ['project:delete'],\n }\n\n def has_object_permission(self, request, view, project):\n if request.user and request.user.is_authenticated() and request.auth:\n request.access = access.from_request(\n request, project.organization, scopes=request.auth.get_scopes(),\n )\n\n elif request.auth:\n if request.auth is ROOT_KEY:\n return True\n return request.auth.organization_id == project.organization_id\n\n else:\n request.access = access.from_request(request, project.organization)\n\n allowed_scopes = set(self.scope_map.get(request.method, []))\n return any(\n request.access.has_team_scope(project.team, s)\n for s in allowed_scopes\n )\n\n\nclass ProjectReleasePermission(ProjectPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:delete', 'project:releases'],\n 'POST': ['project:write', 'project:delete', 'project:releases'],\n 'PUT': ['project:write', 'project:delete', 'project:releases'],\n 'DELETE': ['project:delete', 'project:releases'],\n }\n\n\nclass ProjectEventPermission(ProjectPermission):\n scope_map = {\n 'GET': ['event:read', 'event:write', 'event:delete'],\n 'POST': ['event:write', 'event:delete'],\n 'PUT': ['event:write', 'event:delete'],\n 'DELETE': ['event:delete'],\n }\n\n\nclass ProjectEndpoint(Endpoint):\n permission_classes = (ProjectPermission,)\n\n def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):\n try:\n project = Project.objects.get_from_cache(\n organization__slug=organization_slug,\n slug=project_slug,\n )\n except Project.DoesNotExist:\n raise ResourceDoesNotExist\n\n if project.status != ProjectStatus.VISIBLE:\n raise ResourceDoesNotExist\n\n self.check_object_permissions(request, project)\n\n kwargs['project'] = project\n return (args, kwargs)\n", "path": "src/sentry/api/bases/project.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry.auth import access\nfrom sentry.api.base import Endpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.api.permissions import ScopedPermission\nfrom sentry.models import Organization, Project, ProjectStatus\nfrom sentry.models.apikey import ROOT_KEY\n\n\nclass ProjectPermission(ScopedPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:delete'],\n 'POST': ['project:write', 'project:delete'],\n 'PUT': ['project:write', 'project:delete'],\n 'DELETE': ['project:delete'],\n }\n\n def has_object_permission(self, request, view, project):\n if request.user and request.user.is_authenticated() and request.auth:\n request.access = access.from_request(\n request, project.organization, scopes=request.auth.get_scopes(),\n )\n\n elif request.auth:\n if request.auth is ROOT_KEY:\n return True\n return request.auth.organization_id == project.organization_id\n\n else:\n request.access = access.from_request(request, project.organization)\n\n allowed_scopes = set(self.scope_map.get(request.method, []))\n return any(\n request.access.has_team_scope(project.team, s)\n for s in allowed_scopes\n )\n\n\nclass ProjectReleasePermission(ProjectPermission):\n scope_map = {\n 'GET': ['project:read', 'project:write', 'project:delete', 'project:releases'],\n 'POST': ['project:write', 'project:delete', 'project:releases'],\n 'PUT': ['project:write', 'project:delete', 'project:releases'],\n 'DELETE': ['project:delete', 'project:releases'],\n }\n\n\nclass ProjectEventPermission(ProjectPermission):\n scope_map = {\n 'GET': ['event:read', 'event:write', 'event:delete'],\n 'POST': ['event:write', 'event:delete'],\n 'PUT': ['event:write', 'event:delete'],\n 'DELETE': ['event:delete'],\n }\n\n\nclass ProjectEndpoint(Endpoint):\n permission_classes = (ProjectPermission,)\n\n def convert_args(self, request, organization_slug, project_slug, *args, **kwargs):\n try:\n org = Organization.objects.get_from_cache(slug=organization_slug)\n if request.user:\n can_access_org = any(access.from_request(request, org).memberships)\n if request.auth:\n can_access_org = request.auth.organization_id == org.id\n if not can_access_org:\n raise ResourceDoesNotExist\n\n except Organization.DoesNotExist:\n raise ResourceDoesNotExist\n\n try:\n project = Project.objects.get_from_cache(\n organization=org,\n slug=project_slug,\n )\n except Project.DoesNotExist:\n raise ResourceDoesNotExist\n\n if project.status != ProjectStatus.VISIBLE:\n raise ResourceDoesNotExist\n\n self.check_object_permissions(request, project)\n\n kwargs['project'] = project\n return (args, kwargs)\n", "path": "src/sentry/api/bases/project.py"}]}
| 1,283 | 288 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.