problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_26284 | rasdani/github-patches | git_diff | python-poetry__poetry-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
poetry script: unable to find entry point module, if package resides in src
`poetry script` is unable to find the entry point module in `src`
```
[NameError]
name 'Module' is not defined
script <script-name> [<args>]...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/console/commands/script.py`
Content:
```
1 import sys
2
3 from .venv_command import VenvCommand
4
5
6 class ScriptCommand(VenvCommand):
7 """
8 Executes a script defined in <comment>pyproject.toml</comment>
9
10 script
11 { script-name : The name of the script to execute }
12 { args?* : The command and arguments/options to pass to the script. }
13 """
14
15 def handle(self):
16 script = self.argument('script-name')
17 argv = [script] + self.argument('args')
18
19 scripts = self.poetry.local_config.get('scripts')
20 if not scripts:
21 raise RuntimeError('No scripts defined in pyproject.toml')
22
23 if script not in scripts:
24 raise ValueError('Script {} is not defined'.format(script))
25
26 module, callable_ = scripts[script].split(':')
27
28 cmd = ['python', '-c']
29
30 cmd += [
31 '"import sys; '
32 'from importlib import import_module; '
33 'sys.argv = {!r}; '
34 'import_module(\'{}\').{}()"'.format(
35 argv, module, callable_
36 )
37 ]
38
39 self.venv.run(*cmd, shell=True, call=True)
40
41 def merge_application_definition(self, merge_args=True):
42 if self._application is None \
43 or (self._application_definition_merged
44 and (self._application_definition_merged_with_args or not merge_args)):
45 return
46
47 if merge_args:
48 current_arguments = self._definition.get_arguments()
49 self._definition.set_arguments(self._application.get_definition().get_arguments())
50 self._definition.add_arguments(current_arguments)
51
52 self._application_definition_merged = True
53 if merge_args:
54 self._application_definition_merged_with_args = True
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/poetry/console/commands/script.py b/poetry/console/commands/script.py
--- a/poetry/console/commands/script.py
+++ b/poetry/console/commands/script.py
@@ -1,5 +1,6 @@
import sys
+from ...masonry.utils.module import Module
from .venv_command import VenvCommand
@@ -25,19 +26,32 @@
module, callable_ = scripts[script].split(':')
+ src_in_sys_path = 'sys.path.append(\'src\'); '\
+ if self._module.is_in_src() else ''
+
cmd = ['python', '-c']
cmd += [
'"import sys; '
'from importlib import import_module; '
- 'sys.argv = {!r}; '
+ 'sys.argv = {!r}; {}'
'import_module(\'{}\').{}()"'.format(
- argv, module, callable_
+ argv, src_in_sys_path, module, callable_
)
]
self.venv.run(*cmd, shell=True, call=True)
+ @property
+ def _module(self):
+ poetry = self.poetry
+ package = poetry.package
+ path = poetry.file.parent
+ module = Module(
+ package.name, path.as_posix()
+ )
+ return module
+
def merge_application_definition(self, merge_args=True):
if self._application is None \
or (self._application_definition_merged
| {"golden_diff": "diff --git a/poetry/console/commands/script.py b/poetry/console/commands/script.py\n--- a/poetry/console/commands/script.py\n+++ b/poetry/console/commands/script.py\n@@ -1,5 +1,6 @@\n import sys\n \n+from ...masonry.utils.module import Module\n from .venv_command import VenvCommand\n \n \n@@ -25,19 +26,32 @@\n \n module, callable_ = scripts[script].split(':')\n \n+ src_in_sys_path = 'sys.path.append(\\'src\\'); '\\\n+ if self._module.is_in_src() else ''\n+\n cmd = ['python', '-c']\n \n cmd += [\n '\"import sys; '\n 'from importlib import import_module; '\n- 'sys.argv = {!r}; '\n+ 'sys.argv = {!r}; {}'\n 'import_module(\\'{}\\').{}()\"'.format(\n- argv, module, callable_\n+ argv, src_in_sys_path, module, callable_\n )\n ]\n \n self.venv.run(*cmd, shell=True, call=True)\n \n+ @property\n+ def _module(self):\n+ poetry = self.poetry\n+ package = poetry.package\n+ path = poetry.file.parent\n+ module = Module(\n+ package.name, path.as_posix()\n+ )\n+ return module\n+\n def merge_application_definition(self, merge_args=True):\n if self._application is None \\\n or (self._application_definition_merged\n", "issue": "poetry script: unable to find entry point module, if package resides in src\n`poetry script` is unable to find the entry point module in `src`\r\n\r\n```\r\n[NameError]\r\nname 'Module' is not defined\r\nscript <script-name> [<args>]...\r\n```\n", "before_files": [{"content": "import sys\n\nfrom .venv_command import VenvCommand\n\n\nclass ScriptCommand(VenvCommand):\n \"\"\"\n Executes a script defined in <comment>pyproject.toml</comment>\n\n script\n { script-name : The name of the script to execute }\n { args?* : The command and arguments/options to pass to the script. }\n \"\"\"\n\n def handle(self):\n script = self.argument('script-name')\n argv = [script] + self.argument('args')\n\n scripts = self.poetry.local_config.get('scripts')\n if not scripts:\n raise RuntimeError('No scripts defined in pyproject.toml')\n\n if script not in scripts:\n raise ValueError('Script {} is not defined'.format(script))\n\n module, callable_ = scripts[script].split(':')\n\n cmd = ['python', '-c']\n\n cmd += [\n '\"import sys; '\n 'from importlib import import_module; '\n 'sys.argv = {!r}; '\n 'import_module(\\'{}\\').{}()\"'.format(\n argv, module, callable_\n )\n ]\n\n self.venv.run(*cmd, shell=True, call=True)\n\n def merge_application_definition(self, merge_args=True):\n if self._application is None \\\n or (self._application_definition_merged\n and (self._application_definition_merged_with_args or not merge_args)):\n return\n\n if merge_args:\n current_arguments = self._definition.get_arguments()\n self._definition.set_arguments(self._application.get_definition().get_arguments())\n self._definition.add_arguments(current_arguments)\n\n self._application_definition_merged = True\n if merge_args:\n self._application_definition_merged_with_args = True\n", "path": "poetry/console/commands/script.py"}], "after_files": [{"content": "import sys\n\nfrom ...masonry.utils.module import Module\nfrom .venv_command import VenvCommand\n\n\nclass ScriptCommand(VenvCommand):\n \"\"\"\n Executes a script defined in <comment>pyproject.toml</comment>\n\n script\n { script-name : The name of the script to execute }\n { args?* : The command and arguments/options to pass to the script. }\n \"\"\"\n\n def handle(self):\n script = self.argument('script-name')\n argv = [script] + self.argument('args')\n\n scripts = self.poetry.local_config.get('scripts')\n if not scripts:\n raise RuntimeError('No scripts defined in pyproject.toml')\n\n if script not in scripts:\n raise ValueError('Script {} is not defined'.format(script))\n\n module, callable_ = scripts[script].split(':')\n\n src_in_sys_path = 'sys.path.append(\\'src\\'); '\\\n if self._module.is_in_src() else ''\n\n cmd = ['python', '-c']\n\n cmd += [\n '\"import sys; '\n 'from importlib import import_module; '\n 'sys.argv = {!r}; {}'\n 'import_module(\\'{}\\').{}()\"'.format(\n argv, src_in_sys_path, module, callable_\n )\n ]\n\n self.venv.run(*cmd, shell=True, call=True)\n\n @property\n def _module(self):\n poetry = self.poetry\n package = poetry.package\n path = poetry.file.parent\n module = Module(\n package.name, path.as_posix()\n )\n return module\n\n def merge_application_definition(self, merge_args=True):\n if self._application is None \\\n or (self._application_definition_merged\n and (self._application_definition_merged_with_args or not merge_args)):\n return\n\n if merge_args:\n current_arguments = self._definition.get_arguments()\n self._definition.set_arguments(self._application.get_definition().get_arguments())\n self._definition.add_arguments(current_arguments)\n\n self._application_definition_merged = True\n if merge_args:\n self._application_definition_merged_with_args = True\n", "path": "poetry/console/commands/script.py"}]} | 785 | 330 |
gh_patches_debug_23167 | rasdani/github-patches | git_diff | onnx__sklearn-onnx-59 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MemoryError when trying to convert TfIdf
Hello
Got an exception when trying to export a pipeline with a TfIdf :
Exception "unhandled MemoryError"
cv = CountVectorizer()
tt = TfidfTransformer()
lsvc = LinearSVC(penalty=penalty, dual=False, tol=1e-3)
text_clf = Pipeline([
('vect', cv),
('tfidf', tt),
('clf', lsvc),
])
text_clf.fit(twenty_train.data, twenty_train.target)
print("Converting text_clf to onnx...")
onnx = convert_sklearn(text_clf, target_opset=9, name='DocClassifierCV-Tfidf-LSVC',
initial_types=[('input', StringTensorType())]
)
Exception "unhandled MemoryError"
The stack is:
convert_sklearn()
convert_topology() :
_registration.get_converter(operator.type)(scope, operator, container)
convert_sklearn_tfidf_transformer()
if not isinstance(cst, numpy.ndarray):
cst = numpy.array(cst.todense())
toDense()
return np.asmatrix(self.toarray(order=order, out=out))
_process_toarray_args()
return np.zeros(self.shape, dtype=self.dtype, order=order)
Could make sens : the input sparse matrix is (strangely) 130000 per 130000, pretty big to be densified.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skl2onnx/operator_converters/TfIdfTransformer.py`
Content:
```
1 # -------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for
4 # license information.
5 # --------------------------------------------------------------------------
6
7 import numpy
8 import numbers
9 import warnings
10 from ..common._registration import register_converter
11 from ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity
12 from ..proto import onnx_proto
13
14
15 def convert_sklearn_tfidf_transformer(scope, operator, container):
16 # TODO: use sparse containers when available
17 op = operator.raw_operator
18 data = operator.input_full_names
19 final = operator.output_full_names
20 C = operator.inputs[0].type.shape[1]
21
22 if op.sublinear_tf:
23 # code scikit-learn
24 # np.log(X.data, X.data) --> does not apply on null coefficient
25 # X.data += 1
26 raise RuntimeError("ONNX does not support sparse tensors, sublinear_tf must be False")
27
28 logged = scope.get_unique_variable_name('logged')
29 apply_log(scope, data, logged, container)
30
31 if not op.use_idf and op.norm is None:
32 loggedplus1 = final
33 else:
34 loggedplus1 = scope.get_unique_variable_name('loggedplus1')
35 ones = scope.get_unique_variable_name('ones')
36 cst = numpy.ones((C,), dtype=numpy.float32)
37 container.add_initializer(ones, onnx_proto.TensorProto.FLOAT, [C], cst.flatten())
38 apply_add(scope, [logged, ones], loggedplus1, container, broadcast=1)
39
40 data = [loggedplus1]
41
42 if op.use_idf:
43 # code scikit-learn
44 # X = X * self._idf_diag
45 cst = op._idf_diag.astype(numpy.float32)
46 if not isinstance(cst, numpy.ndarray):
47 cst = numpy.array(cst.todense())
48 if len(cst.shape) > 1:
49 cst = numpy.diag(cst)
50 cst = cst.ravel().flatten()
51 shape = [len(cst)]
52 idfcst = scope.get_unique_variable_name('idfcst')
53 container.add_initializer(idfcst, onnx_proto.TensorProto.FLOAT, shape, cst)
54 idfed = final if op.norm is None else scope.get_unique_variable_name('idfed')
55 apply_mul(scope, data + [idfcst], idfed, container, broadcast=1)
56 data = [idfed]
57
58 if op.norm is not None:
59 op_type = 'Normalizer'
60 norm_map = {'max': 'MAX', 'l1': 'L1', 'l2': 'L2'}
61 attrs = {'name': scope.get_unique_operator_name(op_type)}
62 if op.norm in norm_map:
63 attrs['norm'] = norm_map[op.norm]
64 else:
65 raise RuntimeError('Invalid norm: %s' % op.norm)
66
67 container.add_node(op_type, data, operator.output_full_names, op_domain='ai.onnx.ml', **attrs)
68 data = None
69
70 if data == operator.input_full_names:
71 # Nothing happened --> identity
72 apply_identity(scope, data, final, container)
73
74
75 register_converter('SklearnTfidfTransformer', convert_sklearn_tfidf_transformer)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skl2onnx/operator_converters/TfIdfTransformer.py b/skl2onnx/operator_converters/TfIdfTransformer.py
--- a/skl2onnx/operator_converters/TfIdfTransformer.py
+++ b/skl2onnx/operator_converters/TfIdfTransformer.py
@@ -4,9 +4,10 @@
# license information.
# --------------------------------------------------------------------------
-import numpy
import numbers
import warnings
+import numpy
+from scipy.sparse import diags
from ..common._registration import register_converter
from ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity
from ..proto import onnx_proto
@@ -44,7 +45,11 @@
# X = X * self._idf_diag
cst = op._idf_diag.astype(numpy.float32)
if not isinstance(cst, numpy.ndarray):
- cst = numpy.array(cst.todense())
+ if len(cst.shape) > 1:
+ n = cst.shape[0]
+ cst = numpy.array([cst[i, i] for i in range(n)])
+ else:
+ cst = numpy.array(cst.todense())
if len(cst.shape) > 1:
cst = numpy.diag(cst)
cst = cst.ravel().flatten()
| {"golden_diff": "diff --git a/skl2onnx/operator_converters/TfIdfTransformer.py b/skl2onnx/operator_converters/TfIdfTransformer.py\n--- a/skl2onnx/operator_converters/TfIdfTransformer.py\n+++ b/skl2onnx/operator_converters/TfIdfTransformer.py\n@@ -4,9 +4,10 @@\n # license information.\n # --------------------------------------------------------------------------\n \n-import numpy\n import numbers\n import warnings\n+import numpy\n+from scipy.sparse import diags\n from ..common._registration import register_converter\n from ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity\n from ..proto import onnx_proto\n@@ -44,7 +45,11 @@\n # X = X * self._idf_diag\n cst = op._idf_diag.astype(numpy.float32)\n if not isinstance(cst, numpy.ndarray):\n- cst = numpy.array(cst.todense())\n+ if len(cst.shape) > 1:\n+ n = cst.shape[0]\n+ cst = numpy.array([cst[i, i] for i in range(n)])\n+ else:\n+ cst = numpy.array(cst.todense())\n if len(cst.shape) > 1:\n cst = numpy.diag(cst)\n cst = cst.ravel().flatten()\n", "issue": "MemoryError when trying to convert TfIdf\nHello\r\nGot an exception when trying to export a pipeline with a TfIdf : \r\nException \"unhandled MemoryError\"\r\n cv = CountVectorizer()\r\n tt = TfidfTransformer()\r\n lsvc = LinearSVC(penalty=penalty, dual=False, tol=1e-3)\r\n text_clf = Pipeline([ \r\n ('vect', cv),\r\n ('tfidf', tt),\r\n ('clf', lsvc),\r\n ])\r\n text_clf.fit(twenty_train.data, twenty_train.target) \r\n print(\"Converting text_clf to onnx...\")\r\n onnx = convert_sklearn(text_clf, target_opset=9, name='DocClassifierCV-Tfidf-LSVC', \r\n initial_types=[('input', StringTensorType())]\r\n )\r\n\r\nException \"unhandled MemoryError\"\r\n\r\nThe stack is:\r\n\r\nconvert_sklearn()\r\n\r\nconvert_topology() : \r\n _registration.get_converter(operator.type)(scope, operator, container)\r\n\r\nconvert_sklearn_tfidf_transformer()\r\n if not isinstance(cst, numpy.ndarray):\r\n cst = numpy.array(cst.todense())\r\n\r\ntoDense()\r\n return np.asmatrix(self.toarray(order=order, out=out))\r\n\r\n_process_toarray_args()\r\n return np.zeros(self.shape, dtype=self.dtype, order=order)\r\n\r\nCould make sens : the input sparse matrix is (strangely) 130000 per 130000, pretty big to be densified.\r\n\n", "before_files": [{"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport numpy\nimport numbers\nimport warnings\nfrom ..common._registration import register_converter\nfrom ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity\nfrom ..proto import onnx_proto\n\n\ndef convert_sklearn_tfidf_transformer(scope, operator, container):\n # TODO: use sparse containers when available\n op = operator.raw_operator\n data = operator.input_full_names\n final = operator.output_full_names\n C = operator.inputs[0].type.shape[1]\n \n if op.sublinear_tf:\n # code scikit-learn\n # np.log(X.data, X.data) --> does not apply on null coefficient\n # X.data += 1\n raise RuntimeError(\"ONNX does not support sparse tensors, sublinear_tf must be False\")\n \n logged = scope.get_unique_variable_name('logged')\n apply_log(scope, data, logged, container)\n \n if not op.use_idf and op.norm is None:\n loggedplus1 = final\n else:\n loggedplus1 = scope.get_unique_variable_name('loggedplus1')\n ones = scope.get_unique_variable_name('ones')\n cst = numpy.ones((C,), dtype=numpy.float32)\n container.add_initializer(ones, onnx_proto.TensorProto.FLOAT, [C], cst.flatten()) \n apply_add(scope, [logged, ones], loggedplus1, container, broadcast=1)\n \n data = [loggedplus1]\n \n if op.use_idf:\n # code scikit-learn\n # X = X * self._idf_diag\n cst = op._idf_diag.astype(numpy.float32)\n if not isinstance(cst, numpy.ndarray):\n cst = numpy.array(cst.todense())\n if len(cst.shape) > 1:\n cst = numpy.diag(cst)\n cst = cst.ravel().flatten()\n shape = [len(cst)]\n idfcst = scope.get_unique_variable_name('idfcst')\n container.add_initializer(idfcst, onnx_proto.TensorProto.FLOAT, shape, cst)\n idfed = final if op.norm is None else scope.get_unique_variable_name('idfed')\n apply_mul(scope, data + [idfcst], idfed, container, broadcast=1)\n data = [idfed]\n\n if op.norm is not None:\n op_type = 'Normalizer'\n norm_map = {'max': 'MAX', 'l1': 'L1', 'l2': 'L2'}\n attrs = {'name': scope.get_unique_operator_name(op_type)}\n if op.norm in norm_map:\n attrs['norm'] = norm_map[op.norm]\n else:\n raise RuntimeError('Invalid norm: %s' % op.norm)\n\n container.add_node(op_type, data, operator.output_full_names, op_domain='ai.onnx.ml', **attrs)\n data = None\n \n if data == operator.input_full_names:\n # Nothing happened --> identity\n apply_identity(scope, data, final, container)\n\n\nregister_converter('SklearnTfidfTransformer', convert_sklearn_tfidf_transformer)\n", "path": "skl2onnx/operator_converters/TfIdfTransformer.py"}], "after_files": [{"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport numbers\nimport warnings\nimport numpy\nfrom scipy.sparse import diags\nfrom ..common._registration import register_converter\nfrom ..common._apply_operation import apply_log, apply_add, apply_mul, apply_identity\nfrom ..proto import onnx_proto\n\n\ndef convert_sklearn_tfidf_transformer(scope, operator, container):\n # TODO: use sparse containers when available\n op = operator.raw_operator\n data = operator.input_full_names\n final = operator.output_full_names\n C = operator.inputs[0].type.shape[1]\n \n if op.sublinear_tf:\n # code scikit-learn\n # np.log(X.data, X.data) --> does not apply on null coefficient\n # X.data += 1\n raise RuntimeError(\"ONNX does not support sparse tensors, sublinear_tf must be False\")\n \n logged = scope.get_unique_variable_name('logged')\n apply_log(scope, data, logged, container)\n \n if not op.use_idf and op.norm is None:\n loggedplus1 = final\n else:\n loggedplus1 = scope.get_unique_variable_name('loggedplus1')\n ones = scope.get_unique_variable_name('ones')\n cst = numpy.ones((C,), dtype=numpy.float32)\n container.add_initializer(ones, onnx_proto.TensorProto.FLOAT, [C], cst.flatten()) \n apply_add(scope, [logged, ones], loggedplus1, container, broadcast=1)\n \n data = [loggedplus1]\n \n if op.use_idf:\n # code scikit-learn\n # X = X * self._idf_diag\n cst = op._idf_diag.astype(numpy.float32)\n if not isinstance(cst, numpy.ndarray):\n if len(cst.shape) > 1:\n n = cst.shape[0]\n cst = numpy.array([cst[i, i] for i in range(n)])\n else:\n cst = numpy.array(cst.todense())\n if len(cst.shape) > 1:\n cst = numpy.diag(cst)\n cst = cst.ravel().flatten()\n shape = [len(cst)]\n idfcst = scope.get_unique_variable_name('idfcst')\n container.add_initializer(idfcst, onnx_proto.TensorProto.FLOAT, shape, cst)\n idfed = final if op.norm is None else scope.get_unique_variable_name('idfed')\n apply_mul(scope, data + [idfcst], idfed, container, broadcast=1)\n data = [idfed]\n\n if op.norm is not None:\n op_type = 'Normalizer'\n norm_map = {'max': 'MAX', 'l1': 'L1', 'l2': 'L2'}\n attrs = {'name': scope.get_unique_operator_name(op_type)}\n if op.norm in norm_map:\n attrs['norm'] = norm_map[op.norm]\n else:\n raise RuntimeError('Invalid norm: %s' % op.norm)\n\n container.add_node(op_type, data, operator.output_full_names, op_domain='ai.onnx.ml', **attrs)\n data = None\n \n if data == operator.input_full_names:\n # Nothing happened --> identity\n apply_identity(scope, data, final, container)\n\n\nregister_converter('SklearnTfidfTransformer', convert_sklearn_tfidf_transformer)\n", "path": "skl2onnx/operator_converters/TfIdfTransformer.py"}]} | 1,419 | 285 |
gh_patches_debug_29507 | rasdani/github-patches | git_diff | translate__pootle-6487 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Paths dropdown missing some parent dirs
if a dir contains only directories, not any active stores its not shown in menu (on master)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/core/paths.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import posixpath
10
11 from pootle.core.decorators import persistent_property
12 from pootle.core.delegate import revision
13
14
15 class Paths(object):
16
17 def __init__(self, context, q, show_all=False):
18 self.context = context
19 self.q = q
20 self.show_all = show_all
21
22 @property
23 def rev_cache_key(self):
24 return revision.get(
25 self.context.directory.__class__)(
26 self.context.directory).get(key="stats")
27
28 @property
29 def cache_key(self):
30 return (
31 "%s.%s.%s"
32 % (self.q,
33 self.rev_cache_key,
34 self.show_all))
35
36 @property
37 def store_qs(self):
38 raise NotImplementedError
39
40 @property
41 def stores(self):
42 stores = self.store_qs.exclude(obsolete=True)
43 if not self.show_all:
44 stores = stores.exclude(
45 translation_project__project__disabled=True)
46 return stores.exclude(is_template=True).filter(
47 tp_path__contains=self.q).order_by()
48
49 @persistent_property
50 def paths(self):
51 stores = set(
52 st[1:]
53 for st
54 in self.stores.values_list("tp_path", flat=True))
55 dirs = set(
56 ("%s/" % posixpath.dirname(path))
57 for path
58 in stores
59 if (path.count("/") > 1
60 and self.q in path))
61 return sorted(
62 dirs | stores,
63 key=lambda path: (posixpath.dirname(path), posixpath.basename(path)))
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/core/paths.py b/pootle/core/paths.py
--- a/pootle/core/paths.py
+++ b/pootle/core/paths.py
@@ -6,7 +6,11 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+import pathlib
import posixpath
+from hashlib import md5
+
+from django.utils.encoding import force_bytes
from pootle.core.decorators import persistent_property
from pootle.core.delegate import revision
@@ -29,7 +33,7 @@
def cache_key(self):
return (
"%s.%s.%s"
- % (self.q,
+ % (md5(force_bytes(self.q)).hexdigest(),
self.rev_cache_key,
self.show_all))
@@ -52,12 +56,17 @@
st[1:]
for st
in self.stores.values_list("tp_path", flat=True))
- dirs = set(
- ("%s/" % posixpath.dirname(path))
- for path
- in stores
- if (path.count("/") > 1
- and self.q in path))
+ dirs = set()
+ for store in stores:
+ if posixpath.dirname(store) in dirs:
+ continue
+ dirs = (
+ dirs
+ | (set(
+ "%s/" % str(p)
+ for p
+ in pathlib.PosixPath(store).parents
+ if str(p) != ".")))
return sorted(
dirs | stores,
key=lambda path: (posixpath.dirname(path), posixpath.basename(path)))
| {"golden_diff": "diff --git a/pootle/core/paths.py b/pootle/core/paths.py\n--- a/pootle/core/paths.py\n+++ b/pootle/core/paths.py\n@@ -6,7 +6,11 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+import pathlib\n import posixpath\n+from hashlib import md5\n+\n+from django.utils.encoding import force_bytes\n \n from pootle.core.decorators import persistent_property\n from pootle.core.delegate import revision\n@@ -29,7 +33,7 @@\n def cache_key(self):\n return (\n \"%s.%s.%s\"\n- % (self.q,\n+ % (md5(force_bytes(self.q)).hexdigest(),\n self.rev_cache_key,\n self.show_all))\n \n@@ -52,12 +56,17 @@\n st[1:]\n for st\n in self.stores.values_list(\"tp_path\", flat=True))\n- dirs = set(\n- (\"%s/\" % posixpath.dirname(path))\n- for path\n- in stores\n- if (path.count(\"/\") > 1\n- and self.q in path))\n+ dirs = set()\n+ for store in stores:\n+ if posixpath.dirname(store) in dirs:\n+ continue\n+ dirs = (\n+ dirs\n+ | (set(\n+ \"%s/\" % str(p)\n+ for p\n+ in pathlib.PosixPath(store).parents\n+ if str(p) != \".\")))\n return sorted(\n dirs | stores,\n key=lambda path: (posixpath.dirname(path), posixpath.basename(path)))\n", "issue": "Paths dropdown missing some parent dirs\nif a dir contains only directories, not any active stores its not shown in menu (on master)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport posixpath\n\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.delegate import revision\n\n\nclass Paths(object):\n\n def __init__(self, context, q, show_all=False):\n self.context = context\n self.q = q\n self.show_all = show_all\n\n @property\n def rev_cache_key(self):\n return revision.get(\n self.context.directory.__class__)(\n self.context.directory).get(key=\"stats\")\n\n @property\n def cache_key(self):\n return (\n \"%s.%s.%s\"\n % (self.q,\n self.rev_cache_key,\n self.show_all))\n\n @property\n def store_qs(self):\n raise NotImplementedError\n\n @property\n def stores(self):\n stores = self.store_qs.exclude(obsolete=True)\n if not self.show_all:\n stores = stores.exclude(\n translation_project__project__disabled=True)\n return stores.exclude(is_template=True).filter(\n tp_path__contains=self.q).order_by()\n\n @persistent_property\n def paths(self):\n stores = set(\n st[1:]\n for st\n in self.stores.values_list(\"tp_path\", flat=True))\n dirs = set(\n (\"%s/\" % posixpath.dirname(path))\n for path\n in stores\n if (path.count(\"/\") > 1\n and self.q in path))\n return sorted(\n dirs | stores,\n key=lambda path: (posixpath.dirname(path), posixpath.basename(path)))\n", "path": "pootle/core/paths.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport pathlib\nimport posixpath\nfrom hashlib import md5\n\nfrom django.utils.encoding import force_bytes\n\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.delegate import revision\n\n\nclass Paths(object):\n\n def __init__(self, context, q, show_all=False):\n self.context = context\n self.q = q\n self.show_all = show_all\n\n @property\n def rev_cache_key(self):\n return revision.get(\n self.context.directory.__class__)(\n self.context.directory).get(key=\"stats\")\n\n @property\n def cache_key(self):\n return (\n \"%s.%s.%s\"\n % (md5(force_bytes(self.q)).hexdigest(),\n self.rev_cache_key,\n self.show_all))\n\n @property\n def store_qs(self):\n raise NotImplementedError\n\n @property\n def stores(self):\n stores = self.store_qs.exclude(obsolete=True)\n if not self.show_all:\n stores = stores.exclude(\n translation_project__project__disabled=True)\n return stores.exclude(is_template=True).filter(\n tp_path__contains=self.q).order_by()\n\n @persistent_property\n def paths(self):\n stores = set(\n st[1:]\n for st\n in self.stores.values_list(\"tp_path\", flat=True))\n dirs = set()\n for store in stores:\n if posixpath.dirname(store) in dirs:\n continue\n dirs = (\n dirs\n | (set(\n \"%s/\" % str(p)\n for p\n in pathlib.PosixPath(store).parents\n if str(p) != \".\")))\n return sorted(\n dirs | stores,\n key=lambda path: (posixpath.dirname(path), posixpath.basename(path)))\n", "path": "pootle/core/paths.py"}]} | 800 | 370 |
gh_patches_debug_843 | rasdani/github-patches | git_diff | obspy__obspy-2148 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FDSN routing client has a locale dependency
There's a dummy call to `time.strptime` in the module init that uses locale-specific formatting, which fails under locales that don't use the same names (ie. "Nov" for the 11th month of the year).
```
>>> import locale
>>> locale.setlocale(locale.LC_TIME, ('zh_CN', 'UTF-8'))
'zh_CN.UTF-8'
>>> from obspy.clients.fdsn.routing.routing_client import RoutingClient
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/workspace/anaconda/envs/django/lib/python2.7/site-packages/obspy/clients/fdsn/__init__.py", line 242, in <module>
from .routing.routing_client import RoutingClient # NOQA
File "/workspace/anaconda/envs/django/lib/python2.7/site-packages/obspy/clients/fdsn/routing/__init__.py", line 25, in <module>
time.strptime("30 Nov 00", "%d %b %y")
File "/workspace/anaconda/envs/django/lib/python2.7/_strptime.py", line 478, in _strptime_time
return _strptime(data_string, format)[0]
File "/workspace/anaconda/envs/django/lib/python2.7/_strptime.py", line 332, in _strptime
(data_string, format))
ValueError: time data u'30 Nov 00' does not match format u'%d %b %y'
```
I believe switching this to an ISO8601-like string would be locale-agnostic:
time.strptime("2000/11/30", "%Y/%m/%d")
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/clients/fdsn/routing/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 obspy.clients.fdsn.routing - Routing services for FDSN web services
5 ===================================================================
6
7 :copyright:
8 The ObsPy Development Team ([email protected])
9 Celso G Reyes, 2017
10 IRIS-DMC
11 :license:
12 GNU Lesser General Public License, Version 3
13 (https://www.gnu.org/copyleft/lesser.html)
14 """
15 from __future__ import (absolute_import, division, print_function,
16 unicode_literals)
17 from future.builtins import * # NOQA
18
19
20 # Extremely ugly way to avoid a race condition the first time strptime is
21 # imported which is not thread safe...
22 #
23 # See https://bugs.python.org/issue7980
24 import time
25 time.strptime("30 Nov 00", "%d %b %y")
26
27
28 if __name__ == '__main__': # pragma: no cover
29 import doctest
30 doctest.testmod(exclude_empty=True)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/obspy/clients/fdsn/routing/__init__.py b/obspy/clients/fdsn/routing/__init__.py
--- a/obspy/clients/fdsn/routing/__init__.py
+++ b/obspy/clients/fdsn/routing/__init__.py
@@ -22,7 +22,7 @@
#
# See https://bugs.python.org/issue7980
import time
-time.strptime("30 Nov 00", "%d %b %y")
+time.strptime("2000/11/30", "%Y/%m/%d")
if __name__ == '__main__': # pragma: no cover
| {"golden_diff": "diff --git a/obspy/clients/fdsn/routing/__init__.py b/obspy/clients/fdsn/routing/__init__.py\n--- a/obspy/clients/fdsn/routing/__init__.py\n+++ b/obspy/clients/fdsn/routing/__init__.py\n@@ -22,7 +22,7 @@\n #\n # See https://bugs.python.org/issue7980\n import time\n-time.strptime(\"30 Nov 00\", \"%d %b %y\")\n+time.strptime(\"2000/11/30\", \"%Y/%m/%d\")\n \n \n if __name__ == '__main__': # pragma: no cover\n", "issue": "FDSN routing client has a locale dependency\nThere's a dummy call to `time.strptime` in the module init that uses locale-specific formatting, which fails under locales that don't use the same names (ie. \"Nov\" for the 11th month of the year).\r\n\r\n```\r\n>>> import locale\r\n>>> locale.setlocale(locale.LC_TIME, ('zh_CN', 'UTF-8'))\r\n'zh_CN.UTF-8'\r\n>>> from obspy.clients.fdsn.routing.routing_client import RoutingClient\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/workspace/anaconda/envs/django/lib/python2.7/site-packages/obspy/clients/fdsn/__init__.py\", line 242, in <module>\r\n from .routing.routing_client import RoutingClient # NOQA\r\n File \"/workspace/anaconda/envs/django/lib/python2.7/site-packages/obspy/clients/fdsn/routing/__init__.py\", line 25, in <module>\r\n time.strptime(\"30 Nov 00\", \"%d %b %y\")\r\n File \"/workspace/anaconda/envs/django/lib/python2.7/_strptime.py\", line 478, in _strptime_time\r\n return _strptime(data_string, format)[0]\r\n File \"/workspace/anaconda/envs/django/lib/python2.7/_strptime.py\", line 332, in _strptime\r\n (data_string, format))\r\nValueError: time data u'30 Nov 00' does not match format u'%d %b %y'\r\n```\r\n\r\nI believe switching this to an ISO8601-like string would be locale-agnostic:\r\n\r\n time.strptime(\"2000/11/30\", \"%Y/%m/%d\")\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nobspy.clients.fdsn.routing - Routing services for FDSN web services\n===================================================================\n\n:copyright:\n The ObsPy Development Team ([email protected])\n Celso G Reyes, 2017\n IRIS-DMC\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\n\n# Extremely ugly way to avoid a race condition the first time strptime is\n# imported which is not thread safe...\n#\n# See https://bugs.python.org/issue7980\nimport time\ntime.strptime(\"30 Nov 00\", \"%d %b %y\")\n\n\nif __name__ == '__main__': # pragma: no cover\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/clients/fdsn/routing/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nobspy.clients.fdsn.routing - Routing services for FDSN web services\n===================================================================\n\n:copyright:\n The ObsPy Development Team ([email protected])\n Celso G Reyes, 2017\n IRIS-DMC\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\n\n# Extremely ugly way to avoid a race condition the first time strptime is\n# imported which is not thread safe...\n#\n# See https://bugs.python.org/issue7980\nimport time\ntime.strptime(\"2000/11/30\", \"%Y/%m/%d\")\n\n\nif __name__ == '__main__': # pragma: no cover\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/clients/fdsn/routing/__init__.py"}]} | 935 | 150 |
gh_patches_debug_8074 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2456 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider academy is broken
During the global build at 2021-05-21-20-28-08, spider **academy** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/logs/academy.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/academy.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/academy.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/academy.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6 from scrapy.utils.gz import gunzip
7
8 from locations.items import GeojsonPointItem
9 from locations.hours import OpeningHours
10
11
12 class AcademySpider(scrapy.Spider):
13 name = "academy"
14 item_attributes = {'brand': 'Academy Sports + Outdoors', 'brand_wikidata': 'Q4671380'}
15 allowed_domains = []
16 start_urls = [
17 'https://www.academy.com/sitemap_store_1.xml.gz',
18 ]
19
20 def parse(self, response):
21 body = gunzip(response.body)
22 body = scrapy.Selector(text=body)
23 body.remove_namespaces()
24 urls = body.xpath('//url/loc/text()').extract()
25 for path in urls:
26 store_url = re.compile(r'http://www.academy.com/shop/storelocator/.+?/.+?/store-\d+')
27 if re.search(store_url, path):
28 yield scrapy.Request(
29 path.strip(),
30 callback=self.parse_store
31 )
32
33 def parse_hours(self, hours):
34 opening_hours = OpeningHours()
35
36 for elem in hours:
37 day, open_time, close_time = re.search(r'([A-Za-z]+)\s([\d:]+)\s-\s([\d:]+)', elem).groups()
38 opening_hours.add_range(day=day[:2], open_time=open_time, close_time=close_time)
39
40 return opening_hours.as_opening_hours()
41
42 def parse_store(self, response):
43 properties = {
44 'ref': re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1),
45 'name': response.xpath('normalize-space(//h1[@itemprop="name"]//text())').extract_first(),
46 'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]//text())').extract_first(),
47 'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]//text())').extract_first(),
48 'state': response.xpath('normalize-space(//span[@itemprop="addressRegion"]//text())').extract_first(),
49 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]//text())').extract_first(),
50 'phone': response.xpath('//a[@id="storePhone"]/text()').extract_first(),
51 'website': response.url,
52 'lat': float(response.xpath('//input[@id="params"]/@data-lat').extract_first()),
53 'lon': float(response.xpath('//input[@id="params"]/@data-lng').extract_first()),
54 }
55
56 properties['opening_hours'] = self.parse_hours(
57 response.xpath('//*[@itemprop="openingHours"]/@datetime').extract()
58 )
59
60 yield GeojsonPointItem(**properties)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/academy.py b/locations/spiders/academy.py
--- a/locations/spiders/academy.py
+++ b/locations/spiders/academy.py
@@ -23,7 +23,7 @@
body.remove_namespaces()
urls = body.xpath('//url/loc/text()').extract()
for path in urls:
- store_url = re.compile(r'http://www.academy.com/shop/storelocator/.+?/.+?/store-\d+')
+ store_url = re.compile(r'https://www.academy.com/shop/storelocator/.+?/.+?/store-\d+')
if re.search(store_url, path):
yield scrapy.Request(
path.strip(),
| {"golden_diff": "diff --git a/locations/spiders/academy.py b/locations/spiders/academy.py\n--- a/locations/spiders/academy.py\n+++ b/locations/spiders/academy.py\n@@ -23,7 +23,7 @@\n body.remove_namespaces()\n urls = body.xpath('//url/loc/text()').extract()\n for path in urls:\n- store_url = re.compile(r'http://www.academy.com/shop/storelocator/.+?/.+?/store-\\d+')\n+ store_url = re.compile(r'https://www.academy.com/shop/storelocator/.+?/.+?/store-\\d+')\n if re.search(store_url, path):\n yield scrapy.Request(\n path.strip(),\n", "issue": "Spider academy is broken\nDuring the global build at 2021-05-21-20-28-08, spider **academy** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/logs/academy.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/academy.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/academy.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\nfrom scrapy.utils.gz import gunzip\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass AcademySpider(scrapy.Spider):\n name = \"academy\"\n item_attributes = {'brand': 'Academy Sports + Outdoors', 'brand_wikidata': 'Q4671380'}\n allowed_domains = []\n start_urls = [\n 'https://www.academy.com/sitemap_store_1.xml.gz',\n ]\n\n def parse(self, response):\n body = gunzip(response.body)\n body = scrapy.Selector(text=body)\n body.remove_namespaces()\n urls = body.xpath('//url/loc/text()').extract()\n for path in urls:\n store_url = re.compile(r'http://www.academy.com/shop/storelocator/.+?/.+?/store-\\d+')\n if re.search(store_url, path):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for elem in hours:\n day, open_time, close_time = re.search(r'([A-Za-z]+)\\s([\\d:]+)\\s-\\s([\\d:]+)', elem).groups()\n opening_hours.add_range(day=day[:2], open_time=open_time, close_time=close_time)\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n properties = {\n 'ref': re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1),\n 'name': response.xpath('normalize-space(//h1[@itemprop=\"name\"]//text())').extract_first(),\n 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]//text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]//text())').extract_first(),\n 'state': response.xpath('normalize-space(//span[@itemprop=\"addressRegion\"]//text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]//text())').extract_first(),\n 'phone': response.xpath('//a[@id=\"storePhone\"]/text()').extract_first(),\n 'website': response.url,\n 'lat': float(response.xpath('//input[@id=\"params\"]/@data-lat').extract_first()),\n 'lon': float(response.xpath('//input[@id=\"params\"]/@data-lng').extract_first()),\n }\n \n properties['opening_hours'] = self.parse_hours(\n response.xpath('//*[@itemprop=\"openingHours\"]/@datetime').extract()\n )\n \n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/academy.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\nfrom scrapy.utils.gz import gunzip\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass AcademySpider(scrapy.Spider):\n name = \"academy\"\n item_attributes = {'brand': 'Academy Sports + Outdoors', 'brand_wikidata': 'Q4671380'}\n allowed_domains = []\n start_urls = [\n 'https://www.academy.com/sitemap_store_1.xml.gz',\n ]\n\n def parse(self, response):\n body = gunzip(response.body)\n body = scrapy.Selector(text=body)\n body.remove_namespaces()\n urls = body.xpath('//url/loc/text()').extract()\n for path in urls:\n store_url = re.compile(r'https://www.academy.com/shop/storelocator/.+?/.+?/store-\\d+')\n if re.search(store_url, path):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for elem in hours:\n day, open_time, close_time = re.search(r'([A-Za-z]+)\\s([\\d:]+)\\s-\\s([\\d:]+)', elem).groups()\n opening_hours.add_range(day=day[:2], open_time=open_time, close_time=close_time)\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n properties = {\n 'ref': re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1),\n 'name': response.xpath('normalize-space(//h1[@itemprop=\"name\"]//text())').extract_first(),\n 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]//text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]//text())').extract_first(),\n 'state': response.xpath('normalize-space(//span[@itemprop=\"addressRegion\"]//text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]//text())').extract_first(),\n 'phone': response.xpath('//a[@id=\"storePhone\"]/text()').extract_first(),\n 'website': response.url,\n 'lat': float(response.xpath('//input[@id=\"params\"]/@data-lat').extract_first()),\n 'lon': float(response.xpath('//input[@id=\"params\"]/@data-lng').extract_first()),\n }\n \n properties['opening_hours'] = self.parse_hours(\n response.xpath('//*[@itemprop=\"openingHours\"]/@datetime').extract()\n )\n \n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/academy.py"}]} | 1,158 | 160 |
gh_patches_debug_14582 | rasdani/github-patches | git_diff | liqd__a4-product-139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Translations incomplete
- partner page
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liqd_product/apps/contrib/management/commands/makemessages.py`
Content:
```
1 from os import path
2
3 from django.conf import settings
4 from django.core.management.commands import makemessages
5
6
7 def get_module_dir(name):
8 module = __import__(name)
9 return path.dirname(module.__file__)
10
11
12 class Command(makemessages.Command):
13 msgmerge_options = (
14 makemessages.Command.msgmerge_options + ['--no-fuzzy-matching']
15 )
16
17 def handle(self, *args, **options):
18 if options['domain'] == 'djangojs':
19 if options['extensions'] is None:
20 options['extensions'] = ['js', 'jsx']
21 return super().handle(*args, **options)
22
23 def find_files(self, root):
24 a4js_paths = super().find_files(path.join(
25 settings.BASE_DIR, 'node_modules', 'adhocracy4', 'adhocracy4'
26 ))
27 a4_paths = super().find_files(get_module_dir('adhocracy4'))
28 liqd_product_paths = super().find_files(
29 path.relpath(get_module_dir('liqd_product'))
30 )
31
32 return a4js_paths + a4_paths + liqd_product_paths
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liqd_product/apps/contrib/management/commands/makemessages.py b/liqd_product/apps/contrib/management/commands/makemessages.py
--- a/liqd_product/apps/contrib/management/commands/makemessages.py
+++ b/liqd_product/apps/contrib/management/commands/makemessages.py
@@ -25,8 +25,15 @@
settings.BASE_DIR, 'node_modules', 'adhocracy4', 'adhocracy4'
))
a4_paths = super().find_files(get_module_dir('adhocracy4'))
+ mbjs_paths = super().find_files(path.join(
+ settings.BASE_DIR, 'node_modules', 'a4-meinberlin', 'meinberlin'
+ ))
+ mb_paths = super().find_files(get_module_dir('meinberlin'))
+
liqd_product_paths = super().find_files(
path.relpath(get_module_dir('liqd_product'))
)
- return a4js_paths + a4_paths + liqd_product_paths
+ return a4js_paths + a4_paths + \
+ mbjs_paths + mb_paths + \
+ liqd_product_paths
| {"golden_diff": "diff --git a/liqd_product/apps/contrib/management/commands/makemessages.py b/liqd_product/apps/contrib/management/commands/makemessages.py\n--- a/liqd_product/apps/contrib/management/commands/makemessages.py\n+++ b/liqd_product/apps/contrib/management/commands/makemessages.py\n@@ -25,8 +25,15 @@\n settings.BASE_DIR, 'node_modules', 'adhocracy4', 'adhocracy4'\n ))\n a4_paths = super().find_files(get_module_dir('adhocracy4'))\n+ mbjs_paths = super().find_files(path.join(\n+ settings.BASE_DIR, 'node_modules', 'a4-meinberlin', 'meinberlin'\n+ ))\n+ mb_paths = super().find_files(get_module_dir('meinberlin'))\n+\n liqd_product_paths = super().find_files(\n path.relpath(get_module_dir('liqd_product'))\n )\n \n- return a4js_paths + a4_paths + liqd_product_paths\n+ return a4js_paths + a4_paths + \\\n+ mbjs_paths + mb_paths + \\\n+ liqd_product_paths\n", "issue": "Translations incomplete\n- partner page\n", "before_files": [{"content": "from os import path\n\nfrom django.conf import settings\nfrom django.core.management.commands import makemessages\n\n\ndef get_module_dir(name):\n module = __import__(name)\n return path.dirname(module.__file__)\n\n\nclass Command(makemessages.Command):\n msgmerge_options = (\n makemessages.Command.msgmerge_options + ['--no-fuzzy-matching']\n )\n\n def handle(self, *args, **options):\n if options['domain'] == 'djangojs':\n if options['extensions'] is None:\n options['extensions'] = ['js', 'jsx']\n return super().handle(*args, **options)\n\n def find_files(self, root):\n a4js_paths = super().find_files(path.join(\n settings.BASE_DIR, 'node_modules', 'adhocracy4', 'adhocracy4'\n ))\n a4_paths = super().find_files(get_module_dir('adhocracy4'))\n liqd_product_paths = super().find_files(\n path.relpath(get_module_dir('liqd_product'))\n )\n\n return a4js_paths + a4_paths + liqd_product_paths\n", "path": "liqd_product/apps/contrib/management/commands/makemessages.py"}], "after_files": [{"content": "from os import path\n\nfrom django.conf import settings\nfrom django.core.management.commands import makemessages\n\n\ndef get_module_dir(name):\n module = __import__(name)\n return path.dirname(module.__file__)\n\n\nclass Command(makemessages.Command):\n msgmerge_options = (\n makemessages.Command.msgmerge_options + ['--no-fuzzy-matching']\n )\n\n def handle(self, *args, **options):\n if options['domain'] == 'djangojs':\n if options['extensions'] is None:\n options['extensions'] = ['js', 'jsx']\n return super().handle(*args, **options)\n\n def find_files(self, root):\n a4js_paths = super().find_files(path.join(\n settings.BASE_DIR, 'node_modules', 'adhocracy4', 'adhocracy4'\n ))\n a4_paths = super().find_files(get_module_dir('adhocracy4'))\n mbjs_paths = super().find_files(path.join(\n settings.BASE_DIR, 'node_modules', 'a4-meinberlin', 'meinberlin'\n ))\n mb_paths = super().find_files(get_module_dir('meinberlin'))\n\n liqd_product_paths = super().find_files(\n path.relpath(get_module_dir('liqd_product'))\n )\n\n return a4js_paths + a4_paths + \\\n mbjs_paths + mb_paths + \\\n liqd_product_paths\n", "path": "liqd_product/apps/contrib/management/commands/makemessages.py"}]} | 571 | 255 |
gh_patches_debug_30492 | rasdani/github-patches | git_diff | holoviz__hvplot-1015 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Python 3.11
Python 3.11 provides major performance improvement that I would like to take advantage of when using the HoloViz ecosystem.
Panel and HoloViews already support python 3.11 according to Pypi.
Please support python 3.11. Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import json
2 import os
3 import sys
4 import shutil
5
6 from setuptools import setup, find_packages
7
8 import pyct.build
9
10 def get_setup_version(reponame):
11 """
12 Helper to get the current version from either git describe or the
13 .version file (if available).
14 """
15 basepath = os.path.split(__file__)[0]
16 version_file_path = os.path.join(basepath, reponame, '.version')
17 try:
18 from param import version
19 except:
20 version = None
21 if version is not None:
22 return version.Version.setup_version(basepath, reponame, archive_commit="$Format:%h$")
23 else:
24 print("WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.")
25 return json.load(open(version_file_path))['version_string']
26
27
28 ########## dependencies ##########
29
30 install_requires = [
31 'bokeh >=1.0.0',
32 'colorcet >=2',
33 'holoviews >=1.11.0',
34 'pandas',
35 'numpy>=1.15',
36 'packaging',
37 'panel >=0.11.0',
38 ]
39
40 _examples = [
41 'geoviews >=1.6.0',
42 'numba >=0.51.0',
43 'geopandas',
44 'xarray >=0.18.2',
45 'networkx >=2.6.3',
46 'streamz >=0.3.0',
47 'intake >=0.6.5',
48 'intake-parquet >=0.2.3',
49 'intake-xarray >=0.5.0',
50 'dask >=2021.3.0',
51 'datashader >=0.6.5',
52 'notebook >=5.4',
53 'rasterio',
54 'cartopy',
55 'pyproj',
56 's3fs >=2022.1.0',
57 'scipy >=1.5.3',
58 'pillow >=8.2.0',
59 'selenium >=3.141.0',
60 'spatialpandas >=0.4.3',
61 'scikit-image >=0.17.2',
62 'python-snappy >=0.6.0',
63 'pooch >=1.6.0',
64 'fiona',
65 'rioxarray',
66 # Extra dependency of cartopy on Python 3.6 only
67 'pyepsg',
68 'matplotlib',
69 'plotly',
70 'pygraphviz',
71 'ipykernel <6.18.0' # temporary
72 ]
73
74 extras_require = {
75 'tests': [
76 'codecov',
77 'flake8',
78 'parameterized',
79 'pytest',
80 'pytest-cov',
81 'nbsmoke >=0.2.0',
82 'numpy >=1.7',
83 'matplotlib',
84 'plotly',
85 'xarray',
86 'pooch',
87 'scipy',
88 'ipywidgets',
89 'pre-commit',
90 ],
91 'examples': _examples,
92 'doc': _examples + [
93 'nbsite >=0.7.2rc2',
94 'pydata-sphinx-theme <0.10',
95 'sphinx-copybutton',
96 'sphinx-design',
97 ]
98 }
99
100 # until pyproject.toml/equivalent is widely supported (setup_requires
101 # doesn't work well with pip)
102 extras_require['build'] = [
103 'param >=1.7.0',
104 'pyct >=0.4.4',
105 'setuptools >=30.3.0' # should make this pip now
106 ]
107
108 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
109
110 ########## metadata for setuptools ##########
111
112 setup_args = dict(
113 name='hvplot',
114 version=get_setup_version("hvplot"),
115 description='A high-level plotting API for the PyData ecosystem built on HoloViews.',
116 long_description=open("README.md", mode="r", encoding="utf-8").read(),
117 long_description_content_type="text/markdown",
118 author= "Philipp Rudiger",
119 author_email= "[email protected]",
120 maintainer="HoloViz developers",
121 maintainer_email="[email protected]",
122 packages=find_packages(),
123 include_package_data=True,
124 platforms=['Windows', 'Mac OS X', 'Linux'],
125 license='BSD',
126 url='https://hvplot.pyviz.org',
127 classifiers = [
128 "License :: OSI Approved :: BSD License",
129 "Development Status :: 5 - Production/Stable",
130 "Programming Language :: Python :: 3.6",
131 "Programming Language :: Python :: 3.7",
132 "Programming Language :: Python :: 3.8",
133 "Programming Language :: Python :: 3.9",
134 "Programming Language :: Python :: 3.10",
135 "Operating System :: OS Independent",
136 "Intended Audience :: Science/Research",
137 "Intended Audience :: Developers",
138 "Natural Language :: English",
139 "Topic :: Scientific/Engineering",
140 "Topic :: Software Development :: Libraries"],
141 python_requires=">=3.6",
142 install_requires=install_requires,
143 extras_require=extras_require,
144 tests_require=extras_require['tests'],
145 entry_points={
146 'console_scripts': [],
147 'pandas_plotting_backends': [
148 'holoviews = hvplot:plotting',
149 ],
150 },
151 )
152
153
154 if __name__ == '__main__':
155 example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
156 'hvplot','examples')
157
158 if 'develop' not in sys.argv and 'egg_info' not in sys.argv:
159 pyct.build.examples(example_path, __file__, force=True)
160
161 setup(**setup_args)
162
163 if os.path.isdir(example_path):
164 shutil.rmtree(example_path)
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,8 +38,7 @@
]
_examples = [
- 'geoviews >=1.6.0',
- 'numba >=0.51.0',
+ 'geoviews >=1.9.0',
'geopandas',
'xarray >=0.18.2',
'networkx >=2.6.3',
@@ -48,7 +47,6 @@
'intake-parquet >=0.2.3',
'intake-xarray >=0.5.0',
'dask >=2021.3.0',
- 'datashader >=0.6.5',
'notebook >=5.4',
'rasterio',
'cartopy',
@@ -57,7 +55,6 @@
'scipy >=1.5.3',
'pillow >=8.2.0',
'selenium >=3.141.0',
- 'spatialpandas >=0.4.3',
'scikit-image >=0.17.2',
'python-snappy >=0.6.0',
'pooch >=1.6.0',
@@ -71,6 +68,14 @@
'ipykernel <6.18.0' # temporary
]
+# Packages not working on python 3.11 because of numba
+if sys.version_info < (3, 11):
+ _examples += [
+ 'numba >=0.51.0',
+ 'datashader >=0.6.5',
+ 'spatialpandas >=0.4.3',
+ ]
+
extras_require = {
'tests': [
'codecov',
@@ -132,6 +137,7 @@
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
"Operating System :: OS Independent",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,8 +38,7 @@\n ]\n \n _examples = [\n- 'geoviews >=1.6.0',\n- 'numba >=0.51.0',\n+ 'geoviews >=1.9.0',\n 'geopandas',\n 'xarray >=0.18.2',\n 'networkx >=2.6.3',\n@@ -48,7 +47,6 @@\n 'intake-parquet >=0.2.3',\n 'intake-xarray >=0.5.0',\n 'dask >=2021.3.0',\n- 'datashader >=0.6.5',\n 'notebook >=5.4',\n 'rasterio',\n 'cartopy',\n@@ -57,7 +55,6 @@\n 'scipy >=1.5.3',\n 'pillow >=8.2.0',\n 'selenium >=3.141.0',\n- 'spatialpandas >=0.4.3',\n 'scikit-image >=0.17.2',\n 'python-snappy >=0.6.0',\n 'pooch >=1.6.0',\n@@ -71,6 +68,14 @@\n 'ipykernel <6.18.0' # temporary\n ]\n \n+# Packages not working on python 3.11 because of numba\n+if sys.version_info < (3, 11):\n+ _examples += [\n+ 'numba >=0.51.0',\n+ 'datashader >=0.6.5',\n+ 'spatialpandas >=0.4.3',\n+ ]\n+\n extras_require = {\n 'tests': [\n 'codecov',\n@@ -132,6 +137,7 @@\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n+ \"Programming Language :: Python :: 3.11\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n", "issue": "Support Python 3.11\nPython 3.11 provides major performance improvement that I would like to take advantage of when using the HoloViz ecosystem.\r\n\r\nPanel and HoloViews already support python 3.11 according to Pypi.\r\n\r\nPlease support python 3.11. Thanks.\n", "before_files": [{"content": "import json\nimport os\nimport sys\nimport shutil\n\nfrom setuptools import setup, find_packages\n\nimport pyct.build\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, '.version')\n try:\n from param import version\n except:\n version = None\n if version is not None:\n return version.Version.setup_version(basepath, reponame, archive_commit=\"$Format:%h$\")\n else:\n print(\"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\")\n return json.load(open(version_file_path))['version_string']\n\n\n########## dependencies ##########\n\ninstall_requires = [\n 'bokeh >=1.0.0',\n 'colorcet >=2',\n 'holoviews >=1.11.0',\n 'pandas',\n 'numpy>=1.15',\n 'packaging',\n 'panel >=0.11.0',\n]\n\n_examples = [\n 'geoviews >=1.6.0',\n 'numba >=0.51.0',\n 'geopandas',\n 'xarray >=0.18.2',\n 'networkx >=2.6.3',\n 'streamz >=0.3.0',\n 'intake >=0.6.5',\n 'intake-parquet >=0.2.3',\n 'intake-xarray >=0.5.0',\n 'dask >=2021.3.0',\n 'datashader >=0.6.5',\n 'notebook >=5.4',\n 'rasterio',\n 'cartopy',\n 'pyproj',\n 's3fs >=2022.1.0',\n 'scipy >=1.5.3',\n 'pillow >=8.2.0',\n 'selenium >=3.141.0',\n 'spatialpandas >=0.4.3',\n 'scikit-image >=0.17.2',\n 'python-snappy >=0.6.0',\n 'pooch >=1.6.0',\n 'fiona',\n 'rioxarray',\n # Extra dependency of cartopy on Python 3.6 only\n 'pyepsg',\n 'matplotlib',\n 'plotly',\n 'pygraphviz',\n 'ipykernel <6.18.0' # temporary\n]\n\nextras_require = {\n 'tests': [\n 'codecov',\n 'flake8',\n 'parameterized',\n 'pytest',\n 'pytest-cov',\n 'nbsmoke >=0.2.0',\n 'numpy >=1.7',\n 'matplotlib',\n 'plotly',\n 'xarray',\n 'pooch',\n 'scipy',\n 'ipywidgets',\n 'pre-commit',\n ],\n 'examples': _examples,\n 'doc': _examples + [\n 'nbsite >=0.7.2rc2',\n 'pydata-sphinx-theme <0.10',\n 'sphinx-copybutton',\n 'sphinx-design',\n ]\n}\n\n# until pyproject.toml/equivalent is widely supported (setup_requires\n# doesn't work well with pip)\nextras_require['build'] = [\n 'param >=1.7.0',\n 'pyct >=0.4.4',\n 'setuptools >=30.3.0' # should make this pip now\n]\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\n########## metadata for setuptools ##########\n\nsetup_args = dict(\n name='hvplot',\n version=get_setup_version(\"hvplot\"),\n description='A high-level plotting API for the PyData ecosystem built on HoloViews.',\n long_description=open(\"README.md\", mode=\"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n author= \"Philipp Rudiger\",\n author_email= \"[email protected]\",\n maintainer=\"HoloViz developers\",\n maintainer_email=\"[email protected]\",\n packages=find_packages(),\n include_package_data=True,\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='https://hvplot.pyviz.org',\n classifiers = [\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\"],\n python_requires=\">=3.6\",\n install_requires=install_requires,\n extras_require=extras_require,\n tests_require=extras_require['tests'],\n entry_points={\n 'console_scripts': [],\n 'pandas_plotting_backends': [\n 'holoviews = hvplot:plotting',\n ],\n },\n)\n\n\nif __name__ == '__main__':\n example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'hvplot','examples')\n\n if 'develop' not in sys.argv and 'egg_info' not in sys.argv:\n pyct.build.examples(example_path, __file__, force=True)\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n", "path": "setup.py"}], "after_files": [{"content": "import json\nimport os\nimport sys\nimport shutil\n\nfrom setuptools import setup, find_packages\n\nimport pyct.build\n\ndef get_setup_version(reponame):\n \"\"\"\n Helper to get the current version from either git describe or the\n .version file (if available).\n \"\"\"\n basepath = os.path.split(__file__)[0]\n version_file_path = os.path.join(basepath, reponame, '.version')\n try:\n from param import version\n except:\n version = None\n if version is not None:\n return version.Version.setup_version(basepath, reponame, archive_commit=\"$Format:%h$\")\n else:\n print(\"WARNING: param>=1.6.0 unavailable. If you are installing a package, this warning can safely be ignored. If you are creating a package or otherwise operating in a git repository, you should install param>=1.6.0.\")\n return json.load(open(version_file_path))['version_string']\n\n\n########## dependencies ##########\n\ninstall_requires = [\n 'bokeh >=1.0.0',\n 'colorcet >=2',\n 'holoviews >=1.11.0',\n 'pandas',\n 'numpy>=1.15',\n 'packaging',\n 'panel >=0.11.0',\n]\n\n_examples = [\n 'geoviews >=1.9.0',\n 'geopandas',\n 'xarray >=0.18.2',\n 'networkx >=2.6.3',\n 'streamz >=0.3.0',\n 'intake >=0.6.5',\n 'intake-parquet >=0.2.3',\n 'intake-xarray >=0.5.0',\n 'dask >=2021.3.0',\n 'notebook >=5.4',\n 'rasterio',\n 'cartopy',\n 'pyproj',\n 's3fs >=2022.1.0',\n 'scipy >=1.5.3',\n 'pillow >=8.2.0',\n 'selenium >=3.141.0',\n 'scikit-image >=0.17.2',\n 'python-snappy >=0.6.0',\n 'pooch >=1.6.0',\n 'fiona',\n 'rioxarray',\n # Extra dependency of cartopy on Python 3.6 only\n 'pyepsg',\n 'matplotlib',\n 'plotly',\n 'pygraphviz',\n 'ipykernel <6.18.0' # temporary\n]\n\n# Packages not working on python 3.11 because of numba\nif sys.version_info < (3, 11):\n _examples += [\n 'numba >=0.51.0',\n 'datashader >=0.6.5',\n 'spatialpandas >=0.4.3',\n ]\n\nextras_require = {\n 'tests': [\n 'codecov',\n 'flake8',\n 'parameterized',\n 'pytest',\n 'pytest-cov',\n 'nbsmoke >=0.2.0',\n 'numpy >=1.7',\n 'matplotlib',\n 'plotly',\n 'xarray',\n 'pooch',\n 'scipy',\n 'ipywidgets',\n 'pre-commit',\n ],\n 'examples': _examples,\n 'doc': _examples + [\n 'nbsite >=0.7.2rc2',\n 'pydata-sphinx-theme <0.10',\n 'sphinx-copybutton',\n 'sphinx-design',\n ]\n}\n\n# until pyproject.toml/equivalent is widely supported (setup_requires\n# doesn't work well with pip)\nextras_require['build'] = [\n 'param >=1.7.0',\n 'pyct >=0.4.4',\n 'setuptools >=30.3.0' # should make this pip now\n]\n\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\n########## metadata for setuptools ##########\n\nsetup_args = dict(\n name='hvplot',\n version=get_setup_version(\"hvplot\"),\n description='A high-level plotting API for the PyData ecosystem built on HoloViews.',\n long_description=open(\"README.md\", mode=\"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n author= \"Philipp Rudiger\",\n author_email= \"[email protected]\",\n maintainer=\"HoloViz developers\",\n maintainer_email=\"[email protected]\",\n packages=find_packages(),\n include_package_data=True,\n platforms=['Windows', 'Mac OS X', 'Linux'],\n license='BSD',\n url='https://hvplot.pyviz.org',\n classifiers = [\n \"License :: OSI Approved :: BSD License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development :: Libraries\"],\n python_requires=\">=3.6\",\n install_requires=install_requires,\n extras_require=extras_require,\n tests_require=extras_require['tests'],\n entry_points={\n 'console_scripts': [],\n 'pandas_plotting_backends': [\n 'holoviews = hvplot:plotting',\n ],\n },\n)\n\n\nif __name__ == '__main__':\n example_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'hvplot','examples')\n\n if 'develop' not in sys.argv and 'egg_info' not in sys.argv:\n pyct.build.examples(example_path, __file__, force=True)\n\n setup(**setup_args)\n\n if os.path.isdir(example_path):\n shutil.rmtree(example_path)\n", "path": "setup.py"}]} | 2,007 | 506 |
gh_patches_debug_8000 | rasdani/github-patches | git_diff | arviz-devs__arviz-203 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing the order of plotting and file load seems to cause netcdf errors
For some reason it seems that changing the order of plotting and loading data causes failures, not entirely sure why yet. This is only occurring for me on Ubuntu 16.04 with the latest version of arviz. It does not occur on my osx laptop
Load, Load, Plot, Plot doesn't work. Stack trace attached
[stack_trace.txt](https://github.com/arviz-devs/arviz/files/2349232/stack_trace.txt)
```
import arviz as az
az.style.use('arviz-darkgrid')
non_centered = az.load_arviz_data('non_centered_eight')
centered = az.load_arviz_data('centered_eight')
az.violintraceplot(non_centered, var_names=["mu", "tau"], textsize=8)
az.violintraceplot(centered, var_names=["mu", "tau"], textsize=8)
```
Load, Plot, Load, Plot works
```
import arviz as az
az.style.use('arviz-darkgrid')
non_centered = az.load_arviz_data('non_centered_eight')
az.violintraceplot(non_centered, var_names=["mu", "tau"], textsize=8)
centered = az.load_arviz_data('centered_eight')
az.violintraceplot(centered, var_names=["mu", "tau"], textsize=8)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/inference_data.py`
Content:
```
1 """Data structure for using netcdf groups with xarray."""
2 import netCDF4 as nc
3 import xarray as xr
4
5
6 class InferenceData():
7 """Container for accessing netCDF files using xarray."""
8
9 def __init__(self, *_, **kwargs):
10 """Initialize InferenceData object from keyword xarray datasets.
11
12 Examples
13 --------
14 InferenceData(posterior=posterior, prior=prior)
15
16 Parameters
17 ----------
18 kwargs :
19 Keyword arguments of xarray datasets
20 """
21 self._groups = []
22 for key, dataset in kwargs.items():
23 if dataset is None:
24 continue
25 elif not isinstance(dataset, xr.Dataset):
26 raise ValueError('Arguments to InferenceData must be xarray Datasets '
27 '(argument "{}" was type "{}")'.format(key, type(dataset)))
28 setattr(self, key, dataset)
29 self._groups.append(key)
30
31 def __repr__(self):
32 """Make string representation of object."""
33 return 'Inference data with groups:\n\t> {options}'.format(
34 options='\n\t> '.join(self._groups)
35 )
36
37 @staticmethod
38 def from_netcdf(filename):
39 """Initialize object from a netcdf file.
40
41 Expects that the file will have groups, each of which can be loaded by xarray.
42
43 Parameters
44 ----------
45 filename : str
46 location of netcdf file
47
48 Returns
49 -------
50 InferenceData object
51 """
52 groups = {}
53 for group in nc.Dataset(filename, mode='r').groups:
54 groups[group] = xr.open_dataset(filename, group=group, autoclose=True)
55 return InferenceData(**groups)
56
57 def to_netcdf(self, filename):
58 """Write InferenceData to file using netcdf4.
59
60 Parameters
61 ----------
62 filename : str
63 Location to write to
64
65 Returns
66 -------
67 str
68 Location of netcdf file
69 """
70 mode = 'w' # overwrite first, then append
71 for group in self._groups:
72 data = getattr(self, group)
73 data.to_netcdf(filename, mode=mode, group=group)
74 data.close()
75 mode = 'a'
76 return filename
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/inference_data.py b/arviz/inference_data.py
--- a/arviz/inference_data.py
+++ b/arviz/inference_data.py
@@ -50,8 +50,12 @@
InferenceData object
"""
groups = {}
- for group in nc.Dataset(filename, mode='r').groups:
- groups[group] = xr.open_dataset(filename, group=group, autoclose=True)
+ with nc.Dataset(filename, mode='r') as data:
+ data_groups = list(data.groups)
+
+ for group in data_groups:
+ with xr.open_dataset(filename, group=group) as data:
+ groups[group] = data
return InferenceData(**groups)
def to_netcdf(self, filename):
| {"golden_diff": "diff --git a/arviz/inference_data.py b/arviz/inference_data.py\n--- a/arviz/inference_data.py\n+++ b/arviz/inference_data.py\n@@ -50,8 +50,12 @@\n InferenceData object\n \"\"\"\n groups = {}\n- for group in nc.Dataset(filename, mode='r').groups:\n- groups[group] = xr.open_dataset(filename, group=group, autoclose=True)\n+ with nc.Dataset(filename, mode='r') as data:\n+ data_groups = list(data.groups)\n+\n+ for group in data_groups:\n+ with xr.open_dataset(filename, group=group) as data:\n+ groups[group] = data\n return InferenceData(**groups)\n \n def to_netcdf(self, filename):\n", "issue": "Changing the order of plotting and file load seems to cause netcdf errors\nFor some reason it seems that changing the order of plotting and loading data causes failures, not entirely sure why yet. This is only occurring for me on Ubuntu 16.04 with the latest version of arviz. It does not occur on my osx laptop\r\n\r\n\r\nLoad, Load, Plot, Plot doesn't work. Stack trace attached\r\n[stack_trace.txt](https://github.com/arviz-devs/arviz/files/2349232/stack_trace.txt)\r\n\r\n```\r\nimport arviz as az\r\naz.style.use('arviz-darkgrid')\r\n\r\nnon_centered = az.load_arviz_data('non_centered_eight')\r\ncentered = az.load_arviz_data('centered_eight')\r\naz.violintraceplot(non_centered, var_names=[\"mu\", \"tau\"], textsize=8)\r\naz.violintraceplot(centered, var_names=[\"mu\", \"tau\"], textsize=8)\r\n```\r\n\r\nLoad, Plot, Load, Plot works\r\n\r\n```\r\nimport arviz as az\r\naz.style.use('arviz-darkgrid')\r\n\r\nnon_centered = az.load_arviz_data('non_centered_eight')\r\naz.violintraceplot(non_centered, var_names=[\"mu\", \"tau\"], textsize=8)\r\n\r\ncentered = az.load_arviz_data('centered_eight')\r\naz.violintraceplot(centered, var_names=[\"mu\", \"tau\"], textsize=8)\r\n```\n", "before_files": [{"content": "\"\"\"Data structure for using netcdf groups with xarray.\"\"\"\nimport netCDF4 as nc\nimport xarray as xr\n\n\nclass InferenceData():\n \"\"\"Container for accessing netCDF files using xarray.\"\"\"\n\n def __init__(self, *_, **kwargs):\n \"\"\"Initialize InferenceData object from keyword xarray datasets.\n\n Examples\n --------\n InferenceData(posterior=posterior, prior=prior)\n\n Parameters\n ----------\n kwargs :\n Keyword arguments of xarray datasets\n \"\"\"\n self._groups = []\n for key, dataset in kwargs.items():\n if dataset is None:\n continue\n elif not isinstance(dataset, xr.Dataset):\n raise ValueError('Arguments to InferenceData must be xarray Datasets '\n '(argument \"{}\" was type \"{}\")'.format(key, type(dataset)))\n setattr(self, key, dataset)\n self._groups.append(key)\n\n def __repr__(self):\n \"\"\"Make string representation of object.\"\"\"\n return 'Inference data with groups:\\n\\t> {options}'.format(\n options='\\n\\t> '.join(self._groups)\n )\n\n @staticmethod\n def from_netcdf(filename):\n \"\"\"Initialize object from a netcdf file.\n\n Expects that the file will have groups, each of which can be loaded by xarray.\n\n Parameters\n ----------\n filename : str\n location of netcdf file\n\n Returns\n -------\n InferenceData object\n \"\"\"\n groups = {}\n for group in nc.Dataset(filename, mode='r').groups:\n groups[group] = xr.open_dataset(filename, group=group, autoclose=True)\n return InferenceData(**groups)\n\n def to_netcdf(self, filename):\n \"\"\"Write InferenceData to file using netcdf4.\n\n Parameters\n ----------\n filename : str\n Location to write to\n\n Returns\n -------\n str\n Location of netcdf file\n \"\"\"\n mode = 'w' # overwrite first, then append\n for group in self._groups:\n data = getattr(self, group)\n data.to_netcdf(filename, mode=mode, group=group)\n data.close()\n mode = 'a'\n return filename\n", "path": "arviz/inference_data.py"}], "after_files": [{"content": "\"\"\"Data structure for using netcdf groups with xarray.\"\"\"\nimport netCDF4 as nc\nimport xarray as xr\n\n\nclass InferenceData():\n \"\"\"Container for accessing netCDF files using xarray.\"\"\"\n\n def __init__(self, *_, **kwargs):\n \"\"\"Initialize InferenceData object from keyword xarray datasets.\n\n Examples\n --------\n InferenceData(posterior=posterior, prior=prior)\n\n Parameters\n ----------\n kwargs :\n Keyword arguments of xarray datasets\n \"\"\"\n self._groups = []\n for key, dataset in kwargs.items():\n if dataset is None:\n continue\n elif not isinstance(dataset, xr.Dataset):\n raise ValueError('Arguments to InferenceData must be xarray Datasets '\n '(argument \"{}\" was type \"{}\")'.format(key, type(dataset)))\n setattr(self, key, dataset)\n self._groups.append(key)\n\n def __repr__(self):\n \"\"\"Make string representation of object.\"\"\"\n return 'Inference data with groups:\\n\\t> {options}'.format(\n options='\\n\\t> '.join(self._groups)\n )\n\n @staticmethod\n def from_netcdf(filename):\n \"\"\"Initialize object from a netcdf file.\n\n Expects that the file will have groups, each of which can be loaded by xarray.\n\n Parameters\n ----------\n filename : str\n location of netcdf file\n\n Returns\n -------\n InferenceData object\n \"\"\"\n groups = {}\n with nc.Dataset(filename, mode='r') as data:\n data_groups = list(data.groups)\n\n for group in data_groups:\n with xr.open_dataset(filename, group=group) as data:\n groups[group] = data\n return InferenceData(**groups)\n\n def to_netcdf(self, filename):\n \"\"\"Write InferenceData to file using netcdf4.\n\n Parameters\n ----------\n filename : str\n Location to write to\n\n Returns\n -------\n str\n Location of netcdf file\n \"\"\"\n mode = 'w' # overwrite first, then append\n for group in self._groups:\n data = getattr(self, group)\n data.to_netcdf(filename, mode=mode, group=group)\n data.close()\n mode = 'a'\n return filename\n", "path": "arviz/inference_data.py"}]} | 1,190 | 169 |
gh_patches_debug_1379 | rasdani/github-patches | git_diff | zulip__zulip-29412 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Go to newly created stream (with first-time modal)
Even after #29154, users find it hard to navigate to a newly created stream. To address this, we should:
1. Take the user directly to the stream they just created. To avoid a potentially confusing interleaved view, we should go to the most recent topic in the stream (currently "stream events", but might be "general chat" in the future).
2. The first time that a user creates a stream, show an explanatory modal (wording to be finalized when working on the PR):
----
## Stream **#{stream name}** created!
You will now see the stream you created. If you'd like to go back to stream settings, click on the name of the stream at the top of your Zulip window, or use the **back** button in your browser or desktop app.
[Continue]
---
Since we are changing the behavior, it's fine to show this once to existing users.
[CZO thread](https://chat.zulip.org/#narrow/stream/137-feedback/topic/user.20research.3A.20going.20to.20a.20new.20stream/near/1744305)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/hotspots.py`
Content:
```
1 # See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html
2 # for documentation on this subsystem.
3 from dataclasses import dataclass
4 from typing import Any, Dict, List, Optional, Union
5
6 from django.conf import settings
7 from django.utils.translation import gettext_lazy
8 from django_stubs_ext import StrPromise
9
10 from zerver.models import OnboardingStep, UserProfile
11
12
13 @dataclass
14 class Hotspot:
15 name: str
16 title: Optional[StrPromise]
17 description: Optional[StrPromise]
18 has_trigger: bool = False
19
20 def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:
21 return {
22 "type": "hotspot",
23 "name": self.name,
24 "title": str(self.title),
25 "description": str(self.description),
26 "delay": delay,
27 "has_trigger": self.has_trigger,
28 }
29
30
31 INTRO_HOTSPOTS: List[Hotspot] = [
32 Hotspot(
33 name="intro_streams",
34 title=gettext_lazy("Catch up on a stream"),
35 description=gettext_lazy(
36 "Messages sent to a stream are seen by everyone subscribed "
37 "to that stream. Try clicking on one of the stream links below."
38 ),
39 ),
40 Hotspot(
41 name="intro_topics",
42 title=gettext_lazy("Topics"),
43 description=gettext_lazy(
44 "Every message has a topic. Topics keep conversations "
45 "easy to follow, and make it easy to reply to conversations that start "
46 "while you are offline."
47 ),
48 ),
49 Hotspot(
50 # In theory, this should be renamed to intro_personal, since
51 # it's no longer attached to the gear menu, but renaming these
52 # requires a migration that is not worth doing at this time.
53 name="intro_gear",
54 title=gettext_lazy("Settings"),
55 description=gettext_lazy("Go to Settings to configure your notifications and preferences."),
56 ),
57 Hotspot(
58 name="intro_compose",
59 title=gettext_lazy("Compose"),
60 description=gettext_lazy(
61 "Click here to start a new conversation. Pick a topic "
62 "(2-3 words is best), and give it a go!"
63 ),
64 ),
65 ]
66
67
68 NON_INTRO_HOTSPOTS: List[Hotspot] = []
69
70
71 @dataclass
72 class OneTimeNotice:
73 name: str
74
75 def to_dict(self) -> Dict[str, str]:
76 return {
77 "type": "one_time_notice",
78 "name": self.name,
79 }
80
81
82 ONE_TIME_NOTICES: List[OneTimeNotice] = [
83 OneTimeNotice(
84 name="visibility_policy_banner",
85 ),
86 OneTimeNotice(
87 name="intro_inbox_view_modal",
88 ),
89 OneTimeNotice(
90 name="intro_recent_view_modal",
91 ),
92 ]
93
94 # We would most likely implement new hotspots in the future that aren't
95 # a part of the initial tutorial. To that end, classifying them into
96 # categories which are aggregated in ALL_HOTSPOTS, seems like a good start.
97 ALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]
98 ALL_ONBOARDING_STEPS: List[Union[Hotspot, OneTimeNotice]] = [*ALL_HOTSPOTS, *ONE_TIME_NOTICES]
99
100
101 def get_next_onboarding_steps(user: UserProfile) -> List[Dict[str, Any]]:
102 # For manual testing, it can be convenient to set
103 # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
104 # make it easy to click on all of the hotspots.
105 #
106 # Since this is just for development purposes, it's convenient for us to send
107 # all the hotspots rather than any specific category.
108 if settings.ALWAYS_SEND_ALL_HOTSPOTS:
109 return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]
110
111 # If a Zulip server has disabled the tutorial, never send hotspots.
112 if not settings.TUTORIAL_ENABLED:
113 return []
114
115 seen_onboarding_steps = frozenset(
116 OnboardingStep.objects.filter(user=user).values_list("onboarding_step", flat=True)
117 )
118
119 onboarding_steps: List[Dict[str, Any]] = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]
120
121 for one_time_notice in ONE_TIME_NOTICES:
122 if one_time_notice.name in seen_onboarding_steps:
123 continue
124 onboarding_steps.append(one_time_notice.to_dict())
125
126 if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
127 return onboarding_steps
128
129 for hotspot in INTRO_HOTSPOTS:
130 if hotspot.name in seen_onboarding_steps:
131 continue
132
133 onboarding_steps.append(hotspot.to_dict(delay=0.5))
134 return onboarding_steps
135
136 user.tutorial_status = UserProfile.TUTORIAL_FINISHED
137 user.save(update_fields=["tutorial_status"])
138 return onboarding_steps
139
140
141 def copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:
142 for userhotspot in frozenset(OnboardingStep.objects.filter(user=source_profile)):
143 OnboardingStep.objects.create(
144 user=target_profile,
145 onboarding_step=userhotspot.onboarding_step,
146 timestamp=userhotspot.timestamp,
147 )
148
149 target_profile.tutorial_status = source_profile.tutorial_status
150 target_profile.onboarding_steps = source_profile.onboarding_steps
151 target_profile.save(update_fields=["tutorial_status", "onboarding_steps"])
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -89,6 +89,9 @@
OneTimeNotice(
name="intro_recent_view_modal",
),
+ OneTimeNotice(
+ name="first_stream_created_banner",
+ ),
]
# We would most likely implement new hotspots in the future that aren't
| {"golden_diff": "diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py\n--- a/zerver/lib/hotspots.py\n+++ b/zerver/lib/hotspots.py\n@@ -89,6 +89,9 @@\n OneTimeNotice(\n name=\"intro_recent_view_modal\",\n ),\n+ OneTimeNotice(\n+ name=\"first_stream_created_banner\",\n+ ),\n ]\n \n # We would most likely implement new hotspots in the future that aren't\n", "issue": "Go to newly created stream (with first-time modal)\nEven after #29154, users find it hard to navigate to a newly created stream. To address this, we should:\r\n\r\n1. Take the user directly to the stream they just created. To avoid a potentially confusing interleaved view, we should go to the most recent topic in the stream (currently \"stream events\", but might be \"general chat\" in the future).\r\n2. The first time that a user creates a stream, show an explanatory modal (wording to be finalized when working on the PR):\r\n\r\n----\r\n\r\n## Stream **#{stream name}** created!\r\n\r\nYou will now see the stream you created. If you'd like to go back to stream settings, click on the name of the stream at the top of your Zulip window, or use the **back** button in your browser or desktop app.\r\n\r\n[Continue]\r\n\r\n---\r\n\r\nSince we are changing the behavior, it's fine to show this once to existing users.\r\n\r\n[CZO thread](https://chat.zulip.org/#narrow/stream/137-feedback/topic/user.20research.3A.20going.20to.20a.20new.20stream/near/1744305)\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy\nfrom django_stubs_ext import StrPromise\n\nfrom zerver.models import OnboardingStep, UserProfile\n\n\n@dataclass\nclass Hotspot:\n name: str\n title: Optional[StrPromise]\n description: Optional[StrPromise]\n has_trigger: bool = False\n\n def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:\n return {\n \"type\": \"hotspot\",\n \"name\": self.name,\n \"title\": str(self.title),\n \"description\": str(self.description),\n \"delay\": delay,\n \"has_trigger\": self.has_trigger,\n }\n\n\nINTRO_HOTSPOTS: List[Hotspot] = [\n Hotspot(\n name=\"intro_streams\",\n title=gettext_lazy(\"Catch up on a stream\"),\n description=gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n ),\n Hotspot(\n name=\"intro_topics\",\n title=gettext_lazy(\"Topics\"),\n description=gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n ),\n Hotspot(\n # In theory, this should be renamed to intro_personal, since\n # it's no longer attached to the gear menu, but renaming these\n # requires a migration that is not worth doing at this time.\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n ),\n Hotspot(\n name=\"intro_compose\",\n title=gettext_lazy(\"Compose\"),\n description=gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n ),\n]\n\n\nNON_INTRO_HOTSPOTS: List[Hotspot] = []\n\n\n@dataclass\nclass OneTimeNotice:\n name: str\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"type\": \"one_time_notice\",\n \"name\": self.name,\n }\n\n\nONE_TIME_NOTICES: List[OneTimeNotice] = [\n OneTimeNotice(\n name=\"visibility_policy_banner\",\n ),\n OneTimeNotice(\n name=\"intro_inbox_view_modal\",\n ),\n OneTimeNotice(\n name=\"intro_recent_view_modal\",\n ),\n]\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]\nALL_ONBOARDING_STEPS: List[Union[Hotspot, OneTimeNotice]] = [*ALL_HOTSPOTS, *ONE_TIME_NOTICES]\n\n\ndef get_next_onboarding_steps(user: UserProfile) -> List[Dict[str, Any]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n seen_onboarding_steps = frozenset(\n OnboardingStep.objects.filter(user=user).values_list(\"onboarding_step\", flat=True)\n )\n\n onboarding_steps: List[Dict[str, Any]] = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]\n\n for one_time_notice in ONE_TIME_NOTICES:\n if one_time_notice.name in seen_onboarding_steps:\n continue\n onboarding_steps.append(one_time_notice.to_dict())\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return onboarding_steps\n\n for hotspot in INTRO_HOTSPOTS:\n if hotspot.name in seen_onboarding_steps:\n continue\n\n onboarding_steps.append(hotspot.to_dict(delay=0.5))\n return onboarding_steps\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return onboarding_steps\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(OnboardingStep.objects.filter(user=source_profile)):\n OnboardingStep.objects.create(\n user=target_profile,\n onboarding_step=userhotspot.onboarding_step,\n timestamp=userhotspot.timestamp,\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}], "after_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy\nfrom django_stubs_ext import StrPromise\n\nfrom zerver.models import OnboardingStep, UserProfile\n\n\n@dataclass\nclass Hotspot:\n name: str\n title: Optional[StrPromise]\n description: Optional[StrPromise]\n has_trigger: bool = False\n\n def to_dict(self, delay: float = 0) -> Dict[str, Union[str, float, bool]]:\n return {\n \"type\": \"hotspot\",\n \"name\": self.name,\n \"title\": str(self.title),\n \"description\": str(self.description),\n \"delay\": delay,\n \"has_trigger\": self.has_trigger,\n }\n\n\nINTRO_HOTSPOTS: List[Hotspot] = [\n Hotspot(\n name=\"intro_streams\",\n title=gettext_lazy(\"Catch up on a stream\"),\n description=gettext_lazy(\n \"Messages sent to a stream are seen by everyone subscribed \"\n \"to that stream. Try clicking on one of the stream links below.\"\n ),\n ),\n Hotspot(\n name=\"intro_topics\",\n title=gettext_lazy(\"Topics\"),\n description=gettext_lazy(\n \"Every message has a topic. Topics keep conversations \"\n \"easy to follow, and make it easy to reply to conversations that start \"\n \"while you are offline.\"\n ),\n ),\n Hotspot(\n # In theory, this should be renamed to intro_personal, since\n # it's no longer attached to the gear menu, but renaming these\n # requires a migration that is not worth doing at this time.\n name=\"intro_gear\",\n title=gettext_lazy(\"Settings\"),\n description=gettext_lazy(\"Go to Settings to configure your notifications and preferences.\"),\n ),\n Hotspot(\n name=\"intro_compose\",\n title=gettext_lazy(\"Compose\"),\n description=gettext_lazy(\n \"Click here to start a new conversation. Pick a topic \"\n \"(2-3 words is best), and give it a go!\"\n ),\n ),\n]\n\n\nNON_INTRO_HOTSPOTS: List[Hotspot] = []\n\n\n@dataclass\nclass OneTimeNotice:\n name: str\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"type\": \"one_time_notice\",\n \"name\": self.name,\n }\n\n\nONE_TIME_NOTICES: List[OneTimeNotice] = [\n OneTimeNotice(\n name=\"visibility_policy_banner\",\n ),\n OneTimeNotice(\n name=\"intro_inbox_view_modal\",\n ),\n OneTimeNotice(\n name=\"intro_recent_view_modal\",\n ),\n OneTimeNotice(\n name=\"first_stream_created_banner\",\n ),\n]\n\n# We would most likely implement new hotspots in the future that aren't\n# a part of the initial tutorial. To that end, classifying them into\n# categories which are aggregated in ALL_HOTSPOTS, seems like a good start.\nALL_HOTSPOTS = [*INTRO_HOTSPOTS, *NON_INTRO_HOTSPOTS]\nALL_ONBOARDING_STEPS: List[Union[Hotspot, OneTimeNotice]] = [*ALL_HOTSPOTS, *ONE_TIME_NOTICES]\n\n\ndef get_next_onboarding_steps(user: UserProfile) -> List[Dict[str, Any]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots.\n #\n # Since this is just for development purposes, it's convenient for us to send\n # all the hotspots rather than any specific category.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [hotspot.to_dict() for hotspot in ALL_HOTSPOTS]\n\n # If a Zulip server has disabled the tutorial, never send hotspots.\n if not settings.TUTORIAL_ENABLED:\n return []\n\n seen_onboarding_steps = frozenset(\n OnboardingStep.objects.filter(user=user).values_list(\"onboarding_step\", flat=True)\n )\n\n onboarding_steps: List[Dict[str, Any]] = [hotspot.to_dict() for hotspot in NON_INTRO_HOTSPOTS]\n\n for one_time_notice in ONE_TIME_NOTICES:\n if one_time_notice.name in seen_onboarding_steps:\n continue\n onboarding_steps.append(one_time_notice.to_dict())\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return onboarding_steps\n\n for hotspot in INTRO_HOTSPOTS:\n if hotspot.name in seen_onboarding_steps:\n continue\n\n onboarding_steps.append(hotspot.to_dict(delay=0.5))\n return onboarding_steps\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=[\"tutorial_status\"])\n return onboarding_steps\n\n\ndef copy_hotspots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(OnboardingStep.objects.filter(user=source_profile)):\n OnboardingStep.objects.create(\n user=target_profile,\n onboarding_step=userhotspot.onboarding_step,\n timestamp=userhotspot.timestamp,\n )\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=[\"tutorial_status\", \"onboarding_steps\"])\n", "path": "zerver/lib/hotspots.py"}]} | 2,047 | 103 |
gh_patches_debug_12200 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1822 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
urllib3 v2.0 compatibility
urllib3 v2.0 came out during PyConUS 2023 and is resulting in hanging tests on windows, and test failures on Linux. We need to update our code/tests to ensure compatibility for this new version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/urllib3.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import itertools
32
33 from elasticapm.conf import constants
34 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
35 from elasticapm.traces import DroppedSpan, capture_span, execution_context
36 from elasticapm.utils import default_ports
37 from elasticapm.utils.disttracing import TracingOptions
38
39
40 def _set_disttracing_headers(headers, trace_parent, transaction):
41 trace_parent_str = trace_parent.to_string()
42 headers[constants.TRACEPARENT_HEADER_NAME] = trace_parent_str
43 if transaction.tracer.config.use_elastic_traceparent_header:
44 headers[constants.TRACEPARENT_LEGACY_HEADER_NAME] = trace_parent_str
45 if trace_parent.tracestate:
46 headers[constants.TRACESTATE_HEADER_NAME] = trace_parent.tracestate
47
48
49 def update_headers(args, kwargs, instance, transaction, trace_parent):
50 """
51 The headers might be in 3 different places: as 4th positional argument, as "headers" keyword argument,
52 or, if none of the former two are provided, as instance variable on the HTTPConnection object.
53
54 If the headers are in the positional arguments tuple, a new tuple with updated headers will be returned.
55 If they are in the keyword arguments or on the instance, an updated kwargs dict will be returned
56
57 :param args: list of positional arguments
58 :param kwargs: dict of keyword arguments
59 :param instance: the HTTPConnection instance
60 :param transaction: the Transaction object
61 :param trace_parent: the TraceParent object
62 :return: an (args, kwargs) tuple
63 """
64 if len(args) >= 4 and args[3]:
65 headers = args[3].copy()
66 args = tuple(itertools.chain((args[:3]), (headers,), args[4:]))
67 elif "headers" in kwargs and kwargs["headers"]:
68 headers = kwargs["headers"].copy()
69 kwargs["headers"] = headers
70 else:
71 headers = instance.headers.copy() if instance.headers else {}
72 # we don't want to change the instance headers, so we'll cheat and
73 # set the headers as keywords. This slightly changes how the wrapped
74 # method is called compared to uninstrumented code.
75 kwargs["headers"] = headers
76 _set_disttracing_headers(headers, trace_parent, transaction)
77 return args, kwargs
78
79
80 class Urllib3Instrumentation(AbstractInstrumentedModule):
81 name = "urllib3"
82
83 instrument_list = [
84 ("urllib3.connectionpool", "HTTPConnectionPool.urlopen"),
85 # packages that vendor or vendored urllib3 in the past
86 ("requests.packages.urllib3.connectionpool", "HTTPConnectionPool.urlopen"),
87 ("botocore.vendored.requests.packages.urllib3.connectionpool", "HTTPConnectionPool.urlopen"),
88 ]
89
90 def call(self, module, method, wrapped, instance, args, kwargs):
91 if "method" in kwargs:
92 method = kwargs["method"]
93 else:
94 method = args[0]
95
96 host = instance.host
97
98 if instance.port != default_ports.get(instance.scheme):
99 host += ":" + str(instance.port)
100
101 if "url" in kwargs:
102 url = kwargs["url"]
103 else:
104 url = args[1]
105
106 signature = method.upper() + " " + host
107
108 if url.startswith("/"):
109 url = "%s://%s%s" % (instance.scheme, host, url)
110
111 transaction = execution_context.get_transaction()
112
113 with capture_span(
114 signature,
115 span_type="external",
116 span_subtype="http",
117 extra={"http": {"url": url}},
118 leaf=True,
119 ) as span:
120 # if urllib3 has been called in a leaf span, this span might be a DroppedSpan.
121 leaf_span = span
122 while isinstance(leaf_span, DroppedSpan):
123 leaf_span = leaf_span.parent
124
125 parent_id = leaf_span.id if leaf_span else transaction.id
126 trace_parent = transaction.trace_parent.copy_from(
127 span_id=parent_id, trace_options=TracingOptions(recorded=True)
128 )
129 args, kwargs = update_headers(args, kwargs, instance, transaction, trace_parent)
130 if leaf_span:
131 leaf_span.dist_tracing_propagated = True
132 response = wrapped(*args, **kwargs)
133 if response:
134 if span.context:
135 span.context["http"]["status_code"] = response.status
136 span.set_success() if response.status < 400 else span.set_failure()
137 return response
138
139 def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):
140 # since we don't have a span, we set the span id to the transaction id
141 trace_parent = transaction.trace_parent.copy_from(
142 span_id=transaction.id, trace_options=TracingOptions(recorded=False)
143 )
144 return update_headers(args, kwargs, instance, transaction, trace_parent)
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/instrumentation/packages/urllib3.py b/elasticapm/instrumentation/packages/urllib3.py
--- a/elasticapm/instrumentation/packages/urllib3.py
+++ b/elasticapm/instrumentation/packages/urllib3.py
@@ -61,7 +61,12 @@
:param trace_parent: the TraceParent object
:return: an (args, kwargs) tuple
"""
- if len(args) >= 4 and args[3]:
+ from urllib3._version import __version__ as urllib3_version
+
+ if urllib3_version.startswith("2") and len(args) >= 5 and args[4]:
+ headers = args[4].copy()
+ args = tuple(itertools.chain((args[:4]), (headers,), args[5:]))
+ elif len(args) >= 4 and args[3]:
headers = args[3].copy()
args = tuple(itertools.chain((args[:3]), (headers,), args[4:]))
elif "headers" in kwargs and kwargs["headers"]:
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/urllib3.py b/elasticapm/instrumentation/packages/urllib3.py\n--- a/elasticapm/instrumentation/packages/urllib3.py\n+++ b/elasticapm/instrumentation/packages/urllib3.py\n@@ -61,7 +61,12 @@\n :param trace_parent: the TraceParent object\n :return: an (args, kwargs) tuple\n \"\"\"\n- if len(args) >= 4 and args[3]:\n+ from urllib3._version import __version__ as urllib3_version\n+\n+ if urllib3_version.startswith(\"2\") and len(args) >= 5 and args[4]:\n+ headers = args[4].copy()\n+ args = tuple(itertools.chain((args[:4]), (headers,), args[5:]))\n+ elif len(args) >= 4 and args[3]:\n headers = args[3].copy()\n args = tuple(itertools.chain((args[:3]), (headers,), args[4:]))\n elif \"headers\" in kwargs and kwargs[\"headers\"]:\n", "issue": "urllib3 v2.0 compatibility\nurllib3 v2.0 came out during PyConUS 2023 and is resulting in hanging tests on windows, and test failures on Linux. We need to update our code/tests to ensure compatibility for this new version.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport itertools\n\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, capture_span, execution_context\nfrom elasticapm.utils import default_ports\nfrom elasticapm.utils.disttracing import TracingOptions\n\n\ndef _set_disttracing_headers(headers, trace_parent, transaction):\n trace_parent_str = trace_parent.to_string()\n headers[constants.TRACEPARENT_HEADER_NAME] = trace_parent_str\n if transaction.tracer.config.use_elastic_traceparent_header:\n headers[constants.TRACEPARENT_LEGACY_HEADER_NAME] = trace_parent_str\n if trace_parent.tracestate:\n headers[constants.TRACESTATE_HEADER_NAME] = trace_parent.tracestate\n\n\ndef update_headers(args, kwargs, instance, transaction, trace_parent):\n \"\"\"\n The headers might be in 3 different places: as 4th positional argument, as \"headers\" keyword argument,\n or, if none of the former two are provided, as instance variable on the HTTPConnection object.\n\n If the headers are in the positional arguments tuple, a new tuple with updated headers will be returned.\n If they are in the keyword arguments or on the instance, an updated kwargs dict will be returned\n\n :param args: list of positional arguments\n :param kwargs: dict of keyword arguments\n :param instance: the HTTPConnection instance\n :param transaction: the Transaction object\n :param trace_parent: the TraceParent object\n :return: an (args, kwargs) tuple\n \"\"\"\n if len(args) >= 4 and args[3]:\n headers = args[3].copy()\n args = tuple(itertools.chain((args[:3]), (headers,), args[4:]))\n elif \"headers\" in kwargs and kwargs[\"headers\"]:\n headers = kwargs[\"headers\"].copy()\n kwargs[\"headers\"] = headers\n else:\n headers = instance.headers.copy() if instance.headers else {}\n # we don't want to change the instance headers, so we'll cheat and\n # set the headers as keywords. This slightly changes how the wrapped\n # method is called compared to uninstrumented code.\n kwargs[\"headers\"] = headers\n _set_disttracing_headers(headers, trace_parent, transaction)\n return args, kwargs\n\n\nclass Urllib3Instrumentation(AbstractInstrumentedModule):\n name = \"urllib3\"\n\n instrument_list = [\n (\"urllib3.connectionpool\", \"HTTPConnectionPool.urlopen\"),\n # packages that vendor or vendored urllib3 in the past\n (\"requests.packages.urllib3.connectionpool\", \"HTTPConnectionPool.urlopen\"),\n (\"botocore.vendored.requests.packages.urllib3.connectionpool\", \"HTTPConnectionPool.urlopen\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"method\" in kwargs:\n method = kwargs[\"method\"]\n else:\n method = args[0]\n\n host = instance.host\n\n if instance.port != default_ports.get(instance.scheme):\n host += \":\" + str(instance.port)\n\n if \"url\" in kwargs:\n url = kwargs[\"url\"]\n else:\n url = args[1]\n\n signature = method.upper() + \" \" + host\n\n if url.startswith(\"/\"):\n url = \"%s://%s%s\" % (instance.scheme, host, url)\n\n transaction = execution_context.get_transaction()\n\n with capture_span(\n signature,\n span_type=\"external\",\n span_subtype=\"http\",\n extra={\"http\": {\"url\": url}},\n leaf=True,\n ) as span:\n # if urllib3 has been called in a leaf span, this span might be a DroppedSpan.\n leaf_span = span\n while isinstance(leaf_span, DroppedSpan):\n leaf_span = leaf_span.parent\n\n parent_id = leaf_span.id if leaf_span else transaction.id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=parent_id, trace_options=TracingOptions(recorded=True)\n )\n args, kwargs = update_headers(args, kwargs, instance, transaction, trace_parent)\n if leaf_span:\n leaf_span.dist_tracing_propagated = True\n response = wrapped(*args, **kwargs)\n if response:\n if span.context:\n span.context[\"http\"][\"status_code\"] = response.status\n span.set_success() if response.status < 400 else span.set_failure()\n return response\n\n def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):\n # since we don't have a span, we set the span id to the transaction id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n return update_headers(args, kwargs, instance, transaction, trace_parent)\n", "path": "elasticapm/instrumentation/packages/urllib3.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport itertools\n\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, capture_span, execution_context\nfrom elasticapm.utils import default_ports\nfrom elasticapm.utils.disttracing import TracingOptions\n\n\ndef _set_disttracing_headers(headers, trace_parent, transaction):\n trace_parent_str = trace_parent.to_string()\n headers[constants.TRACEPARENT_HEADER_NAME] = trace_parent_str\n if transaction.tracer.config.use_elastic_traceparent_header:\n headers[constants.TRACEPARENT_LEGACY_HEADER_NAME] = trace_parent_str\n if trace_parent.tracestate:\n headers[constants.TRACESTATE_HEADER_NAME] = trace_parent.tracestate\n\n\ndef update_headers(args, kwargs, instance, transaction, trace_parent):\n \"\"\"\n The headers might be in 3 different places: as 4th positional argument, as \"headers\" keyword argument,\n or, if none of the former two are provided, as instance variable on the HTTPConnection object.\n\n If the headers are in the positional arguments tuple, a new tuple with updated headers will be returned.\n If they are in the keyword arguments or on the instance, an updated kwargs dict will be returned\n\n :param args: list of positional arguments\n :param kwargs: dict of keyword arguments\n :param instance: the HTTPConnection instance\n :param transaction: the Transaction object\n :param trace_parent: the TraceParent object\n :return: an (args, kwargs) tuple\n \"\"\"\n from urllib3._version import __version__ as urllib3_version\n\n if urllib3_version.startswith(\"2\") and len(args) >= 5 and args[4]:\n headers = args[4].copy()\n args = tuple(itertools.chain((args[:4]), (headers,), args[5:]))\n elif len(args) >= 4 and args[3]:\n headers = args[3].copy()\n args = tuple(itertools.chain((args[:3]), (headers,), args[4:]))\n elif \"headers\" in kwargs and kwargs[\"headers\"]:\n headers = kwargs[\"headers\"].copy()\n kwargs[\"headers\"] = headers\n else:\n headers = instance.headers.copy() if instance.headers else {}\n # we don't want to change the instance headers, so we'll cheat and\n # set the headers as keywords. This slightly changes how the wrapped\n # method is called compared to uninstrumented code.\n kwargs[\"headers\"] = headers\n _set_disttracing_headers(headers, trace_parent, transaction)\n return args, kwargs\n\n\nclass Urllib3Instrumentation(AbstractInstrumentedModule):\n name = \"urllib3\"\n\n instrument_list = [\n (\"urllib3.connectionpool\", \"HTTPConnectionPool.urlopen\"),\n # packages that vendor or vendored urllib3 in the past\n (\"requests.packages.urllib3.connectionpool\", \"HTTPConnectionPool.urlopen\"),\n (\"botocore.vendored.requests.packages.urllib3.connectionpool\", \"HTTPConnectionPool.urlopen\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"method\" in kwargs:\n method = kwargs[\"method\"]\n else:\n method = args[0]\n\n host = instance.host\n\n if instance.port != default_ports.get(instance.scheme):\n host += \":\" + str(instance.port)\n\n if \"url\" in kwargs:\n url = kwargs[\"url\"]\n else:\n url = args[1]\n\n signature = method.upper() + \" \" + host\n\n if url.startswith(\"/\"):\n url = \"%s://%s%s\" % (instance.scheme, host, url)\n\n transaction = execution_context.get_transaction()\n\n with capture_span(\n signature,\n span_type=\"external\",\n span_subtype=\"http\",\n extra={\"http\": {\"url\": url}},\n leaf=True,\n ) as span:\n # if urllib3 has been called in a leaf span, this span might be a DroppedSpan.\n leaf_span = span\n while isinstance(leaf_span, DroppedSpan):\n leaf_span = leaf_span.parent\n\n parent_id = leaf_span.id if leaf_span else transaction.id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=parent_id, trace_options=TracingOptions(recorded=True)\n )\n args, kwargs = update_headers(args, kwargs, instance, transaction, trace_parent)\n if leaf_span:\n leaf_span.dist_tracing_propagated = True\n response = wrapped(*args, **kwargs)\n if response:\n if span.context:\n span.context[\"http\"][\"status_code\"] = response.status\n span.set_success() if response.status < 400 else span.set_failure()\n return response\n\n def mutate_unsampled_call_args(self, module, method, wrapped, instance, args, kwargs, transaction):\n # since we don't have a span, we set the span id to the transaction id\n trace_parent = transaction.trace_parent.copy_from(\n span_id=transaction.id, trace_options=TracingOptions(recorded=False)\n )\n return update_headers(args, kwargs, instance, transaction, trace_parent)\n", "path": "elasticapm/instrumentation/packages/urllib3.py"}]} | 2,033 | 241 |
gh_patches_debug_6914 | rasdani/github-patches | git_diff | saulpw__visidata-1717 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create New Table from Clipboard
It would be nice to have a command which can create a new table directly from the clipboard.
For example, suppose I have tab separated data in my clipboard.
I would like to be able to execute a visidata command to load a table from the clipboard.
The command might ask me what filetype I am loading (tsv in this case).
Then it would load the data directly from the clipboard.
This would be particularly handy for quick file format conversions. e.g. if I copy a table from OneNote and want to convert it to a Markdown table, I could just import it into Visidata form the clipboard, and copy it back to the clipboard as a markdown table.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/features/open_syspaste.py`
Content:
```
1 from visidata import vd, BaseSheet, Path
2
3
4 @BaseSheet.api
5 def open_syspaste(sheet, filetype='tsv'):
6 import io
7
8 v = vd.sysclipValue().strip() or vd.fail('nothing to open')
9
10 p = Path('syspaste'+'.'+filetype, fp=io.BytesIO(v.encode('utf-8')))
11 return vd.openSource(p, filetype=filetype)
12
13
14 BaseSheet.addCommand('', 'open-syspaste', 'vd.push(open_syspaste())', 'open clipboard as tsv')
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/features/open_syspaste.py b/visidata/features/open_syspaste.py
--- a/visidata/features/open_syspaste.py
+++ b/visidata/features/open_syspaste.py
@@ -1,3 +1,7 @@
+'''
+Load new table from system clipboard
+'''
+
from visidata import vd, BaseSheet, Path
@@ -11,4 +15,4 @@
return vd.openSource(p, filetype=filetype)
-BaseSheet.addCommand('', 'open-syspaste', 'vd.push(open_syspaste())', 'open clipboard as tsv')
+BaseSheet.addCommand('gShift+P', 'open-syspaste', 'vd.push(open_syspaste(filetype=vd.input("paste as filetype: ", value="tsv")))', 'open clipboard as filetype')
| {"golden_diff": "diff --git a/visidata/features/open_syspaste.py b/visidata/features/open_syspaste.py\n--- a/visidata/features/open_syspaste.py\n+++ b/visidata/features/open_syspaste.py\n@@ -1,3 +1,7 @@\n+'''\n+Load new table from system clipboard\n+'''\n+\n from visidata import vd, BaseSheet, Path\n \n \n@@ -11,4 +15,4 @@\n return vd.openSource(p, filetype=filetype)\n \n \n-BaseSheet.addCommand('', 'open-syspaste', 'vd.push(open_syspaste())', 'open clipboard as tsv')\n+BaseSheet.addCommand('gShift+P', 'open-syspaste', 'vd.push(open_syspaste(filetype=vd.input(\"paste as filetype: \", value=\"tsv\")))', 'open clipboard as filetype')\n", "issue": "Create New Table from Clipboard\nIt would be nice to have a command which can create a new table directly from the clipboard.\r\n\r\nFor example, suppose I have tab separated data in my clipboard.\r\nI would like to be able to execute a visidata command to load a table from the clipboard.\r\nThe command might ask me what filetype I am loading (tsv in this case).\r\nThen it would load the data directly from the clipboard.\r\n\r\nThis would be particularly handy for quick file format conversions. e.g. if I copy a table from OneNote and want to convert it to a Markdown table, I could just import it into Visidata form the clipboard, and copy it back to the clipboard as a markdown table.\n", "before_files": [{"content": "from visidata import vd, BaseSheet, Path\n\n\[email protected]\ndef open_syspaste(sheet, filetype='tsv'):\n import io\n\n v = vd.sysclipValue().strip() or vd.fail('nothing to open')\n\n p = Path('syspaste'+'.'+filetype, fp=io.BytesIO(v.encode('utf-8')))\n return vd.openSource(p, filetype=filetype)\n\n\nBaseSheet.addCommand('', 'open-syspaste', 'vd.push(open_syspaste())', 'open clipboard as tsv')\n", "path": "visidata/features/open_syspaste.py"}], "after_files": [{"content": "'''\nLoad new table from system clipboard\n'''\n\nfrom visidata import vd, BaseSheet, Path\n\n\[email protected]\ndef open_syspaste(sheet, filetype='tsv'):\n import io\n\n v = vd.sysclipValue().strip() or vd.fail('nothing to open')\n\n p = Path('syspaste'+'.'+filetype, fp=io.BytesIO(v.encode('utf-8')))\n return vd.openSource(p, filetype=filetype)\n\n\nBaseSheet.addCommand('gShift+P', 'open-syspaste', 'vd.push(open_syspaste(filetype=vd.input(\"paste as filetype: \", value=\"tsv\")))', 'open clipboard as filetype')\n", "path": "visidata/features/open_syspaste.py"}]} | 542 | 176 |
gh_patches_debug_9866 | rasdani/github-patches | git_diff | microsoft__ptvsd-882 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyfile adds an additional new line in every line of code
## Environment data
- PTVSD version: master
- OS and version: any
- Python version (& distribution if applicable, e.g. Anaconda): 3.6
- Using VS Code or Visual Studio:
## Actual behavior
code:
```python
@pyfile
def foo():
print('one')
print('two')
```
The file generated by pyfile, `foo.py`:
```python
print('one')
print('two')
```
## Expected behavior
The file generated by pyfile, `foo.py`:
```python
print('one')
print('two')
```
Having the extra blank lines makes it confusing to set breakpoints in tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytests/conftest.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 from __future__ import print_function, with_statement, absolute_import
6
7 import inspect
8 import pytest
9 import threading
10 import types
11
12 from .helpers.session import DebugSession
13
14
15 @pytest.fixture
16 def daemon():
17 """Provides a factory function for daemon threads. The returned thread is
18 started immediately, and it must not be alive by the time the test returns.
19 """
20
21 daemons = []
22
23 def factory(func, name_suffix=''):
24 name = func.__name__ + name_suffix
25 thread = threading.Thread(target=func, name=name)
26 thread.daemon = True
27 daemons.append(thread)
28 thread.start()
29 return thread
30
31 yield factory
32
33 for thread in daemons:
34 assert not thread.is_alive()
35
36
37 @pytest.fixture
38 def pyfile(request, tmpdir):
39 """A fixture providing a factory function that generates .py files.
40
41 The returned factory takes a single function with an empty argument list,
42 generates a temporary file that contains the code corresponding to the
43 function body, and returns the full path to the generated file. Idiomatic
44 use is as a decorator, e.g.:
45
46 @pyfile
47 def script_file():
48 print('fizz')
49 print('buzz')
50
51 will produce a temporary file named script_file.py containing:
52
53 print('fizz')
54 print('buzz')
55
56 and the variable script_file will contain the path to that file.
57
58 In order for the factory to be able to extract the function body properly,
59 function header ("def") must all be on a single line, with nothing after
60 the colon but whitespace.
61 """
62
63 def factory(source):
64 assert isinstance(source, types.FunctionType)
65 name = source.__name__
66 source, _ = inspect.getsourcelines(source)
67
68 # First, find the "def" line.
69 def_lineno = 0
70 for line in source:
71 line = line.strip()
72 if line.startswith('def') and line.endswith(':'):
73 break
74 def_lineno += 1
75 else:
76 raise ValueError('Failed to locate function header.')
77
78 # Remove everything up to and including "def".
79 source = source[def_lineno + 1:]
80 assert source
81
82 # Now we need to adjust indentation. Compute how much the first line of
83 # the body is indented by, then dedent all lines by that amount.
84 line = source[0]
85 indent = len(line) - len(line.lstrip())
86 source = [line[indent:] for line in source]
87 source = '\n'.join(source)
88
89 tmpfile = tmpdir.join(name + '.py')
90 assert not tmpfile.check()
91 tmpfile.write(source)
92 return tmpfile.strpath
93
94 return factory
95
96
97 @pytest.fixture(params=[
98 'launch', 'attach_socket' # 'attach_pid'
99 ])
100 def debug_session(request):
101 session = DebugSession(request.param)
102 yield session
103 try:
104 session.wait_for_exit()
105 finally:
106 session.stop()
107
108
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytests/conftest.py b/pytests/conftest.py
--- a/pytests/conftest.py
+++ b/pytests/conftest.py
@@ -84,7 +84,7 @@
line = source[0]
indent = len(line) - len(line.lstrip())
source = [line[indent:] for line in source]
- source = '\n'.join(source)
+ source = ''.join(source)
tmpfile = tmpdir.join(name + '.py')
assert not tmpfile.check()
@@ -104,5 +104,3 @@
session.wait_for_exit()
finally:
session.stop()
-
-
| {"golden_diff": "diff --git a/pytests/conftest.py b/pytests/conftest.py\n--- a/pytests/conftest.py\n+++ b/pytests/conftest.py\n@@ -84,7 +84,7 @@\n line = source[0]\n indent = len(line) - len(line.lstrip())\n source = [line[indent:] for line in source]\n- source = '\\n'.join(source)\n+ source = ''.join(source)\n \n tmpfile = tmpdir.join(name + '.py')\n assert not tmpfile.check()\n@@ -104,5 +104,3 @@\n session.wait_for_exit()\n finally:\n session.stop()\n-\n-\n", "issue": "pyfile adds an additional new line in every line of code\n## Environment data\r\n\r\n- PTVSD version: master\r\n- OS and version: any\r\n- Python version (& distribution if applicable, e.g. Anaconda): 3.6\r\n- Using VS Code or Visual Studio:\r\n\r\n## Actual behavior\r\n\r\ncode:\r\n```python\r\n@pyfile\r\ndef foo():\r\n print('one')\r\n print('two')\r\n```\r\nThe file generated by pyfile, `foo.py`:\r\n```python\r\nprint('one')\r\n\r\nprint('two')\r\n\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe file generated by pyfile, `foo.py`:\r\n```python\r\nprint('one')\r\nprint('two')\r\n```\r\n\r\nHaving the extra blank lines makes it confusing to set breakpoints in tests.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import print_function, with_statement, absolute_import\n\nimport inspect\nimport pytest\nimport threading\nimport types\n\nfrom .helpers.session import DebugSession\n\n\[email protected]\ndef daemon():\n \"\"\"Provides a factory function for daemon threads. The returned thread is\n started immediately, and it must not be alive by the time the test returns.\n \"\"\"\n\n daemons = []\n\n def factory(func, name_suffix=''):\n name = func.__name__ + name_suffix\n thread = threading.Thread(target=func, name=name)\n thread.daemon = True\n daemons.append(thread)\n thread.start()\n return thread\n\n yield factory\n\n for thread in daemons:\n assert not thread.is_alive()\n\n\[email protected]\ndef pyfile(request, tmpdir):\n \"\"\"A fixture providing a factory function that generates .py files.\n\n The returned factory takes a single function with an empty argument list,\n generates a temporary file that contains the code corresponding to the\n function body, and returns the full path to the generated file. Idiomatic\n use is as a decorator, e.g.:\n\n @pyfile\n def script_file():\n print('fizz')\n print('buzz')\n\n will produce a temporary file named script_file.py containing:\n\n print('fizz')\n print('buzz')\n\n and the variable script_file will contain the path to that file.\n\n In order for the factory to be able to extract the function body properly,\n function header (\"def\") must all be on a single line, with nothing after\n the colon but whitespace.\n \"\"\"\n\n def factory(source):\n assert isinstance(source, types.FunctionType)\n name = source.__name__\n source, _ = inspect.getsourcelines(source)\n\n # First, find the \"def\" line.\n def_lineno = 0\n for line in source:\n line = line.strip()\n if line.startswith('def') and line.endswith(':'):\n break\n def_lineno += 1\n else:\n raise ValueError('Failed to locate function header.')\n\n # Remove everything up to and including \"def\".\n source = source[def_lineno + 1:]\n assert source\n\n # Now we need to adjust indentation. Compute how much the first line of\n # the body is indented by, then dedent all lines by that amount.\n line = source[0]\n indent = len(line) - len(line.lstrip())\n source = [line[indent:] for line in source]\n source = '\\n'.join(source)\n\n tmpfile = tmpdir.join(name + '.py')\n assert not tmpfile.check()\n tmpfile.write(source)\n return tmpfile.strpath\n\n return factory\n\n\[email protected](params=[\n 'launch', 'attach_socket' # 'attach_pid'\n])\ndef debug_session(request):\n session = DebugSession(request.param)\n yield session\n try:\n session.wait_for_exit()\n finally:\n session.stop()\n\n\n", "path": "pytests/conftest.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import print_function, with_statement, absolute_import\n\nimport inspect\nimport pytest\nimport threading\nimport types\n\nfrom .helpers.session import DebugSession\n\n\[email protected]\ndef daemon():\n \"\"\"Provides a factory function for daemon threads. The returned thread is\n started immediately, and it must not be alive by the time the test returns.\n \"\"\"\n\n daemons = []\n\n def factory(func, name_suffix=''):\n name = func.__name__ + name_suffix\n thread = threading.Thread(target=func, name=name)\n thread.daemon = True\n daemons.append(thread)\n thread.start()\n return thread\n\n yield factory\n\n for thread in daemons:\n assert not thread.is_alive()\n\n\[email protected]\ndef pyfile(request, tmpdir):\n \"\"\"A fixture providing a factory function that generates .py files.\n\n The returned factory takes a single function with an empty argument list,\n generates a temporary file that contains the code corresponding to the\n function body, and returns the full path to the generated file. Idiomatic\n use is as a decorator, e.g.:\n\n @pyfile\n def script_file():\n print('fizz')\n print('buzz')\n\n will produce a temporary file named script_file.py containing:\n\n print('fizz')\n print('buzz')\n\n and the variable script_file will contain the path to that file.\n\n In order for the factory to be able to extract the function body properly,\n function header (\"def\") must all be on a single line, with nothing after\n the colon but whitespace.\n \"\"\"\n\n def factory(source):\n assert isinstance(source, types.FunctionType)\n name = source.__name__\n source, _ = inspect.getsourcelines(source)\n\n # First, find the \"def\" line.\n def_lineno = 0\n for line in source:\n line = line.strip()\n if line.startswith('def') and line.endswith(':'):\n break\n def_lineno += 1\n else:\n raise ValueError('Failed to locate function header.')\n\n # Remove everything up to and including \"def\".\n source = source[def_lineno + 1:]\n assert source\n\n # Now we need to adjust indentation. Compute how much the first line of\n # the body is indented by, then dedent all lines by that amount.\n line = source[0]\n indent = len(line) - len(line.lstrip())\n source = [line[indent:] for line in source]\n source = ''.join(source)\n\n tmpfile = tmpdir.join(name + '.py')\n assert not tmpfile.check()\n tmpfile.write(source)\n return tmpfile.strpath\n\n return factory\n\n\[email protected](params=[\n 'launch', 'attach_socket' # 'attach_pid'\n])\ndef debug_session(request):\n session = DebugSession(request.param)\n yield session\n try:\n session.wait_for_exit()\n finally:\n session.stop()\n", "path": "pytests/conftest.py"}]} | 1,317 | 147 |
gh_patches_debug_8312 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-672 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: The location specified in bundle-manifest (manifest.yml) are invalid
### Describe the bug
A build manifest is bundled inside the OpenSearch bundle tarball (manifest.yml) which contains all the information about the components used to build the bundle. The bundle manifest contains a key `location` whose value is invalid for all components.
```
build:
architecture: x64
id: '317'
location: https://ci.opensearch.org/ci/bundles/1.1.0/317/opensearch-1.1.0-linux-x64.tar.gz
name: OpenSearch
version: 1.1.0
components:
- commit_id: 15e9f137622d878b79103df8f82d78d782b686a1
location: https://ci.opensearch.org/ci/builds/1.1.0/317/bundle/opensearch-min-1.1.0-linux-x64.tar.gz
name: OpenSearch
ref: '1.1'
repository: https://github.com/opensearch-project/OpenSearch.git
```
### To reproduce
Download the bundle. Untar it and see the manifest.yml
Try accessing the location url to download individual component. It will give `Access Denied` error
### Expected behavior
The URLs should be valid. Each component should be downloadable from the given location url
### Screenshots
_No response_
### Host / Environment
_No response_
### Additional context
_No response_
### Relevant log output
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bundle-workflow/src/assemble_workflow/bundle_recorder.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8 from urllib.parse import urljoin
9
10 from manifests.bundle_manifest import BundleManifest
11
12
13 class BundleRecorder:
14 def __init__(self, build, output_dir, artifacts_dir):
15 self.output_dir = output_dir
16 self.build_id = build.id
17 self.public_url = os.getenv("PUBLIC_ARTIFACT_URL", None)
18 self.version = build.version
19 self.tar_name = self.__get_tar_name(build)
20 self.artifacts_dir = artifacts_dir
21 self.bundle_manifest = self.BundleManifestBuilder(
22 build.id,
23 build.name,
24 build.version,
25 build.architecture,
26 self.__get_tar_location(),
27 )
28
29 def __get_tar_name(self, build):
30 parts = [build.name.lower(), build.version, "linux", build.architecture]
31 return "-".join(parts) + ".tar.gz"
32
33 def __get_public_url_path(self, folder, rel_path):
34 path = "{}/{}/{}/{}".format(folder, self.version, self.build_id, rel_path)
35 return urljoin(self.public_url, path)
36
37 def __get_location(self, folder_name, rel_path, abs_path):
38 if self.public_url:
39 return self.__get_public_url_path(folder_name, rel_path)
40 return abs_path
41
42 # Assembled bundles are expected to be served from a separate "bundles" folder
43 # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id
44 def __get_tar_location(self):
45 return self.__get_location(
46 "bundles", self.tar_name, os.path.join(self.output_dir, self.tar_name)
47 )
48
49 # Build artifacts are expected to be served from a "builds" folder
50 # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>
51 def __get_component_location(self, component_rel_path):
52 abs_path = os.path.join(self.artifacts_dir, component_rel_path)
53 return self.__get_location("builds", component_rel_path, abs_path)
54
55 def record_component(self, component, rel_path):
56 self.bundle_manifest.append_component(
57 component.name,
58 component.repository,
59 component.ref,
60 component.commit_id,
61 self.__get_component_location(rel_path),
62 )
63
64 def get_manifest(self):
65 return self.bundle_manifest.to_manifest()
66
67 def write_manifest(self, folder):
68 manifest_path = os.path.join(folder, "manifest.yml")
69 self.get_manifest().to_file(manifest_path)
70
71 class BundleManifestBuilder:
72 def __init__(self, build_id, name, version, arch, location):
73 self.data = {}
74 self.data["build"] = {}
75 self.data["build"]["id"] = build_id
76 self.data["build"]["name"] = name
77 self.data["build"]["version"] = str(version)
78 self.data["build"]["architecture"] = arch
79 self.data["build"]["location"] = location
80 self.data["schema-version"] = "1.0"
81 # We need to store components as a hash so that we can append artifacts by component name
82 # When we convert to a BundleManifest this will get converted back into a list
83 self.data["components"] = []
84
85 def append_component(self, name, repository_url, ref, commit_id, location):
86 component = {
87 "name": name,
88 "repository": repository_url,
89 "ref": ref,
90 "commit_id": commit_id,
91 "location": location,
92 }
93 self.data["components"].append(component)
94
95 def to_manifest(self):
96 return BundleManifest(self.data)
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bundle-workflow/src/assemble_workflow/bundle_recorder.py b/bundle-workflow/src/assemble_workflow/bundle_recorder.py
--- a/bundle-workflow/src/assemble_workflow/bundle_recorder.py
+++ b/bundle-workflow/src/assemble_workflow/bundle_recorder.py
@@ -31,8 +31,8 @@
return "-".join(parts) + ".tar.gz"
def __get_public_url_path(self, folder, rel_path):
- path = "{}/{}/{}/{}".format(folder, self.version, self.build_id, rel_path)
- return urljoin(self.public_url, path)
+ path = "/".join((folder, self.version, self.build_id, rel_path))
+ return urljoin(self.public_url + "/", path)
def __get_location(self, folder_name, rel_path, abs_path):
if self.public_url:
| {"golden_diff": "diff --git a/bundle-workflow/src/assemble_workflow/bundle_recorder.py b/bundle-workflow/src/assemble_workflow/bundle_recorder.py\n--- a/bundle-workflow/src/assemble_workflow/bundle_recorder.py\n+++ b/bundle-workflow/src/assemble_workflow/bundle_recorder.py\n@@ -31,8 +31,8 @@\n return \"-\".join(parts) + \".tar.gz\"\n \n def __get_public_url_path(self, folder, rel_path):\n- path = \"{}/{}/{}/{}\".format(folder, self.version, self.build_id, rel_path)\n- return urljoin(self.public_url, path)\n+ path = \"/\".join((folder, self.version, self.build_id, rel_path))\n+ return urljoin(self.public_url + \"/\", path)\n \n def __get_location(self, folder_name, rel_path, abs_path):\n if self.public_url:\n", "issue": "[Bug]: The location specified in bundle-manifest (manifest.yml) are invalid\n### Describe the bug\n\nA build manifest is bundled inside the OpenSearch bundle tarball (manifest.yml) which contains all the information about the components used to build the bundle. The bundle manifest contains a key `location` whose value is invalid for all components.\r\n\r\n```\r\nbuild:\r\n architecture: x64\r\n id: '317'\r\n location: https://ci.opensearch.org/ci/bundles/1.1.0/317/opensearch-1.1.0-linux-x64.tar.gz\r\n name: OpenSearch\r\n version: 1.1.0\r\ncomponents:\r\n- commit_id: 15e9f137622d878b79103df8f82d78d782b686a1\r\n location: https://ci.opensearch.org/ci/builds/1.1.0/317/bundle/opensearch-min-1.1.0-linux-x64.tar.gz\r\n name: OpenSearch\r\n ref: '1.1'\r\n repository: https://github.com/opensearch-project/OpenSearch.git\r\n```\n\n### To reproduce\n\nDownload the bundle. Untar it and see the manifest.yml\r\n\r\nTry accessing the location url to download individual component. It will give `Access Denied` error\n\n### Expected behavior\n\nThe URLs should be valid. Each component should be downloadable from the given location url\n\n### Screenshots\n\n_No response_\n\n### Host / Environment\n\n_No response_\n\n### Additional context\n\n_No response_\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nfrom urllib.parse import urljoin\n\nfrom manifests.bundle_manifest import BundleManifest\n\n\nclass BundleRecorder:\n def __init__(self, build, output_dir, artifacts_dir):\n self.output_dir = output_dir\n self.build_id = build.id\n self.public_url = os.getenv(\"PUBLIC_ARTIFACT_URL\", None)\n self.version = build.version\n self.tar_name = self.__get_tar_name(build)\n self.artifacts_dir = artifacts_dir\n self.bundle_manifest = self.BundleManifestBuilder(\n build.id,\n build.name,\n build.version,\n build.architecture,\n self.__get_tar_location(),\n )\n\n def __get_tar_name(self, build):\n parts = [build.name.lower(), build.version, \"linux\", build.architecture]\n return \"-\".join(parts) + \".tar.gz\"\n\n def __get_public_url_path(self, folder, rel_path):\n path = \"{}/{}/{}/{}\".format(folder, self.version, self.build_id, rel_path)\n return urljoin(self.public_url, path)\n\n def __get_location(self, folder_name, rel_path, abs_path):\n if self.public_url:\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n\n # Assembled bundles are expected to be served from a separate \"bundles\" folder\n # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id\n def __get_tar_location(self):\n return self.__get_location(\n \"bundles\", self.tar_name, os.path.join(self.output_dir, self.tar_name)\n )\n\n # Build artifacts are expected to be served from a \"builds\" folder\n # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>\n def __get_component_location(self, component_rel_path):\n abs_path = os.path.join(self.artifacts_dir, component_rel_path)\n return self.__get_location(\"builds\", component_rel_path, abs_path)\n\n def record_component(self, component, rel_path):\n self.bundle_manifest.append_component(\n component.name,\n component.repository,\n component.ref,\n component.commit_id,\n self.__get_component_location(rel_path),\n )\n\n def get_manifest(self):\n return self.bundle_manifest.to_manifest()\n\n def write_manifest(self, folder):\n manifest_path = os.path.join(folder, \"manifest.yml\")\n self.get_manifest().to_file(manifest_path)\n\n class BundleManifestBuilder:\n def __init__(self, build_id, name, version, arch, location):\n self.data = {}\n self.data[\"build\"] = {}\n self.data[\"build\"][\"id\"] = build_id\n self.data[\"build\"][\"name\"] = name\n self.data[\"build\"][\"version\"] = str(version)\n self.data[\"build\"][\"architecture\"] = arch\n self.data[\"build\"][\"location\"] = location\n self.data[\"schema-version\"] = \"1.0\"\n # We need to store components as a hash so that we can append artifacts by component name\n # When we convert to a BundleManifest this will get converted back into a list\n self.data[\"components\"] = []\n\n def append_component(self, name, repository_url, ref, commit_id, location):\n component = {\n \"name\": name,\n \"repository\": repository_url,\n \"ref\": ref,\n \"commit_id\": commit_id,\n \"location\": location,\n }\n self.data[\"components\"].append(component)\n\n def to_manifest(self):\n return BundleManifest(self.data)\n", "path": "bundle-workflow/src/assemble_workflow/bundle_recorder.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nfrom urllib.parse import urljoin\n\nfrom manifests.bundle_manifest import BundleManifest\n\n\nclass BundleRecorder:\n def __init__(self, build, output_dir, artifacts_dir):\n self.output_dir = output_dir\n self.build_id = build.id\n self.public_url = os.getenv(\"PUBLIC_ARTIFACT_URL\", None)\n self.version = build.version\n self.tar_name = self.__get_tar_name(build)\n self.artifacts_dir = artifacts_dir\n self.bundle_manifest = self.BundleManifestBuilder(\n build.id,\n build.name,\n build.version,\n build.architecture,\n self.__get_tar_location(),\n )\n\n def __get_tar_name(self, build):\n parts = [build.name.lower(), build.version, \"linux\", build.architecture]\n return \"-\".join(parts) + \".tar.gz\"\n\n def __get_public_url_path(self, folder, rel_path):\n path = \"/\".join((folder, self.version, self.build_id, rel_path))\n return urljoin(self.public_url + \"/\", path)\n\n def __get_location(self, folder_name, rel_path, abs_path):\n if self.public_url:\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n\n # Assembled bundles are expected to be served from a separate \"bundles\" folder\n # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id\n def __get_tar_location(self):\n return self.__get_location(\n \"bundles\", self.tar_name, os.path.join(self.output_dir, self.tar_name)\n )\n\n # Build artifacts are expected to be served from a \"builds\" folder\n # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>\n def __get_component_location(self, component_rel_path):\n abs_path = os.path.join(self.artifacts_dir, component_rel_path)\n return self.__get_location(\"builds\", component_rel_path, abs_path)\n\n def record_component(self, component, rel_path):\n self.bundle_manifest.append_component(\n component.name,\n component.repository,\n component.ref,\n component.commit_id,\n self.__get_component_location(rel_path),\n )\n\n def get_manifest(self):\n return self.bundle_manifest.to_manifest()\n\n def write_manifest(self, folder):\n manifest_path = os.path.join(folder, \"manifest.yml\")\n self.get_manifest().to_file(manifest_path)\n\n class BundleManifestBuilder:\n def __init__(self, build_id, name, version, arch, location):\n self.data = {}\n self.data[\"build\"] = {}\n self.data[\"build\"][\"id\"] = build_id\n self.data[\"build\"][\"name\"] = name\n self.data[\"build\"][\"version\"] = str(version)\n self.data[\"build\"][\"architecture\"] = arch\n self.data[\"build\"][\"location\"] = location\n self.data[\"schema-version\"] = \"1.0\"\n # We need to store components as a hash so that we can append artifacts by component name\n # When we convert to a BundleManifest this will get converted back into a list\n self.data[\"components\"] = []\n\n def append_component(self, name, repository_url, ref, commit_id, location):\n component = {\n \"name\": name,\n \"repository\": repository_url,\n \"ref\": ref,\n \"commit_id\": commit_id,\n \"location\": location,\n }\n self.data[\"components\"].append(component)\n\n def to_manifest(self):\n return BundleManifest(self.data)\n", "path": "bundle-workflow/src/assemble_workflow/bundle_recorder.py"}]} | 1,609 | 191 |
gh_patches_debug_5840 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-tf-11 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'str' object has no attribute 'update'
```
Traceback (most recent call last):
File "/home/soul/anaconda2/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/home/soul/anaconda2/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/home/soul/projects/opennmt-tf/OpenNMT-tf/bin/main.py", line 275, in <module>
main()
File "/home/soul/projects/opennmt-tf/OpenNMT-tf/bin/main.py", line 225, in main
config = load_config(args.config)
File "opennmt/config.py", line 48, in load_config
config[section].update(subconfig[section])
AttributeError: 'str' object has no attribute 'update'
```
The attribute that caused it was "model_dir", where its value was a string.
The config file that I used:
```
# The directory where models and summaries will be saved. It is created if it does not exist.
model_dir: enfr
data:
train_features_file: data/enfr/src-train.txt
train_labels_file: data/enfr/tgt-train.txt
eval_features_file: data/enfr/src-val.txt
eval_labels_file: data/enfr/tgt-val.txt
# (optional) Models may require additional resource files (e.g. vocabularies).
source_words_vocabulary: data/enfr/src-vocab.txt
target_words_vocabulary: data/enfr/tgt-vocab.txt
# Model and optimization parameters.
params:
# The optimizer class name in tf.train or tf.contrib.opt.
optimizer: AdamOptimizer
learning_rate: 0.1
# (optional) Maximum gradients norm (default: None).
clip_gradients: 5.0
# (optional) The type of learning rate decay (default: None). See:
# * https://www.tensorflow.org/versions/master/api_guides/python/train#Decaying_the_learning_rate
# * opennmt/utils/decay.py
# This value may change the semantics of other decay options. See the documentation or the code.
decay_type: exponential_decay
# (optional unless decay_type is set) The learning rate decay rate.
decay_rate: 0.9
# (optional unless decay_type is set) Decay every this many steps.
decay_steps: 10000
# (optional) If true, the learning rate is decayed in a staircase fashion (default: True).
staircase: true
# (optional) After how many steps to start the decay (default: 0).
start_decay_steps: 50000
# (optional) Stop decay when this learning rate value is reached (default: 0).
minimum_learning_rate: 0.0001
# (optional) Width of the beam search (default: 1).
beam_width: 5
# (optional) Length penaly weight to apply on hypotheses (default: 0).
length_penalty: 0.2
# (optional) Maximum decoding iterations before stopping (default: 250).
maximum_iterations: 200
# Training options.
train:
batch_size: 64
# (optional) Save a checkpoint every this many steps.
save_checkpoints_steps: 5000
# (optional) How many checkpoints to keep on disk.
keep_checkpoint_max: 3
# (optional) Save summaries every this many steps.
save_summary_steps: 100
# (optional) Train for this many steps. If not set, train forever.
train_steps: 1000000
# (optional) Evaluate every this many seconds (default: 3600).
eval_delay: 7200
# (optional) Save evaluation predictions in model_dir/eval/.
save_eval_predictions: false
# (optional) The maximum length of feature sequences during training (default: None).
maximum_features_length: 70
# (optional) The maximum length of label sequences during training (default: None).
maximum_labels_length: 70
# (optional) The number of buckets by sequence length to improve training efficiency (default: 5).
num_buckets: 5
# (optional) The number of threads to use for processing data in parallel (default: number of logical cores).
num_parallel_process_calls: 4
# (optional) The data pre-fetch buffer size, e.g. for shuffling examples (default: batch_size * 1000).
buffer_size: 10000
# (optional) Inference options.
infer:
# (optional) The batch size to use (default: 1).
batch_size: 10
# (optional) The number of threads to use for processing data in parallel (default: number of logical cores).
num_parallel_process_calls: 8
# (optional) The data pre-fetch buffer size when processing data in parallel (default: batch_size * 10).
buffer_size: 100
# (optional) For compatible models, the number of hypotheses to output (default: 1).
n_best: 1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opennmt/config.py`
Content:
```
1 """Defines functions related to configuration files."""
2
3 from importlib import import_module
4
5 import yaml
6
7
8 def load_model_module(path):
9 """Loads a model configuration file.
10
11 Args:
12 path: The relative path to the configuration file.
13
14 Returns:
15 A Python module.
16 """
17 module, _ = path.rsplit(".", 1)
18 module = module.replace("/", ".")
19 module = import_module(module)
20
21 if not hasattr(module, "model"):
22 raise ImportError("No model defined in {}".format(path))
23
24 return module
25
26 def load_config(config_paths, config=None):
27 """Loads configuration files.
28
29 Args:
30 config_paths: A list of configuration files.
31 config: A (possibly non empty) config dictionary to fill.
32
33 Returns:
34 The configuration dictionary.
35 """
36 if config is None:
37 config = {}
38
39 for config_path in config_paths:
40 with open(config_path) as config_file:
41 subconfig = yaml.load(config_file.read())
42
43 # Add or update section in main configuration.
44 for section in subconfig:
45 if section in config:
46 config[section].update(subconfig[section])
47 else:
48 config[section] = subconfig[section]
49
50 return config
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opennmt/config.py b/opennmt/config.py
--- a/opennmt/config.py
+++ b/opennmt/config.py
@@ -43,7 +43,10 @@
# Add or update section in main configuration.
for section in subconfig:
if section in config:
- config[section].update(subconfig[section])
+ if isinstance(config[section], dict):
+ config[section].update(subconfig[section])
+ else:
+ config[section] = subconfig[section]
else:
config[section] = subconfig[section]
| {"golden_diff": "diff --git a/opennmt/config.py b/opennmt/config.py\n--- a/opennmt/config.py\n+++ b/opennmt/config.py\n@@ -43,7 +43,10 @@\n # Add or update section in main configuration.\n for section in subconfig:\n if section in config:\n- config[section].update(subconfig[section])\n+ if isinstance(config[section], dict):\n+ config[section].update(subconfig[section])\n+ else:\n+ config[section] = subconfig[section]\n else:\n config[section] = subconfig[section]\n", "issue": "AttributeError: 'str' object has no attribute 'update'\n```\r\nTraceback (most recent call last):\r\n File \"/home/soul/anaconda2/lib/python2.7/runpy.py\", line 174, in _run_module_as_main\r\n \"__main__\", fname, loader, pkg_name)\r\n File \"/home/soul/anaconda2/lib/python2.7/runpy.py\", line 72, in _run_code\r\n exec code in run_globals\r\n File \"/home/soul/projects/opennmt-tf/OpenNMT-tf/bin/main.py\", line 275, in <module>\r\n main()\r\n File \"/home/soul/projects/opennmt-tf/OpenNMT-tf/bin/main.py\", line 225, in main\r\n config = load_config(args.config)\r\n File \"opennmt/config.py\", line 48, in load_config\r\n config[section].update(subconfig[section])\r\nAttributeError: 'str' object has no attribute 'update'\r\n```\r\nThe attribute that caused it was \"model_dir\", where its value was a string.\r\n\r\nThe config file that I used:\r\n```\r\n# The directory where models and summaries will be saved. It is created if it does not exist.\r\nmodel_dir: enfr\r\n\r\ndata:\r\n train_features_file: data/enfr/src-train.txt\r\n train_labels_file: data/enfr/tgt-train.txt\r\n eval_features_file: data/enfr/src-val.txt\r\n eval_labels_file: data/enfr/tgt-val.txt\r\n\r\n # (optional) Models may require additional resource files (e.g. vocabularies).\r\n source_words_vocabulary: data/enfr/src-vocab.txt\r\n target_words_vocabulary: data/enfr/tgt-vocab.txt\r\n\r\n# Model and optimization parameters.\r\nparams:\r\n # The optimizer class name in tf.train or tf.contrib.opt.\r\n optimizer: AdamOptimizer\r\n learning_rate: 0.1\r\n\r\n # (optional) Maximum gradients norm (default: None).\r\n clip_gradients: 5.0\r\n # (optional) The type of learning rate decay (default: None). See:\r\n # * https://www.tensorflow.org/versions/master/api_guides/python/train#Decaying_the_learning_rate\r\n # * opennmt/utils/decay.py\r\n # This value may change the semantics of other decay options. See the documentation or the code.\r\n decay_type: exponential_decay\r\n # (optional unless decay_type is set) The learning rate decay rate.\r\n decay_rate: 0.9\r\n # (optional unless decay_type is set) Decay every this many steps.\r\n decay_steps: 10000\r\n # (optional) If true, the learning rate is decayed in a staircase fashion (default: True).\r\n staircase: true\r\n # (optional) After how many steps to start the decay (default: 0).\r\n start_decay_steps: 50000\r\n # (optional) Stop decay when this learning rate value is reached (default: 0).\r\n minimum_learning_rate: 0.0001\r\n # (optional) Width of the beam search (default: 1).\r\n beam_width: 5\r\n # (optional) Length penaly weight to apply on hypotheses (default: 0).\r\n length_penalty: 0.2\r\n # (optional) Maximum decoding iterations before stopping (default: 250).\r\n maximum_iterations: 200\r\n\r\n# Training options.\r\ntrain:\r\n batch_size: 64\r\n\r\n # (optional) Save a checkpoint every this many steps.\r\n save_checkpoints_steps: 5000\r\n # (optional) How many checkpoints to keep on disk.\r\n keep_checkpoint_max: 3\r\n # (optional) Save summaries every this many steps.\r\n save_summary_steps: 100\r\n # (optional) Train for this many steps. If not set, train forever.\r\n train_steps: 1000000\r\n # (optional) Evaluate every this many seconds (default: 3600).\r\n eval_delay: 7200\r\n # (optional) Save evaluation predictions in model_dir/eval/.\r\n save_eval_predictions: false\r\n # (optional) The maximum length of feature sequences during training (default: None).\r\n maximum_features_length: 70\r\n # (optional) The maximum length of label sequences during training (default: None).\r\n maximum_labels_length: 70\r\n # (optional) The number of buckets by sequence length to improve training efficiency (default: 5).\r\n num_buckets: 5\r\n # (optional) The number of threads to use for processing data in parallel (default: number of logical cores).\r\n num_parallel_process_calls: 4\r\n # (optional) The data pre-fetch buffer size, e.g. for shuffling examples (default: batch_size * 1000).\r\n buffer_size: 10000\r\n\r\n# (optional) Inference options.\r\ninfer:\r\n # (optional) The batch size to use (default: 1).\r\n batch_size: 10\r\n # (optional) The number of threads to use for processing data in parallel (default: number of logical cores).\r\n num_parallel_process_calls: 8\r\n # (optional) The data pre-fetch buffer size when processing data in parallel (default: batch_size * 10).\r\n buffer_size: 100\r\n # (optional) For compatible models, the number of hypotheses to output (default: 1).\r\n n_best: 1\r\n```\n", "before_files": [{"content": "\"\"\"Defines functions related to configuration files.\"\"\"\n\nfrom importlib import import_module\n\nimport yaml\n\n\ndef load_model_module(path):\n \"\"\"Loads a model configuration file.\n\n Args:\n path: The relative path to the configuration file.\n\n Returns:\n A Python module.\n \"\"\"\n module, _ = path.rsplit(\".\", 1)\n module = module.replace(\"/\", \".\")\n module = import_module(module)\n\n if not hasattr(module, \"model\"):\n raise ImportError(\"No model defined in {}\".format(path))\n\n return module\n\ndef load_config(config_paths, config=None):\n \"\"\"Loads configuration files.\n\n Args:\n config_paths: A list of configuration files.\n config: A (possibly non empty) config dictionary to fill.\n\n Returns:\n The configuration dictionary.\n \"\"\"\n if config is None:\n config = {}\n\n for config_path in config_paths:\n with open(config_path) as config_file:\n subconfig = yaml.load(config_file.read())\n\n # Add or update section in main configuration.\n for section in subconfig:\n if section in config:\n config[section].update(subconfig[section])\n else:\n config[section] = subconfig[section]\n\n return config\n", "path": "opennmt/config.py"}], "after_files": [{"content": "\"\"\"Defines functions related to configuration files.\"\"\"\n\nfrom importlib import import_module\n\nimport yaml\n\n\ndef load_model_module(path):\n \"\"\"Loads a model configuration file.\n\n Args:\n path: The relative path to the configuration file.\n\n Returns:\n A Python module.\n \"\"\"\n module, _ = path.rsplit(\".\", 1)\n module = module.replace(\"/\", \".\")\n module = import_module(module)\n\n if not hasattr(module, \"model\"):\n raise ImportError(\"No model defined in {}\".format(path))\n\n return module\n\ndef load_config(config_paths, config=None):\n \"\"\"Loads configuration files.\n\n Args:\n config_paths: A list of configuration files.\n config: A (possibly non empty) config dictionary to fill.\n\n Returns:\n The configuration dictionary.\n \"\"\"\n if config is None:\n config = {}\n\n for config_path in config_paths:\n with open(config_path) as config_file:\n subconfig = yaml.load(config_file.read())\n\n # Add or update section in main configuration.\n for section in subconfig:\n if section in config:\n if isinstance(config[section], dict):\n config[section].update(subconfig[section])\n else:\n config[section] = subconfig[section]\n else:\n config[section] = subconfig[section]\n\n return config\n", "path": "opennmt/config.py"}]} | 1,800 | 123 |
gh_patches_debug_30228 | rasdani/github-patches | git_diff | netket__netket-1256 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`QGTJacobian***(holomorphic=True)` for R->C models returns wrong output. Should error.
cc @attila-i-szabo
I agree that `holomorphic=True` is wrong in that case, but it should rather throw an error than give wrong results...
It took me a while to notice that this was the cause of some wrong optimisations.
```python
import netket as nk
import jax
L = 20
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
hi = nk.hilbert.Spin(s=1 / 2, N=g.n_nodes)
ha = nk.operator.Ising(hilbert=hi, graph=g, h=1.0)
ma = nk.models.RBMModPhase(alpha=1, param_dtype=float)
sa = nk.sampler.MetropolisLocal(hi, n_chains=16)
vs = nk.vqs.MCState(sa, ma, n_samples=1000, n_discard_per_chain=100)
qgt_holo = nk.optimizer.qgt.QGTJacobianDense(holomorphic=True)
qgt_nonholo = nk.optimizer.qgt.QGTJacobianDense(holomorphic=False)
qgt_otf = nk.optimizer.qgt.QGTOnTheFly()
S_holo = vs.quantum_geometric_tensor(qgt_holo)
S_nonholo = vs.quantum_geometric_tensor(qgt_nonholo)
S_otf = vs.quantum_geometric_tensor(qgt_otf)
F = vs.parameters
r_holo = S_holo@F
r_nonholo = S_nonholo@F
r_otf = S_otf@F
jax.tree_map(lambda x,y:x-y, r_holo, r_nonholo)
```
`QGTJacobian***(holomorphic=True)` for R->C models returns wrong output. Should error.
cc @attila-i-szabo
I agree that `holomorphic=True` is wrong in that case, but it should rather throw an error than give wrong results...
It took me a while to notice that this was the cause of some wrong optimisations.
```python
import netket as nk
import jax
L = 20
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
hi = nk.hilbert.Spin(s=1 / 2, N=g.n_nodes)
ha = nk.operator.Ising(hilbert=hi, graph=g, h=1.0)
ma = nk.models.RBMModPhase(alpha=1, param_dtype=float)
sa = nk.sampler.MetropolisLocal(hi, n_chains=16)
vs = nk.vqs.MCState(sa, ma, n_samples=1000, n_discard_per_chain=100)
qgt_holo = nk.optimizer.qgt.QGTJacobianDense(holomorphic=True)
qgt_nonholo = nk.optimizer.qgt.QGTJacobianDense(holomorphic=False)
qgt_otf = nk.optimizer.qgt.QGTOnTheFly()
S_holo = vs.quantum_geometric_tensor(qgt_holo)
S_nonholo = vs.quantum_geometric_tensor(qgt_nonholo)
S_otf = vs.quantum_geometric_tensor(qgt_otf)
F = vs.parameters
r_holo = S_holo@F
r_nonholo = S_nonholo@F
r_otf = S_otf@F
jax.tree_map(lambda x,y:x-y, r_holo, r_nonholo)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netket/optimizer/qgt/qgt_jacobian_common.py`
Content:
```
1 # Copyright 2021 The NetKet Authors - All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from functools import partial
16 import warnings
17 from textwrap import dedent
18
19 import jax
20
21 import netket.jax as nkjax
22
23
24 @partial(jax.jit, static_argnums=(0, 4, 5))
25 def _choose_jacobian_mode(apply_fun, pars, model_state, samples, mode, holomorphic):
26 homogeneous_vars = nkjax.tree_ishomogeneous(pars)
27
28 if holomorphic is True:
29 if not homogeneous_vars:
30 warnings.warn(
31 dedent(
32 """The ansatz has non homogeneous variables, which might not behave well with the
33 holomorhic implementation.
34 Use `holomorphic=False` or mode='complex' for more accurate results but
35 lower performance.
36 """
37 )
38 )
39 mode = "holomorphic"
40 else:
41 leaf_iscomplex = nkjax.tree_leaf_iscomplex(pars)
42 complex_output = nkjax.is_complex(
43 jax.eval_shape(
44 apply_fun,
45 {"params": pars, **model_state},
46 samples.reshape(-1, samples.shape[-1]),
47 )
48 )
49
50 if complex_output:
51 if leaf_iscomplex:
52 if holomorphic is None:
53 warnings.warn(
54 dedent(
55 """
56 Complex-to-Complex model detected. Defaulting to `holomorphic=False` for
57 the implementation of QGTJacobianDense.
58 If your model is holomorphic, specify `holomorphic=True` to use a more
59 performant implementation.
60 To suppress this warning specify `holomorphic`.
61 """
62 ),
63 UserWarning,
64 )
65 mode = "complex"
66 else:
67 mode = "complex"
68 else:
69 mode = "real"
70
71 if mode == "real":
72 return 0
73 elif mode == "complex":
74 return 1
75 elif mode == "holomorphic":
76 return 2
77 else:
78 raise ValueError(f"unknown mode {mode}")
79
80
81 def choose_jacobian_mode(afun, pars, state, samples, *, mode, holomorphic):
82 """
83 Select an implementation of Jacobian
84 """
85 i = _choose_jacobian_mode(afun, pars, state, samples, mode, holomorphic).item()
86 if i == 0:
87 return "real"
88 elif i == 1:
89 return "complex"
90 elif i == 2:
91 return "holomorphic"
92 else:
93 raise ValueError(f"unknown mode {i}")
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netket/optimizer/qgt/qgt_jacobian_common.py b/netket/optimizer/qgt/qgt_jacobian_common.py
--- a/netket/optimizer/qgt/qgt_jacobian_common.py
+++ b/netket/optimizer/qgt/qgt_jacobian_common.py
@@ -24,21 +24,37 @@
@partial(jax.jit, static_argnums=(0, 4, 5))
def _choose_jacobian_mode(apply_fun, pars, model_state, samples, mode, holomorphic):
homogeneous_vars = nkjax.tree_ishomogeneous(pars)
+ leaf_iscomplex = nkjax.tree_leaf_iscomplex(pars)
if holomorphic is True:
- if not homogeneous_vars:
+ if homogeneous_vars and leaf_iscomplex:
+ ## all complex parameters
+ mode = "holomorphic"
+ elif homogeneous_vars and not leaf_iscomplex:
+ # all real parameters
+ raise ValueError(
+ dedent(
+ """
+ A function with real parameters cannot be holomorphic.
+
+ Please remove the kw-arg `holomorphic=True`.
+ """
+ )
+ )
+ else:
+ # mixed complex and real parameters
warnings.warn(
dedent(
"""The ansatz has non homogeneous variables, which might not behave well with the
holomorhic implementation.
+
Use `holomorphic=False` or mode='complex' for more accurate results but
lower performance.
"""
)
)
- mode = "holomorphic"
+ mode = "holomorphic"
else:
- leaf_iscomplex = nkjax.tree_leaf_iscomplex(pars)
complex_output = nkjax.is_complex(
jax.eval_shape(
apply_fun,
| {"golden_diff": "diff --git a/netket/optimizer/qgt/qgt_jacobian_common.py b/netket/optimizer/qgt/qgt_jacobian_common.py\n--- a/netket/optimizer/qgt/qgt_jacobian_common.py\n+++ b/netket/optimizer/qgt/qgt_jacobian_common.py\n@@ -24,21 +24,37 @@\n @partial(jax.jit, static_argnums=(0, 4, 5))\n def _choose_jacobian_mode(apply_fun, pars, model_state, samples, mode, holomorphic):\n homogeneous_vars = nkjax.tree_ishomogeneous(pars)\n+ leaf_iscomplex = nkjax.tree_leaf_iscomplex(pars)\n \n if holomorphic is True:\n- if not homogeneous_vars:\n+ if homogeneous_vars and leaf_iscomplex:\n+ ## all complex parameters\n+ mode = \"holomorphic\"\n+ elif homogeneous_vars and not leaf_iscomplex:\n+ # all real parameters\n+ raise ValueError(\n+ dedent(\n+ \"\"\"\n+ A function with real parameters cannot be holomorphic. \n+ \n+ Please remove the kw-arg `holomorphic=True`.\n+ \"\"\"\n+ )\n+ )\n+ else:\n+ # mixed complex and real parameters\n warnings.warn(\n dedent(\n \"\"\"The ansatz has non homogeneous variables, which might not behave well with the\n holomorhic implementation.\n+\n Use `holomorphic=False` or mode='complex' for more accurate results but\n lower performance.\n \"\"\"\n )\n )\n- mode = \"holomorphic\"\n+ mode = \"holomorphic\"\n else:\n- leaf_iscomplex = nkjax.tree_leaf_iscomplex(pars)\n complex_output = nkjax.is_complex(\n jax.eval_shape(\n apply_fun,\n", "issue": "`QGTJacobian***(holomorphic=True)` for R->C models returns wrong output. Should error.\ncc @attila-i-szabo\r\n\r\nI agree that `holomorphic=True`\u00a0is wrong in that case, but it should rather throw an error than give wrong results...\r\nIt took me a while to notice that this was the cause of some wrong optimisations.\r\n\r\n```python\r\nimport netket as nk\r\nimport jax\r\n\r\nL = 20\r\ng = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)\r\nhi = nk.hilbert.Spin(s=1 / 2, N=g.n_nodes)\r\nha = nk.operator.Ising(hilbert=hi, graph=g, h=1.0)\r\nma = nk.models.RBMModPhase(alpha=1, param_dtype=float)\r\nsa = nk.sampler.MetropolisLocal(hi, n_chains=16)\r\nvs = nk.vqs.MCState(sa, ma, n_samples=1000, n_discard_per_chain=100)\r\n\r\nqgt_holo = nk.optimizer.qgt.QGTJacobianDense(holomorphic=True)\r\nqgt_nonholo = nk.optimizer.qgt.QGTJacobianDense(holomorphic=False)\r\nqgt_otf = nk.optimizer.qgt.QGTOnTheFly()\r\n\r\nS_holo = vs.quantum_geometric_tensor(qgt_holo)\r\nS_nonholo = vs.quantum_geometric_tensor(qgt_nonholo)\r\nS_otf = vs.quantum_geometric_tensor(qgt_otf)\r\n\r\nF = vs.parameters\r\n\r\nr_holo = S_holo@F\r\nr_nonholo = S_nonholo@F\r\nr_otf = S_otf@F\r\n\r\njax.tree_map(lambda x,y:x-y, r_holo, r_nonholo)\r\n```\n`QGTJacobian***(holomorphic=True)` for R->C models returns wrong output. Should error.\ncc @attila-i-szabo\r\n\r\nI agree that `holomorphic=True`\u00a0is wrong in that case, but it should rather throw an error than give wrong results...\r\nIt took me a while to notice that this was the cause of some wrong optimisations.\r\n\r\n```python\r\nimport netket as nk\r\nimport jax\r\n\r\nL = 20\r\ng = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)\r\nhi = nk.hilbert.Spin(s=1 / 2, N=g.n_nodes)\r\nha = nk.operator.Ising(hilbert=hi, graph=g, h=1.0)\r\nma = nk.models.RBMModPhase(alpha=1, param_dtype=float)\r\nsa = nk.sampler.MetropolisLocal(hi, n_chains=16)\r\nvs = nk.vqs.MCState(sa, ma, n_samples=1000, n_discard_per_chain=100)\r\n\r\nqgt_holo = nk.optimizer.qgt.QGTJacobianDense(holomorphic=True)\r\nqgt_nonholo = nk.optimizer.qgt.QGTJacobianDense(holomorphic=False)\r\nqgt_otf = nk.optimizer.qgt.QGTOnTheFly()\r\n\r\nS_holo = vs.quantum_geometric_tensor(qgt_holo)\r\nS_nonholo = vs.quantum_geometric_tensor(qgt_nonholo)\r\nS_otf = vs.quantum_geometric_tensor(qgt_otf)\r\n\r\nF = vs.parameters\r\n\r\nr_holo = S_holo@F\r\nr_nonholo = S_nonholo@F\r\nr_otf = S_otf@F\r\n\r\njax.tree_map(lambda x,y:x-y, r_holo, r_nonholo)\r\n```\n", "before_files": [{"content": "# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nimport warnings\nfrom textwrap import dedent\n\nimport jax\n\nimport netket.jax as nkjax\n\n\n@partial(jax.jit, static_argnums=(0, 4, 5))\ndef _choose_jacobian_mode(apply_fun, pars, model_state, samples, mode, holomorphic):\n homogeneous_vars = nkjax.tree_ishomogeneous(pars)\n\n if holomorphic is True:\n if not homogeneous_vars:\n warnings.warn(\n dedent(\n \"\"\"The ansatz has non homogeneous variables, which might not behave well with the\n holomorhic implementation.\n Use `holomorphic=False` or mode='complex' for more accurate results but\n lower performance.\n \"\"\"\n )\n )\n mode = \"holomorphic\"\n else:\n leaf_iscomplex = nkjax.tree_leaf_iscomplex(pars)\n complex_output = nkjax.is_complex(\n jax.eval_shape(\n apply_fun,\n {\"params\": pars, **model_state},\n samples.reshape(-1, samples.shape[-1]),\n )\n )\n\n if complex_output:\n if leaf_iscomplex:\n if holomorphic is None:\n warnings.warn(\n dedent(\n \"\"\"\n Complex-to-Complex model detected. Defaulting to `holomorphic=False` for\n the implementation of QGTJacobianDense.\n If your model is holomorphic, specify `holomorphic=True` to use a more\n performant implementation.\n To suppress this warning specify `holomorphic`.\n \"\"\"\n ),\n UserWarning,\n )\n mode = \"complex\"\n else:\n mode = \"complex\"\n else:\n mode = \"real\"\n\n if mode == \"real\":\n return 0\n elif mode == \"complex\":\n return 1\n elif mode == \"holomorphic\":\n return 2\n else:\n raise ValueError(f\"unknown mode {mode}\")\n\n\ndef choose_jacobian_mode(afun, pars, state, samples, *, mode, holomorphic):\n \"\"\"\n Select an implementation of Jacobian\n \"\"\"\n i = _choose_jacobian_mode(afun, pars, state, samples, mode, holomorphic).item()\n if i == 0:\n return \"real\"\n elif i == 1:\n return \"complex\"\n elif i == 2:\n return \"holomorphic\"\n else:\n raise ValueError(f\"unknown mode {i}\")\n", "path": "netket/optimizer/qgt/qgt_jacobian_common.py"}], "after_files": [{"content": "# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nimport warnings\nfrom textwrap import dedent\n\nimport jax\n\nimport netket.jax as nkjax\n\n\n@partial(jax.jit, static_argnums=(0, 4, 5))\ndef _choose_jacobian_mode(apply_fun, pars, model_state, samples, mode, holomorphic):\n homogeneous_vars = nkjax.tree_ishomogeneous(pars)\n leaf_iscomplex = nkjax.tree_leaf_iscomplex(pars)\n\n if holomorphic is True:\n if homogeneous_vars and leaf_iscomplex:\n ## all complex parameters\n mode = \"holomorphic\"\n elif homogeneous_vars and not leaf_iscomplex:\n # all real parameters\n raise ValueError(\n dedent(\n \"\"\"\n A function with real parameters cannot be holomorphic. \n \n Please remove the kw-arg `holomorphic=True`.\n \"\"\"\n )\n )\n else:\n # mixed complex and real parameters\n warnings.warn(\n dedent(\n \"\"\"The ansatz has non homogeneous variables, which might not behave well with the\n holomorhic implementation.\n\n Use `holomorphic=False` or mode='complex' for more accurate results but\n lower performance.\n \"\"\"\n )\n )\n mode = \"holomorphic\"\n else:\n complex_output = nkjax.is_complex(\n jax.eval_shape(\n apply_fun,\n {\"params\": pars, **model_state},\n samples.reshape(-1, samples.shape[-1]),\n )\n )\n\n if complex_output:\n if leaf_iscomplex:\n if holomorphic is None:\n warnings.warn(\n dedent(\n \"\"\"\n Complex-to-Complex model detected. Defaulting to `holomorphic=False` for\n the implementation of QGTJacobianDense.\n If your model is holomorphic, specify `holomorphic=True` to use a more\n performant implementation.\n To suppress this warning specify `holomorphic`.\n \"\"\"\n ),\n UserWarning,\n )\n mode = \"complex\"\n else:\n mode = \"complex\"\n else:\n mode = \"real\"\n\n if mode == \"real\":\n return 0\n elif mode == \"complex\":\n return 1\n elif mode == \"holomorphic\":\n return 2\n else:\n raise ValueError(f\"unknown mode {mode}\")\n\n\ndef choose_jacobian_mode(afun, pars, state, samples, *, mode, holomorphic):\n \"\"\"\n Select an implementation of Jacobian\n \"\"\"\n i = _choose_jacobian_mode(afun, pars, state, samples, mode, holomorphic).item()\n if i == 0:\n return \"real\"\n elif i == 1:\n return \"complex\"\n elif i == 2:\n return \"holomorphic\"\n else:\n raise ValueError(f\"unknown mode {i}\")\n", "path": "netket/optimizer/qgt/qgt_jacobian_common.py"}]} | 1,862 | 380 |
gh_patches_debug_28949 | rasdani/github-patches | git_diff | google__mobly-417 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`SnippetEvent` should be loggable
Right now logging event object directly does not show the content of the event, which makes debugging difficult.
`logging.info(event)` should log the full content of the event.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import platform
16 import setuptools
17 from setuptools.command import test
18 import sys
19
20 install_requires = [
21 'future',
22 # mock-1.0.1 is the last version compatible with setuptools <17.1,
23 # which is what comes with Ubuntu 14.04 LTS.
24 'mock<=1.0.1',
25 'portpicker',
26 'psutil',
27 'pytz',
28 'pyyaml',
29 'timeout_decorator',
30 'pyserial'
31 ]
32
33 if sys.version_info < (3, ):
34 install_requires.extend([
35 'enum34',
36 # "futures" is needed for py2 compatibility and it only works in 2.7
37 'futures',
38 ])
39
40 if platform.system() == 'Windows':
41 install_requires.append('pywin32')
42
43
44 class PyTest(test.test):
45 """Class used to execute unit tests using PyTest. This allows us to execute
46 unit tests without having to install the package.
47 """
48
49 def finalize_options(self):
50 test.test.finalize_options(self)
51 self.test_args = ['-x', "tests"]
52 self.test_suite = True
53
54 def run_tests(self):
55 import pytest
56 errno = pytest.main(self.test_args)
57 sys.exit(errno)
58
59
60 def main():
61 setuptools.setup(
62 name='mobly',
63 version='1.7.1',
64 maintainer = 'Ang Li',
65 maintainer_email = '[email protected]',
66 description='Automation framework for special end-to-end test cases',
67 license='Apache2.0',
68 url = 'https://github.com/google/mobly',
69 download_url = 'https://github.com/google/mobly/tarball/1.7.1',
70 packages=setuptools.find_packages(),
71 include_package_data=False,
72 scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
73 tests_require=['pytest'],
74 install_requires=install_requires,
75 cmdclass={'test': PyTest}, )
76
77
78 if __name__ == '__main__':
79 main()
80
```
Path: `mobly/controllers/android_device_lib/snippet_event.py`
Content:
```
1 # Copyright 2017 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 def from_dict(event_dict):
17 """Create a SnippetEvent object from a dictionary.
18
19 Args:
20 event_dict: a dictionary representing an event.
21
22 Returns:
23 A SnippetEvent object.
24 """
25 return SnippetEvent(
26 callback_id=event_dict['callbackId'],
27 name=event_dict['name'],
28 creation_time=event_dict['time'],
29 data=event_dict['data'])
30
31
32 class SnippetEvent(object):
33 """The class that represents callback events for mobly snippet library.
34
35 Attributes:
36 callback_id: string, the callback ID associated with the event.
37 name: string, the name of the event.
38 creation_time: int, the epoch time when the event is created on the
39 Rpc server side.
40 data: dictionary, the data held by the event. Can be None.
41 """
42
43 def __init__(self, callback_id, name, creation_time, data):
44 self.callback_id = callback_id
45 self.name = name
46 self.creation_time = creation_time
47 self.data = data
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mobly/controllers/android_device_lib/snippet_event.py b/mobly/controllers/android_device_lib/snippet_event.py
--- a/mobly/controllers/android_device_lib/snippet_event.py
+++ b/mobly/controllers/android_device_lib/snippet_event.py
@@ -3,9 +3,9 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -45,3 +45,8 @@
self.name = name
self.creation_time = creation_time
self.data = data
+
+ def __repr__(self):
+ return ('SnippetEvent(callback_id: %s, name: %s, creation_time: %s, '
+ 'data: %s)') % (self.callback_id, self.name,
+ self.creation_time, self.data)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -70,7 +70,7 @@
packages=setuptools.find_packages(),
include_package_data=False,
scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],
- tests_require=['pytest'],
+ tests_require=['pytest', 'testfixtures'],
install_requires=install_requires,
cmdclass={'test': PyTest}, )
| {"golden_diff": "diff --git a/mobly/controllers/android_device_lib/snippet_event.py b/mobly/controllers/android_device_lib/snippet_event.py\n--- a/mobly/controllers/android_device_lib/snippet_event.py\n+++ b/mobly/controllers/android_device_lib/snippet_event.py\n@@ -3,9 +3,9 @@\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n # You may obtain a copy of the License at\n-# \n+#\n # http://www.apache.org/licenses/LICENSE-2.0\n-# \n+#\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n@@ -45,3 +45,8 @@\n self.name = name\n self.creation_time = creation_time\n self.data = data\n+\n+ def __repr__(self):\n+ return ('SnippetEvent(callback_id: %s, name: %s, creation_time: %s, '\n+ 'data: %s)') % (self.callback_id, self.name,\n+ self.creation_time, self.data)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -70,7 +70,7 @@\n packages=setuptools.find_packages(),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n- tests_require=['pytest'],\n+ tests_require=['pytest', 'testfixtures'],\n install_requires=install_requires,\n cmdclass={'test': PyTest}, )\n", "issue": "`SnippetEvent` should be loggable\nRight now logging event object directly does not show the content of the event, which makes debugging difficult.\r\n`logging.info(event)` should log the full content of the event.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport setuptools\nfrom setuptools.command import test\nimport sys\n\ninstall_requires = [\n 'future',\n # mock-1.0.1 is the last version compatible with setuptools <17.1,\n # which is what comes with Ubuntu 14.04 LTS.\n 'mock<=1.0.1',\n 'portpicker',\n 'psutil',\n 'pytz',\n 'pyyaml',\n 'timeout_decorator',\n 'pyserial'\n]\n\nif sys.version_info < (3, ):\n install_requires.extend([\n 'enum34',\n # \"futures\" is needed for py2 compatibility and it only works in 2.7\n 'futures',\n ])\n\nif platform.system() == 'Windows':\n install_requires.append('pywin32')\n\n\nclass PyTest(test.test):\n \"\"\"Class used to execute unit tests using PyTest. This allows us to execute\n unit tests without having to install the package.\n \"\"\"\n\n def finalize_options(self):\n test.test.finalize_options(self)\n self.test_args = ['-x', \"tests\"]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef main():\n setuptools.setup(\n name='mobly',\n version='1.7.1',\n maintainer = 'Ang Li',\n maintainer_email = '[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url = 'https://github.com/google/mobly',\n download_url = 'https://github.com/google/mobly/tarball/1.7.1',\n packages=setuptools.find_packages(),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n tests_require=['pytest'],\n install_requires=install_requires,\n cmdclass={'test': PyTest}, )\n\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}, {"content": "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef from_dict(event_dict):\n \"\"\"Create a SnippetEvent object from a dictionary.\n\n Args:\n event_dict: a dictionary representing an event.\n\n Returns:\n A SnippetEvent object.\n \"\"\"\n return SnippetEvent(\n callback_id=event_dict['callbackId'],\n name=event_dict['name'],\n creation_time=event_dict['time'],\n data=event_dict['data'])\n\n\nclass SnippetEvent(object):\n \"\"\"The class that represents callback events for mobly snippet library.\n\n Attributes:\n callback_id: string, the callback ID associated with the event.\n name: string, the name of the event.\n creation_time: int, the epoch time when the event is created on the\n Rpc server side.\n data: dictionary, the data held by the event. Can be None.\n \"\"\"\n\n def __init__(self, callback_id, name, creation_time, data):\n self.callback_id = callback_id\n self.name = name\n self.creation_time = creation_time\n self.data = data\n", "path": "mobly/controllers/android_device_lib/snippet_event.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport setuptools\nfrom setuptools.command import test\nimport sys\n\ninstall_requires = [\n 'future',\n # mock-1.0.1 is the last version compatible with setuptools <17.1,\n # which is what comes with Ubuntu 14.04 LTS.\n 'mock<=1.0.1',\n 'portpicker',\n 'psutil',\n 'pytz',\n 'pyyaml',\n 'timeout_decorator',\n 'pyserial'\n]\n\nif sys.version_info < (3, ):\n install_requires.extend([\n 'enum34',\n # \"futures\" is needed for py2 compatibility and it only works in 2.7\n 'futures',\n ])\n\nif platform.system() == 'Windows':\n install_requires.append('pywin32')\n\n\nclass PyTest(test.test):\n \"\"\"Class used to execute unit tests using PyTest. This allows us to execute\n unit tests without having to install the package.\n \"\"\"\n\n def finalize_options(self):\n test.test.finalize_options(self)\n self.test_args = ['-x', \"tests\"]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef main():\n setuptools.setup(\n name='mobly',\n version='1.7.1',\n maintainer = 'Ang Li',\n maintainer_email = '[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url = 'https://github.com/google/mobly',\n download_url = 'https://github.com/google/mobly/tarball/1.7.1',\n packages=setuptools.find_packages(),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n tests_require=['pytest', 'testfixtures'],\n install_requires=install_requires,\n cmdclass={'test': PyTest}, )\n\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}, {"content": "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef from_dict(event_dict):\n \"\"\"Create a SnippetEvent object from a dictionary.\n\n Args:\n event_dict: a dictionary representing an event.\n\n Returns:\n A SnippetEvent object.\n \"\"\"\n return SnippetEvent(\n callback_id=event_dict['callbackId'],\n name=event_dict['name'],\n creation_time=event_dict['time'],\n data=event_dict['data'])\n\n\nclass SnippetEvent(object):\n \"\"\"The class that represents callback events for mobly snippet library.\n\n Attributes:\n callback_id: string, the callback ID associated with the event.\n name: string, the name of the event.\n creation_time: int, the epoch time when the event is created on the\n Rpc server side.\n data: dictionary, the data held by the event. Can be None.\n \"\"\"\n\n def __init__(self, callback_id, name, creation_time, data):\n self.callback_id = callback_id\n self.name = name\n self.creation_time = creation_time\n self.data = data\n\n def __repr__(self):\n return ('SnippetEvent(callback_id: %s, name: %s, creation_time: %s, '\n 'data: %s)') % (self.callback_id, self.name,\n self.creation_time, self.data)\n", "path": "mobly/controllers/android_device_lib/snippet_event.py"}]} | 1,484 | 364 |
gh_patches_debug_11487 | rasdani/github-patches | git_diff | facebookresearch__nevergrad-712 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Optimizer always trying the same points
## Steps to reproduce
1. Set up an optimizer with a parametrization
2. Run optimizer.minimize()
3. Print values being tested
## Observed Results
* What happened? This could be a description, log output, etc.
The argument of the objective function are always the same
## Expected Results
* What did you expect to happen?
The optimizer tries new values
## Relevant Code
```
from concurrent import futures
import nevergrad as ng
import numpy as np
class myobj:
def __init__(self):
self.params = ng.p.Dict(
kernel=ng.p.Log(lower=3, upper=2000).set_integer_casting()
)
def get_optimizer(self, parametrization, dictA):
optimizer = ng.optimizers.NGO(
parametrization=parametrization,
budget=dictA["budget"],
num_workers=dictA["workers"],
)
return optimizer
def fn(self, params, dictA, X, Y):
print(params)
kvalue = params['kernel']
print(kvalue)
return kvalue * np.mean((Y - X)**2)
def optimize_params(self, dictA, X, Y):
# breakpoint()
parametrization = ng.p.Instrumentation(
self.params, dictA=dictA, X=X, Y=Y
)
optimizer = self.get_optimizer(parametrization, dictA)
with futures.ThreadPoolExecutor(
max_workers=optimizer.num_workers
) as executor:
r = optimizer.minimize(
self.fn, executor=executor, batch_mode=False
)
return r
obj = myobj()
dictA = {'budget':5, 'workers':2}
recom = obj.optimize_params(dictA, np.zeros(3), np.ones(3))
print('recommendation: ')
print(*recom.args)
```
I am sure I am doing something wrong, but I can not detect what. Any helpful hand is welcome.
<!--- In any case, don't hesitate to join and ask questions if you need on Nevergrad users Facebook group https://www.facebook.com/groups/nevergradusers/ -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nevergrad/optimization/mutations.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from typing import Optional, Any
7 import numpy as np
8 from ..common.typetools import ArrayLike
9 from . import utils
10
11
12 class Mutator:
13 """Class defining mutations, and holding a random state used for random generation.
14 """
15
16 def __init__(self, random_state: np.random.RandomState) -> None:
17 self.random_state = random_state
18
19 def doerr_discrete_mutation(self, parent: ArrayLike) -> ArrayLike:
20 """Mutation as in the fast 1+1-ES, Doerr et al. The exponent is 1.5.
21 """
22 dimension = len(parent)
23 if dimension < 5:
24 return self.discrete_mutation(parent)
25 return self.doubledoerr_discrete_mutation(parent, max_ratio=.5)
26
27 def doubledoerr_discrete_mutation(self, parent: ArrayLike, max_ratio: float = 1.) -> ArrayLike:
28 """Doerr's recommendation above can mutate up to half variables
29 in average.
30 In our high-arity context, we might need more than that.
31
32 Parameters
33 ----------
34 parent: array-like
35 the point to mutate
36 max_ratio: float (between 0 and 1)
37 the maximum mutation ratio (careful: this is not an exact ratio)
38 """
39 assert 0 <= max_ratio <= 1
40 dimension = len(parent)
41 max_mutations = max(2, int(max_ratio * dimension))
42 p = 1. / np.arange(1, max_mutations)**1.5
43 p /= np.sum(p)
44 u = self.random_state.choice(np.arange(1, max_mutations), p=p)
45 return self.portfolio_discrete_mutation(parent, u=u)
46
47 def portfolio_discrete_mutation(self, parent: ArrayLike, u: Optional[int] = None) -> ArrayLike:
48 """Mutation discussed in
49 https://arxiv.org/pdf/1606.05551v1.pdf
50 We mutate a randomly drawn number of variables in average.
51 """
52 dimension = len(parent)
53 if u is None:
54 u = 1 if dimension == 1 else int(self.random_state.randint(1, dimension))
55 boolean_vector = [True for _ in parent]
56 while all(boolean_vector) and dimension != 1:
57 boolean_vector = [self.random_state.rand() > (float(u) / dimension) for _ in parent]
58 return [s if b else self.random_state.normal(0., 1.) for (b, s) in zip(boolean_vector, parent)]
59
60 def discrete_mutation(self, parent: ArrayLike) -> ArrayLike:
61 dimension = len(parent)
62 boolean_vector = [True for _ in parent]
63 while all(boolean_vector):
64 boolean_vector = [self.random_state.rand() > (1. / dimension) for _ in parent]
65 return [s if b else self.random_state.normal(0., 1.) for (b, s) in zip(boolean_vector, parent)]
66
67 def crossover(self, parent: ArrayLike, donor: ArrayLike) -> ArrayLike:
68 mix = [self.random_state.choice([d, p]) for (p, d) in zip(parent, donor)]
69 return self.discrete_mutation(mix)
70
71 def get_roulette(self, archive: utils.Archive[utils.MultiValue], num: Optional[int] = None) -> Any:
72 """Apply a roulette tournament selection.
73 """
74 if num is None:
75 num = int(.999 + np.sqrt(len(archive)))
76 # the following sort makes the line deterministic, and function seedable, at the cost of complexity!
77 my_keys = sorted(archive.bytesdict.keys())
78 my_keys_indices = self.random_state.choice(len(my_keys), size=min(num, len(my_keys)), replace=False)
79 my_keys = [my_keys[i] for i in my_keys_indices]
80 # best pessimistic value in a random set of keys
81 return np.frombuffer(min(my_keys, key=lambda x: archive.bytesdict[x].pessimistic_confidence_bound))
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nevergrad/optimization/mutations.py b/nevergrad/optimization/mutations.py
--- a/nevergrad/optimization/mutations.py
+++ b/nevergrad/optimization/mutations.py
@@ -52,6 +52,8 @@
dimension = len(parent)
if u is None:
u = 1 if dimension == 1 else int(self.random_state.randint(1, dimension))
+ if dimension == 1: # corner case.
+ return self.random_state.normal(0., 1., size=1) # type: ignore
boolean_vector = [True for _ in parent]
while all(boolean_vector) and dimension != 1:
boolean_vector = [self.random_state.rand() > (float(u) / dimension) for _ in parent]
| {"golden_diff": "diff --git a/nevergrad/optimization/mutations.py b/nevergrad/optimization/mutations.py\n--- a/nevergrad/optimization/mutations.py\n+++ b/nevergrad/optimization/mutations.py\n@@ -52,6 +52,8 @@\n dimension = len(parent)\n if u is None:\n u = 1 if dimension == 1 else int(self.random_state.randint(1, dimension))\n+ if dimension == 1: # corner case.\n+ return self.random_state.normal(0., 1., size=1) # type: ignore\n boolean_vector = [True for _ in parent]\n while all(boolean_vector) and dimension != 1:\n boolean_vector = [self.random_state.rand() > (float(u) / dimension) for _ in parent]\n", "issue": "Optimizer always trying the same points \n## Steps to reproduce\r\n\r\n 1. Set up an optimizer with a parametrization\r\n 2. Run optimizer.minimize()\r\n 3. Print values being tested\r\n\r\n## Observed Results\r\n\r\n * What happened? This could be a description, log output, etc.\r\nThe argument of the objective function are always the same\r\n\r\n## Expected Results\r\n\r\n * What did you expect to happen?\r\nThe optimizer tries new values\r\n## Relevant Code\r\n\r\n ```\r\nfrom concurrent import futures\r\n\r\nimport nevergrad as ng\r\nimport numpy as np\r\n\r\n\r\nclass myobj:\r\n def __init__(self):\r\n self.params = ng.p.Dict(\r\n kernel=ng.p.Log(lower=3, upper=2000).set_integer_casting()\r\n )\r\n\r\n def get_optimizer(self, parametrization, dictA):\r\n optimizer = ng.optimizers.NGO(\r\n parametrization=parametrization,\r\n budget=dictA[\"budget\"],\r\n num_workers=dictA[\"workers\"],\r\n )\r\n return optimizer\r\n\r\n def fn(self, params, dictA, X, Y):\r\n print(params)\r\n kvalue = params['kernel']\r\n print(kvalue)\r\n return kvalue * np.mean((Y - X)**2)\r\n\r\n def optimize_params(self, dictA, X, Y):\r\n # breakpoint()\r\n parametrization = ng.p.Instrumentation(\r\n self.params, dictA=dictA, X=X, Y=Y\r\n )\r\n optimizer = self.get_optimizer(parametrization, dictA)\r\n with futures.ThreadPoolExecutor(\r\n max_workers=optimizer.num_workers\r\n ) as executor:\r\n r = optimizer.minimize(\r\n self.fn, executor=executor, batch_mode=False\r\n )\r\n return r\r\n\r\n\r\nobj = myobj()\r\ndictA = {'budget':5, 'workers':2}\r\nrecom = obj.optimize_params(dictA, np.zeros(3), np.ones(3))\r\nprint('recommendation: ')\r\nprint(*recom.args)\r\n\r\n ```\r\n\r\nI am sure I am doing something wrong, but I can not detect what. Any helpful hand is welcome.\r\n\r\n<!--- In any case, don't hesitate to join and ask questions if you need on Nevergrad users Facebook group https://www.facebook.com/groups/nevergradusers/ -->\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Optional, Any\nimport numpy as np\nfrom ..common.typetools import ArrayLike\nfrom . import utils\n\n\nclass Mutator:\n \"\"\"Class defining mutations, and holding a random state used for random generation.\n \"\"\"\n\n def __init__(self, random_state: np.random.RandomState) -> None:\n self.random_state = random_state\n\n def doerr_discrete_mutation(self, parent: ArrayLike) -> ArrayLike:\n \"\"\"Mutation as in the fast 1+1-ES, Doerr et al. The exponent is 1.5.\n \"\"\"\n dimension = len(parent)\n if dimension < 5:\n return self.discrete_mutation(parent)\n return self.doubledoerr_discrete_mutation(parent, max_ratio=.5)\n\n def doubledoerr_discrete_mutation(self, parent: ArrayLike, max_ratio: float = 1.) -> ArrayLike:\n \"\"\"Doerr's recommendation above can mutate up to half variables\n in average.\n In our high-arity context, we might need more than that.\n\n Parameters\n ----------\n parent: array-like\n the point to mutate\n max_ratio: float (between 0 and 1)\n the maximum mutation ratio (careful: this is not an exact ratio)\n \"\"\"\n assert 0 <= max_ratio <= 1\n dimension = len(parent)\n max_mutations = max(2, int(max_ratio * dimension))\n p = 1. / np.arange(1, max_mutations)**1.5\n p /= np.sum(p)\n u = self.random_state.choice(np.arange(1, max_mutations), p=p)\n return self.portfolio_discrete_mutation(parent, u=u)\n\n def portfolio_discrete_mutation(self, parent: ArrayLike, u: Optional[int] = None) -> ArrayLike:\n \"\"\"Mutation discussed in\n https://arxiv.org/pdf/1606.05551v1.pdf\n We mutate a randomly drawn number of variables in average.\n \"\"\"\n dimension = len(parent)\n if u is None:\n u = 1 if dimension == 1 else int(self.random_state.randint(1, dimension))\n boolean_vector = [True for _ in parent]\n while all(boolean_vector) and dimension != 1:\n boolean_vector = [self.random_state.rand() > (float(u) / dimension) for _ in parent]\n return [s if b else self.random_state.normal(0., 1.) for (b, s) in zip(boolean_vector, parent)]\n\n def discrete_mutation(self, parent: ArrayLike) -> ArrayLike:\n dimension = len(parent)\n boolean_vector = [True for _ in parent]\n while all(boolean_vector):\n boolean_vector = [self.random_state.rand() > (1. / dimension) for _ in parent]\n return [s if b else self.random_state.normal(0., 1.) for (b, s) in zip(boolean_vector, parent)]\n\n def crossover(self, parent: ArrayLike, donor: ArrayLike) -> ArrayLike:\n mix = [self.random_state.choice([d, p]) for (p, d) in zip(parent, donor)]\n return self.discrete_mutation(mix)\n\n def get_roulette(self, archive: utils.Archive[utils.MultiValue], num: Optional[int] = None) -> Any:\n \"\"\"Apply a roulette tournament selection.\n \"\"\"\n if num is None:\n num = int(.999 + np.sqrt(len(archive)))\n # the following sort makes the line deterministic, and function seedable, at the cost of complexity!\n my_keys = sorted(archive.bytesdict.keys())\n my_keys_indices = self.random_state.choice(len(my_keys), size=min(num, len(my_keys)), replace=False)\n my_keys = [my_keys[i] for i in my_keys_indices]\n # best pessimistic value in a random set of keys\n return np.frombuffer(min(my_keys, key=lambda x: archive.bytesdict[x].pessimistic_confidence_bound))\n", "path": "nevergrad/optimization/mutations.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Optional, Any\nimport numpy as np\nfrom ..common.typetools import ArrayLike\nfrom . import utils\n\n\nclass Mutator:\n \"\"\"Class defining mutations, and holding a random state used for random generation.\n \"\"\"\n\n def __init__(self, random_state: np.random.RandomState) -> None:\n self.random_state = random_state\n\n def doerr_discrete_mutation(self, parent: ArrayLike) -> ArrayLike:\n \"\"\"Mutation as in the fast 1+1-ES, Doerr et al. The exponent is 1.5.\n \"\"\"\n dimension = len(parent)\n if dimension < 5:\n return self.discrete_mutation(parent)\n return self.doubledoerr_discrete_mutation(parent, max_ratio=.5)\n\n def doubledoerr_discrete_mutation(self, parent: ArrayLike, max_ratio: float = 1.) -> ArrayLike:\n \"\"\"Doerr's recommendation above can mutate up to half variables\n in average.\n In our high-arity context, we might need more than that.\n\n Parameters\n ----------\n parent: array-like\n the point to mutate\n max_ratio: float (between 0 and 1)\n the maximum mutation ratio (careful: this is not an exact ratio)\n \"\"\"\n assert 0 <= max_ratio <= 1\n dimension = len(parent)\n max_mutations = max(2, int(max_ratio * dimension))\n p = 1. / np.arange(1, max_mutations)**1.5\n p /= np.sum(p)\n u = self.random_state.choice(np.arange(1, max_mutations), p=p)\n return self.portfolio_discrete_mutation(parent, u=u)\n\n def portfolio_discrete_mutation(self, parent: ArrayLike, u: Optional[int] = None) -> ArrayLike:\n \"\"\"Mutation discussed in\n https://arxiv.org/pdf/1606.05551v1.pdf\n We mutate a randomly drawn number of variables in average.\n \"\"\"\n dimension = len(parent)\n if u is None:\n u = 1 if dimension == 1 else int(self.random_state.randint(1, dimension))\n if dimension == 1: # corner case.\n return self.random_state.normal(0., 1., size=1) # type: ignore\n boolean_vector = [True for _ in parent]\n while all(boolean_vector) and dimension != 1:\n boolean_vector = [self.random_state.rand() > (float(u) / dimension) for _ in parent]\n return [s if b else self.random_state.normal(0., 1.) for (b, s) in zip(boolean_vector, parent)]\n\n def discrete_mutation(self, parent: ArrayLike) -> ArrayLike:\n dimension = len(parent)\n boolean_vector = [True for _ in parent]\n while all(boolean_vector):\n boolean_vector = [self.random_state.rand() > (1. / dimension) for _ in parent]\n return [s if b else self.random_state.normal(0., 1.) for (b, s) in zip(boolean_vector, parent)]\n\n def crossover(self, parent: ArrayLike, donor: ArrayLike) -> ArrayLike:\n mix = [self.random_state.choice([d, p]) for (p, d) in zip(parent, donor)]\n return self.discrete_mutation(mix)\n\n def get_roulette(self, archive: utils.Archive[utils.MultiValue], num: Optional[int] = None) -> Any:\n \"\"\"Apply a roulette tournament selection.\n \"\"\"\n if num is None:\n num = int(.999 + np.sqrt(len(archive)))\n # the following sort makes the line deterministic, and function seedable, at the cost of complexity!\n my_keys = sorted(archive.bytesdict.keys())\n my_keys_indices = self.random_state.choice(len(my_keys), size=min(num, len(my_keys)), replace=False)\n my_keys = [my_keys[i] for i in my_keys_indices]\n # best pessimistic value in a random set of keys\n return np.frombuffer(min(my_keys, key=lambda x: archive.bytesdict[x].pessimistic_confidence_bound))\n", "path": "nevergrad/optimization/mutations.py"}]} | 1,800 | 175 |
gh_patches_debug_5700 | rasdani/github-patches | git_diff | psychopy__psychopy-1325 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
psychopyApp won't start with Matplotlib 1.5 installed
See http://discourse.psychopy.org/t/mac-specific-help/1540/3
We need to figure out
- whether this problem is Anaconda-specific (and would require fixing upstream)
- whether this problem is Mac-specific
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/psychopyApp.py`
Content:
```
1 #!/usr/bin/env python2
2
3 # Part of the PsychoPy library
4 # Copyright (C) 2015 Jonathan Peirce
5 # Distributed under the terms of the GNU General Public License (GPL).
6
7 from __future__ import absolute_import, print_function
8
9 import sys
10 from psychopy.app._psychopyApp import PsychoPyApp, __version__
11
12 # NB the PsychoPyApp classes moved to _psychopyApp.py as of version 1.78.00
13 # to allow for better upgrading possibilities from the mac app bundle. this
14 # file now used solely as a launcher for the app, not as the app itself.
15
16 if __name__ == '__main__':
17 if '-x' in sys.argv:
18 # run a .py script from the command line using StandAlone python
19 targetScript = sys.argv[sys.argv.index('-x') + 1]
20 from psychopy import core
21 import os
22 core.shellCall([sys.executable, os.path.abspath(targetScript)])
23 sys.exit()
24 if '-v' in sys.argv or '--version' in sys.argv:
25 info = 'PsychoPy2, version %s (c)Jonathan Peirce 2015, GNU GPL license'
26 print(info % __version__)
27 sys.exit()
28 if '-h' in sys.argv or '--help' in sys.argv:
29 print("""Starts the PsychoPy2 application.
30
31 Usage: python PsychoPy.py [options] [file]
32
33 Without options or files provided this starts PsychoPy using prefs to
34 decide on the view(s) to open. If optional [file] is provided action
35 depends on the type of the [file]:
36
37 Python script 'file.py' -- opens coder
38
39 Experiment design 'file.psyexp' -- opens builder
40
41 Options:
42 -c, --coder, coder opens coder view only
43 -b, --builder, builder opens builder view only
44 -x script.py execute script.py using StandAlone python
45
46 -v, --version prints version and exits
47 -h, --help prints this help and exit
48
49 --firstrun launches configuration wizard
50 --no-splash suppresses splash screen
51
52 """)
53 sys.exit()
54
55 else:
56 showSplash = True
57 if '--no-splash' in sys.argv:
58 showSplash = False
59 del sys.argv[sys.argv.index('--no-splash')]
60 app = PsychoPyApp(0, showSplash=showSplash)
61 app.MainLoop()
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/app/psychopyApp.py b/psychopy/app/psychopyApp.py
--- a/psychopy/app/psychopyApp.py
+++ b/psychopy/app/psychopyApp.py
@@ -8,6 +8,8 @@
import sys
from psychopy.app._psychopyApp import PsychoPyApp, __version__
+# fix OS X locale-bug on startup: sets locale to LC_ALL (must be defined!)
+import psychopy.locale_setup # noqa
# NB the PsychoPyApp classes moved to _psychopyApp.py as of version 1.78.00
# to allow for better upgrading possibilities from the mac app bundle. this
| {"golden_diff": "diff --git a/psychopy/app/psychopyApp.py b/psychopy/app/psychopyApp.py\n--- a/psychopy/app/psychopyApp.py\n+++ b/psychopy/app/psychopyApp.py\n@@ -8,6 +8,8 @@\n \n import sys\n from psychopy.app._psychopyApp import PsychoPyApp, __version__\n+# fix OS X locale-bug on startup: sets locale to LC_ALL (must be defined!)\n+import psychopy.locale_setup # noqa\n \n # NB the PsychoPyApp classes moved to _psychopyApp.py as of version 1.78.00\n # to allow for better upgrading possibilities from the mac app bundle. this\n", "issue": "psychopyApp won't start with Matplotlib 1.5 installed\nSee http://discourse.psychopy.org/t/mac-specific-help/1540/3\r\n\r\nWe need to figure out\r\n- whether this problem is Anaconda-specific (and would require fixing upstream)\r\n- whether this problem is Mac-specific\n", "before_files": [{"content": "#!/usr/bin/env python2\n\n# Part of the PsychoPy library\n# Copyright (C) 2015 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom __future__ import absolute_import, print_function\n\nimport sys\nfrom psychopy.app._psychopyApp import PsychoPyApp, __version__\n\n# NB the PsychoPyApp classes moved to _psychopyApp.py as of version 1.78.00\n# to allow for better upgrading possibilities from the mac app bundle. this\n# file now used solely as a launcher for the app, not as the app itself.\n\nif __name__ == '__main__':\n if '-x' in sys.argv:\n # run a .py script from the command line using StandAlone python\n targetScript = sys.argv[sys.argv.index('-x') + 1]\n from psychopy import core\n import os\n core.shellCall([sys.executable, os.path.abspath(targetScript)])\n sys.exit()\n if '-v' in sys.argv or '--version' in sys.argv:\n info = 'PsychoPy2, version %s (c)Jonathan Peirce 2015, GNU GPL license'\n print(info % __version__)\n sys.exit()\n if '-h' in sys.argv or '--help' in sys.argv:\n print(\"\"\"Starts the PsychoPy2 application.\n\nUsage: python PsychoPy.py [options] [file]\n\nWithout options or files provided this starts PsychoPy using prefs to\ndecide on the view(s) to open. If optional [file] is provided action\ndepends on the type of the [file]:\n\n Python script 'file.py' -- opens coder\n\n Experiment design 'file.psyexp' -- opens builder\n\nOptions:\n -c, --coder, coder opens coder view only\n -b, --builder, builder opens builder view only\n -x script.py execute script.py using StandAlone python\n\n -v, --version prints version and exits\n -h, --help prints this help and exit\n\n --firstrun launches configuration wizard\n --no-splash suppresses splash screen\n\n\"\"\")\n sys.exit()\n\n else:\n showSplash = True\n if '--no-splash' in sys.argv:\n showSplash = False\n del sys.argv[sys.argv.index('--no-splash')]\n app = PsychoPyApp(0, showSplash=showSplash)\n app.MainLoop()\n", "path": "psychopy/app/psychopyApp.py"}], "after_files": [{"content": "#!/usr/bin/env python2\n\n# Part of the PsychoPy library\n# Copyright (C) 2015 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom __future__ import absolute_import, print_function\n\nimport sys\nfrom psychopy.app._psychopyApp import PsychoPyApp, __version__\n# fix OS X locale-bug on startup: sets locale to LC_ALL (must be defined!)\nimport psychopy.locale_setup # noqa\n\n# NB the PsychoPyApp classes moved to _psychopyApp.py as of version 1.78.00\n# to allow for better upgrading possibilities from the mac app bundle. this\n# file now used solely as a launcher for the app, not as the app itself.\n\nif __name__ == '__main__':\n if '-x' in sys.argv:\n # run a .py script from the command line using StandAlone python\n targetScript = sys.argv[sys.argv.index('-x') + 1]\n from psychopy import core\n import os\n core.shellCall([sys.executable, os.path.abspath(targetScript)])\n sys.exit()\n if '-v' in sys.argv or '--version' in sys.argv:\n info = 'PsychoPy2, version %s (c)Jonathan Peirce 2015, GNU GPL license'\n print(info % __version__)\n sys.exit()\n if '-h' in sys.argv or '--help' in sys.argv:\n print(\"\"\"Starts the PsychoPy2 application.\n\nUsage: python PsychoPy.py [options] [file]\n\nWithout options or files provided this starts PsychoPy using prefs to\ndecide on the view(s) to open. If optional [file] is provided action\ndepends on the type of the [file]:\n\n Python script 'file.py' -- opens coder\n\n Experiment design 'file.psyexp' -- opens builder\n\nOptions:\n -c, --coder, coder opens coder view only\n -b, --builder, builder opens builder view only\n -x script.py execute script.py using StandAlone python\n\n -v, --version prints version and exits\n -h, --help prints this help and exit\n\n --firstrun launches configuration wizard\n --no-splash suppresses splash screen\n\n\"\"\")\n sys.exit()\n\n else:\n showSplash = True\n if '--no-splash' in sys.argv:\n showSplash = False\n del sys.argv[sys.argv.index('--no-splash')]\n app = PsychoPyApp(0, showSplash=showSplash)\n app.MainLoop()\n", "path": "psychopy/app/psychopyApp.py"}]} | 988 | 149 |
gh_patches_debug_9887 | rasdani/github-patches | git_diff | mars-project__mars-151 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] mars-* scripts not marked as executable
**Describe the bug**
Scripts in scripts/ dir not marked as executable when packed into wheels, thus when installed in venvs, they cannot be installed.
**To Reproduce**
To help us reproducing this bug, please provide information below:
1. Your Python version
Python 3.7.1 installed with pyenv
2. The version of Mars you use
0.1.0b1
3. Versions of crucial packages, such as numpy, scipy and protobuf
not relevant
4. Full stack of the error.
NA
5. Minimized code to reproduce the error.
mars-scheduler ......
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 1999-2017 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import sys
17 from setuptools import setup, find_packages, Extension
18
19 import numpy as np
20 from Cython.Build import cythonize
21 from Cython.Distutils import build_ext
22
23 repo_root = os.path.dirname(os.path.abspath(__file__))
24
25 try:
26 execfile
27 except NameError:
28 def execfile(fname, globs, locs=None):
29 locs = locs or globs
30 exec(compile(open(fname).read(), fname, "exec"), globs, locs)
31
32 version_file_path = os.path.join(repo_root, 'mars', '_version.py')
33 version_ns = {'__file__': version_file_path}
34 execfile(version_file_path, version_ns)
35
36 requirements = []
37 with open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:
38 requirements.extend(f.read().splitlines())
39
40
41 extra_requirements = []
42 with open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:
43 extra_requirements.extend(f.read().splitlines())
44
45
46 long_description = None
47 if os.path.exists(os.path.join(repo_root, 'README.rst')):
48 with open(os.path.join(repo_root, 'README.rst')) as f:
49 long_description = f.read()
50
51
52 if os.path.exists(os.path.join(repo_root, '.git')):
53 git_info = version_ns['get_git_info']()
54 if git_info:
55 with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:
56 git_file.write('%s %s' % git_info)
57
58 cythonize_kw = dict(language_level=sys.version_info[0])
59 extension_kw = dict()
60 if 'CI_MODE' in os.environ:
61 extension_kw['define_macros'] = [('CYTHON_TRACE_NOGIL', '1'), ('CYTHON_TRACE', '1')]
62 cythonize_kw['compiler_directives'] = {'linetrace': True, 'binding': True}
63
64 if 'MSC' in sys.version:
65 extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]
66 extension_kw['extra_compile_args'] = extra_compile_args
67 else:
68 extra_compile_args = ['-O3']
69 extension_kw['extra_compile_args'] = extra_compile_args
70
71 extension_kw['include_dirs'] = [np.get_include()]
72 extensions = [
73 Extension('mars.graph', ['mars/graph.pyx'], **extension_kw),
74 Extension('mars.fuse', ['mars/fuse.pyx'], **extension_kw),
75 Extension('mars.utils_c', ['mars/utils_c.pyx'], **extension_kw),
76 Extension('mars.lib.gipc', ['mars/lib/gipc.pyx'], **extension_kw),
77 Extension('mars.actors.core', ['mars/actors/core.pyx'], **extension_kw),
78 Extension('mars.actors.distributor', ['mars/actors/distributor.pyx'], **extension_kw),
79 Extension('mars.actors.cluster', ['mars/actors/cluster.pyx'], **extension_kw),
80 Extension('mars.actors.pool.messages', ['mars/actors/pool/messages.pyx'], **extension_kw),
81 Extension('mars.actors.pool.utils', ['mars/actors/pool/utils.pyx'], **extension_kw),
82 Extension('mars.actors.pool.gevent_pool', ['mars/actors/pool/gevent_pool.pyx'], **extension_kw),
83 Extension('mars.serialize.core', ['mars/serialize/core.pyx'], **extension_kw),
84 Extension('mars.serialize.pbserializer', ['mars/serialize/pbserializer.pyx'], **extension_kw),
85 Extension('mars.serialize.jsonserializer', ['mars/serialize/jsonserializer.pyx'], **extension_kw),
86 ]
87
88
89 setup_options = dict(
90 name='pymars',
91 version=version_ns['__version__'],
92 description='MARS: a tensor-based unified framework for large-scale data computation.',
93 long_description=long_description,
94 author='Qin Xuye',
95 author_email='[email protected]',
96 maintainer='Qin Xuye',
97 maintainer_email='[email protected]',
98 url='http://github.com/mars-project/mars',
99 license='Apache License 2.0',
100 classifiers=[
101 'Operating System :: OS Independent',
102 'Programming Language :: Python',
103 'Programming Language :: Python :: 2',
104 'Programming Language :: Python :: 2.7',
105 'Programming Language :: Python :: 3',
106 'Programming Language :: Python :: 3.5',
107 'Programming Language :: Python :: 3.6',
108 'Programming Language :: Python :: 3.7',
109 'Programming Language :: Python :: Implementation :: CPython',
110 'Topic :: Software Development :: Libraries',
111 ],
112 packages=find_packages(exclude=('*.tests.*', '*.tests')),
113 include_package_data=True,
114 scripts=['scripts/mars-scheduler', 'scripts/mars-worker', 'scripts/mars-web'],
115 install_requires=requirements,
116 cmdclass={'build_ext': build_ext},
117 ext_modules=cythonize(extensions, **cythonize_kw),
118 extras_require={'distributed': extra_requirements}
119 )
120 setup(**setup_options)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -111,7 +111,11 @@
],
packages=find_packages(exclude=('*.tests.*', '*.tests')),
include_package_data=True,
- scripts=['scripts/mars-scheduler', 'scripts/mars-worker', 'scripts/mars-web'],
+ entry_points={'console_scripts': [
+ 'mars-scheduler = mars.scheduler.__main__:main',
+ 'mars-worker = mars.worker.__main__:main',
+ 'mars-web = mars.web.__main__:main',
+ ]},
install_requires=requirements,
cmdclass={'build_ext': build_ext},
ext_modules=cythonize(extensions, **cythonize_kw),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -111,7 +111,11 @@\n ],\n packages=find_packages(exclude=('*.tests.*', '*.tests')),\n include_package_data=True,\n- scripts=['scripts/mars-scheduler', 'scripts/mars-worker', 'scripts/mars-web'],\n+ entry_points={'console_scripts': [\n+ 'mars-scheduler = mars.scheduler.__main__:main',\n+ 'mars-worker = mars.worker.__main__:main',\n+ 'mars-web = mars.web.__main__:main',\n+ ]},\n install_requires=requirements,\n cmdclass={'build_ext': build_ext},\n ext_modules=cythonize(extensions, **cythonize_kw),\n", "issue": "[BUG] mars-* scripts not marked as executable\n**Describe the bug**\r\nScripts in scripts/ dir not marked as executable when packed into wheels, thus when installed in venvs, they cannot be installed.\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version\r\n Python 3.7.1 installed with pyenv\r\n2. The version of Mars you use\r\n 0.1.0b1\r\n3. Versions of crucial packages, such as numpy, scipy and protobuf\r\n not relevant\r\n4. Full stack of the error.\r\n NA\r\n5. Minimized code to reproduce the error.\r\n mars-scheduler ......\r\n\n", "before_files": [{"content": "# Copyright 1999-2017 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages, Extension\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\n\nrepo_root = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n execfile\nexcept NameError:\n def execfile(fname, globs, locs=None):\n locs = locs or globs\n exec(compile(open(fname).read(), fname, \"exec\"), globs, locs)\n\nversion_file_path = os.path.join(repo_root, 'mars', '_version.py')\nversion_ns = {'__file__': version_file_path}\nexecfile(version_file_path, version_ns)\n\nrequirements = []\nwith open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:\n requirements.extend(f.read().splitlines())\n\n\nextra_requirements = []\nwith open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:\n extra_requirements.extend(f.read().splitlines())\n\n\nlong_description = None\nif os.path.exists(os.path.join(repo_root, 'README.rst')):\n with open(os.path.join(repo_root, 'README.rst')) as f:\n long_description = f.read()\n\n\nif os.path.exists(os.path.join(repo_root, '.git')):\n git_info = version_ns['get_git_info']()\n if git_info:\n with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:\n git_file.write('%s %s' % git_info)\n\ncythonize_kw = dict(language_level=sys.version_info[0])\nextension_kw = dict()\nif 'CI_MODE' in os.environ:\n extension_kw['define_macros'] = [('CYTHON_TRACE_NOGIL', '1'), ('CYTHON_TRACE', '1')]\n cythonize_kw['compiler_directives'] = {'linetrace': True, 'binding': True}\n\nif 'MSC' in sys.version:\n extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]\n extension_kw['extra_compile_args'] = extra_compile_args\nelse:\n extra_compile_args = ['-O3']\n extension_kw['extra_compile_args'] = extra_compile_args\n\nextension_kw['include_dirs'] = [np.get_include()]\nextensions = [\n Extension('mars.graph', ['mars/graph.pyx'], **extension_kw),\n Extension('mars.fuse', ['mars/fuse.pyx'], **extension_kw),\n Extension('mars.utils_c', ['mars/utils_c.pyx'], **extension_kw),\n Extension('mars.lib.gipc', ['mars/lib/gipc.pyx'], **extension_kw),\n Extension('mars.actors.core', ['mars/actors/core.pyx'], **extension_kw),\n Extension('mars.actors.distributor', ['mars/actors/distributor.pyx'], **extension_kw),\n Extension('mars.actors.cluster', ['mars/actors/cluster.pyx'], **extension_kw),\n Extension('mars.actors.pool.messages', ['mars/actors/pool/messages.pyx'], **extension_kw),\n Extension('mars.actors.pool.utils', ['mars/actors/pool/utils.pyx'], **extension_kw),\n Extension('mars.actors.pool.gevent_pool', ['mars/actors/pool/gevent_pool.pyx'], **extension_kw),\n Extension('mars.serialize.core', ['mars/serialize/core.pyx'], **extension_kw),\n Extension('mars.serialize.pbserializer', ['mars/serialize/pbserializer.pyx'], **extension_kw),\n Extension('mars.serialize.jsonserializer', ['mars/serialize/jsonserializer.pyx'], **extension_kw),\n]\n\n\nsetup_options = dict(\n name='pymars',\n version=version_ns['__version__'],\n description='MARS: a tensor-based unified framework for large-scale data computation.',\n long_description=long_description,\n author='Qin Xuye',\n author_email='[email protected]',\n maintainer='Qin Xuye',\n maintainer_email='[email protected]',\n url='http://github.com/mars-project/mars',\n license='Apache License 2.0',\n classifiers=[\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Software Development :: Libraries',\n ],\n packages=find_packages(exclude=('*.tests.*', '*.tests')),\n include_package_data=True,\n scripts=['scripts/mars-scheduler', 'scripts/mars-worker', 'scripts/mars-web'],\n install_requires=requirements,\n cmdclass={'build_ext': build_ext},\n ext_modules=cythonize(extensions, **cythonize_kw),\n extras_require={'distributed': extra_requirements}\n)\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 1999-2017 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages, Extension\n\nimport numpy as np\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\n\nrepo_root = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n execfile\nexcept NameError:\n def execfile(fname, globs, locs=None):\n locs = locs or globs\n exec(compile(open(fname).read(), fname, \"exec\"), globs, locs)\n\nversion_file_path = os.path.join(repo_root, 'mars', '_version.py')\nversion_ns = {'__file__': version_file_path}\nexecfile(version_file_path, version_ns)\n\nrequirements = []\nwith open(os.path.join(repo_root, 'requirements.txt'), 'r') as f:\n requirements.extend(f.read().splitlines())\n\n\nextra_requirements = []\nwith open(os.path.join(repo_root, 'requirements-extra.txt'), 'r') as f:\n extra_requirements.extend(f.read().splitlines())\n\n\nlong_description = None\nif os.path.exists(os.path.join(repo_root, 'README.rst')):\n with open(os.path.join(repo_root, 'README.rst')) as f:\n long_description = f.read()\n\n\nif os.path.exists(os.path.join(repo_root, '.git')):\n git_info = version_ns['get_git_info']()\n if git_info:\n with open(os.path.join(repo_root, 'mars', '.git-branch'), 'w') as git_file:\n git_file.write('%s %s' % git_info)\n\ncythonize_kw = dict(language_level=sys.version_info[0])\nextension_kw = dict()\nif 'CI_MODE' in os.environ:\n extension_kw['define_macros'] = [('CYTHON_TRACE_NOGIL', '1'), ('CYTHON_TRACE', '1')]\n cythonize_kw['compiler_directives'] = {'linetrace': True, 'binding': True}\n\nif 'MSC' in sys.version:\n extra_compile_args = ['/Ot', '/I' + os.path.join(repo_root, 'misc')]\n extension_kw['extra_compile_args'] = extra_compile_args\nelse:\n extra_compile_args = ['-O3']\n extension_kw['extra_compile_args'] = extra_compile_args\n\nextension_kw['include_dirs'] = [np.get_include()]\nextensions = [\n Extension('mars.graph', ['mars/graph.pyx'], **extension_kw),\n Extension('mars.fuse', ['mars/fuse.pyx'], **extension_kw),\n Extension('mars.utils_c', ['mars/utils_c.pyx'], **extension_kw),\n Extension('mars.lib.gipc', ['mars/lib/gipc.pyx'], **extension_kw),\n Extension('mars.actors.core', ['mars/actors/core.pyx'], **extension_kw),\n Extension('mars.actors.distributor', ['mars/actors/distributor.pyx'], **extension_kw),\n Extension('mars.actors.cluster', ['mars/actors/cluster.pyx'], **extension_kw),\n Extension('mars.actors.pool.messages', ['mars/actors/pool/messages.pyx'], **extension_kw),\n Extension('mars.actors.pool.utils', ['mars/actors/pool/utils.pyx'], **extension_kw),\n Extension('mars.actors.pool.gevent_pool', ['mars/actors/pool/gevent_pool.pyx'], **extension_kw),\n Extension('mars.serialize.core', ['mars/serialize/core.pyx'], **extension_kw),\n Extension('mars.serialize.pbserializer', ['mars/serialize/pbserializer.pyx'], **extension_kw),\n Extension('mars.serialize.jsonserializer', ['mars/serialize/jsonserializer.pyx'], **extension_kw),\n]\n\n\nsetup_options = dict(\n name='pymars',\n version=version_ns['__version__'],\n description='MARS: a tensor-based unified framework for large-scale data computation.',\n long_description=long_description,\n author='Qin Xuye',\n author_email='[email protected]',\n maintainer='Qin Xuye',\n maintainer_email='[email protected]',\n url='http://github.com/mars-project/mars',\n license='Apache License 2.0',\n classifiers=[\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Software Development :: Libraries',\n ],\n packages=find_packages(exclude=('*.tests.*', '*.tests')),\n include_package_data=True,\n entry_points={'console_scripts': [\n 'mars-scheduler = mars.scheduler.__main__:main',\n 'mars-worker = mars.worker.__main__:main',\n 'mars-web = mars.web.__main__:main',\n ]},\n install_requires=requirements,\n cmdclass={'build_ext': build_ext},\n ext_modules=cythonize(extensions, **cythonize_kw),\n extras_require={'distributed': extra_requirements}\n)\nsetup(**setup_options)\n", "path": "setup.py"}]} | 1,865 | 164 |
gh_patches_debug_19111 | rasdani/github-patches | git_diff | scrapy__scrapy-3045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python3.3 support and requirements without it
Scrapy still supports py3.3 (at least according to its trove classifiers in setup.py and the CI conf)
but some of its dependencies dropped support some time ago.
https://github.com/pyca/service_identity/blob/master/CHANGELOG.rst#backward-incompatible-changes-1
https://github.com/pyca/cryptography/blob/master/CHANGELOG.rst#20---2017-07-17
This caused some problems when testing scrapy daemon with py3.3,
which was resolved by installing the [enum-compat virtual package](https://pypi.python.org/pypi/enum-compat/0.0.2)
There are several options here.
scrapy1.5 can drop support for python3.3,
scrapy1.4 can restrict the max versions for those dependencies
and enum-compat can become a requirement,
although there may be more things broken.
I didn't figure out why the python3.3 build for scrapy doesn't fail
but here is a failed scrapyd build https://travis-ci.org/scrapy/scrapyd/jobs/299029712
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from os.path import dirname, join
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
7 version = f.read().decode('ascii').strip()
8
9
10 def has_environment_marker_platform_impl_support():
11 """Code extracted from 'pytest/setup.py'
12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
13
14 The first known release to support environment marker with range operators
15 it is 18.5, see:
16 https://setuptools.readthedocs.io/en/latest/history.html#id235
17 """
18 return parse_version(setuptools_version) >= parse_version('18.5')
19
20
21 extras_require = {}
22
23 if has_environment_marker_platform_impl_support():
24 extras_require[':platform_python_implementation == "PyPy"'] = [
25 'PyPyDispatcher>=2.1.0',
26 ]
27
28
29 setup(
30 name='Scrapy',
31 version=version,
32 url='https://scrapy.org',
33 description='A high-level Web Crawling and Web Scraping framework',
34 long_description=open('README.rst').read(),
35 author='Scrapy developers',
36 maintainer='Pablo Hoffman',
37 maintainer_email='[email protected]',
38 license='BSD',
39 packages=find_packages(exclude=('tests', 'tests.*')),
40 include_package_data=True,
41 zip_safe=False,
42 entry_points={
43 'console_scripts': ['scrapy = scrapy.cmdline:execute']
44 },
45 classifiers=[
46 'Framework :: Scrapy',
47 'Development Status :: 5 - Production/Stable',
48 'Environment :: Console',
49 'Intended Audience :: Developers',
50 'License :: OSI Approved :: BSD License',
51 'Operating System :: OS Independent',
52 'Programming Language :: Python',
53 'Programming Language :: Python :: 2',
54 'Programming Language :: Python :: 2.7',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.3',
57 'Programming Language :: Python :: 3.4',
58 'Programming Language :: Python :: 3.5',
59 'Programming Language :: Python :: 3.6',
60 'Topic :: Internet :: WWW/HTTP',
61 'Topic :: Software Development :: Libraries :: Application Frameworks',
62 'Topic :: Software Development :: Libraries :: Python Modules',
63 ],
64 install_requires=[
65 'Twisted>=13.1.0',
66 'w3lib>=1.17.0',
67 'queuelib',
68 'lxml',
69 'pyOpenSSL',
70 'cssselect>=0.9',
71 'six>=1.5.2',
72 'parsel>=1.1',
73 'PyDispatcher>=2.0.5',
74 'service_identity',
75 ],
76 extras_require=extras_require,
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,6 @@
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
@@ -61,6 +60,7 @@
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
install_requires=[
'Twisted>=13.1.0',
'w3lib>=1.17.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,6 @@\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n@@ -61,6 +60,7 @@\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n 'Twisted>=13.1.0',\n 'w3lib>=1.17.0',\n", "issue": "Python3.3 support and requirements without it\nScrapy still supports py3.3 (at least according to its trove classifiers in setup.py and the CI conf)\r\nbut some of its dependencies dropped support some time ago.\r\nhttps://github.com/pyca/service_identity/blob/master/CHANGELOG.rst#backward-incompatible-changes-1\r\nhttps://github.com/pyca/cryptography/blob/master/CHANGELOG.rst#20---2017-07-17\r\n\r\nThis caused some problems when testing scrapy daemon with py3.3,\r\nwhich was resolved by installing the [enum-compat virtual package](https://pypi.python.org/pypi/enum-compat/0.0.2)\r\n\r\nThere are several options here.\r\nscrapy1.5 can drop support for python3.3,\r\nscrapy1.4 can restrict the max versions for those dependencies\r\nand enum-compat can become a requirement,\r\nalthough there may be more things broken.\r\n\r\nI didn't figure out why the python3.3 build for scrapy doesn't fail\r\nbut here is a failed scrapyd build https://travis-ci.org/scrapy/scrapyd/jobs/299029712\n", "before_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\nextras_require = {}\n\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=[\n 'Twisted>=13.1.0',\n 'w3lib>=1.17.0',\n 'queuelib',\n 'lxml',\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n 'parsel>=1.1',\n 'PyDispatcher>=2.0.5',\n 'service_identity',\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\nextras_require = {}\n\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=[\n 'Twisted>=13.1.0',\n 'w3lib>=1.17.0',\n 'queuelib',\n 'lxml',\n 'pyOpenSSL',\n 'cssselect>=0.9',\n 'six>=1.5.2',\n 'parsel>=1.1',\n 'PyDispatcher>=2.0.5',\n 'service_identity',\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}]} | 1,276 | 212 |
gh_patches_debug_1326 | rasdani/github-patches | git_diff | iterative__dvc-1757 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
typo in docs
super minor typo:
$dvc repro --help
-c CWD, --cwd CWD Directory within your repo to **reroduce** from.
dvc --version
0.30.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/command/repro.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import os
4
5 import dvc.logger as logger
6 from dvc.command.base import CmdBase
7 from dvc.command.status import CmdDataStatus
8 from dvc.exceptions import DvcException
9
10
11 class CmdRepro(CmdBase):
12 def run(self):
13 recursive = not self.args.single_item
14 saved_dir = os.path.realpath(os.curdir)
15 if self.args.cwd:
16 os.chdir(self.args.cwd)
17
18 # Dirty hack so the for loop below can at least enter once
19 if self.args.all_pipelines:
20 self.args.targets = [None]
21 elif not self.args.targets:
22 self.args.targets = self.default_targets
23
24 ret = 0
25 for target in self.args.targets:
26 try:
27 stages = self.repo.reproduce(
28 target,
29 recursive=recursive,
30 force=self.args.force,
31 dry=self.args.dry,
32 interactive=self.args.interactive,
33 pipeline=self.args.pipeline,
34 all_pipelines=self.args.all_pipelines,
35 ignore_build_cache=self.args.ignore_build_cache,
36 no_commit=self.args.no_commit,
37 )
38
39 if len(stages) == 0:
40 logger.info(CmdDataStatus.UP_TO_DATE_MSG)
41
42 if self.args.metrics:
43 self.repo.metrics.show()
44 except DvcException:
45 logger.error()
46 ret = 1
47 break
48
49 os.chdir(saved_dir)
50 return ret
51
52
53 def add_parser(subparsers, parent_parser):
54 REPRO_HELP = "Reproduce DVC file. Default file name - 'Dvcfile'."
55 repro_parser = subparsers.add_parser(
56 "repro",
57 parents=[parent_parser],
58 description=REPRO_HELP,
59 help=REPRO_HELP,
60 )
61 repro_parser.add_argument(
62 "targets", nargs="*", help="DVC file to reproduce."
63 )
64 repro_parser.add_argument(
65 "-f",
66 "--force",
67 action="store_true",
68 default=False,
69 help="Reproduce even if dependencies were not changed.",
70 )
71 repro_parser.add_argument(
72 "-s",
73 "--single-item",
74 action="store_true",
75 default=False,
76 help="Reproduce only single data item without recursive dependencies "
77 "check.",
78 )
79 repro_parser.add_argument(
80 "-c",
81 "--cwd",
82 default=os.path.curdir,
83 help="Directory within your repo to reroduce from.",
84 )
85 repro_parser.add_argument(
86 "-m",
87 "--metrics",
88 action="store_true",
89 default=False,
90 help="Show metrics after reproduction.",
91 )
92 repro_parser.add_argument(
93 "--dry",
94 action="store_true",
95 default=False,
96 help="Only print the commands that would be executed without "
97 "actually executing.",
98 )
99 repro_parser.add_argument(
100 "-i",
101 "--interactive",
102 action="store_true",
103 default=False,
104 help="Ask for confirmation before reproducing each stage.",
105 )
106 repro_parser.add_argument(
107 "-p",
108 "--pipeline",
109 action="store_true",
110 default=False,
111 help="Reproduce the whole pipeline that the specified stage file "
112 "belongs to.",
113 )
114 repro_parser.add_argument(
115 "-P",
116 "--all-pipelines",
117 action="store_true",
118 default=False,
119 help="Reproduce all pipelines in the repo.",
120 )
121 repro_parser.add_argument(
122 "--ignore-build-cache",
123 action="store_true",
124 default=False,
125 help="Reproduce all descendants of a changed stage even if their "
126 "direct dependencies didn't change.",
127 )
128 repro_parser.add_argument(
129 "--no-commit",
130 action="store_true",
131 default=False,
132 help="Don't put files/directories into cache.",
133 )
134 repro_parser.set_defaults(func=CmdRepro)
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/command/repro.py b/dvc/command/repro.py
--- a/dvc/command/repro.py
+++ b/dvc/command/repro.py
@@ -80,7 +80,7 @@
"-c",
"--cwd",
default=os.path.curdir,
- help="Directory within your repo to reroduce from.",
+ help="Directory within your repo to reproduce from.",
)
repro_parser.add_argument(
"-m",
| {"golden_diff": "diff --git a/dvc/command/repro.py b/dvc/command/repro.py\n--- a/dvc/command/repro.py\n+++ b/dvc/command/repro.py\n@@ -80,7 +80,7 @@\n \"-c\",\n \"--cwd\",\n default=os.path.curdir,\n- help=\"Directory within your repo to reroduce from.\",\n+ help=\"Directory within your repo to reproduce from.\",\n )\n repro_parser.add_argument(\n \"-m\",\n", "issue": "typo in docs\nsuper minor typo:\r\n\r\n$dvc repro --help\r\n -c CWD, --cwd CWD Directory within your repo to **reroduce** from.\r\n\r\ndvc --version\r\n0.30.1\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\n\nimport dvc.logger as logger\nfrom dvc.command.base import CmdBase\nfrom dvc.command.status import CmdDataStatus\nfrom dvc.exceptions import DvcException\n\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n saved_dir = os.path.realpath(os.curdir)\n if self.args.cwd:\n os.chdir(self.args.cwd)\n\n # Dirty hack so the for loop below can at least enter once\n if self.args.all_pipelines:\n self.args.targets = [None]\n elif not self.args.targets:\n self.args.targets = self.default_targets\n\n ret = 0\n for target in self.args.targets:\n try:\n stages = self.repo.reproduce(\n target,\n recursive=recursive,\n force=self.args.force,\n dry=self.args.dry,\n interactive=self.args.interactive,\n pipeline=self.args.pipeline,\n all_pipelines=self.args.all_pipelines,\n ignore_build_cache=self.args.ignore_build_cache,\n no_commit=self.args.no_commit,\n )\n\n if len(stages) == 0:\n logger.info(CmdDataStatus.UP_TO_DATE_MSG)\n\n if self.args.metrics:\n self.repo.metrics.show()\n except DvcException:\n logger.error()\n ret = 1\n break\n\n os.chdir(saved_dir)\n return ret\n\n\ndef add_parser(subparsers, parent_parser):\n REPRO_HELP = \"Reproduce DVC file. Default file name - 'Dvcfile'.\"\n repro_parser = subparsers.add_parser(\n \"repro\",\n parents=[parent_parser],\n description=REPRO_HELP,\n help=REPRO_HELP,\n )\n repro_parser.add_argument(\n \"targets\", nargs=\"*\", help=\"DVC file to reproduce.\"\n )\n repro_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce even if dependencies were not changed.\",\n )\n repro_parser.add_argument(\n \"-s\",\n \"--single-item\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce only single data item without recursive dependencies \"\n \"check.\",\n )\n repro_parser.add_argument(\n \"-c\",\n \"--cwd\",\n default=os.path.curdir,\n help=\"Directory within your repo to reroduce from.\",\n )\n repro_parser.add_argument(\n \"-m\",\n \"--metrics\",\n action=\"store_true\",\n default=False,\n help=\"Show metrics after reproduction.\",\n )\n repro_parser.add_argument(\n \"--dry\",\n action=\"store_true\",\n default=False,\n help=\"Only print the commands that would be executed without \"\n \"actually executing.\",\n )\n repro_parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n default=False,\n help=\"Ask for confirmation before reproducing each stage.\",\n )\n repro_parser.add_argument(\n \"-p\",\n \"--pipeline\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce the whole pipeline that the specified stage file \"\n \"belongs to.\",\n )\n repro_parser.add_argument(\n \"-P\",\n \"--all-pipelines\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce all pipelines in the repo.\",\n )\n repro_parser.add_argument(\n \"--ignore-build-cache\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce all descendants of a changed stage even if their \"\n \"direct dependencies didn't change.\",\n )\n repro_parser.add_argument(\n \"--no-commit\",\n action=\"store_true\",\n default=False,\n help=\"Don't put files/directories into cache.\",\n )\n repro_parser.set_defaults(func=CmdRepro)\n", "path": "dvc/command/repro.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport os\n\nimport dvc.logger as logger\nfrom dvc.command.base import CmdBase\nfrom dvc.command.status import CmdDataStatus\nfrom dvc.exceptions import DvcException\n\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n saved_dir = os.path.realpath(os.curdir)\n if self.args.cwd:\n os.chdir(self.args.cwd)\n\n # Dirty hack so the for loop below can at least enter once\n if self.args.all_pipelines:\n self.args.targets = [None]\n elif not self.args.targets:\n self.args.targets = self.default_targets\n\n ret = 0\n for target in self.args.targets:\n try:\n stages = self.repo.reproduce(\n target,\n recursive=recursive,\n force=self.args.force,\n dry=self.args.dry,\n interactive=self.args.interactive,\n pipeline=self.args.pipeline,\n all_pipelines=self.args.all_pipelines,\n ignore_build_cache=self.args.ignore_build_cache,\n no_commit=self.args.no_commit,\n )\n\n if len(stages) == 0:\n logger.info(CmdDataStatus.UP_TO_DATE_MSG)\n\n if self.args.metrics:\n self.repo.metrics.show()\n except DvcException:\n logger.error()\n ret = 1\n break\n\n os.chdir(saved_dir)\n return ret\n\n\ndef add_parser(subparsers, parent_parser):\n REPRO_HELP = \"Reproduce DVC file. Default file name - 'Dvcfile'.\"\n repro_parser = subparsers.add_parser(\n \"repro\",\n parents=[parent_parser],\n description=REPRO_HELP,\n help=REPRO_HELP,\n )\n repro_parser.add_argument(\n \"targets\", nargs=\"*\", help=\"DVC file to reproduce.\"\n )\n repro_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce even if dependencies were not changed.\",\n )\n repro_parser.add_argument(\n \"-s\",\n \"--single-item\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce only single data item without recursive dependencies \"\n \"check.\",\n )\n repro_parser.add_argument(\n \"-c\",\n \"--cwd\",\n default=os.path.curdir,\n help=\"Directory within your repo to reproduce from.\",\n )\n repro_parser.add_argument(\n \"-m\",\n \"--metrics\",\n action=\"store_true\",\n default=False,\n help=\"Show metrics after reproduction.\",\n )\n repro_parser.add_argument(\n \"--dry\",\n action=\"store_true\",\n default=False,\n help=\"Only print the commands that would be executed without \"\n \"actually executing.\",\n )\n repro_parser.add_argument(\n \"-i\",\n \"--interactive\",\n action=\"store_true\",\n default=False,\n help=\"Ask for confirmation before reproducing each stage.\",\n )\n repro_parser.add_argument(\n \"-p\",\n \"--pipeline\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce the whole pipeline that the specified stage file \"\n \"belongs to.\",\n )\n repro_parser.add_argument(\n \"-P\",\n \"--all-pipelines\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce all pipelines in the repo.\",\n )\n repro_parser.add_argument(\n \"--ignore-build-cache\",\n action=\"store_true\",\n default=False,\n help=\"Reproduce all descendants of a changed stage even if their \"\n \"direct dependencies didn't change.\",\n )\n repro_parser.add_argument(\n \"--no-commit\",\n action=\"store_true\",\n default=False,\n help=\"Don't put files/directories into cache.\",\n )\n repro_parser.set_defaults(func=CmdRepro)\n", "path": "dvc/command/repro.py"}]} | 1,408 | 101 |
gh_patches_debug_10319 | rasdani/github-patches | git_diff | voxel51__fiftyone-102 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need custom FloatField that supports more numeric types
In this code (taken from https://github.com/voxel51/fiftyone/blob/develop/examples/model_inference/README.md), I had to add `float(confidence)` otherwise I got an error about `confidence`, which was a numpy float32 or something similar, not being a supported value for a `mongoengine.fields.FloatField`.
```py
for imgs, sample_ids in data_loader:
predictions, confidences = predict(model, imgs)
# Add predictions to your FiftyOne dataset
for sample_id, prediction, confidence in zip(
sample_ids, predictions, confidences
):
sample = dataset[sample_id]
sample[model_name] = fo.Classification(label=labels_map[prediction])
sample["confidence"] = float(confidence) # float() is required here, but shouldn't need to be...
sample.save()
```
Kind of hard to believe that MongoEngine doesn't handle casting a `np.float32` into a float, but, alas, it seems like our wrapper around `mongoengine.fields.FloatField` will need to override the `validate()` function below to cast non-int types with `float()` as well...
https://github.com/MongoEngine/mongoengine/blob/4275c2d7b791f5910308a4815a1ba39324dee373/mongoengine/fields.py#L377-L411
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fiftyone/core/fields.py`
Content:
```
1 """
2 Fields of dataset sample schemas.
3
4 | Copyright 2017-2020, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 # pragma pylint: disable=redefined-builtin
9 # pragma pylint: disable=unused-wildcard-import
10 # pragma pylint: disable=wildcard-import
11 from __future__ import absolute_import
12 from __future__ import division
13 from __future__ import print_function
14 from __future__ import unicode_literals
15 from builtins import *
16
17 # pragma pylint: enable=redefined-builtin
18 # pragma pylint: enable=unused-wildcard-import
19 # pragma pylint: enable=wildcard-import
20
21 import mongoengine.fields
22
23
24 class Field(mongoengine.fields.BaseField):
25 pass
26
27
28 class BooleanField(mongoengine.BooleanField, Field):
29 pass
30
31
32 class IntField(mongoengine.IntField, Field):
33 pass
34
35
36 class FloatField(mongoengine.FloatField, Field):
37 pass
38
39
40 class StringField(mongoengine.StringField, Field):
41 pass
42
43
44 class ListField(mongoengine.ListField, Field):
45 pass
46
47
48 class DictField(mongoengine.DictField, Field):
49 pass
50
51
52 class EmbeddedDocumentField(mongoengine.EmbeddedDocumentField, Field):
53 pass
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fiftyone/core/fields.py b/fiftyone/core/fields.py
--- a/fiftyone/core/fields.py
+++ b/fiftyone/core/fields.py
@@ -34,7 +34,19 @@
class FloatField(mongoengine.FloatField, Field):
- pass
+ def validate(self, value):
+ try:
+ value = float(value)
+ except OverflowError:
+ self.error("The value is too large to be converted to float")
+ except (TypeError, ValueError):
+ self.error("%s could not be converted to float" % value)
+
+ if self.min_value is not None and value < self.min_value:
+ self.error("Float value is too small")
+
+ if self.max_value is not None and value > self.max_value:
+ self.error("Float value is too large")
class StringField(mongoengine.StringField, Field):
| {"golden_diff": "diff --git a/fiftyone/core/fields.py b/fiftyone/core/fields.py\n--- a/fiftyone/core/fields.py\n+++ b/fiftyone/core/fields.py\n@@ -34,7 +34,19 @@\n \n \n class FloatField(mongoengine.FloatField, Field):\n- pass\n+ def validate(self, value):\n+ try:\n+ value = float(value)\n+ except OverflowError:\n+ self.error(\"The value is too large to be converted to float\")\n+ except (TypeError, ValueError):\n+ self.error(\"%s could not be converted to float\" % value)\n+\n+ if self.min_value is not None and value < self.min_value:\n+ self.error(\"Float value is too small\")\n+\n+ if self.max_value is not None and value > self.max_value:\n+ self.error(\"Float value is too large\")\n \n \n class StringField(mongoengine.StringField, Field):\n", "issue": "Need custom FloatField that supports more numeric types\nIn this code (taken from https://github.com/voxel51/fiftyone/blob/develop/examples/model_inference/README.md), I had to add `float(confidence)` otherwise I got an error about `confidence`, which was a numpy float32 or something similar, not being a supported value for a `mongoengine.fields.FloatField`. \r\n\r\n```py\r\nfor imgs, sample_ids in data_loader:\r\n predictions, confidences = predict(model, imgs)\r\n\r\n # Add predictions to your FiftyOne dataset\r\n for sample_id, prediction, confidence in zip(\r\n sample_ids, predictions, confidences\r\n ):\r\n sample = dataset[sample_id]\r\n sample[model_name] = fo.Classification(label=labels_map[prediction])\r\n sample[\"confidence\"] = float(confidence) # float() is required here, but shouldn't need to be...\r\n sample.save()\r\n```\r\n\r\nKind of hard to believe that MongoEngine doesn't handle casting a `np.float32` into a float, but, alas, it seems like our wrapper around `mongoengine.fields.FloatField` will need to override the `validate()` function below to cast non-int types with `float()` as well...\r\n\r\nhttps://github.com/MongoEngine/mongoengine/blob/4275c2d7b791f5910308a4815a1ba39324dee373/mongoengine/fields.py#L377-L411\r\n\n", "before_files": [{"content": "\"\"\"\nFields of dataset sample schemas.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nimport mongoengine.fields\n\n\nclass Field(mongoengine.fields.BaseField):\n pass\n\n\nclass BooleanField(mongoengine.BooleanField, Field):\n pass\n\n\nclass IntField(mongoengine.IntField, Field):\n pass\n\n\nclass FloatField(mongoengine.FloatField, Field):\n pass\n\n\nclass StringField(mongoengine.StringField, Field):\n pass\n\n\nclass ListField(mongoengine.ListField, Field):\n pass\n\n\nclass DictField(mongoengine.DictField, Field):\n pass\n\n\nclass EmbeddedDocumentField(mongoengine.EmbeddedDocumentField, Field):\n pass\n", "path": "fiftyone/core/fields.py"}], "after_files": [{"content": "\"\"\"\nFields of dataset sample schemas.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nimport mongoengine.fields\n\n\nclass Field(mongoengine.fields.BaseField):\n pass\n\n\nclass BooleanField(mongoengine.BooleanField, Field):\n pass\n\n\nclass IntField(mongoengine.IntField, Field):\n pass\n\n\nclass FloatField(mongoengine.FloatField, Field):\n def validate(self, value):\n try:\n value = float(value)\n except OverflowError:\n self.error(\"The value is too large to be converted to float\")\n except (TypeError, ValueError):\n self.error(\"%s could not be converted to float\" % value)\n\n if self.min_value is not None and value < self.min_value:\n self.error(\"Float value is too small\")\n\n if self.max_value is not None and value > self.max_value:\n self.error(\"Float value is too large\")\n\n\nclass StringField(mongoengine.StringField, Field):\n pass\n\n\nclass ListField(mongoengine.ListField, Field):\n pass\n\n\nclass DictField(mongoengine.DictField, Field):\n pass\n\n\nclass EmbeddedDocumentField(mongoengine.EmbeddedDocumentField, Field):\n pass\n", "path": "fiftyone/core/fields.py"}]} | 963 | 200 |
gh_patches_debug_12098 | rasdani/github-patches | git_diff | ansible__awx-13080 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[api] Confusing error when trying to sync project set to scm_type Manual
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
Creating a project and accidentally setting it to be `scm_type: manual` (the default in the awx collection), and then trying to do a project update on it, leads to a very confusing error:
```
fatal: [localhost]: FAILED! => {"changed": false, "msg": "Cannot make a request with the POST method to this endpoint /api/v2/projects/8/update/"}
```
Although it is "technically" correct, it would be nice if we could show something nicer in this scenario (e.g. "Unable to trigger a project update because the project scm type (Manual) does not support it.")
### AWX version
devel
### Select the relevant components
- [ ] UI
- [X] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
docker development environment
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
- Create a project, set its type to Manual
- Use the API (or `awx.awx.project_update`) to try to update it
### Expected results
An error explaining the problem
### Actual results
An error telling me that I can't POST to an endpoint that I expect to be able to POST to.
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx_collection/plugins/modules/project_update.py`
Content:
```
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5
6 from __future__ import absolute_import, division, print_function
7
8 __metaclass__ = type
9
10 ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'}
11
12 DOCUMENTATION = '''
13 ---
14 module: project_update
15 author: "Sean Sullivan (@sean-m-sullivan)"
16 short_description: Update a Project in Automation Platform Controller
17 description:
18 - Update a Automation Platform Controller Project. See
19 U(https://www.ansible.com/tower) for an overview.
20 options:
21 name:
22 description:
23 - The name or id of the project to update.
24 required: True
25 type: str
26 aliases:
27 - project
28 organization:
29 description:
30 - Organization the project exists in.
31 - Used to help lookup the object, cannot be modified using this module.
32 - If not provided, will lookup by name only, which does not work with duplicates.
33 type: str
34 wait:
35 description:
36 - Wait for the project to update.
37 - If scm revision has not changed module will return not changed.
38 default: True
39 type: bool
40 interval:
41 description:
42 - The interval to request an update from the controller.
43 required: False
44 default: 2
45 type: float
46 timeout:
47 description:
48 - If waiting for the project to update this will abort after this
49 amount of seconds
50 type: int
51 extends_documentation_fragment: awx.awx.auth
52 '''
53
54 RETURN = '''
55 id:
56 description: project id of the updated project
57 returned: success
58 type: int
59 sample: 86
60 status:
61 description: status of the updated project
62 returned: success
63 type: str
64 sample: pending
65 '''
66
67
68 EXAMPLES = '''
69 - name: Launch a project with a timeout of 10 seconds
70 project_update:
71 project: "Networking Project"
72 timeout: 10
73
74 - name: Launch a Project with extra_vars without waiting
75 project_update:
76 project: "Networking Project"
77 wait: False
78 '''
79
80 from ..module_utils.controller_api import ControllerAPIModule
81
82
83 def main():
84 # Any additional arguments that are not fields of the item can be added here
85 argument_spec = dict(
86 name=dict(required=True, aliases=['project']),
87 organization=dict(),
88 wait=dict(default=True, type='bool'),
89 interval=dict(default=2.0, type='float'),
90 timeout=dict(default=None, type='int'),
91 )
92
93 # Create a module for ourselves
94 module = ControllerAPIModule(argument_spec=argument_spec)
95
96 # Extract our parameters
97 name = module.params.get('name')
98 organization = module.params.get('organization')
99 wait = module.params.get('wait')
100 interval = module.params.get('interval')
101 timeout = module.params.get('timeout')
102
103 # Attempt to look up project based on the provided name or id
104 lookup_data = {}
105 if organization:
106 lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)
107 project = module.get_one('projects', name_or_id=name, data=lookup_data)
108 if project is None:
109 module.fail_json(msg="Unable to find project")
110
111 if wait:
112 scm_revision_original = project['scm_revision']
113
114 # Update the project
115 result = module.post_endpoint(project['related']['update'])
116
117 if result['status_code'] != 202:
118 module.fail_json(msg="Failed to update project, see response for details", response=result)
119
120 module.json_output['changed'] = True
121 module.json_output['id'] = result['json']['id']
122 module.json_output['status'] = result['json']['status']
123
124 if not wait:
125 module.exit_json(**module.json_output)
126
127 # Invoke wait function
128 result = module.wait_on_url(
129 url=result['json']['url'], object_name=module.get_item_name(project), object_type='Project Update', timeout=timeout, interval=interval
130 )
131 scm_revision_new = result['json']['scm_revision']
132 if scm_revision_new == scm_revision_original:
133 module.json_output['changed'] = False
134
135 module.exit_json(**module.json_output)
136
137
138 if __name__ == '__main__':
139 main()
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awx_collection/plugins/modules/project_update.py b/awx_collection/plugins/modules/project_update.py
--- a/awx_collection/plugins/modules/project_update.py
+++ b/awx_collection/plugins/modules/project_update.py
@@ -114,7 +114,12 @@
# Update the project
result = module.post_endpoint(project['related']['update'])
- if result['status_code'] != 202:
+ if result['status_code'] == 405:
+ module.fail_json(
+ msg="Unable to trigger a project update because the project scm_type ({0}) does not support it.".format(project['scm_type']),
+ response=result
+ )
+ elif result['status_code'] != 202:
module.fail_json(msg="Failed to update project, see response for details", response=result)
module.json_output['changed'] = True
| {"golden_diff": "diff --git a/awx_collection/plugins/modules/project_update.py b/awx_collection/plugins/modules/project_update.py\n--- a/awx_collection/plugins/modules/project_update.py\n+++ b/awx_collection/plugins/modules/project_update.py\n@@ -114,7 +114,12 @@\n # Update the project\n result = module.post_endpoint(project['related']['update'])\n \n- if result['status_code'] != 202:\n+ if result['status_code'] == 405:\n+ module.fail_json(\n+ msg=\"Unable to trigger a project update because the project scm_type ({0}) does not support it.\".format(project['scm_type']),\n+ response=result\n+ )\n+ elif result['status_code'] != 202:\n module.fail_json(msg=\"Failed to update project, see response for details\", response=result)\n \n module.json_output['changed'] = True\n", "issue": "[api] Confusing error when trying to sync project set to scm_type Manual\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n\n### Bug Summary\n\nCreating a project and accidentally setting it to be `scm_type: manual` (the default in the awx collection), and then trying to do a project update on it, leads to a very confusing error:\r\n\r\n```\r\nfatal: [localhost]: FAILED! => {\"changed\": false, \"msg\": \"Cannot make a request with the POST method to this endpoint /api/v2/projects/8/update/\"}\r\n```\r\n\r\nAlthough it is \"technically\" correct, it would be nice if we could show something nicer in this scenario (e.g. \"Unable to trigger a project update because the project scm type (Manual) does not support it.\")\n\n### AWX version\n\ndevel\n\n### Select the relevant components\n\n- [ ] UI\n- [X] API\n- [ ] Docs\n- [ ] Collection\n- [ ] CLI\n- [ ] Other\n\n### Installation method\n\ndocker development environment\n\n### Modifications\n\nno\n\n### Ansible version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\n- Create a project, set its type to Manual\r\n- Use the API (or `awx.awx.project_update`) to try to update it\n\n### Expected results\n\nAn error explaining the problem\n\n### Actual results\n\nAn error telling me that I can't POST to an endpoint that I expect to be able to POST to.\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: project_update\nauthor: \"Sean Sullivan (@sean-m-sullivan)\"\nshort_description: Update a Project in Automation Platform Controller\ndescription:\n - Update a Automation Platform Controller Project. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name or id of the project to update.\n required: True\n type: str\n aliases:\n - project\n organization:\n description:\n - Organization the project exists in.\n - Used to help lookup the object, cannot be modified using this module.\n - If not provided, will lookup by name only, which does not work with duplicates.\n type: str\n wait:\n description:\n - Wait for the project to update.\n - If scm revision has not changed module will return not changed.\n default: True\n type: bool\n interval:\n description:\n - The interval to request an update from the controller.\n required: False\n default: 2\n type: float\n timeout:\n description:\n - If waiting for the project to update this will abort after this\n amount of seconds\n type: int\nextends_documentation_fragment: awx.awx.auth\n'''\n\nRETURN = '''\nid:\n description: project id of the updated project\n returned: success\n type: int\n sample: 86\nstatus:\n description: status of the updated project\n returned: success\n type: str\n sample: pending\n'''\n\n\nEXAMPLES = '''\n- name: Launch a project with a timeout of 10 seconds\n project_update:\n project: \"Networking Project\"\n timeout: 10\n\n- name: Launch a Project with extra_vars without waiting\n project_update:\n project: \"Networking Project\"\n wait: False\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True, aliases=['project']),\n organization=dict(),\n wait=dict(default=True, type='bool'),\n interval=dict(default=2.0, type='float'),\n timeout=dict(default=None, type='int'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n name = module.params.get('name')\n organization = module.params.get('organization')\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n timeout = module.params.get('timeout')\n\n # Attempt to look up project based on the provided name or id\n lookup_data = {}\n if organization:\n lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)\n project = module.get_one('projects', name_or_id=name, data=lookup_data)\n if project is None:\n module.fail_json(msg=\"Unable to find project\")\n\n if wait:\n scm_revision_original = project['scm_revision']\n\n # Update the project\n result = module.post_endpoint(project['related']['update'])\n\n if result['status_code'] != 202:\n module.fail_json(msg=\"Failed to update project, see response for details\", response=result)\n\n module.json_output['changed'] = True\n module.json_output['id'] = result['json']['id']\n module.json_output['status'] = result['json']['status']\n\n if not wait:\n module.exit_json(**module.json_output)\n\n # Invoke wait function\n result = module.wait_on_url(\n url=result['json']['url'], object_name=module.get_item_name(project), object_type='Project Update', timeout=timeout, interval=interval\n )\n scm_revision_new = result['json']['scm_revision']\n if scm_revision_new == scm_revision_original:\n module.json_output['changed'] = False\n\n module.exit_json(**module.json_output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/project_update.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: project_update\nauthor: \"Sean Sullivan (@sean-m-sullivan)\"\nshort_description: Update a Project in Automation Platform Controller\ndescription:\n - Update a Automation Platform Controller Project. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name or id of the project to update.\n required: True\n type: str\n aliases:\n - project\n organization:\n description:\n - Organization the project exists in.\n - Used to help lookup the object, cannot be modified using this module.\n - If not provided, will lookup by name only, which does not work with duplicates.\n type: str\n wait:\n description:\n - Wait for the project to update.\n - If scm revision has not changed module will return not changed.\n default: True\n type: bool\n interval:\n description:\n - The interval to request an update from the controller.\n required: False\n default: 2\n type: float\n timeout:\n description:\n - If waiting for the project to update this will abort after this\n amount of seconds\n type: int\nextends_documentation_fragment: awx.awx.auth\n'''\n\nRETURN = '''\nid:\n description: project id of the updated project\n returned: success\n type: int\n sample: 86\nstatus:\n description: status of the updated project\n returned: success\n type: str\n sample: pending\n'''\n\n\nEXAMPLES = '''\n- name: Launch a project with a timeout of 10 seconds\n project_update:\n project: \"Networking Project\"\n timeout: 10\n\n- name: Launch a Project with extra_vars without waiting\n project_update:\n project: \"Networking Project\"\n wait: False\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True, aliases=['project']),\n organization=dict(),\n wait=dict(default=True, type='bool'),\n interval=dict(default=2.0, type='float'),\n timeout=dict(default=None, type='int'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n name = module.params.get('name')\n organization = module.params.get('organization')\n wait = module.params.get('wait')\n interval = module.params.get('interval')\n timeout = module.params.get('timeout')\n\n # Attempt to look up project based on the provided name or id\n lookup_data = {}\n if organization:\n lookup_data['organization'] = module.resolve_name_to_id('organizations', organization)\n project = module.get_one('projects', name_or_id=name, data=lookup_data)\n if project is None:\n module.fail_json(msg=\"Unable to find project\")\n\n if wait:\n scm_revision_original = project['scm_revision']\n\n # Update the project\n result = module.post_endpoint(project['related']['update'])\n\n if result['status_code'] == 405:\n module.fail_json(\n msg=\"Unable to trigger a project update because the project scm_type ({0}) does not support it.\".format(project['scm_type']),\n response=result\n )\n elif result['status_code'] != 202:\n module.fail_json(msg=\"Failed to update project, see response for details\", response=result)\n\n module.json_output['changed'] = True\n module.json_output['id'] = result['json']['id']\n module.json_output['status'] = result['json']['status']\n\n if not wait:\n module.exit_json(**module.json_output)\n\n # Invoke wait function\n result = module.wait_on_url(\n url=result['json']['url'], object_name=module.get_item_name(project), object_type='Project Update', timeout=timeout, interval=interval\n )\n scm_revision_new = result['json']['scm_revision']\n if scm_revision_new == scm_revision_original:\n module.json_output['changed'] = False\n\n module.exit_json(**module.json_output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/project_update.py"}]} | 1,933 | 198 |
gh_patches_debug_12410 | rasdani/github-patches | git_diff | sagemath__sage-36176 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
toml package is obsolete
### Steps To Reproduce
_No response_
### Expected Behavior
toml is not installed
### Actual Behavior
toml is installed
### Additional Information
I think our `toml` package is obsolete. The only other package listing it as a requirement is tox, but tox only needs it with ancient pythons: https://github.com/tox-dev/tox/blob/3.27.1/setup.cfg#L45
I think our dependency can be replaced with tomli at which point toml can go.
### Environment
```markdown
Gentoo / git develop
```
### Checklist
- [X] I have searched the existing issues for a bug report that matches the one I want to file, without success.
- [X] I have read the documentation and troubleshoot guide
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `build/sage_bootstrap/creator.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Package Creator
4 """
5
6 # ****************************************************************************
7 # Copyright (C) 2016 Volker Braun <[email protected]>
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 2 of the License, or
12 # (at your option) any later version.
13 # https://www.gnu.org/licenses/
14 # ****************************************************************************
15
16 import os
17
18 import logging
19 log = logging.getLogger()
20
21 from sage_bootstrap.env import SAGE_ROOT
22
23
24 class PackageCreator(object):
25
26 def __init__(self, package_name):
27 self.package_name = package_name
28 self.path = os.path.join(SAGE_ROOT, 'build', 'pkgs', package_name)
29 try:
30 os.mkdir(self.path)
31 except OSError:
32 pass
33
34 def set_version(self, version):
35 """
36 Write the version to ``package-version.txt``
37 """
38 with open(os.path.join(self.path, 'package-version.txt'), 'w+') as f:
39 f.write(version)
40 f.write('\n')
41
42 def set_type(self, pkg_type):
43 """
44 Write the package type to ``type``
45 """
46 with open(os.path.join(self.path, 'type'), 'w+') as f:
47 f.write(pkg_type)
48 f.write('\n')
49
50 def set_tarball(self, tarball, upstream_url):
51 """
52 Write the tarball name pattern to ``checksums.ini``
53 """
54 with open(os.path.join(self.path, 'checksums.ini'), 'w+') as f:
55 f.write('tarball={0}'.format(tarball))
56 f.write('\n')
57 if upstream_url:
58 f.write('upstream_url={0}'.format(upstream_url))
59 f.write('\n')
60
61 def set_description(self, description, license, upstream_contact):
62 """
63 Write the ``SPKG.rst`` file
64 """
65 with open(os.path.join(self.path, 'SPKG.rst'), 'w+') as f:
66 def heading(title, char='-'):
67 return '{0}\n{1}\n\n'.format(title, char * len(title))
68 if description:
69 title = '{0}: {1}'.format(self.package_name, description)
70 else:
71 title = self.package_name
72 f.write(heading(title, '='))
73 f.write(heading('Description'))
74 if description:
75 f.write('{0}\n\n'.format(description))
76 f.write(heading('License'))
77 if license:
78 f.write('{0}\n\n'.format(license))
79 f.write(heading('Upstream Contact'))
80 if upstream_contact:
81 f.write('{0}\n\n'.format(upstream_contact))
82
83 def set_python_data_and_scripts(self, pypi_package_name=None, source='normal'):
84 """
85 Write the file ``dependencies`` and other files for Python packages.
86
87 If ``source`` is ``"normal"``, write the files ``spkg-install.in`` and
88 ``install-requires.txt``.
89
90 If ``source`` is ``"wheel"``, write the file ``install-requires.txt``.
91
92 If ``source`` is ``"pip"``, write the file ``requirements.txt``.
93 """
94 if pypi_package_name is None:
95 pypi_package_name = self.package_name
96 with open(os.path.join(self.path, 'dependencies'), 'w+') as f:
97 f.write('$(PYTHON) | $(PYTHON_TOOLCHAIN)\n\n')
98 f.write('----------\nAll lines of this file are ignored except the first.\n')
99 if source == 'normal':
100 with open(os.path.join(self.path, 'spkg-install.in'), 'w+') as f:
101 f.write('cd src\nsdh_pip_install .\n')
102 with open(os.path.join(self.path, 'install-requires.txt'), 'w+') as f:
103 f.write('{0}\n'.format(pypi_package_name))
104 elif source == 'wheel':
105 with open(os.path.join(self.path, 'install-requires.txt'), 'w+') as f:
106 f.write('{0}\n'.format(pypi_package_name))
107 elif source == 'pip':
108 with open(os.path.join(self.path, 'requirements.txt'), 'w+') as f:
109 f.write('{0}\n'.format(pypi_package_name))
110 elif source == 'script':
111 pass
112 else:
113 raise ValueError('package source must be one of normal, script, pip, or wheel')
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/build/sage_bootstrap/creator.py b/build/sage_bootstrap/creator.py
--- a/build/sage_bootstrap/creator.py
+++ b/build/sage_bootstrap/creator.py
@@ -94,7 +94,7 @@
if pypi_package_name is None:
pypi_package_name = self.package_name
with open(os.path.join(self.path, 'dependencies'), 'w+') as f:
- f.write('$(PYTHON) | $(PYTHON_TOOLCHAIN)\n\n')
+ f.write(' | $(PYTHON_TOOLCHAIN) $(PYTHON)\n\n')
f.write('----------\nAll lines of this file are ignored except the first.\n')
if source == 'normal':
with open(os.path.join(self.path, 'spkg-install.in'), 'w+') as f:
| {"golden_diff": "diff --git a/build/sage_bootstrap/creator.py b/build/sage_bootstrap/creator.py\n--- a/build/sage_bootstrap/creator.py\n+++ b/build/sage_bootstrap/creator.py\n@@ -94,7 +94,7 @@\n if pypi_package_name is None:\n pypi_package_name = self.package_name\n with open(os.path.join(self.path, 'dependencies'), 'w+') as f:\n- f.write('$(PYTHON) | $(PYTHON_TOOLCHAIN)\\n\\n')\n+ f.write(' | $(PYTHON_TOOLCHAIN) $(PYTHON)\\n\\n')\n f.write('----------\\nAll lines of this file are ignored except the first.\\n')\n if source == 'normal':\n with open(os.path.join(self.path, 'spkg-install.in'), 'w+') as f:\n", "issue": "toml package is obsolete\n### Steps To Reproduce\n\n_No response_\n\n### Expected Behavior\n\ntoml is not installed\n\n### Actual Behavior\n\ntoml is installed\n\n### Additional Information\n\nI think our `toml` package is obsolete. The only other package listing it as a requirement is tox, but tox only needs it with ancient pythons: https://github.com/tox-dev/tox/blob/3.27.1/setup.cfg#L45\r\n\r\nI think our dependency can be replaced with tomli at which point toml can go.\r\n\n\n### Environment\n\n```markdown\nGentoo / git develop\n```\n\n\n### Checklist\n\n- [X] I have searched the existing issues for a bug report that matches the one I want to file, without success.\n- [X] I have read the documentation and troubleshoot guide\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nPackage Creator\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2016 Volker Braun <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport os\n\nimport logging\nlog = logging.getLogger()\n\nfrom sage_bootstrap.env import SAGE_ROOT\n\n\nclass PackageCreator(object):\n\n def __init__(self, package_name):\n self.package_name = package_name\n self.path = os.path.join(SAGE_ROOT, 'build', 'pkgs', package_name)\n try:\n os.mkdir(self.path)\n except OSError:\n pass\n\n def set_version(self, version):\n \"\"\"\n Write the version to ``package-version.txt``\n \"\"\"\n with open(os.path.join(self.path, 'package-version.txt'), 'w+') as f:\n f.write(version)\n f.write('\\n')\n\n def set_type(self, pkg_type):\n \"\"\"\n Write the package type to ``type``\n \"\"\"\n with open(os.path.join(self.path, 'type'), 'w+') as f:\n f.write(pkg_type)\n f.write('\\n')\n\n def set_tarball(self, tarball, upstream_url):\n \"\"\"\n Write the tarball name pattern to ``checksums.ini``\n \"\"\"\n with open(os.path.join(self.path, 'checksums.ini'), 'w+') as f:\n f.write('tarball={0}'.format(tarball))\n f.write('\\n')\n if upstream_url:\n f.write('upstream_url={0}'.format(upstream_url))\n f.write('\\n')\n\n def set_description(self, description, license, upstream_contact):\n \"\"\"\n Write the ``SPKG.rst`` file\n \"\"\"\n with open(os.path.join(self.path, 'SPKG.rst'), 'w+') as f:\n def heading(title, char='-'):\n return '{0}\\n{1}\\n\\n'.format(title, char * len(title))\n if description:\n title = '{0}: {1}'.format(self.package_name, description)\n else:\n title = self.package_name\n f.write(heading(title, '='))\n f.write(heading('Description'))\n if description:\n f.write('{0}\\n\\n'.format(description))\n f.write(heading('License'))\n if license:\n f.write('{0}\\n\\n'.format(license))\n f.write(heading('Upstream Contact'))\n if upstream_contact:\n f.write('{0}\\n\\n'.format(upstream_contact))\n\n def set_python_data_and_scripts(self, pypi_package_name=None, source='normal'):\n \"\"\"\n Write the file ``dependencies`` and other files for Python packages.\n\n If ``source`` is ``\"normal\"``, write the files ``spkg-install.in`` and\n ``install-requires.txt``.\n\n If ``source`` is ``\"wheel\"``, write the file ``install-requires.txt``.\n\n If ``source`` is ``\"pip\"``, write the file ``requirements.txt``.\n \"\"\"\n if pypi_package_name is None:\n pypi_package_name = self.package_name\n with open(os.path.join(self.path, 'dependencies'), 'w+') as f:\n f.write('$(PYTHON) | $(PYTHON_TOOLCHAIN)\\n\\n')\n f.write('----------\\nAll lines of this file are ignored except the first.\\n')\n if source == 'normal':\n with open(os.path.join(self.path, 'spkg-install.in'), 'w+') as f:\n f.write('cd src\\nsdh_pip_install .\\n')\n with open(os.path.join(self.path, 'install-requires.txt'), 'w+') as f:\n f.write('{0}\\n'.format(pypi_package_name))\n elif source == 'wheel':\n with open(os.path.join(self.path, 'install-requires.txt'), 'w+') as f:\n f.write('{0}\\n'.format(pypi_package_name))\n elif source == 'pip':\n with open(os.path.join(self.path, 'requirements.txt'), 'w+') as f:\n f.write('{0}\\n'.format(pypi_package_name))\n elif source == 'script':\n pass\n else:\n raise ValueError('package source must be one of normal, script, pip, or wheel')\n", "path": "build/sage_bootstrap/creator.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nPackage Creator\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2016 Volker Braun <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport os\n\nimport logging\nlog = logging.getLogger()\n\nfrom sage_bootstrap.env import SAGE_ROOT\n\n\nclass PackageCreator(object):\n\n def __init__(self, package_name):\n self.package_name = package_name\n self.path = os.path.join(SAGE_ROOT, 'build', 'pkgs', package_name)\n try:\n os.mkdir(self.path)\n except OSError:\n pass\n\n def set_version(self, version):\n \"\"\"\n Write the version to ``package-version.txt``\n \"\"\"\n with open(os.path.join(self.path, 'package-version.txt'), 'w+') as f:\n f.write(version)\n f.write('\\n')\n\n def set_type(self, pkg_type):\n \"\"\"\n Write the package type to ``type``\n \"\"\"\n with open(os.path.join(self.path, 'type'), 'w+') as f:\n f.write(pkg_type)\n f.write('\\n')\n\n def set_tarball(self, tarball, upstream_url):\n \"\"\"\n Write the tarball name pattern to ``checksums.ini``\n \"\"\"\n with open(os.path.join(self.path, 'checksums.ini'), 'w+') as f:\n f.write('tarball={0}'.format(tarball))\n f.write('\\n')\n if upstream_url:\n f.write('upstream_url={0}'.format(upstream_url))\n f.write('\\n')\n\n def set_description(self, description, license, upstream_contact):\n \"\"\"\n Write the ``SPKG.rst`` file\n \"\"\"\n with open(os.path.join(self.path, 'SPKG.rst'), 'w+') as f:\n def heading(title, char='-'):\n return '{0}\\n{1}\\n\\n'.format(title, char * len(title))\n if description:\n title = '{0}: {1}'.format(self.package_name, description)\n else:\n title = self.package_name\n f.write(heading(title, '='))\n f.write(heading('Description'))\n if description:\n f.write('{0}\\n\\n'.format(description))\n f.write(heading('License'))\n if license:\n f.write('{0}\\n\\n'.format(license))\n f.write(heading('Upstream Contact'))\n if upstream_contact:\n f.write('{0}\\n\\n'.format(upstream_contact))\n\n def set_python_data_and_scripts(self, pypi_package_name=None, source='normal'):\n \"\"\"\n Write the file ``dependencies`` and other files for Python packages.\n\n If ``source`` is ``\"normal\"``, write the files ``spkg-install.in`` and\n ``install-requires.txt``.\n\n If ``source`` is ``\"wheel\"``, write the file ``install-requires.txt``.\n\n If ``source`` is ``\"pip\"``, write the file ``requirements.txt``.\n \"\"\"\n if pypi_package_name is None:\n pypi_package_name = self.package_name\n with open(os.path.join(self.path, 'dependencies'), 'w+') as f:\n f.write(' | $(PYTHON_TOOLCHAIN) $(PYTHON)\\n\\n')\n f.write('----------\\nAll lines of this file are ignored except the first.\\n')\n if source == 'normal':\n with open(os.path.join(self.path, 'spkg-install.in'), 'w+') as f:\n f.write('cd src\\nsdh_pip_install .\\n')\n with open(os.path.join(self.path, 'install-requires.txt'), 'w+') as f:\n f.write('{0}\\n'.format(pypi_package_name))\n elif source == 'wheel':\n with open(os.path.join(self.path, 'install-requires.txt'), 'w+') as f:\n f.write('{0}\\n'.format(pypi_package_name))\n elif source == 'pip':\n with open(os.path.join(self.path, 'requirements.txt'), 'w+') as f:\n f.write('{0}\\n'.format(pypi_package_name))\n elif source == 'script':\n pass\n else:\n raise ValueError('package source must be one of normal, script, pip, or wheel')\n", "path": "build/sage_bootstrap/creator.py"}]} | 1,639 | 171 |
gh_patches_debug_20288 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-926 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add UUID objects support for uuid()
- [x] Add parameter `as_object`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/providers/cryptographic.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Cryptographic data provider."""
4
5 import hashlib
6 import secrets
7 from typing import Optional, Union
8 from uuid import UUID, uuid4
9
10 from mimesis.enums import Algorithm
11 from mimesis.providers.base import BaseProvider
12 from mimesis.providers.text import Text
13
14 __all__ = ['Cryptographic']
15
16
17 class Cryptographic(BaseProvider):
18 """Class that provides cryptographic data."""
19
20 def __init__(self, *args, **kwargs) -> None:
21 """Initialize attributes.
22
23 :param seed: Seed.
24 """
25 super().__init__(*args, **kwargs)
26 self.__words = Text('en')._data.get('words', {})
27
28 class Meta:
29 """Class for metadata."""
30
31 name = 'cryptographic'
32
33 @staticmethod
34 def uuid(as_object: bool = False) -> Union[UUID, str]:
35 """Generate random UUID4.
36
37 This method returns string by default,
38 but you can make it return uuid.UUID object using
39 parameter **as_object**
40
41 :param as_object: Returns uuid.UUID.
42 :return: UUID.
43 """
44 _uuid = uuid4()
45
46 if not as_object:
47 return str(_uuid)
48
49 return _uuid
50
51 def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003
52 """Generate random hash.
53
54 To change hashing algorithm, pass parameter ``algorithm``
55 with needed value of the enum object :class:`~mimesis.enums.Algorithm`
56
57 :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.
58 :return: Hash.
59 :raises NonEnumerableError: When algorithm is unsupported.
60 """
61 key = self._validate_enum(algorithm, Algorithm)
62
63 if hasattr(hashlib, key):
64 fn = getattr(hashlib, key)
65 return fn(self.uuid().encode()).hexdigest() # type: ignore
66
67 @staticmethod
68 def token_bytes(entropy: int = 32) -> bytes:
69 """Generate byte string containing ``entropy`` bytes.
70
71 The string has ``entropy`` random bytes, each byte
72 converted to two hex digits.
73
74 .. warning:: Seed is not applicable to this method,
75 because of its cryptographic-safe nature.
76
77 :param entropy: Number of bytes (default: 32).
78 :return: Random bytes.
79 """
80 return secrets.token_bytes(entropy)
81
82 @staticmethod
83 def token_hex(entropy: int = 32) -> str:
84 """Return a random text string, in hexadecimal.
85
86 The string has *entropy* random bytes, each byte converted to two
87 hex digits. If *entropy* is ``None`` or not supplied, a reasonable
88 default is used.
89
90 .. warning:: Seed is not applicable to this method,
91 because of its cryptographic-safe nature.
92
93 :param entropy: Number of bytes (default: 32).
94 :return: Token.
95 """
96 return secrets.token_hex(entropy)
97
98 @staticmethod
99 def token_urlsafe(entropy: int = 32):
100 """Return a random URL-safe text string, in Base64 encoding.
101
102 The string has *entropy* random bytes. If *entropy* is ``None``
103 or not supplied, a reasonable default is used.
104
105 .. warning:: Seed is not applicable to this method,
106 because of its cryptographic-safe nature.
107
108 :param entropy: Number of bytes (default: 32).
109 :return: URL-safe token.
110 """
111 return secrets.token_urlsafe(entropy)
112
113 def mnemonic_phrase(self, length: int = 12,
114 separator: Optional[str] = None) -> str:
115 """Generate pseudo mnemonic phrase.
116
117 Please, keep in mind that this method generates
118 crypto-insecure values.
119
120 :param separator: Separator of phrases (Default is " ").
121 :param length: Number of words.
122 :return: Mnemonic phrase.
123 """
124 if not separator:
125 separator = ' '
126
127 words = self.__words['normal']
128 words_generator = (self.random.choice(words) for _ in range(length))
129 return '{}'.format(separator).join(words_generator)
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py
--- a/mimesis/providers/cryptographic.py
+++ b/mimesis/providers/cryptographic.py
@@ -38,6 +38,9 @@
but you can make it return uuid.UUID object using
parameter **as_object**
+ .. warning:: Seed is not applicable to this method,
+ because of its cryptographic-safe nature.
+
:param as_object: Returns uuid.UUID.
:return: UUID.
"""
@@ -54,6 +57,9 @@
To change hashing algorithm, pass parameter ``algorithm``
with needed value of the enum object :class:`~mimesis.enums.Algorithm`
+ .. warning:: Seed is not applicable to this method,
+ because of its cryptographic-safe nature.
+
:param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.
:return: Hash.
:raises NonEnumerableError: When algorithm is unsupported.
| {"golden_diff": "diff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py\n--- a/mimesis/providers/cryptographic.py\n+++ b/mimesis/providers/cryptographic.py\n@@ -38,6 +38,9 @@\n but you can make it return uuid.UUID object using\n parameter **as_object**\n \n+ .. warning:: Seed is not applicable to this method,\n+ because of its cryptographic-safe nature.\n+\n :param as_object: Returns uuid.UUID.\n :return: UUID.\n \"\"\"\n@@ -54,6 +57,9 @@\n To change hashing algorithm, pass parameter ``algorithm``\n with needed value of the enum object :class:`~mimesis.enums.Algorithm`\n \n+ .. warning:: Seed is not applicable to this method,\n+ because of its cryptographic-safe nature.\n+\n :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.\n :return: Hash.\n :raises NonEnumerableError: When algorithm is unsupported.\n", "issue": "Add UUID objects support for uuid()\n- [x] Add parameter `as_object` \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport secrets\nfrom typing import Optional, Union\nfrom uuid import UUID, uuid4\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.text import Text\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data.get('words', {})\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'cryptographic'\n\n @staticmethod\n def uuid(as_object: bool = False) -> Union[UUID, str]:\n \"\"\"Generate random UUID4.\n\n This method returns string by default,\n but you can make it return uuid.UUID object using\n parameter **as_object**\n\n :param as_object: Returns uuid.UUID.\n :return: UUID.\n \"\"\"\n _uuid = uuid4()\n\n if not as_object:\n return str(_uuid)\n\n return _uuid\n\n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n\n To change hashing algorithm, pass parameter ``algorithm``\n with needed value of the enum object :class:`~mimesis.enums.Algorithm`\n\n :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.\n :return: Hash.\n :raises NonEnumerableError: When algorithm is unsupported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest() # type: ignore\n\n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n \"\"\"Generate byte string containing ``entropy`` bytes.\n\n The string has ``entropy`` random bytes, each byte\n converted to two hex digits.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Random bytes.\n \"\"\"\n return secrets.token_bytes(entropy)\n\n @staticmethod\n def token_hex(entropy: int = 32) -> str:\n \"\"\"Return a random text string, in hexadecimal.\n\n The string has *entropy* random bytes, each byte converted to two\n hex digits. If *entropy* is ``None`` or not supplied, a reasonable\n default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Token.\n \"\"\"\n return secrets.token_hex(entropy)\n\n @staticmethod\n def token_urlsafe(entropy: int = 32):\n \"\"\"Return a random URL-safe text string, in Base64 encoding.\n\n The string has *entropy* random bytes. If *entropy* is ``None``\n or not supplied, a reasonable default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: URL-safe token.\n \"\"\"\n return secrets.token_urlsafe(entropy)\n\n def mnemonic_phrase(self, length: int = 12,\n separator: Optional[str] = None) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n Please, keep in mind that this method generates\n crypto-insecure values.\n\n :param separator: Separator of phrases (Default is \" \").\n :param length: Number of words.\n :return: Mnemonic phrase.\n \"\"\"\n if not separator:\n separator = ' '\n\n words = self.__words['normal']\n words_generator = (self.random.choice(words) for _ in range(length))\n return '{}'.format(separator).join(words_generator)\n", "path": "mimesis/providers/cryptographic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport secrets\nfrom typing import Optional, Union\nfrom uuid import UUID, uuid4\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.text import Text\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data.get('words', {})\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'cryptographic'\n\n @staticmethod\n def uuid(as_object: bool = False) -> Union[UUID, str]:\n \"\"\"Generate random UUID4.\n\n This method returns string by default,\n but you can make it return uuid.UUID object using\n parameter **as_object**\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param as_object: Returns uuid.UUID.\n :return: UUID.\n \"\"\"\n _uuid = uuid4()\n\n if not as_object:\n return str(_uuid)\n\n return _uuid\n\n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n\n To change hashing algorithm, pass parameter ``algorithm``\n with needed value of the enum object :class:`~mimesis.enums.Algorithm`\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.\n :return: Hash.\n :raises NonEnumerableError: When algorithm is unsupported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest() # type: ignore\n\n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n \"\"\"Generate byte string containing ``entropy`` bytes.\n\n The string has ``entropy`` random bytes, each byte\n converted to two hex digits.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Random bytes.\n \"\"\"\n return secrets.token_bytes(entropy)\n\n @staticmethod\n def token_hex(entropy: int = 32) -> str:\n \"\"\"Return a random text string, in hexadecimal.\n\n The string has *entropy* random bytes, each byte converted to two\n hex digits. If *entropy* is ``None`` or not supplied, a reasonable\n default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Token.\n \"\"\"\n return secrets.token_hex(entropy)\n\n @staticmethod\n def token_urlsafe(entropy: int = 32):\n \"\"\"Return a random URL-safe text string, in Base64 encoding.\n\n The string has *entropy* random bytes. If *entropy* is ``None``\n or not supplied, a reasonable default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: URL-safe token.\n \"\"\"\n return secrets.token_urlsafe(entropy)\n\n def mnemonic_phrase(self, length: int = 12,\n separator: Optional[str] = None) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n Please, keep in mind that this method generates\n crypto-insecure values.\n\n :param separator: Separator of phrases (Default is \" \").\n :param length: Number of words.\n :return: Mnemonic phrase.\n \"\"\"\n if not separator:\n separator = ' '\n\n words = self.__words['normal']\n words_generator = (self.random.choice(words) for _ in range(length))\n return '{}'.format(separator).join(words_generator)\n", "path": "mimesis/providers/cryptographic.py"}]} | 1,463 | 216 |
gh_patches_debug_21786 | rasdani/github-patches | git_diff | pre-commit__pre-commit-310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Non-ascii prints in error handler without tty cause stacktrace
```
23:00:13 style runtests: commands[0] | pre-commit run --all-files
23:00:13 [INFO] Installing environment for [email protected]:mirrors/pre-commit/mirrors-jshint.
23:00:13 [INFO] Once installed this environment will be reused.
23:00:13 [INFO] This may take a few minutes...
23:01:33 Traceback (most recent call last):
23:01:33 File ".tox/style/bin/pre-commit", line 11, in <module>
23:01:33 sys.exit(main())
23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/main.py", line 157, in main
23:01:33 'Command {0} failed to exit with a returncode'.format(args.command)
23:01:33 File "/usr/lib64/python2.7/contextlib.py", line 35, in __exit__
23:01:33 self.gen.throw(type, value, traceback)
23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 41, in error_handler
23:01:33 traceback.format_exc(),
23:01:33 File ".../.tox/style/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 21, in _log_and_exit
23:01:33 print_fn(error_msg)
23:01:33 UnicodeEncodeError: 'ascii' codec can't encode characters in position 735-737: ordinal not in range(128)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/error_handler.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3 from __future__ import unicode_literals
4
5 import contextlib
6 import io
7 import os.path
8 import traceback
9
10 from pre_commit.errors import FatalError
11 from pre_commit.store import Store
12
13
14 # For testing purposes
15 class PreCommitSystemExit(SystemExit):
16 pass
17
18
19 def _log_and_exit(msg, exc, formatted, print_fn=print):
20 error_msg = '{0}: {1}: {2}'.format(msg, type(exc).__name__, exc)
21 print_fn(error_msg)
22 print_fn('Check the log at ~/.pre-commit/pre-commit.log')
23 store = Store()
24 store.require_created()
25 with io.open(os.path.join(store.directory, 'pre-commit.log'), 'w') as log:
26 log.write(error_msg + '\n')
27 log.write(formatted + '\n')
28 raise PreCommitSystemExit(1)
29
30
31 @contextlib.contextmanager
32 def error_handler():
33 try:
34 yield
35 except FatalError as e:
36 _log_and_exit('An error has occurred', e, traceback.format_exc())
37 except Exception as e:
38 _log_and_exit(
39 'An unexpected error has occurred',
40 e,
41 traceback.format_exc(),
42 )
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py
--- a/pre_commit/error_handler.py
+++ b/pre_commit/error_handler.py
@@ -7,7 +7,9 @@
import os.path
import traceback
+from pre_commit import five
from pre_commit.errors import FatalError
+from pre_commit.output import sys_stdout_write_wrapper
from pre_commit.store import Store
@@ -16,15 +18,15 @@
pass
-def _log_and_exit(msg, exc, formatted, print_fn=print):
- error_msg = '{0}: {1}: {2}'.format(msg, type(exc).__name__, exc)
- print_fn(error_msg)
- print_fn('Check the log at ~/.pre-commit/pre-commit.log')
+def _log_and_exit(msg, exc, formatted, write_fn=sys_stdout_write_wrapper):
+ error_msg = '{0}: {1}: {2}\n'.format(msg, type(exc).__name__, exc)
+ write_fn(error_msg)
+ write_fn('Check the log at ~/.pre-commit/pre-commit.log\n')
store = Store()
store.require_created()
- with io.open(os.path.join(store.directory, 'pre-commit.log'), 'w') as log:
- log.write(error_msg + '\n')
- log.write(formatted + '\n')
+ with io.open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log:
+ log.write(five.to_bytes(error_msg))
+ log.write(five.to_bytes(formatted) + b'\n')
raise PreCommitSystemExit(1)
| {"golden_diff": "diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py\n--- a/pre_commit/error_handler.py\n+++ b/pre_commit/error_handler.py\n@@ -7,7 +7,9 @@\n import os.path\n import traceback\n \n+from pre_commit import five\n from pre_commit.errors import FatalError\n+from pre_commit.output import sys_stdout_write_wrapper\n from pre_commit.store import Store\n \n \n@@ -16,15 +18,15 @@\n pass\n \n \n-def _log_and_exit(msg, exc, formatted, print_fn=print):\n- error_msg = '{0}: {1}: {2}'.format(msg, type(exc).__name__, exc)\n- print_fn(error_msg)\n- print_fn('Check the log at ~/.pre-commit/pre-commit.log')\n+def _log_and_exit(msg, exc, formatted, write_fn=sys_stdout_write_wrapper):\n+ error_msg = '{0}: {1}: {2}\\n'.format(msg, type(exc).__name__, exc)\n+ write_fn(error_msg)\n+ write_fn('Check the log at ~/.pre-commit/pre-commit.log\\n')\n store = Store()\n store.require_created()\n- with io.open(os.path.join(store.directory, 'pre-commit.log'), 'w') as log:\n- log.write(error_msg + '\\n')\n- log.write(formatted + '\\n')\n+ with io.open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log:\n+ log.write(five.to_bytes(error_msg))\n+ log.write(five.to_bytes(formatted) + b'\\n')\n raise PreCommitSystemExit(1)\n", "issue": "Non-ascii prints in error handler without tty cause stacktrace\n```\n23:00:13 style runtests: commands[0] | pre-commit run --all-files\n23:00:13 [INFO] Installing environment for [email protected]:mirrors/pre-commit/mirrors-jshint.\n23:00:13 [INFO] Once installed this environment will be reused.\n23:00:13 [INFO] This may take a few minutes...\n23:01:33 Traceback (most recent call last):\n23:01:33 File \".tox/style/bin/pre-commit\", line 11, in <module>\n23:01:33 sys.exit(main())\n23:01:33 File \".../.tox/style/local/lib/python2.7/site-packages/pre_commit/main.py\", line 157, in main\n23:01:33 'Command {0} failed to exit with a returncode'.format(args.command)\n23:01:33 File \"/usr/lib64/python2.7/contextlib.py\", line 35, in __exit__\n23:01:33 self.gen.throw(type, value, traceback)\n23:01:33 File \".../.tox/style/local/lib/python2.7/site-packages/pre_commit/error_handler.py\", line 41, in error_handler\n23:01:33 traceback.format_exc(),\n23:01:33 File \".../.tox/style/local/lib/python2.7/site-packages/pre_commit/error_handler.py\", line 21, in _log_and_exit\n23:01:33 print_fn(error_msg)\n23:01:33 UnicodeEncodeError: 'ascii' codec can't encode characters in position 735-737: ordinal not in range(128)\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport os.path\nimport traceback\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.store import Store\n\n\n# For testing purposes\nclass PreCommitSystemExit(SystemExit):\n pass\n\n\ndef _log_and_exit(msg, exc, formatted, print_fn=print):\n error_msg = '{0}: {1}: {2}'.format(msg, type(exc).__name__, exc)\n print_fn(error_msg)\n print_fn('Check the log at ~/.pre-commit/pre-commit.log')\n store = Store()\n store.require_created()\n with io.open(os.path.join(store.directory, 'pre-commit.log'), 'w') as log:\n log.write(error_msg + '\\n')\n log.write(formatted + '\\n')\n raise PreCommitSystemExit(1)\n\n\[email protected]\ndef error_handler():\n try:\n yield\n except FatalError as e:\n _log_and_exit('An error has occurred', e, traceback.format_exc())\n except Exception as e:\n _log_and_exit(\n 'An unexpected error has occurred',\n e,\n traceback.format_exc(),\n )\n", "path": "pre_commit/error_handler.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport os.path\nimport traceback\n\nfrom pre_commit import five\nfrom pre_commit.errors import FatalError\nfrom pre_commit.output import sys_stdout_write_wrapper\nfrom pre_commit.store import Store\n\n\n# For testing purposes\nclass PreCommitSystemExit(SystemExit):\n pass\n\n\ndef _log_and_exit(msg, exc, formatted, write_fn=sys_stdout_write_wrapper):\n error_msg = '{0}: {1}: {2}\\n'.format(msg, type(exc).__name__, exc)\n write_fn(error_msg)\n write_fn('Check the log at ~/.pre-commit/pre-commit.log\\n')\n store = Store()\n store.require_created()\n with io.open(os.path.join(store.directory, 'pre-commit.log'), 'wb') as log:\n log.write(five.to_bytes(error_msg))\n log.write(five.to_bytes(formatted) + b'\\n')\n raise PreCommitSystemExit(1)\n\n\[email protected]\ndef error_handler():\n try:\n yield\n except FatalError as e:\n _log_and_exit('An error has occurred', e, traceback.format_exc())\n except Exception as e:\n _log_and_exit(\n 'An unexpected error has occurred',\n e,\n traceback.format_exc(),\n )\n", "path": "pre_commit/error_handler.py"}]} | 1,025 | 343 |
gh_patches_debug_7181 | rasdani/github-patches | git_diff | vega__altair-3074 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include a suggestion to update frontend (Jupyterlab, ...) in mimetype error
Follow-up that comes out of #2585. Raised by @joelostblom:
> do you think we need to communicate the minimum version of JuptyerLab that support Altair 5 somewhere? I am thinking ideally directly in the error message if possible, but otherwise at least in the docs and release notes, what do you all think?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/vegalite/v5/display.py`
Content:
```
1 import os
2
3 from ...utils.mimebundle import spec_to_mimebundle
4 from ..display import Displayable
5 from ..display import default_renderer_base
6 from ..display import json_renderer_base
7 from ..display import RendererRegistry
8 from ..display import HTMLRenderer
9
10 from .schema import SCHEMA_VERSION
11
12 VEGALITE_VERSION = SCHEMA_VERSION.lstrip("v")
13 VEGA_VERSION = "5"
14 VEGAEMBED_VERSION = "6"
15
16
17 # ==============================================================================
18 # VegaLite v5 renderer logic
19 # ==============================================================================
20
21
22 # The MIME type for Vega-Lite 5.x releases.
23 VEGALITE_MIME_TYPE = "application/vnd.vegalite.v5+json" # type: str
24
25 # The entry point group that can be used by other packages to declare other
26 # renderers that will be auto-detected. Explicit registration is also
27 # allowed by the PluginRegistery API.
28 ENTRY_POINT_GROUP = "altair.vegalite.v5.renderer" # type: str
29
30 # The display message when rendering fails
31 DEFAULT_DISPLAY = """\
32 <VegaLite 5 object>
33
34 If you see this message, it means the renderer has not been properly enabled
35 for the frontend that you are using. For more information, see
36 https://altair-viz.github.io/user_guide/display_frontends.html#troubleshooting
37 """
38
39 renderers = RendererRegistry(entry_point_group=ENTRY_POINT_GROUP)
40
41 here = os.path.dirname(os.path.realpath(__file__))
42
43
44 def mimetype_renderer(spec, **metadata):
45 return default_renderer_base(spec, VEGALITE_MIME_TYPE, DEFAULT_DISPLAY, **metadata)
46
47
48 def json_renderer(spec, **metadata):
49 return json_renderer_base(spec, DEFAULT_DISPLAY, **metadata)
50
51
52 def png_renderer(spec, **metadata):
53 return spec_to_mimebundle(
54 spec,
55 format="png",
56 mode="vega-lite",
57 vega_version=VEGA_VERSION,
58 vegaembed_version=VEGAEMBED_VERSION,
59 vegalite_version=VEGALITE_VERSION,
60 **metadata,
61 )
62
63
64 def svg_renderer(spec, **metadata):
65 return spec_to_mimebundle(
66 spec,
67 format="svg",
68 mode="vega-lite",
69 vega_version=VEGA_VERSION,
70 vegaembed_version=VEGAEMBED_VERSION,
71 vegalite_version=VEGALITE_VERSION,
72 **metadata,
73 )
74
75
76 html_renderer = HTMLRenderer(
77 mode="vega-lite",
78 template="universal",
79 vega_version=VEGA_VERSION,
80 vegaembed_version=VEGAEMBED_VERSION,
81 vegalite_version=VEGALITE_VERSION,
82 )
83
84 renderers.register("default", html_renderer)
85 renderers.register("html", html_renderer)
86 renderers.register("colab", html_renderer)
87 renderers.register("kaggle", html_renderer)
88 renderers.register("zeppelin", html_renderer)
89 renderers.register("mimetype", mimetype_renderer)
90 renderers.register("jupyterlab", mimetype_renderer)
91 renderers.register("nteract", mimetype_renderer)
92 renderers.register("json", json_renderer)
93 renderers.register("png", png_renderer)
94 renderers.register("svg", svg_renderer)
95 renderers.enable("default")
96
97
98 class VegaLite(Displayable):
99 """An IPython/Jupyter display class for rendering VegaLite 5."""
100
101 renderers = renderers
102 schema_path = (__name__, "schema/vega-lite-schema.json")
103
104
105 def vegalite(spec, validate=True):
106 """Render and optionally validate a VegaLite 5 spec.
107
108 This will use the currently enabled renderer to render the spec.
109
110 Parameters
111 ==========
112 spec: dict
113 A fully compliant VegaLite 5 spec, with the data portion fully processed.
114 validate: bool
115 Should the spec be validated against the VegaLite 5 schema?
116 """
117 from IPython.display import display
118
119 display(VegaLite(spec, validate=validate))
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/altair/vegalite/v5/display.py b/altair/vegalite/v5/display.py
--- a/altair/vegalite/v5/display.py
+++ b/altair/vegalite/v5/display.py
@@ -28,8 +28,8 @@
ENTRY_POINT_GROUP = "altair.vegalite.v5.renderer" # type: str
# The display message when rendering fails
-DEFAULT_DISPLAY = """\
-<VegaLite 5 object>
+DEFAULT_DISPLAY = f"""\
+<VegaLite {VEGALITE_VERSION.split('.')[0]} object>
If you see this message, it means the renderer has not been properly enabled
for the frontend that you are using. For more information, see
| {"golden_diff": "diff --git a/altair/vegalite/v5/display.py b/altair/vegalite/v5/display.py\n--- a/altair/vegalite/v5/display.py\n+++ b/altair/vegalite/v5/display.py\n@@ -28,8 +28,8 @@\n ENTRY_POINT_GROUP = \"altair.vegalite.v5.renderer\" # type: str\n \n # The display message when rendering fails\n-DEFAULT_DISPLAY = \"\"\"\\\n-<VegaLite 5 object>\n+DEFAULT_DISPLAY = f\"\"\"\\\n+<VegaLite {VEGALITE_VERSION.split('.')[0]} object>\n \n If you see this message, it means the renderer has not been properly enabled\n for the frontend that you are using. For more information, see\n", "issue": "Include a suggestion to update frontend (Jupyterlab, ...) in mimetype error\nFollow-up that comes out of #2585. Raised by @joelostblom:\r\n\r\n> do you think we need to communicate the minimum version of JuptyerLab that support Altair 5 somewhere? I am thinking ideally directly in the error message if possible, but otherwise at least in the docs and release notes, what do you all think?\n", "before_files": [{"content": "import os\n\nfrom ...utils.mimebundle import spec_to_mimebundle\nfrom ..display import Displayable\nfrom ..display import default_renderer_base\nfrom ..display import json_renderer_base\nfrom ..display import RendererRegistry\nfrom ..display import HTMLRenderer\n\nfrom .schema import SCHEMA_VERSION\n\nVEGALITE_VERSION = SCHEMA_VERSION.lstrip(\"v\")\nVEGA_VERSION = \"5\"\nVEGAEMBED_VERSION = \"6\"\n\n\n# ==============================================================================\n# VegaLite v5 renderer logic\n# ==============================================================================\n\n\n# The MIME type for Vega-Lite 5.x releases.\nVEGALITE_MIME_TYPE = \"application/vnd.vegalite.v5+json\" # type: str\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = \"altair.vegalite.v5.renderer\" # type: str\n\n# The display message when rendering fails\nDEFAULT_DISPLAY = \"\"\"\\\n<VegaLite 5 object>\n\nIf you see this message, it means the renderer has not been properly enabled\nfor the frontend that you are using. For more information, see\nhttps://altair-viz.github.io/user_guide/display_frontends.html#troubleshooting\n\"\"\"\n\nrenderers = RendererRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nhere = os.path.dirname(os.path.realpath(__file__))\n\n\ndef mimetype_renderer(spec, **metadata):\n return default_renderer_base(spec, VEGALITE_MIME_TYPE, DEFAULT_DISPLAY, **metadata)\n\n\ndef json_renderer(spec, **metadata):\n return json_renderer_base(spec, DEFAULT_DISPLAY, **metadata)\n\n\ndef png_renderer(spec, **metadata):\n return spec_to_mimebundle(\n spec,\n format=\"png\",\n mode=\"vega-lite\",\n vega_version=VEGA_VERSION,\n vegaembed_version=VEGAEMBED_VERSION,\n vegalite_version=VEGALITE_VERSION,\n **metadata,\n )\n\n\ndef svg_renderer(spec, **metadata):\n return spec_to_mimebundle(\n spec,\n format=\"svg\",\n mode=\"vega-lite\",\n vega_version=VEGA_VERSION,\n vegaembed_version=VEGAEMBED_VERSION,\n vegalite_version=VEGALITE_VERSION,\n **metadata,\n )\n\n\nhtml_renderer = HTMLRenderer(\n mode=\"vega-lite\",\n template=\"universal\",\n vega_version=VEGA_VERSION,\n vegaembed_version=VEGAEMBED_VERSION,\n vegalite_version=VEGALITE_VERSION,\n)\n\nrenderers.register(\"default\", html_renderer)\nrenderers.register(\"html\", html_renderer)\nrenderers.register(\"colab\", html_renderer)\nrenderers.register(\"kaggle\", html_renderer)\nrenderers.register(\"zeppelin\", html_renderer)\nrenderers.register(\"mimetype\", mimetype_renderer)\nrenderers.register(\"jupyterlab\", mimetype_renderer)\nrenderers.register(\"nteract\", mimetype_renderer)\nrenderers.register(\"json\", json_renderer)\nrenderers.register(\"png\", png_renderer)\nrenderers.register(\"svg\", svg_renderer)\nrenderers.enable(\"default\")\n\n\nclass VegaLite(Displayable):\n \"\"\"An IPython/Jupyter display class for rendering VegaLite 5.\"\"\"\n\n renderers = renderers\n schema_path = (__name__, \"schema/vega-lite-schema.json\")\n\n\ndef vegalite(spec, validate=True):\n \"\"\"Render and optionally validate a VegaLite 5 spec.\n\n This will use the currently enabled renderer to render the spec.\n\n Parameters\n ==========\n spec: dict\n A fully compliant VegaLite 5 spec, with the data portion fully processed.\n validate: bool\n Should the spec be validated against the VegaLite 5 schema?\n \"\"\"\n from IPython.display import display\n\n display(VegaLite(spec, validate=validate))\n", "path": "altair/vegalite/v5/display.py"}], "after_files": [{"content": "import os\n\nfrom ...utils.mimebundle import spec_to_mimebundle\nfrom ..display import Displayable\nfrom ..display import default_renderer_base\nfrom ..display import json_renderer_base\nfrom ..display import RendererRegistry\nfrom ..display import HTMLRenderer\n\nfrom .schema import SCHEMA_VERSION\n\nVEGALITE_VERSION = SCHEMA_VERSION.lstrip(\"v\")\nVEGA_VERSION = \"5\"\nVEGAEMBED_VERSION = \"6\"\n\n\n# ==============================================================================\n# VegaLite v5 renderer logic\n# ==============================================================================\n\n\n# The MIME type for Vega-Lite 5.x releases.\nVEGALITE_MIME_TYPE = \"application/vnd.vegalite.v5+json\" # type: str\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = \"altair.vegalite.v5.renderer\" # type: str\n\n# The display message when rendering fails\nDEFAULT_DISPLAY = f\"\"\"\\\n<VegaLite {VEGALITE_VERSION.split('.')[0]} object>\n\nIf you see this message, it means the renderer has not been properly enabled\nfor the frontend that you are using. For more information, see\nhttps://altair-viz.github.io/user_guide/display_frontends.html#troubleshooting\n\"\"\"\n\nrenderers = RendererRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nhere = os.path.dirname(os.path.realpath(__file__))\n\n\ndef mimetype_renderer(spec, **metadata):\n return default_renderer_base(spec, VEGALITE_MIME_TYPE, DEFAULT_DISPLAY, **metadata)\n\n\ndef json_renderer(spec, **metadata):\n return json_renderer_base(spec, DEFAULT_DISPLAY, **metadata)\n\n\ndef png_renderer(spec, **metadata):\n return spec_to_mimebundle(\n spec,\n format=\"png\",\n mode=\"vega-lite\",\n vega_version=VEGA_VERSION,\n vegaembed_version=VEGAEMBED_VERSION,\n vegalite_version=VEGALITE_VERSION,\n **metadata,\n )\n\n\ndef svg_renderer(spec, **metadata):\n return spec_to_mimebundle(\n spec,\n format=\"svg\",\n mode=\"vega-lite\",\n vega_version=VEGA_VERSION,\n vegaembed_version=VEGAEMBED_VERSION,\n vegalite_version=VEGALITE_VERSION,\n **metadata,\n )\n\n\nhtml_renderer = HTMLRenderer(\n mode=\"vega-lite\",\n template=\"universal\",\n vega_version=VEGA_VERSION,\n vegaembed_version=VEGAEMBED_VERSION,\n vegalite_version=VEGALITE_VERSION,\n)\n\nrenderers.register(\"default\", html_renderer)\nrenderers.register(\"html\", html_renderer)\nrenderers.register(\"colab\", html_renderer)\nrenderers.register(\"kaggle\", html_renderer)\nrenderers.register(\"zeppelin\", html_renderer)\nrenderers.register(\"mimetype\", mimetype_renderer)\nrenderers.register(\"jupyterlab\", mimetype_renderer)\nrenderers.register(\"nteract\", mimetype_renderer)\nrenderers.register(\"json\", json_renderer)\nrenderers.register(\"png\", png_renderer)\nrenderers.register(\"svg\", svg_renderer)\nrenderers.enable(\"default\")\n\n\nclass VegaLite(Displayable):\n \"\"\"An IPython/Jupyter display class for rendering VegaLite 5.\"\"\"\n\n renderers = renderers\n schema_path = (__name__, \"schema/vega-lite-schema.json\")\n\n\ndef vegalite(spec, validate=True):\n \"\"\"Render and optionally validate a VegaLite 5 spec.\n\n This will use the currently enabled renderer to render the spec.\n\n Parameters\n ==========\n spec: dict\n A fully compliant VegaLite 5 spec, with the data portion fully processed.\n validate: bool\n Should the spec be validated against the VegaLite 5 schema?\n \"\"\"\n from IPython.display import display\n\n display(VegaLite(spec, validate=validate))\n", "path": "altair/vegalite/v5/display.py"}]} | 1,438 | 167 |
gh_patches_debug_7111 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1757 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"'function' object has no attribute '_reset'" in `pwndbg/pwndbg/color/syntax_highlight.py`
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
using latest dev branch, with `set syntax-highlight-style solarized-light` param, above error will be thrown
```bash
$ gdb ~/matrix-matrix-multiply/build/src/dgemm -ex 'start' -ex ''
pwndbg: loaded 141 pwndbg commands and 42 shell commands. Type pwndbg [--shell | --all] [filter] for a list.
pwndbg: created $rebase, $ida GDB functions (can be used with print/break)
Traceback (most recent call last):
File "/home/czg/pwndbg/pwndbg/gdblib/config.py", line 93, in __get_set_string_gdb_gte_9
trigger()
File "/home/czg/pwndbg/pwndbg/color/syntax_highlight.py", line 37, in check_style
get_highlight_source._reset()
^^^^^^^^^^^^^^^^^^^^^^^^^^^
AttributeError: 'function' object has no attribute '_reset'
/home/czg/.gdbinit:19: Error in sourced command file:
Error occurred in Python: 'function' object has no attribute '_reset'
```
when I read `$ git log -p pwndbg/commands/context.py` I found nothing about `_reset`, but the error only exists after I upgrade python from `3.10` to `3.11.3` and pwndbg recently.
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
<!--
### Steps to reproduce
-->
<!--
What do we have to do to reproduce the problem?
If this is connected to particular C/asm code,
please provide the smallest C code that reproduces the issue.
-->
<!--
### My setup
-->
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/color/syntax_highlight.py`
Content:
```
1 import os.path
2 import re
3 from typing import Any
4 from typing import Dict
5
6 import pygments
7 import pygments.formatters
8 import pygments.lexers
9
10 import pwndbg.gdblib.config
11 from pwndbg.color import disable_colors
12 from pwndbg.color import message
13 from pwndbg.color import theme
14 from pwndbg.color.lexer import PwntoolsLexer
15
16 pwndbg.gdblib.config.add_param("syntax-highlight", True, "Source code / assembly syntax highlight")
17 style = theme.add_param(
18 "syntax-highlight-style",
19 "monokai",
20 "Source code / assembly syntax highlight stylename of pygments module",
21 )
22
23 formatter = pygments.formatters.Terminal256Formatter(style=str(style))
24 pwntools_lexer = PwntoolsLexer()
25 lexer_cache: Dict[str, Any] = {}
26
27
28 @pwndbg.gdblib.config.trigger(style)
29 def check_style() -> None:
30 global formatter
31 try:
32 formatter = pygments.formatters.Terminal256Formatter(style=str(style))
33
34 # Reset the highlighted source cache
35 from pwndbg.commands.context import get_highlight_source
36
37 get_highlight_source._reset()
38 except pygments.util.ClassNotFound:
39 print(
40 message.warn(f"The pygment formatter style '{style}' is not found, restore to default")
41 )
42 style.revert_default()
43
44
45 def syntax_highlight(code, filename=".asm"):
46 # No syntax highlight if pygment is not installed
47 if disable_colors:
48 return code
49
50 filename = os.path.basename(filename)
51
52 lexer = lexer_cache.get(filename, None)
53
54 # If source code is asm, use our customized lexer.
55 # Note: We can not register our Lexer to pygments and use their APIs,
56 # since the pygment only search the lexers installed via setuptools.
57 if not lexer:
58 for glob_pat in PwntoolsLexer.filenames:
59 pat = "^" + glob_pat.replace(".", r"\.").replace("*", r".*") + "$"
60 if re.match(pat, filename):
61 lexer = pwntools_lexer
62 break
63
64 if not lexer:
65 try:
66 lexer = pygments.lexers.guess_lexer_for_filename(filename, code, stripnl=False)
67 except pygments.util.ClassNotFound:
68 # no lexer for this file or invalid style
69 pass
70
71 if lexer:
72 lexer_cache[filename] = lexer
73
74 code = pygments.highlight(code, lexer, formatter).rstrip()
75
76 return code
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/color/syntax_highlight.py b/pwndbg/color/syntax_highlight.py
--- a/pwndbg/color/syntax_highlight.py
+++ b/pwndbg/color/syntax_highlight.py
@@ -34,7 +34,7 @@
# Reset the highlighted source cache
from pwndbg.commands.context import get_highlight_source
- get_highlight_source._reset()
+ get_highlight_source.cache.clear()
except pygments.util.ClassNotFound:
print(
message.warn(f"The pygment formatter style '{style}' is not found, restore to default")
| {"golden_diff": "diff --git a/pwndbg/color/syntax_highlight.py b/pwndbg/color/syntax_highlight.py\n--- a/pwndbg/color/syntax_highlight.py\n+++ b/pwndbg/color/syntax_highlight.py\n@@ -34,7 +34,7 @@\n # Reset the highlighted source cache\n from pwndbg.commands.context import get_highlight_source\n \n- get_highlight_source._reset()\n+ get_highlight_source.cache.clear()\n except pygments.util.ClassNotFound:\n print(\n message.warn(f\"The pygment formatter style '{style}' is not found, restore to default\")\n", "issue": "\"'function' object has no attribute '_reset'\" in `pwndbg/pwndbg/color/syntax_highlight.py`\n<!--\r\nBefore reporting a new issue, make sure that we do not have any duplicates already open.\r\nIf there is one it might be good to take part in the discussion there.\r\n\r\nPlease make sure you have checked that the issue persists on LATEST pwndbg version.\r\n\r\nBelow is a template for BUG REPORTS.\r\nDon't include it if this is a FEATURE REQUEST.\r\n-->\r\n\r\n\r\n### Description\r\n\r\nusing latest dev branch, with `set syntax-highlight-style solarized-light` param, above error will be thrown\r\n\r\n```bash\r\n$ gdb ~/matrix-matrix-multiply/build/src/dgemm -ex 'start' -ex ''\r\npwndbg: loaded 141 pwndbg commands and 42 shell commands. Type pwndbg [--shell | --all] [filter] for a list.\r\npwndbg: created $rebase, $ida GDB functions (can be used with print/break)\r\nTraceback (most recent call last):\r\n File \"/home/czg/pwndbg/pwndbg/gdblib/config.py\", line 93, in __get_set_string_gdb_gte_9\r\n trigger()\r\n File \"/home/czg/pwndbg/pwndbg/color/syntax_highlight.py\", line 37, in check_style\r\n get_highlight_source._reset()\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nAttributeError: 'function' object has no attribute '_reset'\r\n/home/czg/.gdbinit:19: Error in sourced command file:\r\nError occurred in Python: 'function' object has no attribute '_reset'\r\n```\r\n\r\nwhen I read `$ git log -p pwndbg/commands/context.py` I found nothing about `_reset`, but the error only exists after I upgrade python from `3.10` to `3.11.3` and pwndbg recently.\r\n\r\n<!--\r\nBriefly describe the problem you are having in a few paragraphs.\r\n-->\r\n\r\n<!--\r\n### Steps to reproduce\r\n-->\r\n\r\n<!--\r\nWhat do we have to do to reproduce the problem?\r\nIf this is connected to particular C/asm code, \r\nplease provide the smallest C code that reproduces the issue.\r\n-->\r\n\r\n<!--\r\n### My setup\r\n-->\r\n\r\n<!--\r\nShow us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).\r\n\r\nNOTE: We are currently supporting only Ubuntu installations.\r\nIt is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).\r\nIf you would like to change this situation - help us improving pwndbg and supporting other distros!\r\n\r\nThis can be displayed in pwndbg through `version` command.\r\n\r\nIf it is somehow unavailable, use:\r\n* `show version` - for gdb\r\n* `py import sys; print(sys.version)` - for python\r\n* pwndbg version/git commit id\r\n-->\r\n\n", "before_files": [{"content": "import os.path\nimport re\nfrom typing import Any\nfrom typing import Dict\n\nimport pygments\nimport pygments.formatters\nimport pygments.lexers\n\nimport pwndbg.gdblib.config\nfrom pwndbg.color import disable_colors\nfrom pwndbg.color import message\nfrom pwndbg.color import theme\nfrom pwndbg.color.lexer import PwntoolsLexer\n\npwndbg.gdblib.config.add_param(\"syntax-highlight\", True, \"Source code / assembly syntax highlight\")\nstyle = theme.add_param(\n \"syntax-highlight-style\",\n \"monokai\",\n \"Source code / assembly syntax highlight stylename of pygments module\",\n)\n\nformatter = pygments.formatters.Terminal256Formatter(style=str(style))\npwntools_lexer = PwntoolsLexer()\nlexer_cache: Dict[str, Any] = {}\n\n\[email protected](style)\ndef check_style() -> None:\n global formatter\n try:\n formatter = pygments.formatters.Terminal256Formatter(style=str(style))\n\n # Reset the highlighted source cache\n from pwndbg.commands.context import get_highlight_source\n\n get_highlight_source._reset()\n except pygments.util.ClassNotFound:\n print(\n message.warn(f\"The pygment formatter style '{style}' is not found, restore to default\")\n )\n style.revert_default()\n\n\ndef syntax_highlight(code, filename=\".asm\"):\n # No syntax highlight if pygment is not installed\n if disable_colors:\n return code\n\n filename = os.path.basename(filename)\n\n lexer = lexer_cache.get(filename, None)\n\n # If source code is asm, use our customized lexer.\n # Note: We can not register our Lexer to pygments and use their APIs,\n # since the pygment only search the lexers installed via setuptools.\n if not lexer:\n for glob_pat in PwntoolsLexer.filenames:\n pat = \"^\" + glob_pat.replace(\".\", r\"\\.\").replace(\"*\", r\".*\") + \"$\"\n if re.match(pat, filename):\n lexer = pwntools_lexer\n break\n\n if not lexer:\n try:\n lexer = pygments.lexers.guess_lexer_for_filename(filename, code, stripnl=False)\n except pygments.util.ClassNotFound:\n # no lexer for this file or invalid style\n pass\n\n if lexer:\n lexer_cache[filename] = lexer\n\n code = pygments.highlight(code, lexer, formatter).rstrip()\n\n return code\n", "path": "pwndbg/color/syntax_highlight.py"}], "after_files": [{"content": "import os.path\nimport re\nfrom typing import Any\nfrom typing import Dict\n\nimport pygments\nimport pygments.formatters\nimport pygments.lexers\n\nimport pwndbg.gdblib.config\nfrom pwndbg.color import disable_colors\nfrom pwndbg.color import message\nfrom pwndbg.color import theme\nfrom pwndbg.color.lexer import PwntoolsLexer\n\npwndbg.gdblib.config.add_param(\"syntax-highlight\", True, \"Source code / assembly syntax highlight\")\nstyle = theme.add_param(\n \"syntax-highlight-style\",\n \"monokai\",\n \"Source code / assembly syntax highlight stylename of pygments module\",\n)\n\nformatter = pygments.formatters.Terminal256Formatter(style=str(style))\npwntools_lexer = PwntoolsLexer()\nlexer_cache: Dict[str, Any] = {}\n\n\[email protected](style)\ndef check_style() -> None:\n global formatter\n try:\n formatter = pygments.formatters.Terminal256Formatter(style=str(style))\n\n # Reset the highlighted source cache\n from pwndbg.commands.context import get_highlight_source\n\n get_highlight_source.cache.clear()\n except pygments.util.ClassNotFound:\n print(\n message.warn(f\"The pygment formatter style '{style}' is not found, restore to default\")\n )\n style.revert_default()\n\n\ndef syntax_highlight(code, filename=\".asm\"):\n # No syntax highlight if pygment is not installed\n if disable_colors:\n return code\n\n filename = os.path.basename(filename)\n\n lexer = lexer_cache.get(filename, None)\n\n # If source code is asm, use our customized lexer.\n # Note: We can not register our Lexer to pygments and use their APIs,\n # since the pygment only search the lexers installed via setuptools.\n if not lexer:\n for glob_pat in PwntoolsLexer.filenames:\n pat = \"^\" + glob_pat.replace(\".\", r\"\\.\").replace(\"*\", r\".*\") + \"$\"\n if re.match(pat, filename):\n lexer = pwntools_lexer\n break\n\n if not lexer:\n try:\n lexer = pygments.lexers.guess_lexer_for_filename(filename, code, stripnl=False)\n except pygments.util.ClassNotFound:\n # no lexer for this file or invalid style\n pass\n\n if lexer:\n lexer_cache[filename] = lexer\n\n code = pygments.highlight(code, lexer, formatter).rstrip()\n\n return code\n", "path": "pwndbg/color/syntax_highlight.py"}]} | 1,560 | 123 |
gh_patches_debug_4793 | rasdani/github-patches | git_diff | iterative__dvc-6688 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
http: allow reading proxies from the current environment
This was something enabled as default by the requests, but not on aiohttp. We have to explicitly enable it to keep the current behavior. https://docs.aiohttp.org/en/stable/client_advanced.html#proxy-support. Discord context: https://discord.com/channels/485586884165107732/563406153334128681/891230518992052274
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/fs/http.py`
Content:
```
1 import threading
2
3 from funcy import cached_property, memoize, wrap_with
4
5 from dvc import prompt
6 from dvc.path_info import HTTPURLInfo
7 from dvc.scheme import Schemes
8
9 from .fsspec_wrapper import FSSpecWrapper, NoDirectoriesMixin
10
11
12 @wrap_with(threading.Lock())
13 @memoize
14 def ask_password(host, user):
15 return prompt.password(
16 "Enter a password for "
17 "host '{host}' user '{user}'".format(host=host, user=user)
18 )
19
20
21 def make_context(ssl_verify):
22 if isinstance(ssl_verify, bool) or ssl_verify is None:
23 return ssl_verify
24
25 # If this is a path, then we will create an
26 # SSL context for it, and load the given certificate.
27 import ssl
28
29 context = ssl.create_default_context()
30 context.load_verify_locations(ssl_verify)
31 return context
32
33
34 # pylint: disable=abstract-method
35 class HTTPFileSystem(NoDirectoriesMixin, FSSpecWrapper):
36 scheme = Schemes.HTTP
37 PATH_CLS = HTTPURLInfo
38 PARAM_CHECKSUM = "checksum"
39 REQUIRES = {"aiohttp": "aiohttp", "aiohttp-retry": "aiohttp_retry"}
40 CAN_TRAVERSE = False
41
42 SESSION_RETRIES = 5
43 SESSION_BACKOFF_FACTOR = 0.1
44 REQUEST_TIMEOUT = 60
45
46 def _prepare_credentials(self, **config):
47 import aiohttp
48 from fsspec.asyn import fsspec_loop
49
50 from dvc.config import ConfigError
51
52 credentials = {}
53 client_kwargs = credentials.setdefault("client_kwargs", {})
54
55 if config.get("auth"):
56 user = config.get("user")
57 password = config.get("password")
58 custom_auth_header = config.get("custom_auth_header")
59
60 if password is None and config.get("ask_password"):
61 password = ask_password(config.get("url"), user or "custom")
62
63 auth_method = config["auth"]
64 if auth_method == "basic":
65 if user is None or password is None:
66 raise ConfigError(
67 "HTTP 'basic' authentication require both "
68 "'user' and 'password'"
69 )
70
71 client_kwargs["auth"] = aiohttp.BasicAuth(user, password)
72 elif auth_method == "custom":
73 if custom_auth_header is None or password is None:
74 raise ConfigError(
75 "HTTP 'custom' authentication require both "
76 "'custom_auth_header' and 'password'"
77 )
78 credentials["headers"] = {custom_auth_header: password}
79 else:
80 raise NotImplementedError(
81 f"Auth method {auth_method!r} is not supported."
82 )
83
84 if "ssl_verify" in config:
85 with fsspec_loop():
86 client_kwargs["connector"] = aiohttp.TCPConnector(
87 ssl=make_context(config["ssl_verify"])
88 )
89
90 credentials["get_client"] = self.get_client
91 self.upload_method = config.get("method", "POST")
92 return credentials
93
94 async def get_client(self, **kwargs):
95 import aiohttp
96 from aiohttp_retry import ExponentialRetry, RetryClient
97
98 kwargs["retry_options"] = ExponentialRetry(
99 attempts=self.SESSION_RETRIES,
100 factor=self.SESSION_BACKOFF_FACTOR,
101 max_timeout=self.REQUEST_TIMEOUT,
102 )
103
104 # The default timeout for the aiohttp is 300 seconds
105 # which is too low for DVC's interactions (especially
106 # on the read) when dealing with large data blobs. We
107 # unlimit the total time to read, and only limit the
108 # time that is spent when connecting to the remote server.
109 kwargs["timeout"] = aiohttp.ClientTimeout(
110 total=None,
111 connect=self.REQUEST_TIMEOUT,
112 sock_connect=self.REQUEST_TIMEOUT,
113 sock_read=None,
114 )
115
116 return RetryClient(**kwargs)
117
118 @cached_property
119 def fs(self):
120 from fsspec.implementations.http import (
121 HTTPFileSystem as _HTTPFileSystem,
122 )
123
124 return _HTTPFileSystem(**self.fs_args)
125
126 def _entry_hook(self, entry):
127 entry["checksum"] = entry.get("ETag") or entry.get("Content-MD5")
128 return entry
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/fs/http.py b/dvc/fs/http.py
--- a/dvc/fs/http.py
+++ b/dvc/fs/http.py
@@ -87,6 +87,9 @@
ssl=make_context(config["ssl_verify"])
)
+ # Allow reading proxy configurations from the environment.
+ client_kwargs["trust_env"] = True
+
credentials["get_client"] = self.get_client
self.upload_method = config.get("method", "POST")
return credentials
| {"golden_diff": "diff --git a/dvc/fs/http.py b/dvc/fs/http.py\n--- a/dvc/fs/http.py\n+++ b/dvc/fs/http.py\n@@ -87,6 +87,9 @@\n ssl=make_context(config[\"ssl_verify\"])\n )\n \n+ # Allow reading proxy configurations from the environment.\n+ client_kwargs[\"trust_env\"] = True\n+\n credentials[\"get_client\"] = self.get_client\n self.upload_method = config.get(\"method\", \"POST\")\n return credentials\n", "issue": "http: allow reading proxies from the current environment\nThis was something enabled as default by the requests, but not on aiohttp. We have to explicitly enable it to keep the current behavior. https://docs.aiohttp.org/en/stable/client_advanced.html#proxy-support. Discord context: https://discord.com/channels/485586884165107732/563406153334128681/891230518992052274\n", "before_files": [{"content": "import threading\n\nfrom funcy import cached_property, memoize, wrap_with\n\nfrom dvc import prompt\nfrom dvc.path_info import HTTPURLInfo\nfrom dvc.scheme import Schemes\n\nfrom .fsspec_wrapper import FSSpecWrapper, NoDirectoriesMixin\n\n\n@wrap_with(threading.Lock())\n@memoize\ndef ask_password(host, user):\n return prompt.password(\n \"Enter a password for \"\n \"host '{host}' user '{user}'\".format(host=host, user=user)\n )\n\n\ndef make_context(ssl_verify):\n if isinstance(ssl_verify, bool) or ssl_verify is None:\n return ssl_verify\n\n # If this is a path, then we will create an\n # SSL context for it, and load the given certificate.\n import ssl\n\n context = ssl.create_default_context()\n context.load_verify_locations(ssl_verify)\n return context\n\n\n# pylint: disable=abstract-method\nclass HTTPFileSystem(NoDirectoriesMixin, FSSpecWrapper):\n scheme = Schemes.HTTP\n PATH_CLS = HTTPURLInfo\n PARAM_CHECKSUM = \"checksum\"\n REQUIRES = {\"aiohttp\": \"aiohttp\", \"aiohttp-retry\": \"aiohttp_retry\"}\n CAN_TRAVERSE = False\n\n SESSION_RETRIES = 5\n SESSION_BACKOFF_FACTOR = 0.1\n REQUEST_TIMEOUT = 60\n\n def _prepare_credentials(self, **config):\n import aiohttp\n from fsspec.asyn import fsspec_loop\n\n from dvc.config import ConfigError\n\n credentials = {}\n client_kwargs = credentials.setdefault(\"client_kwargs\", {})\n\n if config.get(\"auth\"):\n user = config.get(\"user\")\n password = config.get(\"password\")\n custom_auth_header = config.get(\"custom_auth_header\")\n\n if password is None and config.get(\"ask_password\"):\n password = ask_password(config.get(\"url\"), user or \"custom\")\n\n auth_method = config[\"auth\"]\n if auth_method == \"basic\":\n if user is None or password is None:\n raise ConfigError(\n \"HTTP 'basic' authentication require both \"\n \"'user' and 'password'\"\n )\n\n client_kwargs[\"auth\"] = aiohttp.BasicAuth(user, password)\n elif auth_method == \"custom\":\n if custom_auth_header is None or password is None:\n raise ConfigError(\n \"HTTP 'custom' authentication require both \"\n \"'custom_auth_header' and 'password'\"\n )\n credentials[\"headers\"] = {custom_auth_header: password}\n else:\n raise NotImplementedError(\n f\"Auth method {auth_method!r} is not supported.\"\n )\n\n if \"ssl_verify\" in config:\n with fsspec_loop():\n client_kwargs[\"connector\"] = aiohttp.TCPConnector(\n ssl=make_context(config[\"ssl_verify\"])\n )\n\n credentials[\"get_client\"] = self.get_client\n self.upload_method = config.get(\"method\", \"POST\")\n return credentials\n\n async def get_client(self, **kwargs):\n import aiohttp\n from aiohttp_retry import ExponentialRetry, RetryClient\n\n kwargs[\"retry_options\"] = ExponentialRetry(\n attempts=self.SESSION_RETRIES,\n factor=self.SESSION_BACKOFF_FACTOR,\n max_timeout=self.REQUEST_TIMEOUT,\n )\n\n # The default timeout for the aiohttp is 300 seconds\n # which is too low for DVC's interactions (especially\n # on the read) when dealing with large data blobs. We\n # unlimit the total time to read, and only limit the\n # time that is spent when connecting to the remote server.\n kwargs[\"timeout\"] = aiohttp.ClientTimeout(\n total=None,\n connect=self.REQUEST_TIMEOUT,\n sock_connect=self.REQUEST_TIMEOUT,\n sock_read=None,\n )\n\n return RetryClient(**kwargs)\n\n @cached_property\n def fs(self):\n from fsspec.implementations.http import (\n HTTPFileSystem as _HTTPFileSystem,\n )\n\n return _HTTPFileSystem(**self.fs_args)\n\n def _entry_hook(self, entry):\n entry[\"checksum\"] = entry.get(\"ETag\") or entry.get(\"Content-MD5\")\n return entry\n", "path": "dvc/fs/http.py"}], "after_files": [{"content": "import threading\n\nfrom funcy import cached_property, memoize, wrap_with\n\nfrom dvc import prompt\nfrom dvc.path_info import HTTPURLInfo\nfrom dvc.scheme import Schemes\n\nfrom .fsspec_wrapper import FSSpecWrapper, NoDirectoriesMixin\n\n\n@wrap_with(threading.Lock())\n@memoize\ndef ask_password(host, user):\n return prompt.password(\n \"Enter a password for \"\n \"host '{host}' user '{user}'\".format(host=host, user=user)\n )\n\n\ndef make_context(ssl_verify):\n if isinstance(ssl_verify, bool) or ssl_verify is None:\n return ssl_verify\n\n # If this is a path, then we will create an\n # SSL context for it, and load the given certificate.\n import ssl\n\n context = ssl.create_default_context()\n context.load_verify_locations(ssl_verify)\n return context\n\n\n# pylint: disable=abstract-method\nclass HTTPFileSystem(NoDirectoriesMixin, FSSpecWrapper):\n scheme = Schemes.HTTP\n PATH_CLS = HTTPURLInfo\n PARAM_CHECKSUM = \"checksum\"\n REQUIRES = {\"aiohttp\": \"aiohttp\", \"aiohttp-retry\": \"aiohttp_retry\"}\n CAN_TRAVERSE = False\n\n SESSION_RETRIES = 5\n SESSION_BACKOFF_FACTOR = 0.1\n REQUEST_TIMEOUT = 60\n\n def _prepare_credentials(self, **config):\n import aiohttp\n from fsspec.asyn import fsspec_loop\n\n from dvc.config import ConfigError\n\n credentials = {}\n client_kwargs = credentials.setdefault(\"client_kwargs\", {})\n\n if config.get(\"auth\"):\n user = config.get(\"user\")\n password = config.get(\"password\")\n custom_auth_header = config.get(\"custom_auth_header\")\n\n if password is None and config.get(\"ask_password\"):\n password = ask_password(config.get(\"url\"), user or \"custom\")\n\n auth_method = config[\"auth\"]\n if auth_method == \"basic\":\n if user is None or password is None:\n raise ConfigError(\n \"HTTP 'basic' authentication require both \"\n \"'user' and 'password'\"\n )\n\n client_kwargs[\"auth\"] = aiohttp.BasicAuth(user, password)\n elif auth_method == \"custom\":\n if custom_auth_header is None or password is None:\n raise ConfigError(\n \"HTTP 'custom' authentication require both \"\n \"'custom_auth_header' and 'password'\"\n )\n credentials[\"headers\"] = {custom_auth_header: password}\n else:\n raise NotImplementedError(\n f\"Auth method {auth_method!r} is not supported.\"\n )\n\n if \"ssl_verify\" in config:\n with fsspec_loop():\n client_kwargs[\"connector\"] = aiohttp.TCPConnector(\n ssl=make_context(config[\"ssl_verify\"])\n )\n\n # Allow reading proxy configurations from the environment.\n client_kwargs[\"trust_env\"] = True\n\n credentials[\"get_client\"] = self.get_client\n self.upload_method = config.get(\"method\", \"POST\")\n return credentials\n\n async def get_client(self, **kwargs):\n import aiohttp\n from aiohttp_retry import ExponentialRetry, RetryClient\n\n kwargs[\"retry_options\"] = ExponentialRetry(\n attempts=self.SESSION_RETRIES,\n factor=self.SESSION_BACKOFF_FACTOR,\n max_timeout=self.REQUEST_TIMEOUT,\n )\n\n # The default timeout for the aiohttp is 300 seconds\n # which is too low for DVC's interactions (especially\n # on the read) when dealing with large data blobs. We\n # unlimit the total time to read, and only limit the\n # time that is spent when connecting to the remote server.\n kwargs[\"timeout\"] = aiohttp.ClientTimeout(\n total=None,\n connect=self.REQUEST_TIMEOUT,\n sock_connect=self.REQUEST_TIMEOUT,\n sock_read=None,\n )\n\n return RetryClient(**kwargs)\n\n @cached_property\n def fs(self):\n from fsspec.implementations.http import (\n HTTPFileSystem as _HTTPFileSystem,\n )\n\n return _HTTPFileSystem(**self.fs_args)\n\n def _entry_hook(self, entry):\n entry[\"checksum\"] = entry.get(\"ETag\") or entry.get(\"Content-MD5\")\n return entry\n", "path": "dvc/fs/http.py"}]} | 1,568 | 108 |
gh_patches_debug_4485 | rasdani/github-patches | git_diff | goauthentik__authentik-8146 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2023.10.6 - "Please select a username" after Azure AD login
**Describe your question/**
Is it now a expected behavior in 2023.10.6 version to ask every user for username input after logging in with azure ad?

In previous versions it was simply authenticating without any prompt, using email address from Azure AD as username.
Now it expects user to input username (and it leads to duplicated accounts, because users with mail as username already exist), and if you enter already existing mail as username it shows error:

I think it can be related to this fix:
https://github.com/goauthentik/authentik/pull/7970
Is it possible somehow to set this username automatically, or revert back to using email address so old user accounts will work again?
**Version and Deployment (please complete the following information):**
- authentik version: 2023.10.6
- Deployment: helm
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/oauth/types/azure_ad.py`
Content:
```
1 """AzureAD OAuth2 Views"""
2 from typing import Any
3
4 from structlog.stdlib import get_logger
5
6 from authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient
7 from authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback
8 from authentik.sources.oauth.types.registry import SourceType, registry
9 from authentik.sources.oauth.views.redirect import OAuthRedirect
10
11 LOGGER = get_logger()
12
13
14 class AzureADOAuthRedirect(OAuthRedirect):
15 """Azure AD OAuth2 Redirect"""
16
17 def get_additional_parameters(self, source): # pragma: no cover
18 return {
19 "scope": ["openid", "https://graph.microsoft.com/User.Read"],
20 }
21
22
23 class AzureADOAuthCallback(OpenIDConnectOAuth2Callback):
24 """AzureAD OAuth2 Callback"""
25
26 client_class = UserprofileHeaderAuthClient
27
28 def get_user_enroll_context(
29 self,
30 info: dict[str, Any],
31 ) -> dict[str, Any]:
32 mail = info.get("mail", None) or info.get("otherMails", [None])[0]
33 return {
34 "username": info.get("userPrincipalName"),
35 "email": mail,
36 "name": info.get("displayName"),
37 }
38
39
40 @registry.register()
41 class AzureADType(SourceType):
42 """Azure AD Type definition"""
43
44 callback_view = AzureADOAuthCallback
45 redirect_view = AzureADOAuthRedirect
46 verbose_name = "Azure AD"
47 name = "azuread"
48
49 urls_customizable = True
50
51 authorization_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize"
52 access_token_url = "https://login.microsoftonline.com/common/oauth2/v2.0/token" # nosec
53 profile_url = "https://graph.microsoft.com/v1.0/me"
54 oidc_well_known_url = (
55 "https://login.microsoftonline.com/common/.well-known/openid-configuration"
56 )
57 oidc_jwks_url = "https://login.microsoftonline.com/common/discovery/keys"
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/oauth/types/azure_ad.py b/authentik/sources/oauth/types/azure_ad.py
--- a/authentik/sources/oauth/types/azure_ad.py
+++ b/authentik/sources/oauth/types/azure_ad.py
@@ -25,6 +25,11 @@
client_class = UserprofileHeaderAuthClient
+ def get_user_id(self, info: dict[str, str]) -> str:
+ # Default try to get `id` for the Graph API endpoint
+ # fallback to OpenID logic in case the profile URL was changed
+ return info.get("id", super().get_user_id(info))
+
def get_user_enroll_context(
self,
info: dict[str, Any],
| {"golden_diff": "diff --git a/authentik/sources/oauth/types/azure_ad.py b/authentik/sources/oauth/types/azure_ad.py\n--- a/authentik/sources/oauth/types/azure_ad.py\n+++ b/authentik/sources/oauth/types/azure_ad.py\n@@ -25,6 +25,11 @@\n \n client_class = UserprofileHeaderAuthClient\n \n+ def get_user_id(self, info: dict[str, str]) -> str:\n+ # Default try to get `id` for the Graph API endpoint\n+ # fallback to OpenID logic in case the profile URL was changed\n+ return info.get(\"id\", super().get_user_id(info))\n+\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n", "issue": "2023.10.6 - \"Please select a username\" after Azure AD login\n**Describe your question/**\r\n\r\nIs it now a expected behavior in 2023.10.6 version to ask every user for username input after logging in with azure ad?\r\n\r\n\r\nIn previous versions it was simply authenticating without any prompt, using email address from Azure AD as username.\r\n\r\nNow it expects user to input username (and it leads to duplicated accounts, because users with mail as username already exist), and if you enter already existing mail as username it shows error:\r\n\r\n\r\nI think it can be related to this fix:\r\nhttps://github.com/goauthentik/authentik/pull/7970\r\n\r\nIs it possible somehow to set this username automatically, or revert back to using email address so old user accounts will work again?\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.10.6\r\n- Deployment: helm\r\n\r\n\n", "before_files": [{"content": "\"\"\"AzureAD OAuth2 Views\"\"\"\nfrom typing import Any\n\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\nfrom authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback\nfrom authentik.sources.oauth.types.registry import SourceType, registry\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass AzureADOAuthRedirect(OAuthRedirect):\n \"\"\"Azure AD OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"openid\", \"https://graph.microsoft.com/User.Read\"],\n }\n\n\nclass AzureADOAuthCallback(OpenIDConnectOAuth2Callback):\n \"\"\"AzureAD OAuth2 Callback\"\"\"\n\n client_class = UserprofileHeaderAuthClient\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n mail = info.get(\"mail\", None) or info.get(\"otherMails\", [None])[0]\n return {\n \"username\": info.get(\"userPrincipalName\"),\n \"email\": mail,\n \"name\": info.get(\"displayName\"),\n }\n\n\[email protected]()\nclass AzureADType(SourceType):\n \"\"\"Azure AD Type definition\"\"\"\n\n callback_view = AzureADOAuthCallback\n redirect_view = AzureADOAuthRedirect\n verbose_name = \"Azure AD\"\n name = \"azuread\"\n\n urls_customizable = True\n\n authorization_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/authorize\"\n access_token_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/token\" # nosec\n profile_url = \"https://graph.microsoft.com/v1.0/me\"\n oidc_well_known_url = (\n \"https://login.microsoftonline.com/common/.well-known/openid-configuration\"\n )\n oidc_jwks_url = \"https://login.microsoftonline.com/common/discovery/keys\"\n", "path": "authentik/sources/oauth/types/azure_ad.py"}], "after_files": [{"content": "\"\"\"AzureAD OAuth2 Views\"\"\"\nfrom typing import Any\n\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\nfrom authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback\nfrom authentik.sources.oauth.types.registry import SourceType, registry\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass AzureADOAuthRedirect(OAuthRedirect):\n \"\"\"Azure AD OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"openid\", \"https://graph.microsoft.com/User.Read\"],\n }\n\n\nclass AzureADOAuthCallback(OpenIDConnectOAuth2Callback):\n \"\"\"AzureAD OAuth2 Callback\"\"\"\n\n client_class = UserprofileHeaderAuthClient\n\n def get_user_id(self, info: dict[str, str]) -> str:\n # Default try to get `id` for the Graph API endpoint\n # fallback to OpenID logic in case the profile URL was changed\n return info.get(\"id\", super().get_user_id(info))\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n mail = info.get(\"mail\", None) or info.get(\"otherMails\", [None])[0]\n return {\n \"username\": info.get(\"userPrincipalName\"),\n \"email\": mail,\n \"name\": info.get(\"displayName\"),\n }\n\n\[email protected]()\nclass AzureADType(SourceType):\n \"\"\"Azure AD Type definition\"\"\"\n\n callback_view = AzureADOAuthCallback\n redirect_view = AzureADOAuthRedirect\n verbose_name = \"Azure AD\"\n name = \"azuread\"\n\n urls_customizable = True\n\n authorization_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/authorize\"\n access_token_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/token\" # nosec\n profile_url = \"https://graph.microsoft.com/v1.0/me\"\n oidc_well_known_url = (\n \"https://login.microsoftonline.com/common/.well-known/openid-configuration\"\n )\n oidc_jwks_url = \"https://login.microsoftonline.com/common/discovery/keys\"\n", "path": "authentik/sources/oauth/types/azure_ad.py"}]} | 1,131 | 161 |
gh_patches_debug_4482 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-313 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing PyPI classifier for Python 3.6 support
Currently, the PyPI page shows the following as supported:
> Programming Language :: Python
> Programming Language :: Python :: 3
> Programming Language :: Python :: 3.3
> Programming Language :: Python :: 3.4
> Programming Language :: Python :: 3.5
However, Python 3.6 is part of the automated tests, and the README states it is supported,
so I'm presuming this is just an omission.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 import os.path
3 import sys
4 import platform
5 from setuptools import setup, find_packages
6
7
8 install_requires = ['async-timeout']
9 if platform.python_implementation() == 'CPython':
10 install_requires.append('hiredis')
11
12 PY_VER = sys.version_info
13
14 if PY_VER >= (3, 4):
15 pass
16 elif PY_VER >= (3, 3):
17 install_requires.append('asyncio')
18 else:
19 raise RuntimeError("aioredis doesn't support Python version prior 3.3")
20
21
22 def read(*parts):
23 with open(os.path.join(*parts), 'rt') as f:
24 return f.read().strip()
25
26
27 def read_version():
28 regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'")
29 init_py = os.path.join(os.path.dirname(__file__),
30 'aioredis', '__init__.py')
31 with open(init_py) as f:
32 for line in f:
33 match = regexp.match(line)
34 if match is not None:
35 return match.group(1)
36 else:
37 raise RuntimeError('Cannot find version in aioredis/__init__.py')
38
39
40 classifiers = [
41 'License :: OSI Approved :: MIT License',
42 'Development Status :: 4 - Beta',
43 'Programming Language :: Python',
44 'Programming Language :: Python :: 3',
45 'Programming Language :: Python :: 3.3',
46 'Programming Language :: Python :: 3.4',
47 'Programming Language :: Python :: 3.5',
48 'Operating System :: POSIX',
49 'Environment :: Web Environment',
50 'Intended Audience :: Developers',
51 'Topic :: Software Development',
52 'Topic :: Software Development :: Libraries',
53 'Framework :: AsyncIO',
54 ]
55
56 setup(name='aioredis',
57 version=read_version(),
58 description=("asyncio (PEP 3156) Redis support"),
59 long_description="\n\n".join((read('README.rst'), read('CHANGES.txt'))),
60 classifiers=classifiers,
61 platforms=["POSIX"],
62 author="Alexey Popravka",
63 author_email="[email protected]",
64 url="https://github.com/aio-libs/aioredis",
65 license="MIT",
66 packages=find_packages(exclude=["tests"]),
67 install_requires=install_requires,
68 include_package_data=True,
69 )
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -45,6 +45,7 @@
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
'Operating System :: POSIX',
'Environment :: Web Environment',
'Intended Audience :: Developers',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,6 +45,7 @@\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n", "issue": "Missing PyPI classifier for Python 3.6 support\nCurrently, the PyPI page shows the following as supported:\r\n\r\n> Programming Language :: Python\r\n> Programming Language :: Python :: 3\r\n> Programming Language :: Python :: 3.3\r\n> Programming Language :: Python :: 3.4\r\n> Programming Language :: Python :: 3.5\r\n\r\nHowever, Python 3.6 is part of the automated tests, and the README states it is supported,\r\nso I'm presuming this is just an omission.\n", "before_files": [{"content": "import re\nimport os.path\nimport sys\nimport platform\nfrom setuptools import setup, find_packages\n\n\ninstall_requires = ['async-timeout']\nif platform.python_implementation() == 'CPython':\n install_requires.append('hiredis')\n\nPY_VER = sys.version_info\n\nif PY_VER >= (3, 4):\n pass\nelif PY_VER >= (3, 3):\n install_requires.append('asyncio')\nelse:\n raise RuntimeError(\"aioredis doesn't support Python version prior 3.3\")\n\n\ndef read(*parts):\n with open(os.path.join(*parts), 'rt') as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*'([\\d.abrc]+)'\")\n init_py = os.path.join(os.path.dirname(__file__),\n 'aioredis', '__init__.py')\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n else:\n raise RuntimeError('Cannot find version in aioredis/__init__.py')\n\n\nclassifiers = [\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Operating System :: POSIX',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Framework :: AsyncIO',\n]\n\nsetup(name='aioredis',\n version=read_version(),\n description=(\"asyncio (PEP 3156) Redis support\"),\n long_description=\"\\n\\n\".join((read('README.rst'), read('CHANGES.txt'))),\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n author=\"Alexey Popravka\",\n author_email=\"[email protected]\",\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=install_requires,\n include_package_data=True,\n )\n", "path": "setup.py"}], "after_files": [{"content": "import re\nimport os.path\nimport sys\nimport platform\nfrom setuptools import setup, find_packages\n\n\ninstall_requires = ['async-timeout']\nif platform.python_implementation() == 'CPython':\n install_requires.append('hiredis')\n\nPY_VER = sys.version_info\n\nif PY_VER >= (3, 4):\n pass\nelif PY_VER >= (3, 3):\n install_requires.append('asyncio')\nelse:\n raise RuntimeError(\"aioredis doesn't support Python version prior 3.3\")\n\n\ndef read(*parts):\n with open(os.path.join(*parts), 'rt') as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*'([\\d.abrc]+)'\")\n init_py = os.path.join(os.path.dirname(__file__),\n 'aioredis', '__init__.py')\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n else:\n raise RuntimeError('Cannot find version in aioredis/__init__.py')\n\n\nclassifiers = [\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Framework :: AsyncIO',\n]\n\nsetup(name='aioredis',\n version=read_version(),\n description=(\"asyncio (PEP 3156) Redis support\"),\n long_description=\"\\n\\n\".join((read('README.rst'), read('CHANGES.txt'))),\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n author=\"Alexey Popravka\",\n author_email=\"[email protected]\",\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=install_requires,\n include_package_data=True,\n )\n", "path": "setup.py"}]} | 1,001 | 103 |
gh_patches_debug_23807 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-860 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement WordPress API endpoint URL patterns
### Motivation
With Django we decided to introduce a new, clean API URL pattern. However, the apps do not use this API yet. We already have exports of the content and the app team could start end to end testing, as soon as the app can interface with the CMS.
### Proposed Solution
We need to implement the URL pattern defined in https://github.com/Integreat/cms/wiki/REST-APIv3-Documentation
The WordPress URL pattern should be available in addition to the current (clean) URLs, so that the app team can switch over.
Please test the integration with the web app by [changing the back end URL](https://github.com/Integreat/cms/wiki/REST-APIv3-Documentation#test-api-with-webapp) to the Django system. If you need help finding issues or interpreting error messages, feel free to contact the app team for help.
### Alternatives
None, needs to be done.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/backend/urls.py`
Content:
```
1 """
2 Django URL dispatcher.
3 Delegates the following namespaces:
4
5 * ``/api/`` to :mod:`api.urls`
6
7 * ``/admin/`` to :meth:`django.contrib.admin.ModelAdmin.get_urls`
8
9 * ``/i18n/`` to :mod:`django.conf.urls.i18n`
10
11 * ``/sitemap.xml`` and ``/<region_slug>/<language_slug>/sitemap.xml`` to :mod:`sitemap.urls`
12
13 * ``/`` to :mod:`cms.urls`
14
15 Additionally, the error handlers in :mod:`cms.views.error_handler` are referenced here (see :doc:`ref/urls`).
16
17 For more information on this file, see :doc:`topics/http/urls`.
18 """
19 from django.conf.urls import include, url
20 from django.conf import settings
21 from django.contrib import admin
22
23
24 urlpatterns = [
25 url(r"^api/", include("api.urls")),
26 url(r"^i18n/", include("django.conf.urls.i18n")),
27 ]
28
29 # The admin/endpoint is only activated if the system is in debug mode.
30 if settings.DEBUG:
31 urlpatterns.append(url(r"^admin/", admin.site.urls))
32
33 # Unfortunatly we need to do this in such way, as the admin endpoint needs to be added before the endpoints of the other apps.
34 urlpatterns += [
35 url(r"^", include("sitemap.urls")),
36 url(r"^", include("cms.urls")),
37 ]
38
39 handler400 = "cms.views.error_handler.handler400"
40 handler403 = "cms.views.error_handler.handler403"
41 handler404 = "cms.views.error_handler.handler404"
42 handler500 = "cms.views.error_handler.handler500"
43
```
Path: `src/api/urls.py`
Content:
```
1 """
2 Expansion of API-Endpoints for the CMS
3 """
4 from django.conf.urls import include, url
5
6 from .v3.events import events
7 from .v3.feedback import (
8 page_feedback,
9 search_result_feedback,
10 region_feedback,
11 offer_feedback,
12 offer_list_feedback,
13 event_list_feedback,
14 event_feedback,
15 poi_feedback,
16 map_feedback,
17 imprint_page_feedback,
18 legacy_feedback_endpoint,
19 )
20 from .v3.imprint import imprint
21 from .v3.languages import languages
22 from .v3.locations import locations
23 from .v3.pages import pages
24 from .v3.pdf_export import pdf_export
25 from .v3.push_notifications import sent_push_notifications
26 from .v3.regions import regions, liveregions, hiddenregions, pushnew
27 from .v3.offers import offers
28 from .v3.single_page import single_page
29
30 urlpatterns = [
31 url(r"^regions/?$", regions, name="api_regions"),
32 url(r"^regions/live/?$", liveregions, name="api_regions_live"),
33 url(r"^regions/hidden/?$", hiddenregions, name="api_regions_hidden"),
34 url(r"^regions/pushnew/?$", pushnew, name="api_pushnew"),
35 url(
36 r"^(?P<region_slug>[-\w]+)/",
37 include(
38 [
39 url(r"^languages/?$", languages, name="api_languages"),
40 url(r"^(?:offers|extras)/?$", offers, name="api_offers"),
41 url(
42 r"^(?P<language_slug>[-\w]+)/",
43 include(
44 [
45 url(r"^pages/?$", pages, name="api_pages"),
46 url(r"^locations/?$", locations, name="api_locations"),
47 url(r"^events/?$", events, name="api_events"),
48 url(
49 r"^page/?$",
50 single_page,
51 name="api_single_page",
52 ),
53 url(
54 r"^pdf/?$",
55 pdf_export,
56 name="api_pdf_export",
57 ),
58 url(
59 r"^sent_push_notifications/?$",
60 sent_push_notifications,
61 name="api_sent_push_notifications",
62 ),
63 url(
64 r"^(?:imprint|disclaimer)/?$",
65 imprint,
66 name="api_imprint",
67 ),
68 url(r"^(?:offers|extras)/?$", offers, name="api_offers"),
69 url(
70 r"^feedback/",
71 include(
72 [
73 url(
74 r"^$",
75 legacy_feedback_endpoint.legacy_feedback_endpoint,
76 name="api_legacy_feedback_endpoint",
77 ),
78 url(
79 r"^categories/?$",
80 region_feedback.region_feedback,
81 name="api_region_feedback",
82 ),
83 url(
84 r"^page/?$",
85 page_feedback.page_feedback,
86 name="api_page_feedback",
87 ),
88 url(
89 r"^poi/?$",
90 poi_feedback.poi_feedback,
91 name="api_poi_feedback",
92 ),
93 url(
94 r"^event/?$",
95 event_feedback.event_feedback,
96 name="api_event_feedback",
97 ),
98 url(
99 r"^events/?$",
100 event_list_feedback.event_list_feedback,
101 name="api_event_list_feedback",
102 ),
103 url(
104 r"^imprint-page/?$",
105 imprint_page_feedback.imprint_page_feedback,
106 name="api_imprint_page_feedbacks",
107 ),
108 url(
109 r"^map/?$",
110 map_feedback.map_feedback,
111 name="api_map_feedback",
112 ),
113 url(
114 r"^search/?$",
115 search_result_feedback.search_result_feedback,
116 name="api_search_result_feedback",
117 ),
118 url(
119 r"^(?:extras|offers)/?$",
120 offer_list_feedback.offer_list_feedback,
121 name="api_offer_list_feedback",
122 ),
123 url(
124 r"^(?:extra|offer)/?$",
125 offer_feedback.offer_feedback,
126 name="api_offer_feedback",
127 ),
128 ]
129 ),
130 ),
131 ]
132 ),
133 ),
134 ]
135 ),
136 ),
137 ]
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/api/urls.py b/src/api/urls.py
--- a/src/api/urls.py
+++ b/src/api/urls.py
@@ -27,11 +27,7 @@
from .v3.offers import offers
from .v3.single_page import single_page
-urlpatterns = [
- url(r"^regions/?$", regions, name="api_regions"),
- url(r"^regions/live/?$", liveregions, name="api_regions_live"),
- url(r"^regions/hidden/?$", hiddenregions, name="api_regions_hidden"),
- url(r"^regions/pushnew/?$", pushnew, name="api_pushnew"),
+api_urlpatterns = [
url(
r"^(?P<region_slug>[-\w]+)/",
include(
@@ -135,3 +131,18 @@
),
),
]
+
+region_api_urlpatterns = [
+ url(r"^$", regions, name="api_regions"),
+ url(r"^live/?$", liveregions, name="api_regions_live"),
+ url(r"^hidden/?$", hiddenregions, name="api_regions_hidden"),
+ url(r"^pushnew/?$", pushnew, name="api_pushnew"),
+]
+
+
+urlpatterns = [
+ url(r"^api/", include(api_urlpatterns)),
+ url(r"^api/regions/", include(region_api_urlpatterns)),
+ url(r"^wp-json/extensions/v3/", include(api_urlpatterns)),
+ url(r"^wp-json/extensions/v3/sites/", include(region_api_urlpatterns)),
+]
diff --git a/src/backend/urls.py b/src/backend/urls.py
--- a/src/backend/urls.py
+++ b/src/backend/urls.py
@@ -22,7 +22,7 @@
urlpatterns = [
- url(r"^api/", include("api.urls")),
+ url(r"^", include("api.urls")),
url(r"^i18n/", include("django.conf.urls.i18n")),
]
| {"golden_diff": "diff --git a/src/api/urls.py b/src/api/urls.py\n--- a/src/api/urls.py\n+++ b/src/api/urls.py\n@@ -27,11 +27,7 @@\n from .v3.offers import offers\n from .v3.single_page import single_page\n \n-urlpatterns = [\n- url(r\"^regions/?$\", regions, name=\"api_regions\"),\n- url(r\"^regions/live/?$\", liveregions, name=\"api_regions_live\"),\n- url(r\"^regions/hidden/?$\", hiddenregions, name=\"api_regions_hidden\"),\n- url(r\"^regions/pushnew/?$\", pushnew, name=\"api_pushnew\"),\n+api_urlpatterns = [\n url(\n r\"^(?P<region_slug>[-\\w]+)/\",\n include(\n@@ -135,3 +131,18 @@\n ),\n ),\n ]\n+\n+region_api_urlpatterns = [\n+ url(r\"^$\", regions, name=\"api_regions\"),\n+ url(r\"^live/?$\", liveregions, name=\"api_regions_live\"),\n+ url(r\"^hidden/?$\", hiddenregions, name=\"api_regions_hidden\"),\n+ url(r\"^pushnew/?$\", pushnew, name=\"api_pushnew\"),\n+]\n+\n+\n+urlpatterns = [\n+ url(r\"^api/\", include(api_urlpatterns)),\n+ url(r\"^api/regions/\", include(region_api_urlpatterns)),\n+ url(r\"^wp-json/extensions/v3/\", include(api_urlpatterns)),\n+ url(r\"^wp-json/extensions/v3/sites/\", include(region_api_urlpatterns)),\n+]\ndiff --git a/src/backend/urls.py b/src/backend/urls.py\n--- a/src/backend/urls.py\n+++ b/src/backend/urls.py\n@@ -22,7 +22,7 @@\n \n \n urlpatterns = [\n- url(r\"^api/\", include(\"api.urls\")),\n+ url(r\"^\", include(\"api.urls\")),\n url(r\"^i18n/\", include(\"django.conf.urls.i18n\")),\n ]\n", "issue": "Implement WordPress API endpoint URL patterns\n### Motivation\r\nWith Django we decided to introduce a new, clean API URL pattern. However, the apps do not use this API yet. We already have exports of the content and the app team could start end to end testing, as soon as the app can interface with the CMS.\r\n\r\n\r\n### Proposed Solution\r\nWe need to implement the URL pattern defined in https://github.com/Integreat/cms/wiki/REST-APIv3-Documentation\r\nThe WordPress URL pattern should be available in addition to the current (clean) URLs, so that the app team can switch over.\r\n\r\nPlease test the integration with the web app by [changing the back end URL](https://github.com/Integreat/cms/wiki/REST-APIv3-Documentation#test-api-with-webapp) to the Django system. If you need help finding issues or interpreting error messages, feel free to contact the app team for help.\r\n\r\n### Alternatives\r\nNone, needs to be done.\r\n\n", "before_files": [{"content": "\"\"\"\nDjango URL dispatcher.\nDelegates the following namespaces:\n\n* ``/api/`` to :mod:`api.urls`\n\n* ``/admin/`` to :meth:`django.contrib.admin.ModelAdmin.get_urls`\n\n* ``/i18n/`` to :mod:`django.conf.urls.i18n`\n\n* ``/sitemap.xml`` and ``/<region_slug>/<language_slug>/sitemap.xml`` to :mod:`sitemap.urls`\n\n* ``/`` to :mod:`cms.urls`\n\nAdditionally, the error handlers in :mod:`cms.views.error_handler` are referenced here (see :doc:`ref/urls`).\n\nFor more information on this file, see :doc:`topics/http/urls`.\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.conf import settings\nfrom django.contrib import admin\n\n\nurlpatterns = [\n url(r\"^api/\", include(\"api.urls\")),\n url(r\"^i18n/\", include(\"django.conf.urls.i18n\")),\n]\n\n# The admin/endpoint is only activated if the system is in debug mode.\nif settings.DEBUG:\n urlpatterns.append(url(r\"^admin/\", admin.site.urls))\n\n# Unfortunatly we need to do this in such way, as the admin endpoint needs to be added before the endpoints of the other apps.\nurlpatterns += [\n url(r\"^\", include(\"sitemap.urls\")),\n url(r\"^\", include(\"cms.urls\")),\n]\n\nhandler400 = \"cms.views.error_handler.handler400\"\nhandler403 = \"cms.views.error_handler.handler403\"\nhandler404 = \"cms.views.error_handler.handler404\"\nhandler500 = \"cms.views.error_handler.handler500\"\n", "path": "src/backend/urls.py"}, {"content": "\"\"\"\nExpansion of API-Endpoints for the CMS\n\"\"\"\nfrom django.conf.urls import include, url\n\nfrom .v3.events import events\nfrom .v3.feedback import (\n page_feedback,\n search_result_feedback,\n region_feedback,\n offer_feedback,\n offer_list_feedback,\n event_list_feedback,\n event_feedback,\n poi_feedback,\n map_feedback,\n imprint_page_feedback,\n legacy_feedback_endpoint,\n)\nfrom .v3.imprint import imprint\nfrom .v3.languages import languages\nfrom .v3.locations import locations\nfrom .v3.pages import pages\nfrom .v3.pdf_export import pdf_export\nfrom .v3.push_notifications import sent_push_notifications\nfrom .v3.regions import regions, liveregions, hiddenregions, pushnew\nfrom .v3.offers import offers\nfrom .v3.single_page import single_page\n\nurlpatterns = [\n url(r\"^regions/?$\", regions, name=\"api_regions\"),\n url(r\"^regions/live/?$\", liveregions, name=\"api_regions_live\"),\n url(r\"^regions/hidden/?$\", hiddenregions, name=\"api_regions_hidden\"),\n url(r\"^regions/pushnew/?$\", pushnew, name=\"api_pushnew\"),\n url(\n r\"^(?P<region_slug>[-\\w]+)/\",\n include(\n [\n url(r\"^languages/?$\", languages, name=\"api_languages\"),\n url(r\"^(?:offers|extras)/?$\", offers, name=\"api_offers\"),\n url(\n r\"^(?P<language_slug>[-\\w]+)/\",\n include(\n [\n url(r\"^pages/?$\", pages, name=\"api_pages\"),\n url(r\"^locations/?$\", locations, name=\"api_locations\"),\n url(r\"^events/?$\", events, name=\"api_events\"),\n url(\n r\"^page/?$\",\n single_page,\n name=\"api_single_page\",\n ),\n url(\n r\"^pdf/?$\",\n pdf_export,\n name=\"api_pdf_export\",\n ),\n url(\n r\"^sent_push_notifications/?$\",\n sent_push_notifications,\n name=\"api_sent_push_notifications\",\n ),\n url(\n r\"^(?:imprint|disclaimer)/?$\",\n imprint,\n name=\"api_imprint\",\n ),\n url(r\"^(?:offers|extras)/?$\", offers, name=\"api_offers\"),\n url(\n r\"^feedback/\",\n include(\n [\n url(\n r\"^$\",\n legacy_feedback_endpoint.legacy_feedback_endpoint,\n name=\"api_legacy_feedback_endpoint\",\n ),\n url(\n r\"^categories/?$\",\n region_feedback.region_feedback,\n name=\"api_region_feedback\",\n ),\n url(\n r\"^page/?$\",\n page_feedback.page_feedback,\n name=\"api_page_feedback\",\n ),\n url(\n r\"^poi/?$\",\n poi_feedback.poi_feedback,\n name=\"api_poi_feedback\",\n ),\n url(\n r\"^event/?$\",\n event_feedback.event_feedback,\n name=\"api_event_feedback\",\n ),\n url(\n r\"^events/?$\",\n event_list_feedback.event_list_feedback,\n name=\"api_event_list_feedback\",\n ),\n url(\n r\"^imprint-page/?$\",\n imprint_page_feedback.imprint_page_feedback,\n name=\"api_imprint_page_feedbacks\",\n ),\n url(\n r\"^map/?$\",\n map_feedback.map_feedback,\n name=\"api_map_feedback\",\n ),\n url(\n r\"^search/?$\",\n search_result_feedback.search_result_feedback,\n name=\"api_search_result_feedback\",\n ),\n url(\n r\"^(?:extras|offers)/?$\",\n offer_list_feedback.offer_list_feedback,\n name=\"api_offer_list_feedback\",\n ),\n url(\n r\"^(?:extra|offer)/?$\",\n offer_feedback.offer_feedback,\n name=\"api_offer_feedback\",\n ),\n ]\n ),\n ),\n ]\n ),\n ),\n ]\n ),\n ),\n]\n", "path": "src/api/urls.py"}], "after_files": [{"content": "\"\"\"\nDjango URL dispatcher.\nDelegates the following namespaces:\n\n* ``/api/`` to :mod:`api.urls`\n\n* ``/admin/`` to :meth:`django.contrib.admin.ModelAdmin.get_urls`\n\n* ``/i18n/`` to :mod:`django.conf.urls.i18n`\n\n* ``/sitemap.xml`` and ``/<region_slug>/<language_slug>/sitemap.xml`` to :mod:`sitemap.urls`\n\n* ``/`` to :mod:`cms.urls`\n\nAdditionally, the error handlers in :mod:`cms.views.error_handler` are referenced here (see :doc:`ref/urls`).\n\nFor more information on this file, see :doc:`topics/http/urls`.\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.conf import settings\nfrom django.contrib import admin\n\n\nurlpatterns = [\n url(r\"^\", include(\"api.urls\")),\n url(r\"^i18n/\", include(\"django.conf.urls.i18n\")),\n]\n\n# The admin/endpoint is only activated if the system is in debug mode.\nif settings.DEBUG:\n urlpatterns.append(url(r\"^admin/\", admin.site.urls))\n\n# Unfortunatly we need to do this in such way, as the admin endpoint needs to be added before the endpoints of the other apps.\nurlpatterns += [\n url(r\"^\", include(\"sitemap.urls\")),\n url(r\"^\", include(\"cms.urls\")),\n]\n\nhandler400 = \"cms.views.error_handler.handler400\"\nhandler403 = \"cms.views.error_handler.handler403\"\nhandler404 = \"cms.views.error_handler.handler404\"\nhandler500 = \"cms.views.error_handler.handler500\"\n", "path": "src/backend/urls.py"}, {"content": "\"\"\"\nExpansion of API-Endpoints for the CMS\n\"\"\"\nfrom django.conf.urls import include, url\n\nfrom .v3.events import events\nfrom .v3.feedback import (\n page_feedback,\n search_result_feedback,\n region_feedback,\n offer_feedback,\n offer_list_feedback,\n event_list_feedback,\n event_feedback,\n poi_feedback,\n map_feedback,\n imprint_page_feedback,\n legacy_feedback_endpoint,\n)\nfrom .v3.imprint import imprint\nfrom .v3.languages import languages\nfrom .v3.locations import locations\nfrom .v3.pages import pages\nfrom .v3.pdf_export import pdf_export\nfrom .v3.push_notifications import sent_push_notifications\nfrom .v3.regions import regions, liveregions, hiddenregions, pushnew\nfrom .v3.offers import offers\nfrom .v3.single_page import single_page\n\napi_urlpatterns = [\n url(\n r\"^(?P<region_slug>[-\\w]+)/\",\n include(\n [\n url(r\"^languages/?$\", languages, name=\"api_languages\"),\n url(r\"^(?:offers|extras)/?$\", offers, name=\"api_offers\"),\n url(\n r\"^(?P<language_slug>[-\\w]+)/\",\n include(\n [\n url(r\"^pages/?$\", pages, name=\"api_pages\"),\n url(r\"^locations/?$\", locations, name=\"api_locations\"),\n url(r\"^events/?$\", events, name=\"api_events\"),\n url(\n r\"^page/?$\",\n single_page,\n name=\"api_single_page\",\n ),\n url(\n r\"^pdf/?$\",\n pdf_export,\n name=\"api_pdf_export\",\n ),\n url(\n r\"^sent_push_notifications/?$\",\n sent_push_notifications,\n name=\"api_sent_push_notifications\",\n ),\n url(\n r\"^(?:imprint|disclaimer)/?$\",\n imprint,\n name=\"api_imprint\",\n ),\n url(r\"^(?:offers|extras)/?$\", offers, name=\"api_offers\"),\n url(\n r\"^feedback/\",\n include(\n [\n url(\n r\"^$\",\n legacy_feedback_endpoint.legacy_feedback_endpoint,\n name=\"api_legacy_feedback_endpoint\",\n ),\n url(\n r\"^categories/?$\",\n region_feedback.region_feedback,\n name=\"api_region_feedback\",\n ),\n url(\n r\"^page/?$\",\n page_feedback.page_feedback,\n name=\"api_page_feedback\",\n ),\n url(\n r\"^poi/?$\",\n poi_feedback.poi_feedback,\n name=\"api_poi_feedback\",\n ),\n url(\n r\"^event/?$\",\n event_feedback.event_feedback,\n name=\"api_event_feedback\",\n ),\n url(\n r\"^events/?$\",\n event_list_feedback.event_list_feedback,\n name=\"api_event_list_feedback\",\n ),\n url(\n r\"^imprint-page/?$\",\n imprint_page_feedback.imprint_page_feedback,\n name=\"api_imprint_page_feedbacks\",\n ),\n url(\n r\"^map/?$\",\n map_feedback.map_feedback,\n name=\"api_map_feedback\",\n ),\n url(\n r\"^search/?$\",\n search_result_feedback.search_result_feedback,\n name=\"api_search_result_feedback\",\n ),\n url(\n r\"^(?:extras|offers)/?$\",\n offer_list_feedback.offer_list_feedback,\n name=\"api_offer_list_feedback\",\n ),\n url(\n r\"^(?:extra|offer)/?$\",\n offer_feedback.offer_feedback,\n name=\"api_offer_feedback\",\n ),\n ]\n ),\n ),\n ]\n ),\n ),\n ]\n ),\n ),\n]\n\nregion_api_urlpatterns = [\n url(r\"^$\", regions, name=\"api_regions\"),\n url(r\"^live/?$\", liveregions, name=\"api_regions_live\"),\n url(r\"^hidden/?$\", hiddenregions, name=\"api_regions_hidden\"),\n url(r\"^pushnew/?$\", pushnew, name=\"api_pushnew\"),\n]\n\n\nurlpatterns = [\n url(r\"^api/\", include(api_urlpatterns)),\n url(r\"^api/regions/\", include(region_api_urlpatterns)),\n url(r\"^wp-json/extensions/v3/\", include(api_urlpatterns)),\n url(r\"^wp-json/extensions/v3/sites/\", include(region_api_urlpatterns)),\n]\n", "path": "src/api/urls.py"}]} | 2,045 | 424 |
gh_patches_debug_20523 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-507 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"cannot decode v0.3 services payload" error when tracing aiopg
I have a Python application using aiopg and monitored by Datadog. When starting, it sends a bad frame to `trace-agent` and then everything goes fine.
Versions:
- datadog-agent: 6.2.1
- ddtrace-py: 0.12.1
- aiopg: 0.14.0
Here is a minimalist test case:
```python
import asyncio
import aiopg
from ddtrace import patch
patch(aiopg=True)
async def main():
async with aiopg.connect(host=None) as db:
pass
while True:
await asyncio.sleep(0.1)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
This logs the following error:
```
failed_to_send services to Agent: HTTP error status 400, reason Bad Request, message Content-Type: text/plain; charset=utf-8
X-Content-Type-Options: nosniff
Date: Mon, 18 Jun 2018 15:25:18 GMT
Content-Length: 59
```
And then `trace-agent` reports:
```
trace-agent[4437]: 2018-06-18 15:31:16 ERROR (receiver.go:275) - cannot decode v0.3 services payload: msgp: attempted to decode type "nil" with method for "str"
```
I believe this is related to https://github.com/DataDog/datadog-trace-agent/issues/350.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/aiopg/connection.py`
Content:
```
1 import asyncio
2 import wrapt
3
4 from aiopg.utils import _ContextManager
5
6 from .. import dbapi
7 from ...ext import sql
8 from ...pin import Pin
9
10
11 class AIOTracedCursor(wrapt.ObjectProxy):
12 """ TracedCursor wraps a psql cursor and traces it's queries. """
13
14 def __init__(self, cursor, pin):
15 super(AIOTracedCursor, self).__init__(cursor)
16 pin.onto(self)
17 name = pin.app or 'sql'
18 self._datadog_name = '%s.query' % name
19
20 @asyncio.coroutine
21 def _trace_method(self, method, resource, extra_tags, *args, **kwargs):
22 pin = Pin.get_from(self)
23 if not pin or not pin.enabled():
24 result = yield from method(*args, **kwargs) # noqa: E999
25 return result
26 service = pin.service
27
28 with pin.tracer.trace(self._datadog_name, service=service,
29 resource=resource) as s:
30 s.span_type = sql.TYPE
31 s.set_tag(sql.QUERY, resource)
32 s.set_tags(pin.tags)
33 s.set_tags(extra_tags)
34
35 try:
36 result = yield from method(*args, **kwargs)
37 return result
38 finally:
39 s.set_metric("db.rowcount", self.rowcount)
40
41 @asyncio.coroutine
42 def executemany(self, query, *args, **kwargs):
43 # FIXME[matt] properly handle kwargs here. arg names can be different
44 # with different libs.
45 result = yield from self._trace_method(
46 self.__wrapped__.executemany, query, {'sql.executemany': 'true'},
47 query, *args, **kwargs) # noqa: E999
48 return result
49
50 @asyncio.coroutine
51 def execute(self, query, *args, **kwargs):
52 result = yield from self._trace_method(
53 self.__wrapped__.execute, query, {}, query, *args, **kwargs)
54 return result
55
56 @asyncio.coroutine
57 def callproc(self, proc, args):
58 result = yield from self._trace_method(
59 self.__wrapped__.callproc, proc, {}, proc, args) # noqa: E999
60 return result
61
62
63 class AIOTracedConnection(wrapt.ObjectProxy):
64 """ TracedConnection wraps a Connection with tracing code. """
65
66 def __init__(self, conn):
67 super(AIOTracedConnection, self).__init__(conn)
68 name = dbapi._get_vendor(conn)
69 Pin(service=name, app=name).onto(self)
70
71 def cursor(self, *args, **kwargs):
72 # unfortunately we also need to patch this method as otherwise "self"
73 # ends up being the aiopg connection object
74 coro = self._cursor(*args, **kwargs)
75 return _ContextManager(coro)
76
77 @asyncio.coroutine
78 def _cursor(self, *args, **kwargs):
79 cursor = yield from self.__wrapped__._cursor(*args, **kwargs) # noqa: E999
80 pin = Pin.get_from(self)
81 if not pin:
82 return cursor
83 return AIOTracedCursor(cursor, pin)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/aiopg/connection.py b/ddtrace/contrib/aiopg/connection.py
--- a/ddtrace/contrib/aiopg/connection.py
+++ b/ddtrace/contrib/aiopg/connection.py
@@ -4,8 +4,8 @@
from aiopg.utils import _ContextManager
from .. import dbapi
-from ...ext import sql
from ...pin import Pin
+from ...ext import sql, AppTypes
class AIOTracedCursor(wrapt.ObjectProxy):
@@ -63,10 +63,11 @@
class AIOTracedConnection(wrapt.ObjectProxy):
""" TracedConnection wraps a Connection with tracing code. """
- def __init__(self, conn):
+ def __init__(self, conn, pin=None):
super(AIOTracedConnection, self).__init__(conn)
name = dbapi._get_vendor(conn)
- Pin(service=name, app=name).onto(self)
+ db_pin = pin or Pin(service=name, app=name, app_type=AppTypes.db)
+ db_pin.onto(self)
def cursor(self, *args, **kwargs):
# unfortunately we also need to patch this method as otherwise "self"
| {"golden_diff": "diff --git a/ddtrace/contrib/aiopg/connection.py b/ddtrace/contrib/aiopg/connection.py\n--- a/ddtrace/contrib/aiopg/connection.py\n+++ b/ddtrace/contrib/aiopg/connection.py\n@@ -4,8 +4,8 @@\n from aiopg.utils import _ContextManager\n \n from .. import dbapi\n-from ...ext import sql\n from ...pin import Pin\n+from ...ext import sql, AppTypes\n \n \n class AIOTracedCursor(wrapt.ObjectProxy):\n@@ -63,10 +63,11 @@\n class AIOTracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n \n- def __init__(self, conn):\n+ def __init__(self, conn, pin=None):\n super(AIOTracedConnection, self).__init__(conn)\n name = dbapi._get_vendor(conn)\n- Pin(service=name, app=name).onto(self)\n+ db_pin = pin or Pin(service=name, app=name, app_type=AppTypes.db)\n+ db_pin.onto(self)\n \n def cursor(self, *args, **kwargs):\n # unfortunately we also need to patch this method as otherwise \"self\"\n", "issue": "\"cannot decode v0.3 services payload\" error when tracing aiopg\nI have a Python application using aiopg and monitored by Datadog. When starting, it sends a bad frame to `trace-agent` and then everything goes fine.\r\n\r\nVersions:\r\n- datadog-agent: 6.2.1\r\n- ddtrace-py: 0.12.1\r\n- aiopg: 0.14.0\r\n\r\nHere is a minimalist test case:\r\n\r\n```python\r\nimport asyncio\r\nimport aiopg\r\n\r\nfrom ddtrace import patch\r\n\r\npatch(aiopg=True)\r\n\r\n\r\nasync def main():\r\n async with aiopg.connect(host=None) as db:\r\n pass\r\n\r\n while True:\r\n await asyncio.sleep(0.1)\r\n\r\n\r\nloop = asyncio.get_event_loop()\r\nloop.run_until_complete(main())\r\n```\r\n\r\nThis logs the following error:\r\n```\r\nfailed_to_send services to Agent: HTTP error status 400, reason Bad Request, message Content-Type: text/plain; charset=utf-8\r\nX-Content-Type-Options: nosniff\r\nDate: Mon, 18 Jun 2018 15:25:18 GMT\r\nContent-Length: 59\r\n```\r\n\r\nAnd then `trace-agent` reports:\r\n```\r\ntrace-agent[4437]: 2018-06-18 15:31:16 ERROR (receiver.go:275) - cannot decode v0.3 services payload: msgp: attempted to decode type \"nil\" with method for \"str\"\r\n```\r\n\r\nI believe this is related to https://github.com/DataDog/datadog-trace-agent/issues/350.\n", "before_files": [{"content": "import asyncio\nimport wrapt\n\nfrom aiopg.utils import _ContextManager\n\nfrom .. import dbapi\nfrom ...ext import sql\nfrom ...pin import Pin\n\n\nclass AIOTracedCursor(wrapt.ObjectProxy):\n \"\"\" TracedCursor wraps a psql cursor and traces it's queries. \"\"\"\n\n def __init__(self, cursor, pin):\n super(AIOTracedCursor, self).__init__(cursor)\n pin.onto(self)\n name = pin.app or 'sql'\n self._datadog_name = '%s.query' % name\n\n @asyncio.coroutine\n def _trace_method(self, method, resource, extra_tags, *args, **kwargs):\n pin = Pin.get_from(self)\n if not pin or not pin.enabled():\n result = yield from method(*args, **kwargs) # noqa: E999\n return result\n service = pin.service\n\n with pin.tracer.trace(self._datadog_name, service=service,\n resource=resource) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, resource)\n s.set_tags(pin.tags)\n s.set_tags(extra_tags)\n\n try:\n result = yield from method(*args, **kwargs)\n return result\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n\n @asyncio.coroutine\n def executemany(self, query, *args, **kwargs):\n # FIXME[matt] properly handle kwargs here. arg names can be different\n # with different libs.\n result = yield from self._trace_method(\n self.__wrapped__.executemany, query, {'sql.executemany': 'true'},\n query, *args, **kwargs) # noqa: E999\n return result\n\n @asyncio.coroutine\n def execute(self, query, *args, **kwargs):\n result = yield from self._trace_method(\n self.__wrapped__.execute, query, {}, query, *args, **kwargs)\n return result\n\n @asyncio.coroutine\n def callproc(self, proc, args):\n result = yield from self._trace_method(\n self.__wrapped__.callproc, proc, {}, proc, args) # noqa: E999\n return result\n\n\nclass AIOTracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n\n def __init__(self, conn):\n super(AIOTracedConnection, self).__init__(conn)\n name = dbapi._get_vendor(conn)\n Pin(service=name, app=name).onto(self)\n\n def cursor(self, *args, **kwargs):\n # unfortunately we also need to patch this method as otherwise \"self\"\n # ends up being the aiopg connection object\n coro = self._cursor(*args, **kwargs)\n return _ContextManager(coro)\n\n @asyncio.coroutine\n def _cursor(self, *args, **kwargs):\n cursor = yield from self.__wrapped__._cursor(*args, **kwargs) # noqa: E999\n pin = Pin.get_from(self)\n if not pin:\n return cursor\n return AIOTracedCursor(cursor, pin)\n", "path": "ddtrace/contrib/aiopg/connection.py"}], "after_files": [{"content": "import asyncio\nimport wrapt\n\nfrom aiopg.utils import _ContextManager\n\nfrom .. import dbapi\nfrom ...pin import Pin\nfrom ...ext import sql, AppTypes\n\n\nclass AIOTracedCursor(wrapt.ObjectProxy):\n \"\"\" TracedCursor wraps a psql cursor and traces it's queries. \"\"\"\n\n def __init__(self, cursor, pin):\n super(AIOTracedCursor, self).__init__(cursor)\n pin.onto(self)\n name = pin.app or 'sql'\n self._datadog_name = '%s.query' % name\n\n @asyncio.coroutine\n def _trace_method(self, method, resource, extra_tags, *args, **kwargs):\n pin = Pin.get_from(self)\n if not pin or not pin.enabled():\n result = yield from method(*args, **kwargs) # noqa: E999\n return result\n service = pin.service\n\n with pin.tracer.trace(self._datadog_name, service=service,\n resource=resource) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, resource)\n s.set_tags(pin.tags)\n s.set_tags(extra_tags)\n\n try:\n result = yield from method(*args, **kwargs)\n return result\n finally:\n s.set_metric(\"db.rowcount\", self.rowcount)\n\n @asyncio.coroutine\n def executemany(self, query, *args, **kwargs):\n # FIXME[matt] properly handle kwargs here. arg names can be different\n # with different libs.\n result = yield from self._trace_method(\n self.__wrapped__.executemany, query, {'sql.executemany': 'true'},\n query, *args, **kwargs) # noqa: E999\n return result\n\n @asyncio.coroutine\n def execute(self, query, *args, **kwargs):\n result = yield from self._trace_method(\n self.__wrapped__.execute, query, {}, query, *args, **kwargs)\n return result\n\n @asyncio.coroutine\n def callproc(self, proc, args):\n result = yield from self._trace_method(\n self.__wrapped__.callproc, proc, {}, proc, args) # noqa: E999\n return result\n\n\nclass AIOTracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n\n def __init__(self, conn, pin=None):\n super(AIOTracedConnection, self).__init__(conn)\n name = dbapi._get_vendor(conn)\n db_pin = pin or Pin(service=name, app=name, app_type=AppTypes.db)\n db_pin.onto(self)\n\n def cursor(self, *args, **kwargs):\n # unfortunately we also need to patch this method as otherwise \"self\"\n # ends up being the aiopg connection object\n coro = self._cursor(*args, **kwargs)\n return _ContextManager(coro)\n\n @asyncio.coroutine\n def _cursor(self, *args, **kwargs):\n cursor = yield from self.__wrapped__._cursor(*args, **kwargs) # noqa: E999\n pin = Pin.get_from(self)\n if not pin:\n return cursor\n return AIOTracedCursor(cursor, pin)\n", "path": "ddtrace/contrib/aiopg/connection.py"}]} | 1,486 | 268 |
gh_patches_debug_13412 | rasdani/github-patches | git_diff | pre-commit__pre-commit-785 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
documention regarding adding new python based hooks needs improvement
Apparently we need some kind of how-to or mini tutorial on how to add a new hook to pre-commit as the basic documentation does not help someone without previous pre-commit knowledge.
I wanted to add support for `bashate`, a shell script linter written in python, available on pypi and that installs a shell script with the same name that can be used just like other linters.
Initially I went to https://pre-commit.com/#new-hooks which didn't give me enough info. So, i looked for other linters based on python and I found yamllint, which pointed me to https://github.com/adrienverge/yamllint/blob/master/.pre-commit-hooks.yaml
So the idea was to add the hook definition directly to the linter package. In this case I had to fork bashate in order to test the new, hook. So I ended up creating https://github.com/ssbarnea/bashate/blob/master/.pre-commit-hooks.yaml -- folowing the same model used in yamllint.
Now, I wanted to add and test the hook on one of the repos I maintain so I did create https://github.com/pycontribs/jira/blob/feature/pre-commit/.pre-commit-config.yaml#L25
When I tried to run it using `pre-commit run bashate --all`, it failed with this error:
```
Bashate..................................................................An unexpected error has occurred: OSError: [Errno 2] No such file or directory
Check the log at /Users/ssbarnea/.cache/pre-commit/pre-commit.log
An unexpected error has occurred: OSError: [Errno 2] No such file or directory
Traceback (most recent call last):
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/error_handler.py", line 47, in error_handler
yield
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/main.py", line 258, in main
return run(runner, args)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py", line 270, in run
return _run_hooks(runner.config, repo_hooks, args, environ)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py", line 199, in _run_hooks
retval |= _run_single_hook(filenames, hook, repo, args, skips, cols)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py", line 110, in _run_single_hook
hook, tuple(filenames) if hook['pass_filenames'] else (),
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/repository.py", line 207, in run_hook
return languages[language_name].run_hook(prefix, hook, file_args)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/languages/script.py", line 16, in run_hook
return xargs(cmd, file_args)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/xargs.py", line 63, in xargs
*run_cmd, encoding=None, retcode=None
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/util.py", line 167, in cmd_output
proc = subprocess.Popen(cmd, **popen_kwargs)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/subprocess.py", line 390, in __init__
errread, errwrite)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/subprocess.py", line 1025, in _execute_child
raise child_exception
OSError: [Errno 2] No such file or directory
```
At the moment I have no idea what I did wrong, probably something simple. Still, we need to make it easier to integrate new linters into pre-commit.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/parse_shebang.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import unicode_literals
3
4 import os.path
5
6 from identify.identify import parse_shebang_from_file
7
8
9 class ExecutableNotFoundError(OSError):
10 def to_output(self):
11 return (1, self.args[0].encode('UTF-8'), b'')
12
13
14 def parse_filename(filename):
15 if not os.path.exists(filename):
16 return ()
17 else:
18 return parse_shebang_from_file(filename)
19
20
21 def find_executable(exe, _environ=None):
22 exe = os.path.normpath(exe)
23 if os.sep in exe:
24 return exe
25
26 environ = _environ if _environ is not None else os.environ
27
28 if 'PATHEXT' in environ:
29 possible_exe_names = tuple(
30 exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)
31 ) + (exe,)
32
33 else:
34 possible_exe_names = (exe,)
35
36 for path in environ.get('PATH', '').split(os.pathsep):
37 for possible_exe_name in possible_exe_names:
38 joined = os.path.join(path, possible_exe_name)
39 if os.path.isfile(joined) and os.access(joined, os.X_OK):
40 return joined
41 else:
42 return None
43
44
45 def normexe(orig_exe):
46 if os.sep not in orig_exe:
47 exe = find_executable(orig_exe)
48 if exe is None:
49 raise ExecutableNotFoundError(
50 'Executable `{}` not found'.format(orig_exe),
51 )
52 return exe
53 else:
54 return orig_exe
55
56
57 def normalize_cmd(cmd):
58 """Fixes for the following issues on windows
59 - https://bugs.python.org/issue8557
60 - windows does not parse shebangs
61
62 This function also makes deep-path shebangs work just fine
63 """
64 # Use PATH to determine the executable
65 exe = normexe(cmd[0])
66
67 # Figure out the shebang from the resulting command
68 cmd = parse_filename(exe) + (exe,) + cmd[1:]
69
70 # This could have given us back another bare executable
71 exe = normexe(cmd[0])
72
73 return (exe,) + cmd[1:]
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/parse_shebang.py b/pre_commit/parse_shebang.py
--- a/pre_commit/parse_shebang.py
+++ b/pre_commit/parse_shebang.py
@@ -42,16 +42,21 @@
return None
-def normexe(orig_exe):
- if os.sep not in orig_exe:
- exe = find_executable(orig_exe)
+def normexe(orig):
+ def _error(msg):
+ raise ExecutableNotFoundError('Executable `{}` {}'.format(orig, msg))
+
+ if os.sep not in orig and (not os.altsep or os.altsep not in orig):
+ exe = find_executable(orig)
if exe is None:
- raise ExecutableNotFoundError(
- 'Executable `{}` not found'.format(orig_exe),
- )
+ _error('not found')
return exe
+ elif not os.access(orig, os.X_OK):
+ _error('not found')
+ elif os.path.isdir(orig):
+ _error('is a directory')
else:
- return orig_exe
+ return orig
def normalize_cmd(cmd):
| {"golden_diff": "diff --git a/pre_commit/parse_shebang.py b/pre_commit/parse_shebang.py\n--- a/pre_commit/parse_shebang.py\n+++ b/pre_commit/parse_shebang.py\n@@ -42,16 +42,21 @@\n return None\n \n \n-def normexe(orig_exe):\n- if os.sep not in orig_exe:\n- exe = find_executable(orig_exe)\n+def normexe(orig):\n+ def _error(msg):\n+ raise ExecutableNotFoundError('Executable `{}` {}'.format(orig, msg))\n+\n+ if os.sep not in orig and (not os.altsep or os.altsep not in orig):\n+ exe = find_executable(orig)\n if exe is None:\n- raise ExecutableNotFoundError(\n- 'Executable `{}` not found'.format(orig_exe),\n- )\n+ _error('not found')\n return exe\n+ elif not os.access(orig, os.X_OK):\n+ _error('not found')\n+ elif os.path.isdir(orig):\n+ _error('is a directory')\n else:\n- return orig_exe\n+ return orig\n \n \n def normalize_cmd(cmd):\n", "issue": "documention regarding adding new python based hooks needs improvement\nApparently we need some kind of how-to or mini tutorial on how to add a new hook to pre-commit as the basic documentation does not help someone without previous pre-commit knowledge.\r\n\r\nI wanted to add support for `bashate`, a shell script linter written in python, available on pypi and that installs a shell script with the same name that can be used just like other linters.\r\n\r\nInitially I went to https://pre-commit.com/#new-hooks which didn't give me enough info. So, i looked for other linters based on python and I found yamllint, which pointed me to https://github.com/adrienverge/yamllint/blob/master/.pre-commit-hooks.yaml\r\n\r\nSo the idea was to add the hook definition directly to the linter package. In this case I had to fork bashate in order to test the new, hook. So I ended up creating https://github.com/ssbarnea/bashate/blob/master/.pre-commit-hooks.yaml -- folowing the same model used in yamllint.\r\n\r\nNow, I wanted to add and test the hook on one of the repos I maintain so I did create https://github.com/pycontribs/jira/blob/feature/pre-commit/.pre-commit-config.yaml#L25\r\n\r\nWhen I tried to run it using `pre-commit run bashate --all`, it failed with this error:\r\n```\r\nBashate..................................................................An unexpected error has occurred: OSError: [Errno 2] No such file or directory\r\nCheck the log at /Users/ssbarnea/.cache/pre-commit/pre-commit.log\r\n\r\nAn unexpected error has occurred: OSError: [Errno 2] No such file or directory\r\nTraceback (most recent call last):\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/error_handler.py\", line 47, in error_handler\r\n yield\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/main.py\", line 258, in main\r\n return run(runner, args)\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 270, in run\r\n return _run_hooks(runner.config, repo_hooks, args, environ)\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 199, in _run_hooks\r\n retval |= _run_single_hook(filenames, hook, repo, args, skips, cols)\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 110, in _run_single_hook\r\n hook, tuple(filenames) if hook['pass_filenames'] else (),\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/repository.py\", line 207, in run_hook\r\n return languages[language_name].run_hook(prefix, hook, file_args)\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/languages/script.py\", line 16, in run_hook\r\n return xargs(cmd, file_args)\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/xargs.py\", line 63, in xargs\r\n *run_cmd, encoding=None, retcode=None\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/util.py\", line 167, in cmd_output\r\n proc = subprocess.Popen(cmd, **popen_kwargs)\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/subprocess.py\", line 390, in __init__\r\n errread, errwrite)\r\n File \"/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/subprocess.py\", line 1025, in _execute_child\r\n raise child_exception\r\nOSError: [Errno 2] No such file or directory\r\n```\r\n\r\nAt the moment I have no idea what I did wrong, probably something simple. Still, we need to make it easier to integrate new linters into pre-commit.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport os.path\n\nfrom identify.identify import parse_shebang_from_file\n\n\nclass ExecutableNotFoundError(OSError):\n def to_output(self):\n return (1, self.args[0].encode('UTF-8'), b'')\n\n\ndef parse_filename(filename):\n if not os.path.exists(filename):\n return ()\n else:\n return parse_shebang_from_file(filename)\n\n\ndef find_executable(exe, _environ=None):\n exe = os.path.normpath(exe)\n if os.sep in exe:\n return exe\n\n environ = _environ if _environ is not None else os.environ\n\n if 'PATHEXT' in environ:\n possible_exe_names = tuple(\n exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)\n ) + (exe,)\n\n else:\n possible_exe_names = (exe,)\n\n for path in environ.get('PATH', '').split(os.pathsep):\n for possible_exe_name in possible_exe_names:\n joined = os.path.join(path, possible_exe_name)\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\n return joined\n else:\n return None\n\n\ndef normexe(orig_exe):\n if os.sep not in orig_exe:\n exe = find_executable(orig_exe)\n if exe is None:\n raise ExecutableNotFoundError(\n 'Executable `{}` not found'.format(orig_exe),\n )\n return exe\n else:\n return orig_exe\n\n\ndef normalize_cmd(cmd):\n \"\"\"Fixes for the following issues on windows\n - https://bugs.python.org/issue8557\n - windows does not parse shebangs\n\n This function also makes deep-path shebangs work just fine\n \"\"\"\n # Use PATH to determine the executable\n exe = normexe(cmd[0])\n\n # Figure out the shebang from the resulting command\n cmd = parse_filename(exe) + (exe,) + cmd[1:]\n\n # This could have given us back another bare executable\n exe = normexe(cmd[0])\n\n return (exe,) + cmd[1:]\n", "path": "pre_commit/parse_shebang.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport os.path\n\nfrom identify.identify import parse_shebang_from_file\n\n\nclass ExecutableNotFoundError(OSError):\n def to_output(self):\n return (1, self.args[0].encode('UTF-8'), b'')\n\n\ndef parse_filename(filename):\n if not os.path.exists(filename):\n return ()\n else:\n return parse_shebang_from_file(filename)\n\n\ndef find_executable(exe, _environ=None):\n exe = os.path.normpath(exe)\n if os.sep in exe:\n return exe\n\n environ = _environ if _environ is not None else os.environ\n\n if 'PATHEXT' in environ:\n possible_exe_names = tuple(\n exe + ext.lower() for ext in environ['PATHEXT'].split(os.pathsep)\n ) + (exe,)\n\n else:\n possible_exe_names = (exe,)\n\n for path in environ.get('PATH', '').split(os.pathsep):\n for possible_exe_name in possible_exe_names:\n joined = os.path.join(path, possible_exe_name)\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\n return joined\n else:\n return None\n\n\ndef normexe(orig):\n def _error(msg):\n raise ExecutableNotFoundError('Executable `{}` {}'.format(orig, msg))\n\n if os.sep not in orig and (not os.altsep or os.altsep not in orig):\n exe = find_executable(orig)\n if exe is None:\n _error('not found')\n return exe\n elif not os.access(orig, os.X_OK):\n _error('not found')\n elif os.path.isdir(orig):\n _error('is a directory')\n else:\n return orig\n\n\ndef normalize_cmd(cmd):\n \"\"\"Fixes for the following issues on windows\n - https://bugs.python.org/issue8557\n - windows does not parse shebangs\n\n This function also makes deep-path shebangs work just fine\n \"\"\"\n # Use PATH to determine the executable\n exe = normexe(cmd[0])\n\n # Figure out the shebang from the resulting command\n cmd = parse_filename(exe) + (exe,) + cmd[1:]\n\n # This could have given us back another bare executable\n exe = normexe(cmd[0])\n\n return (exe,) + cmd[1:]\n", "path": "pre_commit/parse_shebang.py"}]} | 1,868 | 248 |
gh_patches_debug_12558 | rasdani/github-patches | git_diff | hydroshare__hydroshare-1690 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Web App" needs a space (not "WebApp")
On the open web app button there needs to be a space:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_tools_resource/migrations/0010_auto_20161203_1913.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 ('hs_tools_resource', '0009_auto_20160929_1543'),
11 ]
12
13 operations = [
14 migrations.RemoveField(
15 model_name='toolicon',
16 name='url',
17 ),
18 migrations.AddField(
19 model_name='toolicon',
20 name='value',
21 field=models.CharField(default=b'', max_length=1024, blank=True),
22 ),
23 migrations.AlterField(
24 model_name='apphomepageurl',
25 name='value',
26 field=models.CharField(default=b'', max_length=1024, blank=True),
27 ),
28 migrations.AlterField(
29 model_name='requesturlbase',
30 name='value',
31 field=models.CharField(default=b'', max_length=1024, blank=True),
32 ),
33 migrations.AlterField(
34 model_name='supportedrestypes',
35 name='supported_res_types',
36 field=models.ManyToManyField(to='hs_tools_resource.SupportedResTypeChoices', blank=True),
37 ),
38 migrations.AlterField(
39 model_name='supportedsharingstatus',
40 name='sharing_status',
41 field=models.ManyToManyField(to='hs_tools_resource.SupportedSharingStatusChoices', blank=True),
42 ),
43 ]
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hs_tools_resource/migrations/0010_auto_20161203_1913.py b/hs_tools_resource/migrations/0010_auto_20161203_1913.py
--- a/hs_tools_resource/migrations/0010_auto_20161203_1913.py
+++ b/hs_tools_resource/migrations/0010_auto_20161203_1913.py
@@ -11,14 +11,15 @@
]
operations = [
- migrations.RemoveField(
+ migrations.AlterField(
model_name='toolicon',
name='url',
+ field=models.CharField(default=b'', max_length=1024, blank=True),
),
- migrations.AddField(
+ migrations.RenameField(
model_name='toolicon',
- name='value',
- field=models.CharField(default=b'', max_length=1024, blank=True),
+ old_name='url',
+ new_name='value'
),
migrations.AlterField(
model_name='apphomepageurl',
| {"golden_diff": "diff --git a/hs_tools_resource/migrations/0010_auto_20161203_1913.py b/hs_tools_resource/migrations/0010_auto_20161203_1913.py\n--- a/hs_tools_resource/migrations/0010_auto_20161203_1913.py\n+++ b/hs_tools_resource/migrations/0010_auto_20161203_1913.py\n@@ -11,14 +11,15 @@\n ]\n \n operations = [\n- migrations.RemoveField(\n+ migrations.AlterField(\n model_name='toolicon',\n name='url',\n+ field=models.CharField(default=b'', max_length=1024, blank=True),\n ),\n- migrations.AddField(\n+ migrations.RenameField(\n model_name='toolicon',\n- name='value',\n- field=models.CharField(default=b'', max_length=1024, blank=True),\n+ old_name='url',\n+ new_name='value'\n ),\n migrations.AlterField(\n model_name='apphomepageurl',\n", "issue": "\"Web App\" needs a space (not \"WebApp\")\nOn the open web app button there needs to be a space:\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('hs_tools_resource', '0009_auto_20160929_1543'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='toolicon',\n name='url',\n ),\n migrations.AddField(\n model_name='toolicon',\n name='value',\n field=models.CharField(default=b'', max_length=1024, blank=True),\n ),\n migrations.AlterField(\n model_name='apphomepageurl',\n name='value',\n field=models.CharField(default=b'', max_length=1024, blank=True),\n ),\n migrations.AlterField(\n model_name='requesturlbase',\n name='value',\n field=models.CharField(default=b'', max_length=1024, blank=True),\n ),\n migrations.AlterField(\n model_name='supportedrestypes',\n name='supported_res_types',\n field=models.ManyToManyField(to='hs_tools_resource.SupportedResTypeChoices', blank=True),\n ),\n migrations.AlterField(\n model_name='supportedsharingstatus',\n name='sharing_status',\n field=models.ManyToManyField(to='hs_tools_resource.SupportedSharingStatusChoices', blank=True),\n ),\n ]\n", "path": "hs_tools_resource/migrations/0010_auto_20161203_1913.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('hs_tools_resource', '0009_auto_20160929_1543'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='toolicon',\n name='url',\n field=models.CharField(default=b'', max_length=1024, blank=True),\n ),\n migrations.RenameField(\n model_name='toolicon',\n old_name='url',\n new_name='value'\n ),\n migrations.AlterField(\n model_name='apphomepageurl',\n name='value',\n field=models.CharField(default=b'', max_length=1024, blank=True),\n ),\n migrations.AlterField(\n model_name='requesturlbase',\n name='value',\n field=models.CharField(default=b'', max_length=1024, blank=True),\n ),\n migrations.AlterField(\n model_name='supportedrestypes',\n name='supported_res_types',\n field=models.ManyToManyField(to='hs_tools_resource.SupportedResTypeChoices', blank=True),\n ),\n migrations.AlterField(\n model_name='supportedsharingstatus',\n name='sharing_status',\n field=models.ManyToManyField(to='hs_tools_resource.SupportedSharingStatusChoices', blank=True),\n ),\n ]\n", "path": "hs_tools_resource/migrations/0010_auto_20161203_1913.py"}]} | 733 | 259 |
gh_patches_debug_14598 | rasdani/github-patches | git_diff | sunpy__sunpy-3076 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add list of constants to docs
Currently if one wants to know what constants `sunpy.sun` has built in you have to run `sunpy.sun.constants.print_all()`, but it would be nice to have a table that lists the constants here: https://docs.sunpy.org/en/latest/code_ref/sun.html#module-sunpy.sun.constants (like AstroPy: http://docs.astropy.org/en/stable/constants/index.html#reference-api)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/sun/constants.py`
Content:
```
1 """
2 This module provides fundamental solar physical constants.
3 """
4 from astropy.table import Table
5
6 from sunpy.sun import _constants as _con
7
8 __all__ = [
9 'get', 'find', 'print_all', 'spectral_classification', 'au', 'mass', 'equatorial_radius',
10 'volume', 'surface_area', 'average_density', 'equatorial_surface_gravity',
11 'effective_temperature', 'luminosity', 'mass_conversion_rate', 'escape_velocity', 'sfu',
12 'average_angular_size'
13 ]
14
15 constants = _con.physical_constants
16
17
18 def get(key):
19 """
20 Retrieve a constant by key. This is just a short cut into a dictionary.
21
22 Parameters
23 ----------
24 key : `str`
25 Key in dictionary in ``constants``.
26
27 Returns
28 -------
29 constant : `~astropy.units.Constant`
30
31 See Also
32 --------
33 `sunpy.sun.constants`
34 Contains the description of ``constants``, which, as a dictionary literal object, does not itself possess a docstring.
35
36 Examples
37 --------
38 >>> from sunpy.sun import constants
39 >>> constants.get('mass')
40 <<class 'astropy.constants.iau2015.IAU2015'> name='Solar mass' value=1.9884754153381438e+30 uncertainty=9.236140093538353e+25 unit='kg' reference='IAU 2015 Resolution B 3 + CODATA 2014'>
41 """
42 return constants[key]
43
44
45 def find(sub=None):
46 """
47 Return list of constants keys containing a given string.
48
49 Parameters
50 ----------
51 sub : `str`, optional
52 Sub-string to search keys for. By default set to `None` and returns all keys.
53
54 Returns
55 -------
56 `None`, `list`
57 The matching keys.
58
59 See Also
60 --------
61 `sunpy.sun.constants`
62 Contains the description of ``constants``, which, as a dictionary literal object, does not itself possess a docstring.
63 """
64 if sub is None:
65 result = list(constants.keys())
66 else:
67 result = [key for key in constants if sub.lower() in key.lower()]
68
69 result.sort()
70 return result
71
72
73 def print_all():
74 """
75 Provides a table of the complete list of constants.
76
77 Returns
78 -------
79 `astropy.table.Table`
80 """
81 data_rows = []
82 for key, this_constant in constants.items():
83 data_rows.append([
84 key, this_constant.name, this_constant.value, this_constant.uncertainty,
85 str(this_constant.unit), this_constant.reference
86 ])
87
88 t = Table(rows=data_rows, names=('key', 'name', 'value', 'uncertainty', 'unit', 'Reference'))
89 return t
90
91
92 # Spectral class is not included in physical constants since it is not a number
93 spectral_classification = 'G2V'
94 au = astronomical_unit = get('mean distance')
95 # The following variables from _gets are brought out by making them
96 # accessible through a call such as sun.volume
97 mass = get('mass')
98 equatorial_radius = radius = get('radius')
99 volume = get('volume')
100 surface_area = get('surface area')
101 average_density = density = get('average density')
102 equatorial_surface_gravity = surface_gravity = get('surface gravity')
103 effective_temperature = get('effective temperature')
104 luminosity = get('luminosity')
105 mass_conversion_rate = get('mass conversion rate')
106 escape_velocity = get('escape velocity')
107 sfu = get('solar flux unit')
108 # Observable parameters
109 average_angular_size = get('average angular size')
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/sun/constants.py b/sunpy/sun/constants.py
--- a/sunpy/sun/constants.py
+++ b/sunpy/sun/constants.py
@@ -89,6 +89,20 @@
return t
+# Add a list of constants to the docs
+_lines = [
+ 'The following constants are available:\n',
+ '====================== ============== ================ =================================',
+ ' Name Value Unit Description',
+ '====================== ============== ================ =================================',
+]
+for key, const in constants.items():
+ _lines.append('{0:^22} {1:^14.9g} {2:^16} {3}'.format(
+ key, const.value, const._unit_string, const.name))
+_lines.append(_lines[1])
+if __doc__ is not None:
+ __doc__ += '\n'.join(_lines)
+
# Spectral class is not included in physical constants since it is not a number
spectral_classification = 'G2V'
au = astronomical_unit = get('mean distance')
| {"golden_diff": "diff --git a/sunpy/sun/constants.py b/sunpy/sun/constants.py\n--- a/sunpy/sun/constants.py\n+++ b/sunpy/sun/constants.py\n@@ -89,6 +89,20 @@\n return t\n \n \n+# Add a list of constants to the docs\n+_lines = [\n+ 'The following constants are available:\\n',\n+ '====================== ============== ================ =================================',\n+ ' Name Value Unit Description',\n+ '====================== ============== ================ =================================',\n+]\n+for key, const in constants.items():\n+ _lines.append('{0:^22} {1:^14.9g} {2:^16} {3}'.format(\n+ key, const.value, const._unit_string, const.name))\n+_lines.append(_lines[1])\n+if __doc__ is not None:\n+ __doc__ += '\\n'.join(_lines)\n+\n # Spectral class is not included in physical constants since it is not a number\n spectral_classification = 'G2V'\n au = astronomical_unit = get('mean distance')\n", "issue": "Add list of constants to docs\nCurrently if one wants to know what constants `sunpy.sun` has built in you have to run `sunpy.sun.constants.print_all()`, but it would be nice to have a table that lists the constants here: https://docs.sunpy.org/en/latest/code_ref/sun.html#module-sunpy.sun.constants (like AstroPy: http://docs.astropy.org/en/stable/constants/index.html#reference-api)\n", "before_files": [{"content": "\"\"\"\nThis module provides fundamental solar physical constants.\n\"\"\"\nfrom astropy.table import Table\n\nfrom sunpy.sun import _constants as _con\n\n__all__ = [\n 'get', 'find', 'print_all', 'spectral_classification', 'au', 'mass', 'equatorial_radius',\n 'volume', 'surface_area', 'average_density', 'equatorial_surface_gravity',\n 'effective_temperature', 'luminosity', 'mass_conversion_rate', 'escape_velocity', 'sfu',\n 'average_angular_size'\n]\n\nconstants = _con.physical_constants\n\n\ndef get(key):\n \"\"\"\n Retrieve a constant by key. This is just a short cut into a dictionary.\n\n Parameters\n ----------\n key : `str`\n Key in dictionary in ``constants``.\n\n Returns\n -------\n constant : `~astropy.units.Constant`\n\n See Also\n --------\n `sunpy.sun.constants`\n Contains the description of ``constants``, which, as a dictionary literal object, does not itself possess a docstring.\n\n Examples\n --------\n >>> from sunpy.sun import constants\n >>> constants.get('mass')\n <<class 'astropy.constants.iau2015.IAU2015'> name='Solar mass' value=1.9884754153381438e+30 uncertainty=9.236140093538353e+25 unit='kg' reference='IAU 2015 Resolution B 3 + CODATA 2014'>\n \"\"\"\n return constants[key]\n\n\ndef find(sub=None):\n \"\"\"\n Return list of constants keys containing a given string.\n\n Parameters\n ----------\n sub : `str`, optional\n Sub-string to search keys for. By default set to `None` and returns all keys.\n\n Returns\n -------\n `None`, `list`\n The matching keys.\n\n See Also\n --------\n `sunpy.sun.constants`\n Contains the description of ``constants``, which, as a dictionary literal object, does not itself possess a docstring.\n \"\"\"\n if sub is None:\n result = list(constants.keys())\n else:\n result = [key for key in constants if sub.lower() in key.lower()]\n\n result.sort()\n return result\n\n\ndef print_all():\n \"\"\"\n Provides a table of the complete list of constants.\n\n Returns\n -------\n `astropy.table.Table`\n \"\"\"\n data_rows = []\n for key, this_constant in constants.items():\n data_rows.append([\n key, this_constant.name, this_constant.value, this_constant.uncertainty,\n str(this_constant.unit), this_constant.reference\n ])\n\n t = Table(rows=data_rows, names=('key', 'name', 'value', 'uncertainty', 'unit', 'Reference'))\n return t\n\n\n# Spectral class is not included in physical constants since it is not a number\nspectral_classification = 'G2V'\nau = astronomical_unit = get('mean distance')\n# The following variables from _gets are brought out by making them\n# accessible through a call such as sun.volume\nmass = get('mass')\nequatorial_radius = radius = get('radius')\nvolume = get('volume')\nsurface_area = get('surface area')\naverage_density = density = get('average density')\nequatorial_surface_gravity = surface_gravity = get('surface gravity')\neffective_temperature = get('effective temperature')\nluminosity = get('luminosity')\nmass_conversion_rate = get('mass conversion rate')\nescape_velocity = get('escape velocity')\nsfu = get('solar flux unit')\n# Observable parameters\naverage_angular_size = get('average angular size')\n", "path": "sunpy/sun/constants.py"}], "after_files": [{"content": "\"\"\"\nThis module provides fundamental solar physical constants.\n\"\"\"\nfrom astropy.table import Table\n\nfrom sunpy.sun import _constants as _con\n\n__all__ = [\n 'get', 'find', 'print_all', 'spectral_classification', 'au', 'mass', 'equatorial_radius',\n 'volume', 'surface_area', 'average_density', 'equatorial_surface_gravity',\n 'effective_temperature', 'luminosity', 'mass_conversion_rate', 'escape_velocity', 'sfu',\n 'average_angular_size'\n]\n\nconstants = _con.physical_constants\n\n\ndef get(key):\n \"\"\"\n Retrieve a constant by key. This is just a short cut into a dictionary.\n\n Parameters\n ----------\n key : `str`\n Key in dictionary in ``constants``.\n\n Returns\n -------\n constant : `~astropy.units.Constant`\n\n See Also\n --------\n `sunpy.sun.constants`\n Contains the description of ``constants``, which, as a dictionary literal object, does not itself possess a docstring.\n\n Examples\n --------\n >>> from sunpy.sun import constants\n >>> constants.get('mass')\n <<class 'astropy.constants.iau2015.IAU2015'> name='Solar mass' value=1.9884754153381438e+30 uncertainty=9.236140093538353e+25 unit='kg' reference='IAU 2015 Resolution B 3 + CODATA 2014'>\n \"\"\"\n return constants[key]\n\n\ndef find(sub=None):\n \"\"\"\n Return list of constants keys containing a given string.\n\n Parameters\n ----------\n sub : `str`, optional\n Sub-string to search keys for. By default set to `None` and returns all keys.\n\n Returns\n -------\n `None`, `list`\n The matching keys.\n\n See Also\n --------\n `sunpy.sun.constants`\n Contains the description of ``constants``, which, as a dictionary literal object, does not itself possess a docstring.\n \"\"\"\n if sub is None:\n result = list(constants.keys())\n else:\n result = [key for key in constants if sub.lower() in key.lower()]\n\n result.sort()\n return result\n\n\ndef print_all():\n \"\"\"\n Provides a table of the complete list of constants.\n\n Returns\n -------\n `astropy.table.Table`\n \"\"\"\n data_rows = []\n for key, this_constant in constants.items():\n data_rows.append([\n key, this_constant.name, this_constant.value, this_constant.uncertainty,\n str(this_constant.unit), this_constant.reference\n ])\n\n t = Table(rows=data_rows, names=('key', 'name', 'value', 'uncertainty', 'unit', 'Reference'))\n return t\n\n\n# Add a list of constants to the docs\n_lines = [\n 'The following constants are available:\\n',\n '====================== ============== ================ =================================',\n ' Name Value Unit Description',\n '====================== ============== ================ =================================',\n]\nfor key, const in constants.items():\n _lines.append('{0:^22} {1:^14.9g} {2:^16} {3}'.format(\n key, const.value, const._unit_string, const.name))\n_lines.append(_lines[1])\nif __doc__ is not None:\n __doc__ += '\\n'.join(_lines)\n\n# Spectral class is not included in physical constants since it is not a number\nspectral_classification = 'G2V'\nau = astronomical_unit = get('mean distance')\n# The following variables from _gets are brought out by making them\n# accessible through a call such as sun.volume\nmass = get('mass')\nequatorial_radius = radius = get('radius')\nvolume = get('volume')\nsurface_area = get('surface area')\naverage_density = density = get('average density')\nequatorial_surface_gravity = surface_gravity = get('surface gravity')\neffective_temperature = get('effective temperature')\nluminosity = get('luminosity')\nmass_conversion_rate = get('mass conversion rate')\nescape_velocity = get('escape velocity')\nsfu = get('solar flux unit')\n# Observable parameters\naverage_angular_size = get('average angular size')\n", "path": "sunpy/sun/constants.py"}]} | 1,369 | 240 |
gh_patches_debug_72 | rasdani/github-patches | git_diff | Kinto__kinto-7 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PostgreSQL by default ?
- put `cliquet[postgresql]` in requirements
- put storage_url in config (default postgres:postgres@localhost/postgres)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 from setuptools import setup, find_packages
3
4 here = os.path.abspath(os.path.dirname(__file__))
5
6 with open(os.path.join(here, 'README.rst')) as f:
7 README = f.read()
8
9 REQUIREMENTS = [
10 'colander',
11 'cornice',
12 'six',
13 'waitress',
14 'cliquet'
15 ]
16
17 ENTRY_POINTS = {
18 'paste.app_factory': [
19 'main = kinto:main',
20 ]}
21
22 setup(name='kinto',
23 version='0.1.dev0',
24 description='kinto',
25 long_description=README,
26 classifiers=[
27 "Programming Language :: Python",
28 "Topic :: Internet :: WWW/HTTP",
29 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application"
30 ],
31 keywords="web services",
32 author='Mozilla Services',
33 author_email='[email protected]',
34 url='',
35 packages=find_packages(),
36 include_package_data=True,
37 zip_safe=False,
38 install_requires=REQUIREMENTS,
39 entry_points=ENTRY_POINTS)
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@
'cornice',
'six',
'waitress',
- 'cliquet'
+ 'cliquet[postgresql]'
]
ENTRY_POINTS = {
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,7 @@\n 'cornice',\n 'six',\n 'waitress',\n- 'cliquet'\n+ 'cliquet[postgresql]'\n ]\n \n ENTRY_POINTS = {\n", "issue": "PostgreSQL by default ?\n- put `cliquet[postgresql]` in requirements\n- put storage_url in config (default postgres:postgres@localhost/postgres)\n\n", "before_files": [{"content": "import os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n\nREQUIREMENTS = [\n 'colander',\n 'cornice',\n 'six',\n 'waitress',\n 'cliquet'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ]}\n\nsetup(name='kinto',\n version='0.1.dev0',\n description='kinto',\n long_description=README,\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n\nREQUIREMENTS = [\n 'colander',\n 'cornice',\n 'six',\n 'waitress',\n 'cliquet[postgresql]'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ]}\n\nsetup(name='kinto',\n version='0.1.dev0',\n description='kinto',\n long_description=README,\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}]} | 583 | 68 |
gh_patches_debug_47989 | rasdani/github-patches | git_diff | mlflow__mlflow-10877 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Git version not logged when executable in current path.
### Issues Policy acknowledgement
- [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)
### Where did you encounter this bug?
Local machine
### Willingness to contribute
Yes. I can contribute a fix for this bug independently.
### MLflow version
- Client: 2.8.0
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 20.04.2 LTS
- **Python version**: 3.11.6
### Describe the problem
The tag ``mlflow.source.git.commit`` is not populated if running a python file that's in the current path.
Imagine a file ``mypath/myfile.py`` with this content:
```python
from mlflow.tracking.context.git_context import _get_source_version
print(_get_source_version())
```
If you run from within ``mypath`` via ``python myfile.py`` this will not produce a version,
but if you run from the parent folder via ``python mypath/myfile.py`` it will produce the correct version.
That is caused by [`get_git_commit`](https://github.com/mlflow/mlflow/blob/73fbc7d857b3f04b823b3fcbc6723efc41070288/mlflow/utils/git_utils.py#L47C1-L49C39)
using the path of the ``argv[1]``, and then passing this to ``Repo``. If the path is the current directory, ``os.path.dirname(path)`` will be the empty string, which will make ``Repo`` raise
```
git.exc.GitCommandError: Cmd('git') failed due to: exit code(128)
cmdline: git check-ignore
stderr: 'fatal: empty string is not a valid pathspec. please use . instead if you meant to match all paths
```
which will be caught, and then an empty string will be returned.
A work-around could be as easy as replacing empty string by ``'.'``, or maybe better using ``os.path.dirname(os.path.abspath(path))``?
### What component(s) does this bug affect?
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/deployments`: MLflow Deployments client APIs, server, and third-party Deployments integrations
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs
- [ ] `area/server-infra`: MLflow Tracking server backend
- [X] `area/tracking`: Tracking Service, tracking client APIs, autologging
### What interface(s) does this bug affect?
- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
### What language(s) does this bug affect?
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
### What integration(s) does this bug affect?
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/utils/git_utils.py`
Content:
```
1 import logging
2 import os
3 from typing import Optional
4
5 _logger = logging.getLogger(__name__)
6
7
8 def get_git_repo_url(path: str) -> Optional[str]:
9 """
10 Obtains the url of the git repository associated with the specified path,
11 returning ``None`` if the path does not correspond to a git repository.
12 """
13 try:
14 from git import Repo
15 except ImportError as e:
16 _logger.warning(
17 "Failed to import Git (the Git executable is probably not on your PATH),"
18 " so Git SHA is not available. Error: %s",
19 e,
20 )
21 return None
22
23 try:
24 repo = Repo(path, search_parent_directories=True)
25 return next((remote.url for remote in repo.remotes), None)
26 except Exception:
27 return None
28
29
30 def get_git_commit(path: str) -> Optional[str]:
31 """
32 Obtains the hash of the latest commit on the current branch of the git repository associated
33 with the specified path, returning ``None`` if the path does not correspond to a git
34 repository.
35 """
36 try:
37 from git import Repo
38 except ImportError as e:
39 _logger.warning(
40 "Failed to import Git (the Git executable is probably not on your PATH),"
41 " so Git SHA is not available. Error: %s",
42 e,
43 )
44 return None
45 try:
46 if os.path.isfile(path):
47 path = os.path.dirname(path)
48 repo = Repo(path, search_parent_directories=True)
49 if path in repo.ignored(path):
50 return None
51 return repo.head.commit.hexsha
52 except Exception:
53 return None
54
55
56 def get_git_branch(path: str) -> Optional[str]:
57 """
58 Obtains the name of the current branch of the git repository associated with the specified
59 path, returning ``None`` if the path does not correspond to a git repository.
60 """
61 try:
62 from git import Repo
63 except ImportError as e:
64 _logger.warning(
65 "Failed to import Git (the Git executable is probably not on your PATH),"
66 " so Git SHA is not available. Error: %s",
67 e,
68 )
69 return None
70
71 try:
72 if os.path.isfile(path):
73 path = os.path.dirname(path)
74 repo = Repo(path, search_parent_directories=True)
75 return repo.active_branch.name
76 except Exception:
77 return None
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/utils/git_utils.py b/mlflow/utils/git_utils.py
--- a/mlflow/utils/git_utils.py
+++ b/mlflow/utils/git_utils.py
@@ -44,7 +44,7 @@
return None
try:
if os.path.isfile(path):
- path = os.path.dirname(path)
+ path = os.path.dirname(os.path.abspath(path))
repo = Repo(path, search_parent_directories=True)
if path in repo.ignored(path):
return None
| {"golden_diff": "diff --git a/mlflow/utils/git_utils.py b/mlflow/utils/git_utils.py\n--- a/mlflow/utils/git_utils.py\n+++ b/mlflow/utils/git_utils.py\n@@ -44,7 +44,7 @@\n return None\n try:\n if os.path.isfile(path):\n- path = os.path.dirname(path)\n+ path = os.path.dirname(os.path.abspath(path))\n repo = Repo(path, search_parent_directories=True)\n if path in repo.ignored(path):\n return None\n", "issue": "[BUG] Git version not logged when executable in current path.\n### Issues Policy acknowledgement\r\n\r\n- [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)\r\n\r\n### Where did you encounter this bug?\r\n\r\nLocal machine\r\n\r\n### Willingness to contribute\r\n\r\nYes. I can contribute a fix for this bug independently.\r\n\r\n### MLflow version\r\n\r\n- Client: 2.8.0\r\n\r\n### System information\r\n\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 20.04.2 LTS\r\n- **Python version**: 3.11.6\r\n\r\n\r\n### Describe the problem\r\n\r\nThe tag ``mlflow.source.git.commit`` is not populated if running a python file that's in the current path.\r\nImagine a file ``mypath/myfile.py`` with this content:\r\n```python\r\nfrom mlflow.tracking.context.git_context import _get_source_version\r\nprint(_get_source_version())\r\n```\r\nIf you run from within ``mypath`` via ``python myfile.py`` this will not produce a version,\r\nbut if you run from the parent folder via ``python mypath/myfile.py`` it will produce the correct version.\r\n\r\nThat is caused by [`get_git_commit`](https://github.com/mlflow/mlflow/blob/73fbc7d857b3f04b823b3fcbc6723efc41070288/mlflow/utils/git_utils.py#L47C1-L49C39)\r\nusing the path of the ``argv[1]``, and then passing this to ``Repo``. If the path is the current directory, ``os.path.dirname(path)`` will be the empty string, which will make ``Repo`` raise \r\n```\r\ngit.exc.GitCommandError: Cmd('git') failed due to: exit code(128)\r\n cmdline: git check-ignore\r\n stderr: 'fatal: empty string is not a valid pathspec. please use . instead if you meant to match all paths\r\n```\r\nwhich will be caught, and then an empty string will be returned.\r\n\r\nA work-around could be as easy as replacing empty string by ``'.'``, or maybe better using ``os.path.dirname(os.path.abspath(path))``?\r\n\r\n\r\n\r\n\r\n### What component(s) does this bug affect?\r\n\r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/deployments`: MLflow Deployments client APIs, server, and third-party Deployments integrations\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\r\n- [ ] `area/server-infra`: MLflow Tracking server backend\r\n- [X] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\n### What interface(s) does this bug affect?\r\n\r\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\n### What language(s) does this bug affect?\r\n\r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\n### What integration(s) does this bug affect?\r\n\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "import logging\nimport os\nfrom typing import Optional\n\n_logger = logging.getLogger(__name__)\n\n\ndef get_git_repo_url(path: str) -> Optional[str]:\n \"\"\"\n Obtains the url of the git repository associated with the specified path,\n returning ``None`` if the path does not correspond to a git repository.\n \"\"\"\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n repo = Repo(path, search_parent_directories=True)\n return next((remote.url for remote in repo.remotes), None)\n except Exception:\n return None\n\n\ndef get_git_commit(path: str) -> Optional[str]:\n \"\"\"\n Obtains the hash of the latest commit on the current branch of the git repository associated\n with the specified path, returning ``None`` if the path does not correspond to a git\n repository.\n \"\"\"\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n try:\n if os.path.isfile(path):\n path = os.path.dirname(path)\n repo = Repo(path, search_parent_directories=True)\n if path in repo.ignored(path):\n return None\n return repo.head.commit.hexsha\n except Exception:\n return None\n\n\ndef get_git_branch(path: str) -> Optional[str]:\n \"\"\"\n Obtains the name of the current branch of the git repository associated with the specified\n path, returning ``None`` if the path does not correspond to a git repository.\n \"\"\"\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n if os.path.isfile(path):\n path = os.path.dirname(path)\n repo = Repo(path, search_parent_directories=True)\n return repo.active_branch.name\n except Exception:\n return None\n", "path": "mlflow/utils/git_utils.py"}], "after_files": [{"content": "import logging\nimport os\nfrom typing import Optional\n\n_logger = logging.getLogger(__name__)\n\n\ndef get_git_repo_url(path: str) -> Optional[str]:\n \"\"\"\n Obtains the url of the git repository associated with the specified path,\n returning ``None`` if the path does not correspond to a git repository.\n \"\"\"\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n repo = Repo(path, search_parent_directories=True)\n return next((remote.url for remote in repo.remotes), None)\n except Exception:\n return None\n\n\ndef get_git_commit(path: str) -> Optional[str]:\n \"\"\"\n Obtains the hash of the latest commit on the current branch of the git repository associated\n with the specified path, returning ``None`` if the path does not correspond to a git\n repository.\n \"\"\"\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n try:\n if os.path.isfile(path):\n path = os.path.dirname(os.path.abspath(path))\n repo = Repo(path, search_parent_directories=True)\n if path in repo.ignored(path):\n return None\n return repo.head.commit.hexsha\n except Exception:\n return None\n\n\ndef get_git_branch(path: str) -> Optional[str]:\n \"\"\"\n Obtains the name of the current branch of the git repository associated with the specified\n path, returning ``None`` if the path does not correspond to a git repository.\n \"\"\"\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n if os.path.isfile(path):\n path = os.path.dirname(path)\n repo = Repo(path, search_parent_directories=True)\n return repo.active_branch.name\n except Exception:\n return None\n", "path": "mlflow/utils/git_utils.py"}]} | 1,829 | 108 |
gh_patches_debug_25562 | rasdani/github-patches | git_diff | pydantic__pydantic-107 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing HISTORY.rst
pydantic installs fine from pip but via a dependency in setup.py it fails with a missing HISTORY.rst due to your long_description in setup.py. Basically, you need a MANIFEST.in that includes that file.
```
Processing pydantic-0.6.2.tar.gz
Writing /var/folders/4j/00jv8sg138n61hsj6ppf60pm0000gn/T/easy_install-o7rp3h7o/pydantic-0.6.2/setup.cfg
Running pydantic-0.6.2/setup.py -q bdist_egg --dist-dir /var/folders/4j/00jv8sg138n61hsj6ppf60pm0000gn/T/easy_install-o7rp3h7o/pydantic-0.6.2/egg-dist-tmp-7bd8a1a8
error: [Errno 2] No such file or directory: '/private/var/folders/4j/00jv8sg138n61hsj6ppf60pm0000gn/T/easy_install-o7rp3h7o/pydantic-0.6.2/HISTORY.rst'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from importlib.machinery import SourceFileLoader
2 from pathlib import Path
3 from setuptools import setup
4
5 THIS_DIR = Path(__file__).resolve().parent
6 long_description = (
7 THIS_DIR.joinpath('README.rst').read_text() +
8 '\n\n' +
9 THIS_DIR.joinpath('HISTORY.rst').read_text()
10 )
11
12 # avoid loading the package before requirements are installed:
13 version = SourceFileLoader('version', 'pydantic/version.py').load_module()
14
15 setup(
16 name='pydantic',
17 version=str(version.VERSION),
18 description='Data validation and settings management using python 3.6 type hinting',
19 long_description=long_description,
20 classifiers=[
21 'Development Status :: 4 - Beta',
22 'Environment :: Console',
23 'Programming Language :: Python',
24 'Programming Language :: Python :: 3',
25 'Programming Language :: Python :: 3 :: Only',
26 'Programming Language :: Python :: 3.6',
27 'Intended Audience :: Developers',
28 'Intended Audience :: Information Technology',
29 'Intended Audience :: System Administrators',
30 'License :: OSI Approved :: MIT License',
31 'Operating System :: Unix',
32 'Operating System :: POSIX :: Linux',
33 'Environment :: MacOS X',
34 'Topic :: Software Development :: Libraries :: Python Modules',
35 'Topic :: Internet',
36 ],
37 author='Samuel Colvin',
38 author_email='[email protected]',
39 url='https://github.com/samuelcolvin/pydantic',
40 license='MIT',
41 packages=['pydantic'],
42 python_requires='>=3.6',
43 zip_safe=True,
44 extras_require={
45 'msgpack': ['msgpack-python>=0.4.8'],
46 'ujson': ['ujson>=1.35'],
47 }
48 )
49
```
Path: `pydantic/version.py`
Content:
```
1 from distutils.version import StrictVersion
2
3 __all__ = ['VERSION']
4
5 VERSION = StrictVersion('0.6.2')
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydantic/version.py b/pydantic/version.py
--- a/pydantic/version.py
+++ b/pydantic/version.py
@@ -2,4 +2,4 @@
__all__ = ['VERSION']
-VERSION = StrictVersion('0.6.2')
+VERSION = StrictVersion('0.6.3')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -2,12 +2,15 @@
from pathlib import Path
from setuptools import setup
+description = 'Data validation and settings management using python 3.6 type hinting'
THIS_DIR = Path(__file__).resolve().parent
-long_description = (
- THIS_DIR.joinpath('README.rst').read_text() +
- '\n\n' +
- THIS_DIR.joinpath('HISTORY.rst').read_text()
-)
+try:
+ long_description = '\n\n'.join([
+ THIS_DIR.joinpath('README.rst').read_text(),
+ THIS_DIR.joinpath('HISTORY.rst').read_text()
+ ])
+except FileNotFoundError:
+ long_description = description + '.\n\nSee https://pydantic-docs.helpmanual.io/ for documentation.'
# avoid loading the package before requirements are installed:
version = SourceFileLoader('version', 'pydantic/version.py').load_module()
@@ -15,7 +18,7 @@
setup(
name='pydantic',
version=str(version.VERSION),
- description='Data validation and settings management using python 3.6 type hinting',
+ description=description,
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
| {"golden_diff": "diff --git a/pydantic/version.py b/pydantic/version.py\n--- a/pydantic/version.py\n+++ b/pydantic/version.py\n@@ -2,4 +2,4 @@\n \n __all__ = ['VERSION']\n \n-VERSION = StrictVersion('0.6.2')\n+VERSION = StrictVersion('0.6.3')\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,12 +2,15 @@\n from pathlib import Path\n from setuptools import setup\n \n+description = 'Data validation and settings management using python 3.6 type hinting'\n THIS_DIR = Path(__file__).resolve().parent\n-long_description = (\n- THIS_DIR.joinpath('README.rst').read_text() +\n- '\\n\\n' +\n- THIS_DIR.joinpath('HISTORY.rst').read_text()\n-)\n+try:\n+ long_description = '\\n\\n'.join([\n+ THIS_DIR.joinpath('README.rst').read_text(),\n+ THIS_DIR.joinpath('HISTORY.rst').read_text()\n+ ])\n+except FileNotFoundError:\n+ long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n \n # avoid loading the package before requirements are installed:\n version = SourceFileLoader('version', 'pydantic/version.py').load_module()\n@@ -15,7 +18,7 @@\n setup(\n name='pydantic',\n version=str(version.VERSION),\n- description='Data validation and settings management using python 3.6 type hinting',\n+ description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 4 - Beta',\n", "issue": "Missing HISTORY.rst\npydantic installs fine from pip but via a dependency in setup.py it fails with a missing HISTORY.rst due to your long_description in setup.py. Basically, you need a MANIFEST.in that includes that file.\r\n\r\n```\r\nProcessing pydantic-0.6.2.tar.gz\r\nWriting /var/folders/4j/00jv8sg138n61hsj6ppf60pm0000gn/T/easy_install-o7rp3h7o/pydantic-0.6.2/setup.cfg\r\nRunning pydantic-0.6.2/setup.py -q bdist_egg --dist-dir /var/folders/4j/00jv8sg138n61hsj6ppf60pm0000gn/T/easy_install-o7rp3h7o/pydantic-0.6.2/egg-dist-tmp-7bd8a1a8\r\nerror: [Errno 2] No such file or directory: '/private/var/folders/4j/00jv8sg138n61hsj6ppf60pm0000gn/T/easy_install-o7rp3h7o/pydantic-0.6.2/HISTORY.rst'\r\n```\n", "before_files": [{"content": "from importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\nTHIS_DIR = Path(__file__).resolve().parent\nlong_description = (\n THIS_DIR.joinpath('README.rst').read_text() +\n '\\n\\n' +\n THIS_DIR.joinpath('HISTORY.rst').read_text()\n)\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description='Data validation and settings management using python 3.6 type hinting',\n long_description=long_description,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n python_requires='>=3.6',\n zip_safe=True,\n extras_require={\n 'msgpack': ['msgpack-python>=0.4.8'],\n 'ujson': ['ujson>=1.35'],\n }\n)\n", "path": "setup.py"}, {"content": "from distutils.version import StrictVersion\n\n__all__ = ['VERSION']\n\nVERSION = StrictVersion('0.6.2')\n", "path": "pydantic/version.py"}], "after_files": [{"content": "from importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n long_description = '\\n\\n'.join([\n THIS_DIR.joinpath('README.rst').read_text(),\n THIS_DIR.joinpath('HISTORY.rst').read_text()\n ])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n python_requires='>=3.6',\n zip_safe=True,\n extras_require={\n 'msgpack': ['msgpack-python>=0.4.8'],\n 'ujson': ['ujson>=1.35'],\n }\n)\n", "path": "setup.py"}, {"content": "from distutils.version import StrictVersion\n\n__all__ = ['VERSION']\n\nVERSION = StrictVersion('0.6.3')\n", "path": "pydantic/version.py"}]} | 1,058 | 367 |
gh_patches_debug_17096 | rasdani/github-patches | git_diff | inventree__InvenTree-5045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make event test more specific
@matmair [this CI failure](https://github.com/inventree/InvenTree/actions/runs/5259899543/jobs/9506168595?pr=4732) is the last in the current batch of weird ones. I'm seeing it on multiple PRs, about 50% of the time.
Here's the failing line:
https://github.com/inventree/InvenTree/blob/c8365ccd0c9371ea4d127fe616e0029f35b3c19c/InvenTree/plugin/samples/event/test_event_sample.py#L27
Sometimes, `cm.warning.args[0]` returns an "unclosed file object", rather than a string. Any ideas?
_Originally posted by @SchrodingersGat in https://github.com/inventree/InvenTree/issues/4732#issuecomment-1590219025_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/plugin/samples/event/event_sample.py`
Content:
```
1 """Sample plugin which responds to events."""
2
3 import warnings
4
5 from django.conf import settings
6
7 from plugin import InvenTreePlugin
8 from plugin.mixins import EventMixin
9
10
11 class EventPluginSample(EventMixin, InvenTreePlugin):
12 """A sample plugin which provides supports for triggered events."""
13
14 NAME = "EventPlugin"
15 SLUG = "sampleevent"
16 TITLE = "Triggered Events"
17
18 def process_event(self, event, *args, **kwargs):
19 """Custom event processing."""
20 print(f"Processing triggered event: '{event}'")
21 print("args:", str(args))
22 print("kwargs:", str(kwargs))
23
24 # Issue warning that we can test for
25 if settings.PLUGIN_TESTING:
26 warnings.warn(f'Event `{event}` triggered', stacklevel=2)
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/InvenTree/plugin/samples/event/event_sample.py b/InvenTree/plugin/samples/event/event_sample.py
--- a/InvenTree/plugin/samples/event/event_sample.py
+++ b/InvenTree/plugin/samples/event/event_sample.py
@@ -1,12 +1,14 @@
"""Sample plugin which responds to events."""
-import warnings
+import logging
from django.conf import settings
from plugin import InvenTreePlugin
from plugin.mixins import EventMixin
+logger = logging.getLogger('inventree')
+
class EventPluginSample(EventMixin, InvenTreePlugin):
"""A sample plugin which provides supports for triggered events."""
@@ -23,4 +25,4 @@
# Issue warning that we can test for
if settings.PLUGIN_TESTING:
- warnings.warn(f'Event `{event}` triggered', stacklevel=2)
+ logger.debug(f'Event `{event}` triggered in sample plugin')
| {"golden_diff": "diff --git a/InvenTree/plugin/samples/event/event_sample.py b/InvenTree/plugin/samples/event/event_sample.py\n--- a/InvenTree/plugin/samples/event/event_sample.py\n+++ b/InvenTree/plugin/samples/event/event_sample.py\n@@ -1,12 +1,14 @@\n \"\"\"Sample plugin which responds to events.\"\"\"\n \n-import warnings\n+import logging\n \n from django.conf import settings\n \n from plugin import InvenTreePlugin\n from plugin.mixins import EventMixin\n \n+logger = logging.getLogger('inventree')\n+\n \n class EventPluginSample(EventMixin, InvenTreePlugin):\n \"\"\"A sample plugin which provides supports for triggered events.\"\"\"\n@@ -23,4 +25,4 @@\n \n # Issue warning that we can test for\n if settings.PLUGIN_TESTING:\n- warnings.warn(f'Event `{event}` triggered', stacklevel=2)\n+ logger.debug(f'Event `{event}` triggered in sample plugin')\n", "issue": "Make event test more specific\n @matmair [this CI failure](https://github.com/inventree/InvenTree/actions/runs/5259899543/jobs/9506168595?pr=4732) is the last in the current batch of weird ones. I'm seeing it on multiple PRs, about 50% of the time.\r\n\r\nHere's the failing line:\r\n\r\nhttps://github.com/inventree/InvenTree/blob/c8365ccd0c9371ea4d127fe616e0029f35b3c19c/InvenTree/plugin/samples/event/test_event_sample.py#L27\r\n\r\nSometimes, `cm.warning.args[0]` returns an \"unclosed file object\", rather than a string. Any ideas?\r\n\r\n_Originally posted by @SchrodingersGat in https://github.com/inventree/InvenTree/issues/4732#issuecomment-1590219025_\r\n \n", "before_files": [{"content": "\"\"\"Sample plugin which responds to events.\"\"\"\n\nimport warnings\n\nfrom django.conf import settings\n\nfrom plugin import InvenTreePlugin\nfrom plugin.mixins import EventMixin\n\n\nclass EventPluginSample(EventMixin, InvenTreePlugin):\n \"\"\"A sample plugin which provides supports for triggered events.\"\"\"\n\n NAME = \"EventPlugin\"\n SLUG = \"sampleevent\"\n TITLE = \"Triggered Events\"\n\n def process_event(self, event, *args, **kwargs):\n \"\"\"Custom event processing.\"\"\"\n print(f\"Processing triggered event: '{event}'\")\n print(\"args:\", str(args))\n print(\"kwargs:\", str(kwargs))\n\n # Issue warning that we can test for\n if settings.PLUGIN_TESTING:\n warnings.warn(f'Event `{event}` triggered', stacklevel=2)\n", "path": "InvenTree/plugin/samples/event/event_sample.py"}], "after_files": [{"content": "\"\"\"Sample plugin which responds to events.\"\"\"\n\nimport logging\n\nfrom django.conf import settings\n\nfrom plugin import InvenTreePlugin\nfrom plugin.mixins import EventMixin\n\nlogger = logging.getLogger('inventree')\n\n\nclass EventPluginSample(EventMixin, InvenTreePlugin):\n \"\"\"A sample plugin which provides supports for triggered events.\"\"\"\n\n NAME = \"EventPlugin\"\n SLUG = \"sampleevent\"\n TITLE = \"Triggered Events\"\n\n def process_event(self, event, *args, **kwargs):\n \"\"\"Custom event processing.\"\"\"\n print(f\"Processing triggered event: '{event}'\")\n print(\"args:\", str(args))\n print(\"kwargs:\", str(kwargs))\n\n # Issue warning that we can test for\n if settings.PLUGIN_TESTING:\n logger.debug(f'Event `{event}` triggered in sample plugin')\n", "path": "InvenTree/plugin/samples/event/event_sample.py"}]} | 713 | 205 |
gh_patches_debug_32092 | rasdani/github-patches | git_diff | bokeh__bokeh-10229 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Simplify/refactor filters in bokehjs
ref https://github.com/bokeh/bokeh/pull/10084#discussion_r432112063
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/filters.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7
8 #-----------------------------------------------------------------------------
9 # Boilerplate
10 #-----------------------------------------------------------------------------
11 import logging # isort:skip
12 log = logging.getLogger(__name__)
13
14 #-----------------------------------------------------------------------------
15 # Imports
16 #-----------------------------------------------------------------------------
17
18 # Bokeh imports
19 from ..core.properties import AnyRef, Bool, Dict, Either, Int, Seq, String
20 from ..model import Model
21
22 #-----------------------------------------------------------------------------
23 # Globals and constants
24 #-----------------------------------------------------------------------------
25
26 __all__ = (
27 'BooleanFilter',
28 'CustomJSFilter',
29 'Filter',
30 'GroupFilter',
31 'IndexFilter',
32 )
33
34 #-----------------------------------------------------------------------------
35 # General API
36 #-----------------------------------------------------------------------------
37
38 class Filter(Model):
39 ''' A Filter model represents a filtering operation that returns a row-wise subset of
40 data when applied to a ``ColumnDataSource``.
41 '''
42
43 filter = Either(Seq(Int), Seq(Bool), help="""
44 A list that can be either integer indices or booleans representing a row-wise subset of data.
45 """)
46
47 def __init__(self, *args, **kw):
48 if len(args) == 1 and "filter" not in kw:
49 kw["filter"] = args[0]
50
51 super().__init__(**kw)
52
53 class IndexFilter(Filter):
54 ''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.
55 '''
56
57 indices = Seq(Int, help="""
58 A list of integer indices representing the subset of data to select.
59 """)
60
61 def __init__(self, *args, **kw):
62 if len(args) == 1 and "indices" not in kw:
63 kw["indices"] = args[0]
64
65 super().__init__(**kw)
66
67 class BooleanFilter(Filter):
68 ''' A ``BooleanFilter`` filters data by returning the subset of data corresponding to indices
69 where the values of the booleans array is True.
70 '''
71
72 booleans = Seq(Bool, help="""
73 A list of booleans indicating which rows of data to select.
74 """)
75
76 def __init__(self, *args, **kw):
77 if len(args) == 1 and "booleans" not in kw:
78 kw["booleans"] = args[0]
79
80 super().__init__(**kw)
81
82 class GroupFilter(Filter):
83 ''' A ``GroupFilter`` represents the rows of a ``ColumnDataSource`` where the values of the categorical
84 column column_name match the group variable.
85 '''
86
87 column_name = String(help="""
88 The name of the column to perform the group filtering operation on.
89 """)
90
91 group = String(help="""
92 The value of the column indicating the rows of data to keep.
93 """)
94
95 def __init__(self, *args, **kw):
96 if len(args) == 2 and "column_name" not in kw and "group" not in kw:
97 kw["column_name"] = args[0]
98 kw["group"] = args[1]
99
100 super().__init__(**kw)
101
102 class CustomJSFilter(Filter):
103 ''' Filter data sources with a custom defined JavaScript function.
104
105 .. warning::
106 The explicit purpose of this Bokeh Model is to embed *raw JavaScript
107 code* for a browser to execute. If any part of the code is derived
108 from untrusted user inputs, then you must take appropriate care to
109 sanitize the user input prior to passing to Bokeh.
110
111 '''
112
113 args = Dict(String, AnyRef, help="""
114 A mapping of names to Python objects. In particular those can be bokeh's models.
115 These objects are made available to the callback's code snippet as the values of
116 named parameters to the callback.
117 """)
118
119 code = String(default="", help="""
120 A snippet of JavaScript code to filter data contained in a columnar data source.
121 The code is made into the body of a function, and all of of the named objects in
122 ``args`` are available as parameters that the code can use. The variable
123 ``source`` will contain the data source that is associated with the ``CDSView`` this
124 filter is added to.
125
126 The code should either return the indices of the subset or an array of booleans
127 to use to subset data source rows.
128
129 Example:
130
131 .. code-block
132
133 code = '''
134 const indices = []
135 for (var i = 0; i <= source.data['some_column'].length; i++) {
136 if (source.data['some_column'][i] == 'some_value') {
137 indices.push(i)
138 }
139 }
140 return indices
141 '''
142
143 """)
144
145 #-----------------------------------------------------------------------------
146 # Dev API
147 #-----------------------------------------------------------------------------
148
149 #-----------------------------------------------------------------------------
150 # Private API
151 #-----------------------------------------------------------------------------
152
153 #-----------------------------------------------------------------------------
154 # Code
155 #-----------------------------------------------------------------------------
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/models/filters.py b/bokeh/models/filters.py
--- a/bokeh/models/filters.py
+++ b/bokeh/models/filters.py
@@ -16,7 +16,8 @@
#-----------------------------------------------------------------------------
# Bokeh imports
-from ..core.properties import AnyRef, Bool, Dict, Either, Int, Seq, String
+from ..core.has_props import abstract
+from ..core.properties import AnyRef, Bool, Dict, Int, Seq, String
from ..model import Model
#-----------------------------------------------------------------------------
@@ -35,21 +36,12 @@
# General API
#-----------------------------------------------------------------------------
+@abstract
class Filter(Model):
''' A Filter model represents a filtering operation that returns a row-wise subset of
data when applied to a ``ColumnDataSource``.
'''
- filter = Either(Seq(Int), Seq(Bool), help="""
- A list that can be either integer indices or booleans representing a row-wise subset of data.
- """)
-
- def __init__(self, *args, **kw):
- if len(args) == 1 and "filter" not in kw:
- kw["filter"] = args[0]
-
- super().__init__(**kw)
-
class IndexFilter(Filter):
''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.
'''
@@ -132,7 +124,7 @@
code = '''
const indices = []
- for (var i = 0; i <= source.data['some_column'].length; i++) {
+ for (let i = 0; i <= source.data['some_column'].length; i++) {
if (source.data['some_column'][i] == 'some_value') {
indices.push(i)
}
| {"golden_diff": "diff --git a/bokeh/models/filters.py b/bokeh/models/filters.py\n--- a/bokeh/models/filters.py\n+++ b/bokeh/models/filters.py\n@@ -16,7 +16,8 @@\n #-----------------------------------------------------------------------------\n \n # Bokeh imports\n-from ..core.properties import AnyRef, Bool, Dict, Either, Int, Seq, String\n+from ..core.has_props import abstract\n+from ..core.properties import AnyRef, Bool, Dict, Int, Seq, String\n from ..model import Model\n \n #-----------------------------------------------------------------------------\n@@ -35,21 +36,12 @@\n # General API\n #-----------------------------------------------------------------------------\n \n+@abstract\n class Filter(Model):\n ''' A Filter model represents a filtering operation that returns a row-wise subset of\n data when applied to a ``ColumnDataSource``.\n '''\n \n- filter = Either(Seq(Int), Seq(Bool), help=\"\"\"\n- A list that can be either integer indices or booleans representing a row-wise subset of data.\n- \"\"\")\n-\n- def __init__(self, *args, **kw):\n- if len(args) == 1 and \"filter\" not in kw:\n- kw[\"filter\"] = args[0]\n-\n- super().__init__(**kw)\n-\n class IndexFilter(Filter):\n ''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.\n '''\n@@ -132,7 +124,7 @@\n \n code = '''\n const indices = []\n- for (var i = 0; i <= source.data['some_column'].length; i++) {\n+ for (let i = 0; i <= source.data['some_column'].length; i++) {\n if (source.data['some_column'][i] == 'some_value') {\n indices.push(i)\n }\n", "issue": "Simplify/refactor filters in bokehjs\nref https://github.com/bokeh/bokeh/pull/10084#discussion_r432112063\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Bokeh imports\nfrom ..core.properties import AnyRef, Bool, Dict, Either, Int, Seq, String\nfrom ..model import Model\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'BooleanFilter',\n 'CustomJSFilter',\n 'Filter',\n 'GroupFilter',\n 'IndexFilter',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\nclass Filter(Model):\n ''' A Filter model represents a filtering operation that returns a row-wise subset of\n data when applied to a ``ColumnDataSource``.\n '''\n\n filter = Either(Seq(Int), Seq(Bool), help=\"\"\"\n A list that can be either integer indices or booleans representing a row-wise subset of data.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 1 and \"filter\" not in kw:\n kw[\"filter\"] = args[0]\n\n super().__init__(**kw)\n\nclass IndexFilter(Filter):\n ''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.\n '''\n\n indices = Seq(Int, help=\"\"\"\n A list of integer indices representing the subset of data to select.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 1 and \"indices\" not in kw:\n kw[\"indices\"] = args[0]\n\n super().__init__(**kw)\n\nclass BooleanFilter(Filter):\n ''' A ``BooleanFilter`` filters data by returning the subset of data corresponding to indices\n where the values of the booleans array is True.\n '''\n\n booleans = Seq(Bool, help=\"\"\"\n A list of booleans indicating which rows of data to select.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 1 and \"booleans\" not in kw:\n kw[\"booleans\"] = args[0]\n\n super().__init__(**kw)\n\nclass GroupFilter(Filter):\n ''' A ``GroupFilter`` represents the rows of a ``ColumnDataSource`` where the values of the categorical\n column column_name match the group variable.\n '''\n\n column_name = String(help=\"\"\"\n The name of the column to perform the group filtering operation on.\n \"\"\")\n\n group = String(help=\"\"\"\n The value of the column indicating the rows of data to keep.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 2 and \"column_name\" not in kw and \"group\" not in kw:\n kw[\"column_name\"] = args[0]\n kw[\"group\"] = args[1]\n\n super().__init__(**kw)\n\nclass CustomJSFilter(Filter):\n ''' Filter data sources with a custom defined JavaScript function.\n\n .. warning::\n The explicit purpose of this Bokeh Model is to embed *raw JavaScript\n code* for a browser to execute. If any part of the code is derived\n from untrusted user inputs, then you must take appropriate care to\n sanitize the user input prior to passing to Bokeh.\n\n '''\n\n args = Dict(String, AnyRef, help=\"\"\"\n A mapping of names to Python objects. In particular those can be bokeh's models.\n These objects are made available to the callback's code snippet as the values of\n named parameters to the callback.\n \"\"\")\n\n code = String(default=\"\", help=\"\"\"\n A snippet of JavaScript code to filter data contained in a columnar data source.\n The code is made into the body of a function, and all of of the named objects in\n ``args`` are available as parameters that the code can use. The variable\n ``source`` will contain the data source that is associated with the ``CDSView`` this\n filter is added to.\n\n The code should either return the indices of the subset or an array of booleans\n to use to subset data source rows.\n\n Example:\n\n .. code-block\n\n code = '''\n const indices = []\n for (var i = 0; i <= source.data['some_column'].length; i++) {\n if (source.data['some_column'][i] == 'some_value') {\n indices.push(i)\n }\n }\n return indices\n '''\n\n \"\"\")\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/models/filters.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Bokeh imports\nfrom ..core.has_props import abstract\nfrom ..core.properties import AnyRef, Bool, Dict, Int, Seq, String\nfrom ..model import Model\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'BooleanFilter',\n 'CustomJSFilter',\n 'Filter',\n 'GroupFilter',\n 'IndexFilter',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n@abstract\nclass Filter(Model):\n ''' A Filter model represents a filtering operation that returns a row-wise subset of\n data when applied to a ``ColumnDataSource``.\n '''\n\nclass IndexFilter(Filter):\n ''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.\n '''\n\n indices = Seq(Int, help=\"\"\"\n A list of integer indices representing the subset of data to select.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 1 and \"indices\" not in kw:\n kw[\"indices\"] = args[0]\n\n super().__init__(**kw)\n\nclass BooleanFilter(Filter):\n ''' A ``BooleanFilter`` filters data by returning the subset of data corresponding to indices\n where the values of the booleans array is True.\n '''\n\n booleans = Seq(Bool, help=\"\"\"\n A list of booleans indicating which rows of data to select.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 1 and \"booleans\" not in kw:\n kw[\"booleans\"] = args[0]\n\n super().__init__(**kw)\n\nclass GroupFilter(Filter):\n ''' A ``GroupFilter`` represents the rows of a ``ColumnDataSource`` where the values of the categorical\n column column_name match the group variable.\n '''\n\n column_name = String(help=\"\"\"\n The name of the column to perform the group filtering operation on.\n \"\"\")\n\n group = String(help=\"\"\"\n The value of the column indicating the rows of data to keep.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 2 and \"column_name\" not in kw and \"group\" not in kw:\n kw[\"column_name\"] = args[0]\n kw[\"group\"] = args[1]\n\n super().__init__(**kw)\n\nclass CustomJSFilter(Filter):\n ''' Filter data sources with a custom defined JavaScript function.\n\n .. warning::\n The explicit purpose of this Bokeh Model is to embed *raw JavaScript\n code* for a browser to execute. If any part of the code is derived\n from untrusted user inputs, then you must take appropriate care to\n sanitize the user input prior to passing to Bokeh.\n\n '''\n\n args = Dict(String, AnyRef, help=\"\"\"\n A mapping of names to Python objects. In particular those can be bokeh's models.\n These objects are made available to the callback's code snippet as the values of\n named parameters to the callback.\n \"\"\")\n\n code = String(default=\"\", help=\"\"\"\n A snippet of JavaScript code to filter data contained in a columnar data source.\n The code is made into the body of a function, and all of of the named objects in\n ``args`` are available as parameters that the code can use. The variable\n ``source`` will contain the data source that is associated with the ``CDSView`` this\n filter is added to.\n\n The code should either return the indices of the subset or an array of booleans\n to use to subset data source rows.\n\n Example:\n\n .. code-block\n\n code = '''\n const indices = []\n for (let i = 0; i <= source.data['some_column'].length; i++) {\n if (source.data['some_column'][i] == 'some_value') {\n indices.push(i)\n }\n }\n return indices\n '''\n\n \"\"\")\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/models/filters.py"}]} | 1,723 | 396 |
gh_patches_debug_15868 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-2479 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyInstaller should not check for modules specific for other than the current platform
**Python extensions for Windows**
We should not be checking for these modules in os x and linux.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/pre_safe_import_module/hook-win32com.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2017, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9
10 """
11 PyWin32 package 'win32com' extends it's __path__ attribute with win32comext
12 directory and thus PyInstaller is not able to find modules in it. For example
13 module 'win32com.shell' is in reality 'win32comext.shell'.
14
15 >>> win32com.__path__
16 ['win32com', 'C:\\Python27\\Lib\\site-packages\\win32comext']
17
18 """
19
20
21 import os
22
23 from PyInstaller.utils.hooks import logger, get_module_file_attribute
24
25
26 def pre_safe_import_module(api):
27 win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))
28 comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')
29 logger.debug('win32com: extending __path__ with dir %r' % comext_dir)
30 # Append the __path__ where PyInstaller will look for 'win32com' modules.'
31 api.append_package_path(comext_dir)
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py b/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py
--- a/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py
+++ b/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py
@@ -21,11 +21,13 @@
import os
from PyInstaller.utils.hooks import logger, get_module_file_attribute
+from PyInstaller.compat import is_win
def pre_safe_import_module(api):
- win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))
- comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')
- logger.debug('win32com: extending __path__ with dir %r' % comext_dir)
- # Append the __path__ where PyInstaller will look for 'win32com' modules.'
- api.append_package_path(comext_dir)
+ if is_win:
+ win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))
+ comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')
+ logger.debug('win32com: extending __path__ with dir %r' % comext_dir)
+ # Append the __path__ where PyInstaller will look for 'win32com' modules.'
+ api.append_package_path(comext_dir)
| {"golden_diff": "diff --git a/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py b/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py\n--- a/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py\n+++ b/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py\n@@ -21,11 +21,13 @@\n import os\n \n from PyInstaller.utils.hooks import logger, get_module_file_attribute\n+from PyInstaller.compat import is_win\n \n \n def pre_safe_import_module(api):\n- win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))\n- comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')\n- logger.debug('win32com: extending __path__ with dir %r' % comext_dir)\n- # Append the __path__ where PyInstaller will look for 'win32com' modules.'\n- api.append_package_path(comext_dir)\n+ if is_win:\n+ win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))\n+ comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')\n+ logger.debug('win32com: extending __path__ with dir %r' % comext_dir)\n+ # Append the __path__ where PyInstaller will look for 'win32com' modules.'\n+ api.append_package_path(comext_dir)\n", "issue": "PyInstaller should not check for modules specific for other than the current platform\n**Python extensions for Windows**\r\n\r\nWe should not be checking for these modules in os x and linux. \n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2017, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\"\"\"\nPyWin32 package 'win32com' extends it's __path__ attribute with win32comext\ndirectory and thus PyInstaller is not able to find modules in it. For example\nmodule 'win32com.shell' is in reality 'win32comext.shell'.\n\n>>> win32com.__path__\n['win32com', 'C:\\\\Python27\\\\Lib\\\\site-packages\\\\win32comext']\n\n\"\"\"\n\n\nimport os\n\nfrom PyInstaller.utils.hooks import logger, get_module_file_attribute\n\n\ndef pre_safe_import_module(api):\n win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))\n comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')\n logger.debug('win32com: extending __path__ with dir %r' % comext_dir)\n # Append the __path__ where PyInstaller will look for 'win32com' modules.'\n api.append_package_path(comext_dir)\n", "path": "PyInstaller/hooks/pre_safe_import_module/hook-win32com.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2017, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\"\"\"\nPyWin32 package 'win32com' extends it's __path__ attribute with win32comext\ndirectory and thus PyInstaller is not able to find modules in it. For example\nmodule 'win32com.shell' is in reality 'win32comext.shell'.\n\n>>> win32com.__path__\n['win32com', 'C:\\\\Python27\\\\Lib\\\\site-packages\\\\win32comext']\n\n\"\"\"\n\n\nimport os\n\nfrom PyInstaller.utils.hooks import logger, get_module_file_attribute\nfrom PyInstaller.compat import is_win\n\n\ndef pre_safe_import_module(api):\n if is_win:\n win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))\n comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')\n logger.debug('win32com: extending __path__ with dir %r' % comext_dir)\n # Append the __path__ where PyInstaller will look for 'win32com' modules.'\n api.append_package_path(comext_dir)\n", "path": "PyInstaller/hooks/pre_safe_import_module/hook-win32com.py"}]} | 648 | 330 |
gh_patches_debug_6606 | rasdani/github-patches | git_diff | ansible__ansible-modules-extras-2669 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
modprobe: Call 'modprobe -r' instasd of 'rmmod' for absent?
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
modprobe
##### ANSIBLE VERSION
```
ansible 2.0.1.0
config file =
configured module search path = Default w/o overrides
```
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
N/A (linux target)
##### SUMMARY
Current implementation of modprobe module uses `rmmod` command to remove kernel module.
https://github.com/ansible/ansible-modules-extras/blob/stable-2.1/system/modprobe.py#L114
Why don't we use `modprobe -r` instead of `rmmod` here?
`modprobe -r` would be better because;
1. It will also unload unused modules
2. Straight forward from module name
##### STEPS TO REPRODUCE
I was trying to unload sb_edac module from my server (since it conflict with some hardware monitoring of server), the module depends on edac_core and edac_core was loaded only for sb_edac.
Before applying playbook, on the target server.
```
server# lsmod | grep edac
sb_edac 28672 0
edac_core 53248 1 sb_edac
```
playbook (snippet)
```
- name: unload edac modules
modprobe:
name: sb_edac
state: absent
```
##### EXPECTED RESULTS
edac_core module unloaded, since it no longer be used.
##### ACTUAL RESULTS
After applying playbook, on the target server.
```
server# lsmod | grep edac
edac_core 53248 0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `system/modprobe.py`
Content:
```
1 #!/usr/bin/python
2 #coding: utf-8 -*-
3
4 # (c) 2013, David Stygstra <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # This module is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This software is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with this software. If not, see <http://www.gnu.org/licenses/>.
20
21
22 DOCUMENTATION = '''
23 ---
24 module: modprobe
25 short_description: Add or remove kernel modules
26 requirements: []
27 version_added: 1.4
28 author:
29 - "David Stygstra (@stygstra)"
30 - "Julien Dauphant"
31 - "Matt Jeffery"
32 description:
33 - Add or remove kernel modules.
34 options:
35 name:
36 required: true
37 description:
38 - Name of kernel module to manage.
39 state:
40 required: false
41 default: "present"
42 choices: [ present, absent ]
43 description:
44 - Whether the module should be present or absent.
45 params:
46 required: false
47 default: ""
48 version_added: "1.6"
49 description:
50 - Modules parameters.
51 '''
52
53 EXAMPLES = '''
54 # Add the 802.1q module
55 - modprobe: name=8021q state=present
56 # Add the dummy module
57 - modprobe: name=dummy state=present params="numdummies=2"
58 '''
59
60 from ansible.module_utils.basic import *
61 from ansible.module_utils.pycompat24 import get_exception
62 import shlex
63
64
65 def main():
66 module = AnsibleModule(
67 argument_spec={
68 'name': {'required': True},
69 'state': {'default': 'present', 'choices': ['present', 'absent']},
70 'params': {'default': ''},
71 },
72 supports_check_mode=True,
73 )
74 args = {
75 'changed': False,
76 'failed': False,
77 'name': module.params['name'],
78 'state': module.params['state'],
79 'params': module.params['params'],
80 }
81
82 # Check if module is present
83 try:
84 modules = open('/proc/modules')
85 present = False
86 module_name = args['name'].replace('-', '_') + ' '
87 for line in modules:
88 if line.startswith(module_name):
89 present = True
90 break
91 modules.close()
92 except IOError:
93 e = get_exception()
94 module.fail_json(msg=str(e), **args)
95
96 # Check only; don't modify
97 if module.check_mode:
98 if args['state'] == 'present' and not present:
99 changed = True
100 elif args['state'] == 'absent' and present:
101 changed = True
102 else:
103 changed = False
104 module.exit_json(changed=changed)
105
106 # Add/remove module as needed
107 if args['state'] == 'present':
108 if not present:
109 command = [module.get_bin_path('modprobe', True), args['name']]
110 command.extend(shlex.split(args['params']))
111 rc, _, err = module.run_command(command)
112 if rc != 0:
113 module.fail_json(msg=err, **args)
114 args['changed'] = True
115 elif args['state'] == 'absent':
116 if present:
117 rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])
118 if rc != 0:
119 module.fail_json(msg=err, **args)
120 args['changed'] = True
121
122 module.exit_json(**args)
123
124 main()
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/system/modprobe.py b/system/modprobe.py
--- a/system/modprobe.py
+++ b/system/modprobe.py
@@ -114,7 +114,7 @@
args['changed'] = True
elif args['state'] == 'absent':
if present:
- rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])
+ rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
| {"golden_diff": "diff --git a/system/modprobe.py b/system/modprobe.py\n--- a/system/modprobe.py\n+++ b/system/modprobe.py\n@@ -114,7 +114,7 @@\n args['changed'] = True\n elif args['state'] == 'absent':\n if present:\n- rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])\n+ rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']])\n if rc != 0:\n module.fail_json(msg=err, **args)\n args['changed'] = True\n", "issue": "modprobe: Call 'modprobe -r' instasd of 'rmmod' for absent?\n##### ISSUE TYPE\n- Feature Idea\n##### COMPONENT NAME\n\nmodprobe\n##### ANSIBLE VERSION\n\n```\nansible 2.0.1.0\n config file =\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nN/A\n##### OS / ENVIRONMENT\n\nN/A (linux target)\n##### SUMMARY\n\nCurrent implementation of modprobe module uses `rmmod` command to remove kernel module.\nhttps://github.com/ansible/ansible-modules-extras/blob/stable-2.1/system/modprobe.py#L114\n\nWhy don't we use `modprobe -r` instead of `rmmod` here?\n\n`modprobe -r` would be better because; \n1. It will also unload unused modules\n2. Straight forward from module name\n##### STEPS TO REPRODUCE\n\nI was trying to unload sb_edac module from my server (since it conflict with some hardware monitoring of server), the module depends on edac_core and edac_core was loaded only for sb_edac.\n\nBefore applying playbook, on the target server.\n\n```\nserver# lsmod | grep edac\nsb_edac 28672 0\nedac_core 53248 1 sb_edac\n```\n\nplaybook (snippet)\n\n```\n- name: unload edac modules\n modprobe:\n name: sb_edac\n state: absent\n```\n##### EXPECTED RESULTS\n\nedac_core module unloaded, since it no longer be used.\n##### ACTUAL RESULTS\n\nAfter applying playbook, on the target server.\n\n```\nserver# lsmod | grep edac\nedac_core 53248 0\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n#coding: utf-8 -*-\n\n# (c) 2013, David Stygstra <[email protected]>\n#\n# This file is part of Ansible\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\n\nDOCUMENTATION = '''\n---\nmodule: modprobe\nshort_description: Add or remove kernel modules\nrequirements: []\nversion_added: 1.4\nauthor:\n - \"David Stygstra (@stygstra)\" \n - \"Julien Dauphant\"\n - \"Matt Jeffery\"\ndescription:\n - Add or remove kernel modules.\noptions:\n name:\n required: true\n description:\n - Name of kernel module to manage.\n state:\n required: false\n default: \"present\"\n choices: [ present, absent ]\n description:\n - Whether the module should be present or absent.\n params:\n required: false\n default: \"\"\n version_added: \"1.6\"\n description:\n - Modules parameters.\n'''\n\nEXAMPLES = '''\n# Add the 802.1q module\n- modprobe: name=8021q state=present\n# Add the dummy module\n- modprobe: name=dummy state=present params=\"numdummies=2\"\n'''\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.pycompat24 import get_exception\nimport shlex\n\n\ndef main():\n module = AnsibleModule(\n argument_spec={\n 'name': {'required': True},\n 'state': {'default': 'present', 'choices': ['present', 'absent']},\n 'params': {'default': ''},\n },\n supports_check_mode=True,\n )\n args = {\n 'changed': False,\n 'failed': False,\n 'name': module.params['name'],\n 'state': module.params['state'],\n 'params': module.params['params'],\n }\n\n # Check if module is present\n try:\n modules = open('/proc/modules')\n present = False\n module_name = args['name'].replace('-', '_') + ' '\n for line in modules:\n if line.startswith(module_name):\n present = True\n break\n modules.close()\n except IOError:\n e = get_exception()\n module.fail_json(msg=str(e), **args)\n\n # Check only; don't modify\n if module.check_mode:\n if args['state'] == 'present' and not present:\n changed = True\n elif args['state'] == 'absent' and present:\n changed = True\n else:\n changed = False\n module.exit_json(changed=changed)\n\n # Add/remove module as needed\n if args['state'] == 'present':\n if not present:\n command = [module.get_bin_path('modprobe', True), args['name']]\n command.extend(shlex.split(args['params']))\n rc, _, err = module.run_command(command)\n if rc != 0:\n module.fail_json(msg=err, **args)\n args['changed'] = True\n elif args['state'] == 'absent':\n if present:\n rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])\n if rc != 0:\n module.fail_json(msg=err, **args)\n args['changed'] = True\n\n module.exit_json(**args)\n\nmain()\n", "path": "system/modprobe.py"}], "after_files": [{"content": "#!/usr/bin/python\n#coding: utf-8 -*-\n\n# (c) 2013, David Stygstra <[email protected]>\n#\n# This file is part of Ansible\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\n\nDOCUMENTATION = '''\n---\nmodule: modprobe\nshort_description: Add or remove kernel modules\nrequirements: []\nversion_added: 1.4\nauthor:\n - \"David Stygstra (@stygstra)\" \n - \"Julien Dauphant\"\n - \"Matt Jeffery\"\ndescription:\n - Add or remove kernel modules.\noptions:\n name:\n required: true\n description:\n - Name of kernel module to manage.\n state:\n required: false\n default: \"present\"\n choices: [ present, absent ]\n description:\n - Whether the module should be present or absent.\n params:\n required: false\n default: \"\"\n version_added: \"1.6\"\n description:\n - Modules parameters.\n'''\n\nEXAMPLES = '''\n# Add the 802.1q module\n- modprobe: name=8021q state=present\n# Add the dummy module\n- modprobe: name=dummy state=present params=\"numdummies=2\"\n'''\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.pycompat24 import get_exception\nimport shlex\n\n\ndef main():\n module = AnsibleModule(\n argument_spec={\n 'name': {'required': True},\n 'state': {'default': 'present', 'choices': ['present', 'absent']},\n 'params': {'default': ''},\n },\n supports_check_mode=True,\n )\n args = {\n 'changed': False,\n 'failed': False,\n 'name': module.params['name'],\n 'state': module.params['state'],\n 'params': module.params['params'],\n }\n\n # Check if module is present\n try:\n modules = open('/proc/modules')\n present = False\n module_name = args['name'].replace('-', '_') + ' '\n for line in modules:\n if line.startswith(module_name):\n present = True\n break\n modules.close()\n except IOError:\n e = get_exception()\n module.fail_json(msg=str(e), **args)\n\n # Check only; don't modify\n if module.check_mode:\n if args['state'] == 'present' and not present:\n changed = True\n elif args['state'] == 'absent' and present:\n changed = True\n else:\n changed = False\n module.exit_json(changed=changed)\n\n # Add/remove module as needed\n if args['state'] == 'present':\n if not present:\n command = [module.get_bin_path('modprobe', True), args['name']]\n command.extend(shlex.split(args['params']))\n rc, _, err = module.run_command(command)\n if rc != 0:\n module.fail_json(msg=err, **args)\n args['changed'] = True\n elif args['state'] == 'absent':\n if present:\n rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']])\n if rc != 0:\n module.fail_json(msg=err, **args)\n args['changed'] = True\n\n module.exit_json(**args)\n\nmain()\n", "path": "system/modprobe.py"}]} | 1,768 | 144 |
gh_patches_debug_28590 | rasdani/github-patches | git_diff | biopython__biopython-1827 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ExPASy.sprot_search_ful and ExPASy.sprot_search_de do not work
Both will return just pages stating "Please update your links/bookmarks" and eventually correct link at the end of page.
The functions use http://www.expasy.ch/cgi-bin/sprot-search-ful or http://www.expasy.ch/cgi-bin/sprot-search-de eventually, which pages do not exist anymore.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Bio/ExPASy/__init__.py`
Content:
```
1 # Copyright 2000 by Jeffrey Chang. All rights reserved.
2 # This code is part of the Biopython distribution and governed by its
3 # license. Please see the LICENSE file that should have been included
4 # as part of this package.
5
6 """Code to access resources at ExPASy over the WWW.
7
8 See https://www.expasy.org/
9
10
11 Functions:
12 - get_prodoc_entry Interface to the get-prodoc-entry CGI script.
13 - get_prosite_entry Interface to the get-prosite-entry CGI script.
14 - get_prosite_raw Interface to the get-prosite-raw CGI script.
15 - get_sprot_raw Interface to the get-sprot-raw CGI script.
16 - sprot_search_ful Interface to the sprot-search-ful CGI script.
17 - sprot_search_de Interface to the sprot-search-de CGI script.
18
19 """
20
21 # Importing these functions with leading underscore as not intended for reuse
22 from Bio._py3k import urlopen as _urlopen
23 from Bio._py3k import urlencode as _urlencode
24 from Bio._py3k import _binary_to_string_handle
25
26
27 def get_prodoc_entry(id,
28 cgi='https://prosite.expasy.org/cgi-bin/prosite/get-prodoc-entry'):
29 """Get a text handle to a PRODOC entry at ExPASy in HTML format.
30
31 >>> from Bio import ExPASy
32 >>> with ExPASy.get_prodoc_entry('PDOC00001') as in_handle:
33 ... html = in_handle.read()
34 ...
35 >>> with open("myprodocrecord.html", "w") as out_handle:
36 ... out_handle.write(html)
37 ...
38
39 For a non-existing key XXX, ExPASy returns an HTML-formatted page
40 containing this text: 'There is currently no PROSITE entry for'
41 """
42 return _binary_to_string_handle(_urlopen("%s?%s" % (cgi, id)))
43
44
45 def get_prosite_entry(id,
46 cgi='https://prosite.expasy.org/cgi-bin/prosite/get-prosite-entry'):
47 """Get a text handle to a PROSITE entry at ExPASy in HTML format.
48
49 >>> from Bio import ExPASy
50 >>> with ExPASy.get_prosite_entry('PS00001') as in_handle:
51 ... html = in_handle.read()
52 ...
53 >>> with open("myprositerecord.html", "w") as out_handle:
54 ... out_handle.write(html)
55 ...
56
57 For a non-existing key XXX, ExPASy returns an HTML-formatted page
58 containing this text: 'There is currently no PROSITE entry for'
59 """
60 return _binary_to_string_handle(_urlopen("%s?%s" % (cgi, id)))
61
62
63 def get_prosite_raw(id, cgi=None):
64 """Get a text handle to a raw PROSITE or PRODOC record at ExPASy.
65
66 The cgi argument is deprecated due to changes in the ExPASy
67 website.
68
69 For a non-existing key, ExPASy returns nothing.
70
71 >>> from Bio import ExPASy
72 >>> from Bio.ExPASy import Prosite
73 >>> with ExPASy.get_prosite_raw('PS00001') as handle:
74 ... record = Prosite.read(handle)
75 ...
76 >>> print(record.accession)
77 PS00001
78
79
80 For a non-existing key, ExPASy returns an error:
81
82 >>> handle = get_prosite_raw("does_not_exist")
83 Traceback (most recent call last):
84 ...
85 urllib.error.HTTPError: HTTP Error 404: Not Found
86
87 """
88 url = "https://prosite.expasy.org/%s.txt" % id
89 return _binary_to_string_handle(_urlopen(url))
90
91
92 def get_sprot_raw(id):
93 """Get a text handle to a raw SwissProt entry at ExPASy.
94
95 For an ID of XXX, fetches http://www.uniprot.org/uniprot/XXX.txt
96 (as per the https://www.expasy.org/expasy_urls.html documentation).
97
98 >>> from Bio import ExPASy
99 >>> from Bio import SwissProt
100 >>> with ExPASy.get_sprot_raw("O23729") as handle:
101 ... record = SwissProt.read(handle)
102 ...
103 >>> print(record.entry_name)
104 CHS3_BROFI
105
106 For a non-existing identifier, UniProt returns an error:
107
108 >>> ExPASy.get_sprot_raw("DOES_NOT_EXIST")
109 Traceback (most recent call last):
110 ...
111 urllib.error.HTTPError: HTTP Error 404: Not Found
112
113 """
114 url = "http://www.uniprot.org/uniprot/%s.txt" % id
115 return _binary_to_string_handle(_urlopen(url))
116
117
118 def sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,
119 cgi='https://www.expasy.org/cgi-bin/sprot-search-ful'):
120 """Search SwissProt by full text (BROKEN)."""
121 variables = {'SEARCH': text}
122 if make_wild:
123 variables['makeWild'] = 'on'
124 if swissprot:
125 variables['S'] = 'on'
126 if trembl:
127 variables['T'] = 'on'
128 options = _urlencode(variables)
129 fullcgi = "%s?%s" % (cgi, options)
130 handle = _binary_to_string_handle(_urlopen(fullcgi))
131 return handle
132
133
134 def sprot_search_de(text, swissprot=1, trembl=None,
135 cgi='https://www.expasy.org/cgi-bin/sprot-search-de'):
136 """Search SwissProt (BROKEN).
137
138 Search by name, description, gene name, species, or organelle.
139 """
140 variables = {'SEARCH': text}
141 if swissprot:
142 variables['S'] = 'on'
143 if trembl:
144 variables['T'] = 'on'
145 options = _urlencode(variables)
146 fullcgi = "%s?%s" % (cgi, options)
147 return _binary_to_string_handle(_urlopen(fullcgi))
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Bio/ExPASy/__init__.py b/Bio/ExPASy/__init__.py
--- a/Bio/ExPASy/__init__.py
+++ b/Bio/ExPASy/__init__.py
@@ -13,8 +13,6 @@
- get_prosite_entry Interface to the get-prosite-entry CGI script.
- get_prosite_raw Interface to the get-prosite-raw CGI script.
- get_sprot_raw Interface to the get-sprot-raw CGI script.
- - sprot_search_ful Interface to the sprot-search-ful CGI script.
- - sprot_search_de Interface to the sprot-search-de CGI script.
"""
@@ -113,35 +111,3 @@
"""
url = "http://www.uniprot.org/uniprot/%s.txt" % id
return _binary_to_string_handle(_urlopen(url))
-
-
-def sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,
- cgi='https://www.expasy.org/cgi-bin/sprot-search-ful'):
- """Search SwissProt by full text (BROKEN)."""
- variables = {'SEARCH': text}
- if make_wild:
- variables['makeWild'] = 'on'
- if swissprot:
- variables['S'] = 'on'
- if trembl:
- variables['T'] = 'on'
- options = _urlencode(variables)
- fullcgi = "%s?%s" % (cgi, options)
- handle = _binary_to_string_handle(_urlopen(fullcgi))
- return handle
-
-
-def sprot_search_de(text, swissprot=1, trembl=None,
- cgi='https://www.expasy.org/cgi-bin/sprot-search-de'):
- """Search SwissProt (BROKEN).
-
- Search by name, description, gene name, species, or organelle.
- """
- variables = {'SEARCH': text}
- if swissprot:
- variables['S'] = 'on'
- if trembl:
- variables['T'] = 'on'
- options = _urlencode(variables)
- fullcgi = "%s?%s" % (cgi, options)
- return _binary_to_string_handle(_urlopen(fullcgi))
| {"golden_diff": "diff --git a/Bio/ExPASy/__init__.py b/Bio/ExPASy/__init__.py\n--- a/Bio/ExPASy/__init__.py\n+++ b/Bio/ExPASy/__init__.py\n@@ -13,8 +13,6 @@\n - get_prosite_entry Interface to the get-prosite-entry CGI script.\n - get_prosite_raw Interface to the get-prosite-raw CGI script.\n - get_sprot_raw Interface to the get-sprot-raw CGI script.\n- - sprot_search_ful Interface to the sprot-search-ful CGI script.\n- - sprot_search_de Interface to the sprot-search-de CGI script.\n \n \"\"\"\n \n@@ -113,35 +111,3 @@\n \"\"\"\n url = \"http://www.uniprot.org/uniprot/%s.txt\" % id\n return _binary_to_string_handle(_urlopen(url))\n-\n-\n-def sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,\n- cgi='https://www.expasy.org/cgi-bin/sprot-search-ful'):\n- \"\"\"Search SwissProt by full text (BROKEN).\"\"\"\n- variables = {'SEARCH': text}\n- if make_wild:\n- variables['makeWild'] = 'on'\n- if swissprot:\n- variables['S'] = 'on'\n- if trembl:\n- variables['T'] = 'on'\n- options = _urlencode(variables)\n- fullcgi = \"%s?%s\" % (cgi, options)\n- handle = _binary_to_string_handle(_urlopen(fullcgi))\n- return handle\n-\n-\n-def sprot_search_de(text, swissprot=1, trembl=None,\n- cgi='https://www.expasy.org/cgi-bin/sprot-search-de'):\n- \"\"\"Search SwissProt (BROKEN).\n-\n- Search by name, description, gene name, species, or organelle.\n- \"\"\"\n- variables = {'SEARCH': text}\n- if swissprot:\n- variables['S'] = 'on'\n- if trembl:\n- variables['T'] = 'on'\n- options = _urlencode(variables)\n- fullcgi = \"%s?%s\" % (cgi, options)\n- return _binary_to_string_handle(_urlopen(fullcgi))\n", "issue": "ExPASy.sprot_search_ful and ExPASy.sprot_search_de do not work\nBoth will return just pages stating \"Please update your links/bookmarks\" and eventually correct link at the end of page.\nThe functions use http://www.expasy.ch/cgi-bin/sprot-search-ful or http://www.expasy.ch/cgi-bin/sprot-search-de eventually, which pages do not exist anymore.\n\n", "before_files": [{"content": "# Copyright 2000 by Jeffrey Chang. All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"Code to access resources at ExPASy over the WWW.\n\nSee https://www.expasy.org/\n\n\nFunctions:\n - get_prodoc_entry Interface to the get-prodoc-entry CGI script.\n - get_prosite_entry Interface to the get-prosite-entry CGI script.\n - get_prosite_raw Interface to the get-prosite-raw CGI script.\n - get_sprot_raw Interface to the get-sprot-raw CGI script.\n - sprot_search_ful Interface to the sprot-search-ful CGI script.\n - sprot_search_de Interface to the sprot-search-de CGI script.\n\n\"\"\"\n\n# Importing these functions with leading underscore as not intended for reuse\nfrom Bio._py3k import urlopen as _urlopen\nfrom Bio._py3k import urlencode as _urlencode\nfrom Bio._py3k import _binary_to_string_handle\n\n\ndef get_prodoc_entry(id,\n cgi='https://prosite.expasy.org/cgi-bin/prosite/get-prodoc-entry'):\n \"\"\"Get a text handle to a PRODOC entry at ExPASy in HTML format.\n\n >>> from Bio import ExPASy\n >>> with ExPASy.get_prodoc_entry('PDOC00001') as in_handle:\n ... html = in_handle.read()\n ...\n >>> with open(\"myprodocrecord.html\", \"w\") as out_handle:\n ... out_handle.write(html)\n ...\n\n For a non-existing key XXX, ExPASy returns an HTML-formatted page\n containing this text: 'There is currently no PROSITE entry for'\n \"\"\"\n return _binary_to_string_handle(_urlopen(\"%s?%s\" % (cgi, id)))\n\n\ndef get_prosite_entry(id,\n cgi='https://prosite.expasy.org/cgi-bin/prosite/get-prosite-entry'):\n \"\"\"Get a text handle to a PROSITE entry at ExPASy in HTML format.\n\n >>> from Bio import ExPASy\n >>> with ExPASy.get_prosite_entry('PS00001') as in_handle:\n ... html = in_handle.read()\n ...\n >>> with open(\"myprositerecord.html\", \"w\") as out_handle:\n ... out_handle.write(html)\n ...\n\n For a non-existing key XXX, ExPASy returns an HTML-formatted page\n containing this text: 'There is currently no PROSITE entry for'\n \"\"\"\n return _binary_to_string_handle(_urlopen(\"%s?%s\" % (cgi, id)))\n\n\ndef get_prosite_raw(id, cgi=None):\n \"\"\"Get a text handle to a raw PROSITE or PRODOC record at ExPASy.\n\n The cgi argument is deprecated due to changes in the ExPASy\n website.\n\n For a non-existing key, ExPASy returns nothing.\n\n >>> from Bio import ExPASy\n >>> from Bio.ExPASy import Prosite\n >>> with ExPASy.get_prosite_raw('PS00001') as handle:\n ... record = Prosite.read(handle)\n ...\n >>> print(record.accession)\n PS00001\n\n\n For a non-existing key, ExPASy returns an error:\n\n >>> handle = get_prosite_raw(\"does_not_exist\")\n Traceback (most recent call last):\n ...\n urllib.error.HTTPError: HTTP Error 404: Not Found\n\n \"\"\"\n url = \"https://prosite.expasy.org/%s.txt\" % id\n return _binary_to_string_handle(_urlopen(url))\n\n\ndef get_sprot_raw(id):\n \"\"\"Get a text handle to a raw SwissProt entry at ExPASy.\n\n For an ID of XXX, fetches http://www.uniprot.org/uniprot/XXX.txt\n (as per the https://www.expasy.org/expasy_urls.html documentation).\n\n >>> from Bio import ExPASy\n >>> from Bio import SwissProt\n >>> with ExPASy.get_sprot_raw(\"O23729\") as handle:\n ... record = SwissProt.read(handle)\n ...\n >>> print(record.entry_name)\n CHS3_BROFI\n\n For a non-existing identifier, UniProt returns an error:\n\n >>> ExPASy.get_sprot_raw(\"DOES_NOT_EXIST\")\n Traceback (most recent call last):\n ...\n urllib.error.HTTPError: HTTP Error 404: Not Found\n\n \"\"\"\n url = \"http://www.uniprot.org/uniprot/%s.txt\" % id\n return _binary_to_string_handle(_urlopen(url))\n\n\ndef sprot_search_ful(text, make_wild=None, swissprot=1, trembl=None,\n cgi='https://www.expasy.org/cgi-bin/sprot-search-ful'):\n \"\"\"Search SwissProt by full text (BROKEN).\"\"\"\n variables = {'SEARCH': text}\n if make_wild:\n variables['makeWild'] = 'on'\n if swissprot:\n variables['S'] = 'on'\n if trembl:\n variables['T'] = 'on'\n options = _urlencode(variables)\n fullcgi = \"%s?%s\" % (cgi, options)\n handle = _binary_to_string_handle(_urlopen(fullcgi))\n return handle\n\n\ndef sprot_search_de(text, swissprot=1, trembl=None,\n cgi='https://www.expasy.org/cgi-bin/sprot-search-de'):\n \"\"\"Search SwissProt (BROKEN).\n\n Search by name, description, gene name, species, or organelle.\n \"\"\"\n variables = {'SEARCH': text}\n if swissprot:\n variables['S'] = 'on'\n if trembl:\n variables['T'] = 'on'\n options = _urlencode(variables)\n fullcgi = \"%s?%s\" % (cgi, options)\n return _binary_to_string_handle(_urlopen(fullcgi))\n", "path": "Bio/ExPASy/__init__.py"}], "after_files": [{"content": "# Copyright 2000 by Jeffrey Chang. All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"Code to access resources at ExPASy over the WWW.\n\nSee https://www.expasy.org/\n\n\nFunctions:\n - get_prodoc_entry Interface to the get-prodoc-entry CGI script.\n - get_prosite_entry Interface to the get-prosite-entry CGI script.\n - get_prosite_raw Interface to the get-prosite-raw CGI script.\n - get_sprot_raw Interface to the get-sprot-raw CGI script.\n\n\"\"\"\n\n# Importing these functions with leading underscore as not intended for reuse\nfrom Bio._py3k import urlopen as _urlopen\nfrom Bio._py3k import urlencode as _urlencode\nfrom Bio._py3k import _binary_to_string_handle\n\n\ndef get_prodoc_entry(id,\n cgi='https://prosite.expasy.org/cgi-bin/prosite/get-prodoc-entry'):\n \"\"\"Get a text handle to a PRODOC entry at ExPASy in HTML format.\n\n >>> from Bio import ExPASy\n >>> with ExPASy.get_prodoc_entry('PDOC00001') as in_handle:\n ... html = in_handle.read()\n ...\n >>> with open(\"myprodocrecord.html\", \"w\") as out_handle:\n ... out_handle.write(html)\n ...\n\n For a non-existing key XXX, ExPASy returns an HTML-formatted page\n containing this text: 'There is currently no PROSITE entry for'\n \"\"\"\n return _binary_to_string_handle(_urlopen(\"%s?%s\" % (cgi, id)))\n\n\ndef get_prosite_entry(id,\n cgi='https://prosite.expasy.org/cgi-bin/prosite/get-prosite-entry'):\n \"\"\"Get a text handle to a PROSITE entry at ExPASy in HTML format.\n\n >>> from Bio import ExPASy\n >>> with ExPASy.get_prosite_entry('PS00001') as in_handle:\n ... html = in_handle.read()\n ...\n >>> with open(\"myprositerecord.html\", \"w\") as out_handle:\n ... out_handle.write(html)\n ...\n\n For a non-existing key XXX, ExPASy returns an HTML-formatted page\n containing this text: 'There is currently no PROSITE entry for'\n \"\"\"\n return _binary_to_string_handle(_urlopen(\"%s?%s\" % (cgi, id)))\n\n\ndef get_prosite_raw(id, cgi=None):\n \"\"\"Get a text handle to a raw PROSITE or PRODOC record at ExPASy.\n\n The cgi argument is deprecated due to changes in the ExPASy\n website.\n\n For a non-existing key, ExPASy returns nothing.\n\n >>> from Bio import ExPASy\n >>> from Bio.ExPASy import Prosite\n >>> with ExPASy.get_prosite_raw('PS00001') as handle:\n ... record = Prosite.read(handle)\n ...\n >>> print(record.accession)\n PS00001\n\n\n For a non-existing key, ExPASy returns an error:\n\n >>> handle = get_prosite_raw(\"does_not_exist\")\n Traceback (most recent call last):\n ...\n urllib.error.HTTPError: HTTP Error 404: Not Found\n\n \"\"\"\n url = \"https://prosite.expasy.org/%s.txt\" % id\n return _binary_to_string_handle(_urlopen(url))\n\n\ndef get_sprot_raw(id):\n \"\"\"Get a text handle to a raw SwissProt entry at ExPASy.\n\n For an ID of XXX, fetches http://www.uniprot.org/uniprot/XXX.txt\n (as per the https://www.expasy.org/expasy_urls.html documentation).\n\n >>> from Bio import ExPASy\n >>> from Bio import SwissProt\n >>> with ExPASy.get_sprot_raw(\"O23729\") as handle:\n ... record = SwissProt.read(handle)\n ...\n >>> print(record.entry_name)\n CHS3_BROFI\n\n For a non-existing identifier, UniProt returns an error:\n\n >>> ExPASy.get_sprot_raw(\"DOES_NOT_EXIST\")\n Traceback (most recent call last):\n ...\n urllib.error.HTTPError: HTTP Error 404: Not Found\n\n \"\"\"\n url = \"http://www.uniprot.org/uniprot/%s.txt\" % id\n return _binary_to_string_handle(_urlopen(url))\n", "path": "Bio/ExPASy/__init__.py"}]} | 2,040 | 524 |
gh_patches_debug_21870 | rasdani/github-patches | git_diff | scrapy__scrapy-4298 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SCRAPY_CHECK is not set while running contract
### Description
Hi, it seems that #3739 is not doing what the [documentation describe](https://docs.scrapy.org/en/latest/topics/contracts.html#detecting-check-runs):
`os.environ.get('SCRAPY_CHECK')` is returning `None` in my contract check.
### Steps to Reproduce
1. Create a project from scratch
2. Add a random spider
3. Contract code is as follow
```
def parse(self, response):
"""
@url http://www.amazon.com/s?field-keywords=selfish+gene
@returns requests 1 1
"""
print("test", os.environ.get('SCRAPY_CHECK'))
if os.environ.get('SCRAPY_CHECK'):
yield scrapy.Request(url="next_url")
```
**Expected behavior:** Request should be yielded as per the documentation
**Actual behavior:** Nothing happen
**Reproduces how often:** In my local project and with fresh project
### Versions
Windows
```
(globenv) C:\Users\johnl>scrapy version --verbose
Scrapy : 1.8.0
lxml : 4.4.1.0
libxml2 : 2.9.5
cssselect : 1.1.0
parsel : 1.5.2
w3lib : 1.21.0
Twisted : 19.10.0
Python : 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 19:29:22) [MSC v.1916 32 bit (Intel)]
pyOpenSSL : 19.0.0 (OpenSSL 1.1.1c 28 May 2019)
cryptography : 2.7
Platform : Windows-10-10.0.18362-SP0
```
Linux
```
scrapy version --verbose
Scrapy : 1.8.0
lxml : 4.4.1.0
libxml2 : 2.9.9
cssselect : 1.1.0
parsel : 1.5.2
w3lib : 1.21.0
Twisted : 19.7.0
Python : 3.6.8 (default, Oct 7 2019, 12:59:55) - [GCC 8.3.0]
pyOpenSSL : 19.0.0 (OpenSSL 1.1.1d 10 Sep 2019)
cryptography : 2.8
Platform : Linux-4.4.0-18362-Microsoft-x86_64-with-Ubuntu-18.04-bionic
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/commands/check.py`
Content:
```
1 import time
2 from collections import defaultdict
3 from unittest import TextTestRunner, TextTestResult as _TextTestResult
4
5 from scrapy.commands import ScrapyCommand
6 from scrapy.contracts import ContractsManager
7 from scrapy.utils.misc import load_object, set_environ
8 from scrapy.utils.conf import build_component_list
9
10
11 class TextTestResult(_TextTestResult):
12 def printSummary(self, start, stop):
13 write = self.stream.write
14 writeln = self.stream.writeln
15
16 run = self.testsRun
17 plural = "s" if run != 1 else ""
18
19 writeln(self.separator2)
20 writeln("Ran %d contract%s in %.3fs" % (run, plural, stop - start))
21 writeln()
22
23 infos = []
24 if not self.wasSuccessful():
25 write("FAILED")
26 failed, errored = map(len, (self.failures, self.errors))
27 if failed:
28 infos.append("failures=%d" % failed)
29 if errored:
30 infos.append("errors=%d" % errored)
31 else:
32 write("OK")
33
34 if infos:
35 writeln(" (%s)" % (", ".join(infos),))
36 else:
37 write("\n")
38
39
40 class Command(ScrapyCommand):
41 requires_project = True
42 default_settings = {'LOG_ENABLED': False}
43
44 def syntax(self):
45 return "[options] <spider>"
46
47 def short_desc(self):
48 return "Check spider contracts"
49
50 def add_options(self, parser):
51 ScrapyCommand.add_options(self, parser)
52 parser.add_option("-l", "--list", dest="list", action="store_true",
53 help="only list contracts, without checking them")
54 parser.add_option("-v", "--verbose", dest="verbose", default=False, action='store_true',
55 help="print contract tests for all spiders")
56
57 def run(self, args, opts):
58 # load contracts
59 contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))
60 conman = ContractsManager(load_object(c) for c in contracts)
61 runner = TextTestRunner(verbosity=2 if opts.verbose else 1)
62 result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)
63
64 # contract requests
65 contract_reqs = defaultdict(list)
66
67 spider_loader = self.crawler_process.spider_loader
68
69 with set_environ(SCRAPY_CHECK='true'):
70 for spidername in args or spider_loader.list():
71 spidercls = spider_loader.load(spidername)
72 spidercls.start_requests = lambda s: conman.from_spider(s, result)
73
74 tested_methods = conman.tested_methods_from_spidercls(spidercls)
75 if opts.list:
76 for method in tested_methods:
77 contract_reqs[spidercls.name].append(method)
78 elif tested_methods:
79 self.crawler_process.crawl(spidercls)
80
81 # start checks
82 if opts.list:
83 for spider, methods in sorted(contract_reqs.items()):
84 if not methods and not opts.verbose:
85 continue
86 print(spider)
87 for method in sorted(methods):
88 print(' * %s' % method)
89 else:
90 start = time.time()
91 self.crawler_process.start()
92 stop = time.time()
93
94 result.printErrors()
95 result.printSummary(start, stop)
96 self.exitcode = int(not result.wasSuccessful())
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/commands/check.py b/scrapy/commands/check.py
--- a/scrapy/commands/check.py
+++ b/scrapy/commands/check.py
@@ -78,19 +78,19 @@
elif tested_methods:
self.crawler_process.crawl(spidercls)
- # start checks
- if opts.list:
- for spider, methods in sorted(contract_reqs.items()):
- if not methods and not opts.verbose:
- continue
- print(spider)
- for method in sorted(methods):
- print(' * %s' % method)
- else:
- start = time.time()
- self.crawler_process.start()
- stop = time.time()
-
- result.printErrors()
- result.printSummary(start, stop)
- self.exitcode = int(not result.wasSuccessful())
+ # start checks
+ if opts.list:
+ for spider, methods in sorted(contract_reqs.items()):
+ if not methods and not opts.verbose:
+ continue
+ print(spider)
+ for method in sorted(methods):
+ print(' * %s' % method)
+ else:
+ start = time.time()
+ self.crawler_process.start()
+ stop = time.time()
+
+ result.printErrors()
+ result.printSummary(start, stop)
+ self.exitcode = int(not result.wasSuccessful())
| {"golden_diff": "diff --git a/scrapy/commands/check.py b/scrapy/commands/check.py\n--- a/scrapy/commands/check.py\n+++ b/scrapy/commands/check.py\n@@ -78,19 +78,19 @@\n elif tested_methods:\n self.crawler_process.crawl(spidercls)\n \n- # start checks\n- if opts.list:\n- for spider, methods in sorted(contract_reqs.items()):\n- if not methods and not opts.verbose:\n- continue\n- print(spider)\n- for method in sorted(methods):\n- print(' * %s' % method)\n- else:\n- start = time.time()\n- self.crawler_process.start()\n- stop = time.time()\n-\n- result.printErrors()\n- result.printSummary(start, stop)\n- self.exitcode = int(not result.wasSuccessful())\n+ # start checks\n+ if opts.list:\n+ for spider, methods in sorted(contract_reqs.items()):\n+ if not methods and not opts.verbose:\n+ continue\n+ print(spider)\n+ for method in sorted(methods):\n+ print(' * %s' % method)\n+ else:\n+ start = time.time()\n+ self.crawler_process.start()\n+ stop = time.time()\n+\n+ result.printErrors()\n+ result.printSummary(start, stop)\n+ self.exitcode = int(not result.wasSuccessful())\n", "issue": "SCRAPY_CHECK is not set while running contract\n### Description\r\n\r\nHi, it seems that #3739 is not doing what the [documentation describe](https://docs.scrapy.org/en/latest/topics/contracts.html#detecting-check-runs):\r\n\r\n`os.environ.get('SCRAPY_CHECK')` is returning `None` in my contract check.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a project from scratch\r\n2. Add a random spider\r\n3. Contract code is as follow\r\n```\r\n def parse(self, response):\r\n \"\"\"\r\n @url http://www.amazon.com/s?field-keywords=selfish+gene\r\n @returns requests 1 1\r\n \"\"\"\r\n print(\"test\", os.environ.get('SCRAPY_CHECK'))\r\n if os.environ.get('SCRAPY_CHECK'):\r\n yield scrapy.Request(url=\"next_url\")\r\n```\r\n\r\n**Expected behavior:** Request should be yielded as per the documentation\r\n\r\n**Actual behavior:** Nothing happen\r\n\r\n**Reproduces how often:** In my local project and with fresh project\r\n\r\n### Versions\r\n\r\nWindows\r\n```\r\n(globenv) C:\\Users\\johnl>scrapy version --verbose\r\nScrapy : 1.8.0\r\nlxml : 4.4.1.0\r\nlibxml2 : 2.9.5\r\ncssselect : 1.1.0\r\nparsel : 1.5.2\r\nw3lib : 1.21.0\r\nTwisted : 19.10.0\r\nPython : 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 19:29:22) [MSC v.1916 32 bit (Intel)]\r\npyOpenSSL : 19.0.0 (OpenSSL 1.1.1c 28 May 2019)\r\ncryptography : 2.7\r\nPlatform : Windows-10-10.0.18362-SP0\r\n```\r\n\r\nLinux\r\n```\r\nscrapy version --verbose\r\nScrapy : 1.8.0\r\nlxml : 4.4.1.0\r\nlibxml2 : 2.9.9\r\ncssselect : 1.1.0\r\nparsel : 1.5.2\r\nw3lib : 1.21.0\r\nTwisted : 19.7.0\r\nPython : 3.6.8 (default, Oct 7 2019, 12:59:55) - [GCC 8.3.0]\r\npyOpenSSL : 19.0.0 (OpenSSL 1.1.1d 10 Sep 2019)\r\ncryptography : 2.8\r\nPlatform : Linux-4.4.0-18362-Microsoft-x86_64-with-Ubuntu-18.04-bionic\r\n```\r\n\n", "before_files": [{"content": "import time\nfrom collections import defaultdict\nfrom unittest import TextTestRunner, TextTestResult as _TextTestResult\n\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.contracts import ContractsManager\nfrom scrapy.utils.misc import load_object, set_environ\nfrom scrapy.utils.conf import build_component_list\n\n\nclass TextTestResult(_TextTestResult):\n def printSummary(self, start, stop):\n write = self.stream.write\n writeln = self.stream.writeln\n\n run = self.testsRun\n plural = \"s\" if run != 1 else \"\"\n\n writeln(self.separator2)\n writeln(\"Ran %d contract%s in %.3fs\" % (run, plural, stop - start))\n writeln()\n\n infos = []\n if not self.wasSuccessful():\n write(\"FAILED\")\n failed, errored = map(len, (self.failures, self.errors))\n if failed:\n infos.append(\"failures=%d\" % failed)\n if errored:\n infos.append(\"errors=%d\" % errored)\n else:\n write(\"OK\")\n\n if infos:\n writeln(\" (%s)\" % (\", \".join(infos),))\n else:\n write(\"\\n\")\n\n\nclass Command(ScrapyCommand):\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"[options] <spider>\"\n\n def short_desc(self):\n return \"Check spider contracts\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",\n help=\"only list contracts, without checking them\")\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action='store_true',\n help=\"print contract tests for all spiders\")\n\n def run(self, args, opts):\n # load contracts\n contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))\n conman = ContractsManager(load_object(c) for c in contracts)\n runner = TextTestRunner(verbosity=2 if opts.verbose else 1)\n result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)\n\n # contract requests\n contract_reqs = defaultdict(list)\n\n spider_loader = self.crawler_process.spider_loader\n\n with set_environ(SCRAPY_CHECK='true'):\n for spidername in args or spider_loader.list():\n spidercls = spider_loader.load(spidername)\n spidercls.start_requests = lambda s: conman.from_spider(s, result)\n\n tested_methods = conman.tested_methods_from_spidercls(spidercls)\n if opts.list:\n for method in tested_methods:\n contract_reqs[spidercls.name].append(method)\n elif tested_methods:\n self.crawler_process.crawl(spidercls)\n\n # start checks\n if opts.list:\n for spider, methods in sorted(contract_reqs.items()):\n if not methods and not opts.verbose:\n continue\n print(spider)\n for method in sorted(methods):\n print(' * %s' % method)\n else:\n start = time.time()\n self.crawler_process.start()\n stop = time.time()\n\n result.printErrors()\n result.printSummary(start, stop)\n self.exitcode = int(not result.wasSuccessful())\n", "path": "scrapy/commands/check.py"}], "after_files": [{"content": "import time\nfrom collections import defaultdict\nfrom unittest import TextTestRunner, TextTestResult as _TextTestResult\n\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.contracts import ContractsManager\nfrom scrapy.utils.misc import load_object, set_environ\nfrom scrapy.utils.conf import build_component_list\n\n\nclass TextTestResult(_TextTestResult):\n def printSummary(self, start, stop):\n write = self.stream.write\n writeln = self.stream.writeln\n\n run = self.testsRun\n plural = \"s\" if run != 1 else \"\"\n\n writeln(self.separator2)\n writeln(\"Ran %d contract%s in %.3fs\" % (run, plural, stop - start))\n writeln()\n\n infos = []\n if not self.wasSuccessful():\n write(\"FAILED\")\n failed, errored = map(len, (self.failures, self.errors))\n if failed:\n infos.append(\"failures=%d\" % failed)\n if errored:\n infos.append(\"errors=%d\" % errored)\n else:\n write(\"OK\")\n\n if infos:\n writeln(\" (%s)\" % (\", \".join(infos),))\n else:\n write(\"\\n\")\n\n\nclass Command(ScrapyCommand):\n requires_project = True\n default_settings = {'LOG_ENABLED': False}\n\n def syntax(self):\n return \"[options] <spider>\"\n\n def short_desc(self):\n return \"Check spider contracts\"\n\n def add_options(self, parser):\n ScrapyCommand.add_options(self, parser)\n parser.add_option(\"-l\", \"--list\", dest=\"list\", action=\"store_true\",\n help=\"only list contracts, without checking them\")\n parser.add_option(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action='store_true',\n help=\"print contract tests for all spiders\")\n\n def run(self, args, opts):\n # load contracts\n contracts = build_component_list(self.settings.getwithbase('SPIDER_CONTRACTS'))\n conman = ContractsManager(load_object(c) for c in contracts)\n runner = TextTestRunner(verbosity=2 if opts.verbose else 1)\n result = TextTestResult(runner.stream, runner.descriptions, runner.verbosity)\n\n # contract requests\n contract_reqs = defaultdict(list)\n\n spider_loader = self.crawler_process.spider_loader\n\n with set_environ(SCRAPY_CHECK='true'):\n for spidername in args or spider_loader.list():\n spidercls = spider_loader.load(spidername)\n spidercls.start_requests = lambda s: conman.from_spider(s, result)\n\n tested_methods = conman.tested_methods_from_spidercls(spidercls)\n if opts.list:\n for method in tested_methods:\n contract_reqs[spidercls.name].append(method)\n elif tested_methods:\n self.crawler_process.crawl(spidercls)\n\n # start checks\n if opts.list:\n for spider, methods in sorted(contract_reqs.items()):\n if not methods and not opts.verbose:\n continue\n print(spider)\n for method in sorted(methods):\n print(' * %s' % method)\n else:\n start = time.time()\n self.crawler_process.start()\n stop = time.time()\n\n result.printErrors()\n result.printSummary(start, stop)\n self.exitcode = int(not result.wasSuccessful())\n", "path": "scrapy/commands/check.py"}]} | 1,832 | 309 |
gh_patches_debug_27118 | rasdani/github-patches | git_diff | google__flax-1570 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PReLU activation implementation
I wanted to gauge interest on adding a PReLU activation. I noticed that `flax.linen.activations` are simply aliasing `jax.nn` activation functions which also doesn't have a PReLU implementation.
To add some background, PReLU is simply Leaky ReLU where the alpha (slope) parameter is trainable and not fixed. This makes it simple to implement as a Module if desired.
Here's an example implementation from another [project](https://github.com/isaaccorley/jax-enhance) of mine.
```python
from functools import partial
from typing import Any, Sequence
import jax.numpy as jnp
import flax.linen as nn
# This is nearly identical to jnp.ones however multiplies the output of jnp.ones by the constant value
def constant(key, shape: Sequence[int], value: Any, dtype: Any = jnp.float32) -> jnp.ndarray:
value = jnp.asarray(value, dtype)
return jnp.ones(shape, dtype) * value
class PReLU(nn.Module):
negative_slope_init: float = 0.01
dtype: Any = jnp.float32
@nn.compact
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
x = jnp.asarray(x, self.dtype)
negative_slope = self.param(
"negative_slope",
partial(constant, value=self.negative_slope_init, dtype=self.dtype),
(1,)
)
return jnp.where(x >= 0, x, negative_slope * x)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/linen/__init__.py`
Content:
```
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """The Flax Module system."""
16
17
18 # pylint: disable=g-multiple-import
19 # re-export commonly used modules and functions
20 from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
21 log_softmax, relu, sigmoid, soft_sign, softmax,
22 softplus, swish, silu, tanh)
23 from .attention import (MultiHeadDotProductAttention, SelfAttention,
24 dot_product_attention, make_attention_mask,
25 make_causal_mask, combine_masks)
26 from ..core import broadcast, DenyList, FrozenDict
27 from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
28 from .module import (Module, compact, nowrap, enable_named_call,
29 disable_named_call, override_named_call, Variable, init,
30 init_with_output, apply, merge_param)
31 from .normalization import BatchNorm, GroupNorm, LayerNorm
32 from .pooling import avg_pool, max_pool
33 from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell
34 from .stochastic import Dropout
35 from .transforms import jit, named_call, checkpoint, remat, remat_scan, scan, vmap, map_variables, vjp, jvp
36 from .initializers import zeros, ones
37
38 # pylint: enable=g-multiple-import
39
```
Path: `flax/linen/activation.py`
Content:
```
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Activation functions.
16 """
17
18 # pylint: disable=unused-import
19 # re-export activation functions from jax.nn
20 from jax.nn import celu
21 from jax.nn import elu
22 from jax.nn import gelu
23 from jax.nn import glu
24 from jax.nn import leaky_relu
25 from jax.nn import log_sigmoid
26 from jax.nn import log_softmax
27 from jax.nn import normalize
28 from jax.nn import relu
29 from jax.nn import sigmoid
30 from jax.nn import soft_sign
31 from jax.nn import softmax
32 from jax.nn import softplus
33 from jax.nn import swish
34 from jax.nn import silu
35 from jax.nn import selu
36 from jax.nn import hard_tanh
37 from jax.nn import relu6
38 from jax.nn import hard_sigmoid
39 from jax.nn import hard_swish
40
41 from jax.numpy import tanh
42 # pylint: enable=unused-import
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py
--- a/flax/linen/__init__.py
+++ b/flax/linen/__init__.py
@@ -19,7 +19,7 @@
# re-export commonly used modules and functions
from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
log_softmax, relu, sigmoid, soft_sign, softmax,
- softplus, swish, silu, tanh)
+ softplus, swish, silu, tanh, PReLU)
from .attention import (MultiHeadDotProductAttention, SelfAttention,
dot_product_attention, make_attention_mask,
make_causal_mask, combine_masks)
diff --git a/flax/linen/activation.py b/flax/linen/activation.py
--- a/flax/linen/activation.py
+++ b/flax/linen/activation.py
@@ -40,3 +40,35 @@
from jax.numpy import tanh
# pylint: enable=unused-import
+
+from typing import Any
+
+from flax.linen.module import Module, compact
+import jax.numpy as jnp
+
+
+Array = Any
+
+
+class PReLU(Module):
+ """Parametric Rectified Linear Unit (PReLU) activation function.
+
+ Attributes:
+ negative_slope_init: the value to initialize the negative slope.
+ """
+ negative_slope_init: float = 0.01
+ @compact
+ def __call__(self, inputs: Array) -> Array:
+ """Applies an activation to the inputs.
+
+ Args:
+ inputs: the nd-array to apply the activation function to.
+
+ Returns:
+ The transformed input.
+ """
+ negative_slope = self.param(
+ 'negative_slope',
+ lambda k: jnp.asarray(self.negative_slope_init, jnp.float32)
+ )
+ return jnp.where(inputs >= 0, inputs, jnp.asarray(negative_slope, inputs.dtype) * inputs)
| {"golden_diff": "diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py\n--- a/flax/linen/__init__.py\n+++ b/flax/linen/__init__.py\n@@ -19,7 +19,7 @@\n # re-export commonly used modules and functions\n from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n- softplus, swish, silu, tanh)\n+ softplus, swish, silu, tanh, PReLU)\n from .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\ndiff --git a/flax/linen/activation.py b/flax/linen/activation.py\n--- a/flax/linen/activation.py\n+++ b/flax/linen/activation.py\n@@ -40,3 +40,35 @@\n \n from jax.numpy import tanh\n # pylint: enable=unused-import\n+\n+from typing import Any\n+\n+from flax.linen.module import Module, compact\n+import jax.numpy as jnp\n+\n+\n+Array = Any\n+\n+\n+class PReLU(Module):\n+ \"\"\"Parametric Rectified Linear Unit (PReLU) activation function.\n+\n+ Attributes:\n+ negative_slope_init: the value to initialize the negative slope.\n+ \"\"\"\n+ negative_slope_init: float = 0.01\n+ @compact\n+ def __call__(self, inputs: Array) -> Array:\n+ \"\"\"Applies an activation to the inputs.\n+\n+ Args:\n+ inputs: the nd-array to apply the activation function to.\n+\n+ Returns:\n+ The transformed input.\n+ \"\"\"\n+ negative_slope = self.param(\n+ 'negative_slope',\n+ lambda k: jnp.asarray(self.negative_slope_init, jnp.float32)\n+ )\n+ return jnp.where(inputs >= 0, inputs, jnp.asarray(negative_slope, inputs.dtype) * inputs)\n", "issue": "PReLU activation implementation\nI wanted to gauge interest on adding a PReLU activation. I noticed that `flax.linen.activations` are simply aliasing `jax.nn` activation functions which also doesn't have a PReLU implementation.\r\n\r\nTo add some background, PReLU is simply Leaky ReLU where the alpha (slope) parameter is trainable and not fixed. This makes it simple to implement as a Module if desired.\r\n\r\nHere's an example implementation from another [project](https://github.com/isaaccorley/jax-enhance) of mine. \r\n\r\n```python\r\nfrom functools import partial\r\nfrom typing import Any, Sequence\r\n\r\nimport jax.numpy as jnp\r\nimport flax.linen as nn\r\n\r\n\r\n# This is nearly identical to jnp.ones however multiplies the output of jnp.ones by the constant value\r\ndef constant(key, shape: Sequence[int], value: Any, dtype: Any = jnp.float32) -> jnp.ndarray:\r\n value = jnp.asarray(value, dtype)\r\n return jnp.ones(shape, dtype) * value\r\n\r\n\r\nclass PReLU(nn.Module):\r\n negative_slope_init: float = 0.01\r\n dtype: Any = jnp.float32\r\n\r\n @nn.compact\r\n def __call__(self, x: jnp.ndarray) -> jnp.ndarray:\r\n x = jnp.asarray(x, self.dtype)\r\n negative_slope = self.param(\r\n \"negative_slope\",\r\n partial(constant, value=self.negative_slope_init, dtype=self.dtype),\r\n (1,)\r\n )\r\n return jnp.where(x >= 0, x, negative_slope * x)\r\n```\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Flax Module system.\"\"\"\n\n\n# pylint: disable=g-multiple-import\n# re-export commonly used modules and functions\nfrom .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n softplus, swish, silu, tanh)\nfrom .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\nfrom ..core import broadcast, DenyList, FrozenDict\nfrom .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\nfrom .module import (Module, compact, nowrap, enable_named_call,\n disable_named_call, override_named_call, Variable, init,\n init_with_output, apply, merge_param)\nfrom .normalization import BatchNorm, GroupNorm, LayerNorm\nfrom .pooling import avg_pool, max_pool\nfrom .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\nfrom .stochastic import Dropout\nfrom .transforms import jit, named_call, checkpoint, remat, remat_scan, scan, vmap, map_variables, vjp, jvp\nfrom .initializers import zeros, ones\n\n# pylint: enable=g-multiple-import\n", "path": "flax/linen/__init__.py"}, {"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Activation functions.\n\"\"\"\n\n# pylint: disable=unused-import\n# re-export activation functions from jax.nn\nfrom jax.nn import celu\nfrom jax.nn import elu\nfrom jax.nn import gelu\nfrom jax.nn import glu\nfrom jax.nn import leaky_relu\nfrom jax.nn import log_sigmoid\nfrom jax.nn import log_softmax\nfrom jax.nn import normalize\nfrom jax.nn import relu\nfrom jax.nn import sigmoid\nfrom jax.nn import soft_sign\nfrom jax.nn import softmax\nfrom jax.nn import softplus\nfrom jax.nn import swish\nfrom jax.nn import silu\nfrom jax.nn import selu\nfrom jax.nn import hard_tanh\nfrom jax.nn import relu6\nfrom jax.nn import hard_sigmoid\nfrom jax.nn import hard_swish\n\nfrom jax.numpy import tanh\n# pylint: enable=unused-import\n", "path": "flax/linen/activation.py"}], "after_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Flax Module system.\"\"\"\n\n\n# pylint: disable=g-multiple-import\n# re-export commonly used modules and functions\nfrom .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n softplus, swish, silu, tanh, PReLU)\nfrom .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\nfrom ..core import broadcast, DenyList, FrozenDict\nfrom .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\nfrom .module import (Module, compact, nowrap, enable_named_call,\n disable_named_call, override_named_call, Variable, init,\n init_with_output, apply, merge_param)\nfrom .normalization import BatchNorm, GroupNorm, LayerNorm\nfrom .pooling import avg_pool, max_pool\nfrom .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\nfrom .stochastic import Dropout\nfrom .transforms import jit, named_call, checkpoint, remat, remat_scan, scan, vmap, map_variables, vjp, jvp\nfrom .initializers import zeros, ones\n\n# pylint: enable=g-multiple-import\n", "path": "flax/linen/__init__.py"}, {"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Activation functions.\n\"\"\"\n\n# pylint: disable=unused-import\n# re-export activation functions from jax.nn\nfrom jax.nn import celu\nfrom jax.nn import elu\nfrom jax.nn import gelu\nfrom jax.nn import glu\nfrom jax.nn import leaky_relu\nfrom jax.nn import log_sigmoid\nfrom jax.nn import log_softmax\nfrom jax.nn import normalize\nfrom jax.nn import relu\nfrom jax.nn import sigmoid\nfrom jax.nn import soft_sign\nfrom jax.nn import softmax\nfrom jax.nn import softplus\nfrom jax.nn import swish\nfrom jax.nn import silu\nfrom jax.nn import selu\nfrom jax.nn import hard_tanh\nfrom jax.nn import relu6\nfrom jax.nn import hard_sigmoid\nfrom jax.nn import hard_swish\n\nfrom jax.numpy import tanh\n# pylint: enable=unused-import\n\nfrom typing import Any\n\nfrom flax.linen.module import Module, compact\nimport jax.numpy as jnp\n\n\nArray = Any\n\n\nclass PReLU(Module):\n \"\"\"Parametric Rectified Linear Unit (PReLU) activation function.\n\n Attributes:\n negative_slope_init: the value to initialize the negative slope.\n \"\"\"\n negative_slope_init: float = 0.01\n @compact\n def __call__(self, inputs: Array) -> Array:\n \"\"\"Applies an activation to the inputs.\n\n Args:\n inputs: the nd-array to apply the activation function to.\n\n Returns:\n The transformed input.\n \"\"\"\n negative_slope = self.param(\n 'negative_slope',\n lambda k: jnp.asarray(self.negative_slope_init, jnp.float32)\n )\n return jnp.where(inputs >= 0, inputs, jnp.asarray(negative_slope, inputs.dtype) * inputs)\n", "path": "flax/linen/activation.py"}]} | 1,513 | 466 |
gh_patches_debug_12 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1737 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Shrink the map and related divs

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_theme/ckanext/hdx_theme/version.py`
Content:
```
1 hdx_version = 'v0.4.8'
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.4.8'
+hdx_version = 'v0.4.9'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.4.8'\n+hdx_version = 'v0.4.9'\n", "issue": "Shrink the map and related divs\n\n\n", "before_files": [{"content": "hdx_version = 'v0.4.8'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}], "after_files": [{"content": "hdx_version = 'v0.4.9'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 353 | 106 |
gh_patches_debug_13118 | rasdani/github-patches | git_diff | netbox-community__netbox-3858 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom link in group does not render Jinja2 code
<!--
NOTE: This form is only for reproducible bugs. If you need assistance with
NetBox installation, or if you have a general question, DO NOT open an
issue. Instead, post to our mailing list:
https://groups.google.com/forum/#!forum/netbox-discuss
Please describe the environment in which you are running NetBox. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report.
-->
### Environment
* Python version: 3.6.8
* NetBox version: 2.6.11
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of NetBox (or the current beta
release where applicable). Begin with the creation of any necessary
database objects and call out every operation being performed explicitly.
If reporting a bug in the REST API, be sure to reconstruct the raw HTTP
request(s) being made: Don't rely on a wrapper like pynetbox.
-->
### Steps to Reproduce
1. Create a custom link and add it to a group. Make the name use Jinja style code - example {% if True %} Custom Link 1{% endif %}
2. Open a device and look at the custom link
<!-- What did you expect to happen? -->
### Expected Behavior
Expected the link to use the Jinja2 code and show "Custom Link 1" as name.
<!-- What happened instead? -->
### Observed Behavior
The Jinja code is shown as the name. So the name is "{% if True %} Custom Link 1{% endif %}" instead of "Custom Link 1". The link itself is also not rendering Jinja code.
This seems to come from #3461 and due to the `text_rendered` and `link_rendered` not being used in the format statement on https://github.com/netbox-community/netbox/blob/b5455ed882c47273064933f0120b0e368af93a1f/netbox/extras/templatetags/custom_links.py#L72
I did a quick check and changed
```try:
text_rendered = render_jinja2(cl.text, context)
if text_rendered:
link_target = ' target="_blank"' if cl.new_window else ''
links_rendered.append(
GROUP_LINK.format(cl.url, link_target, cl.text)
)
```
to
```try:
text_rendered = render_jinja2(cl.text, context)
if text_rendered:
link_target = ' target="_blank"' if cl.new_window else ''
link_rendered = render_jinja2(cl.url, context)
links_rendered.append(
GROUP_LINK.format(link_rendered, link_target, text_rendered)
)
```
This made it work as expected again.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/extras/templatetags/custom_links.py`
Content:
```
1 from collections import OrderedDict
2
3 from django import template
4 from django.contrib.contenttypes.models import ContentType
5 from django.utils.safestring import mark_safe
6
7 from extras.models import CustomLink
8 from utilities.utils import render_jinja2
9
10
11 register = template.Library()
12
13 LINK_BUTTON = '<a href="{}"{} class="btn btn-sm btn-{}">{}</a>\n'
14 GROUP_BUTTON = '<div class="btn-group">\n' \
15 '<button type="button" class="btn btn-sm btn-{} dropdown-toggle" data-toggle="dropdown">\n' \
16 '{} <span class="caret"></span>\n' \
17 '</button>\n' \
18 '<ul class="dropdown-menu pull-right">\n' \
19 '{}</ul></div>'
20 GROUP_LINK = '<li><a href="{}"{}>{}</a></li>\n'
21
22
23 @register.simple_tag()
24 def custom_links(obj):
25 """
26 Render all applicable links for the given object.
27 """
28 content_type = ContentType.objects.get_for_model(obj)
29 custom_links = CustomLink.objects.filter(content_type=content_type)
30 if not custom_links:
31 return ''
32
33 context = {
34 'obj': obj,
35 }
36 template_code = ''
37 group_names = OrderedDict()
38
39 for cl in custom_links:
40
41 # Organize custom links by group
42 if cl.group_name and cl.group_name in group_names:
43 group_names[cl.group_name].append(cl)
44 elif cl.group_name:
45 group_names[cl.group_name] = [cl]
46
47 # Add non-grouped links
48 else:
49 try:
50 text_rendered = render_jinja2(cl.text, context)
51 if text_rendered:
52 link_rendered = render_jinja2(cl.url, context)
53 link_target = ' target="_blank"' if cl.new_window else ''
54 template_code += LINK_BUTTON.format(
55 link_rendered, link_target, cl.button_class, text_rendered
56 )
57 except Exception as e:
58 template_code += '<a class="btn btn-sm btn-default" disabled="disabled" title="{}">' \
59 '<i class="fa fa-warning"></i> {}</a>\n'.format(e, cl.name)
60
61 # Add grouped links to template
62 for group, links in group_names.items():
63
64 links_rendered = []
65
66 for cl in links:
67 try:
68 text_rendered = render_jinja2(cl.text, context)
69 if text_rendered:
70 link_target = ' target="_blank"' if cl.new_window else ''
71 links_rendered.append(
72 GROUP_LINK.format(cl.url, link_target, cl.text)
73 )
74 except Exception as e:
75 links_rendered.append(
76 '<li><a disabled="disabled" title="{}"><span class="text-muted">'
77 '<i class="fa fa-warning"></i> {}</span></a></li>'.format(e, cl.name)
78 )
79
80 if links_rendered:
81 template_code += GROUP_BUTTON.format(
82 links[0].button_class, group, ''.join(links_rendered)
83 )
84
85 return mark_safe(template_code)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/extras/templatetags/custom_links.py b/netbox/extras/templatetags/custom_links.py
--- a/netbox/extras/templatetags/custom_links.py
+++ b/netbox/extras/templatetags/custom_links.py
@@ -68,8 +68,9 @@
text_rendered = render_jinja2(cl.text, context)
if text_rendered:
link_target = ' target="_blank"' if cl.new_window else ''
+ link_rendered = render_jinja2(cl.url, context)
links_rendered.append(
- GROUP_LINK.format(cl.url, link_target, cl.text)
+ GROUP_LINK.format(link_rendered, link_target, text_rendered)
)
except Exception as e:
links_rendered.append(
| {"golden_diff": "diff --git a/netbox/extras/templatetags/custom_links.py b/netbox/extras/templatetags/custom_links.py\n--- a/netbox/extras/templatetags/custom_links.py\n+++ b/netbox/extras/templatetags/custom_links.py\n@@ -68,8 +68,9 @@\n text_rendered = render_jinja2(cl.text, context)\n if text_rendered:\n link_target = ' target=\"_blank\"' if cl.new_window else ''\n+ link_rendered = render_jinja2(cl.url, context)\n links_rendered.append(\n- GROUP_LINK.format(cl.url, link_target, cl.text)\n+ GROUP_LINK.format(link_rendered, link_target, text_rendered)\n )\n except Exception as e:\n links_rendered.append(\n", "issue": "Custom link in group does not render Jinja2 code\n<!--\r\n NOTE: This form is only for reproducible bugs. If you need assistance with\r\n NetBox installation, or if you have a general question, DO NOT open an\r\n issue. Instead, post to our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please describe the environment in which you are running NetBox. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report.\r\n-->\r\n### Environment\r\n* Python version: 3.6.8\r\n* NetBox version: 2.6.11\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of NetBox (or the current beta\r\n release where applicable). Begin with the creation of any necessary\r\n database objects and call out every operation being performed explicitly.\r\n If reporting a bug in the REST API, be sure to reconstruct the raw HTTP\r\n request(s) being made: Don't rely on a wrapper like pynetbox.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a custom link and add it to a group. Make the name use Jinja style code - example {% if True %} Custom Link 1{% endif %}\r\n2. Open a device and look at the custom link\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nExpected the link to use the Jinja2 code and show \"Custom Link 1\" as name.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nThe Jinja code is shown as the name. So the name is \"{% if True %} Custom Link 1{% endif %}\" instead of \"Custom Link 1\". The link itself is also not rendering Jinja code.\r\n\r\nThis seems to come from #3461 and due to the `text_rendered` and `link_rendered` not being used in the format statement on https://github.com/netbox-community/netbox/blob/b5455ed882c47273064933f0120b0e368af93a1f/netbox/extras/templatetags/custom_links.py#L72\r\n\r\nI did a quick check and changed \r\n```try:\r\n text_rendered = render_jinja2(cl.text, context)\r\n if text_rendered:\r\n link_target = ' target=\"_blank\"' if cl.new_window else ''\r\n links_rendered.append(\r\n GROUP_LINK.format(cl.url, link_target, cl.text)\r\n )\r\n```\r\nto\r\n\r\n```try:\r\n text_rendered = render_jinja2(cl.text, context)\r\n if text_rendered:\r\n link_target = ' target=\"_blank\"' if cl.new_window else ''\r\n link_rendered = render_jinja2(cl.url, context)\r\n links_rendered.append(\r\n GROUP_LINK.format(link_rendered, link_target, text_rendered)\r\n )\r\n```\r\n\r\nThis made it work as expected again.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.safestring import mark_safe\n\nfrom extras.models import CustomLink\nfrom utilities.utils import render_jinja2\n\n\nregister = template.Library()\n\nLINK_BUTTON = '<a href=\"{}\"{} class=\"btn btn-sm btn-{}\">{}</a>\\n'\nGROUP_BUTTON = '<div class=\"btn-group\">\\n' \\\n '<button type=\"button\" class=\"btn btn-sm btn-{} dropdown-toggle\" data-toggle=\"dropdown\">\\n' \\\n '{} <span class=\"caret\"></span>\\n' \\\n '</button>\\n' \\\n '<ul class=\"dropdown-menu pull-right\">\\n' \\\n '{}</ul></div>'\nGROUP_LINK = '<li><a href=\"{}\"{}>{}</a></li>\\n'\n\n\[email protected]_tag()\ndef custom_links(obj):\n \"\"\"\n Render all applicable links for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n custom_links = CustomLink.objects.filter(content_type=content_type)\n if not custom_links:\n return ''\n\n context = {\n 'obj': obj,\n }\n template_code = ''\n group_names = OrderedDict()\n\n for cl in custom_links:\n\n # Organize custom links by group\n if cl.group_name and cl.group_name in group_names:\n group_names[cl.group_name].append(cl)\n elif cl.group_name:\n group_names[cl.group_name] = [cl]\n\n # Add non-grouped links\n else:\n try:\n text_rendered = render_jinja2(cl.text, context)\n if text_rendered:\n link_rendered = render_jinja2(cl.url, context)\n link_target = ' target=\"_blank\"' if cl.new_window else ''\n template_code += LINK_BUTTON.format(\n link_rendered, link_target, cl.button_class, text_rendered\n )\n except Exception as e:\n template_code += '<a class=\"btn btn-sm btn-default\" disabled=\"disabled\" title=\"{}\">' \\\n '<i class=\"fa fa-warning\"></i> {}</a>\\n'.format(e, cl.name)\n\n # Add grouped links to template\n for group, links in group_names.items():\n\n links_rendered = []\n\n for cl in links:\n try:\n text_rendered = render_jinja2(cl.text, context)\n if text_rendered:\n link_target = ' target=\"_blank\"' if cl.new_window else ''\n links_rendered.append(\n GROUP_LINK.format(cl.url, link_target, cl.text)\n )\n except Exception as e:\n links_rendered.append(\n '<li><a disabled=\"disabled\" title=\"{}\"><span class=\"text-muted\">'\n '<i class=\"fa fa-warning\"></i> {}</span></a></li>'.format(e, cl.name)\n )\n\n if links_rendered:\n template_code += GROUP_BUTTON.format(\n links[0].button_class, group, ''.join(links_rendered)\n )\n\n return mark_safe(template_code)\n", "path": "netbox/extras/templatetags/custom_links.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.safestring import mark_safe\n\nfrom extras.models import CustomLink\nfrom utilities.utils import render_jinja2\n\n\nregister = template.Library()\n\nLINK_BUTTON = '<a href=\"{}\"{} class=\"btn btn-sm btn-{}\">{}</a>\\n'\nGROUP_BUTTON = '<div class=\"btn-group\">\\n' \\\n '<button type=\"button\" class=\"btn btn-sm btn-{} dropdown-toggle\" data-toggle=\"dropdown\">\\n' \\\n '{} <span class=\"caret\"></span>\\n' \\\n '</button>\\n' \\\n '<ul class=\"dropdown-menu pull-right\">\\n' \\\n '{}</ul></div>'\nGROUP_LINK = '<li><a href=\"{}\"{}>{}</a></li>\\n'\n\n\[email protected]_tag()\ndef custom_links(obj):\n \"\"\"\n Render all applicable links for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n custom_links = CustomLink.objects.filter(content_type=content_type)\n if not custom_links:\n return ''\n\n context = {\n 'obj': obj,\n }\n template_code = ''\n group_names = OrderedDict()\n\n for cl in custom_links:\n\n # Organize custom links by group\n if cl.group_name and cl.group_name in group_names:\n group_names[cl.group_name].append(cl)\n elif cl.group_name:\n group_names[cl.group_name] = [cl]\n\n # Add non-grouped links\n else:\n try:\n text_rendered = render_jinja2(cl.text, context)\n if text_rendered:\n link_rendered = render_jinja2(cl.url, context)\n link_target = ' target=\"_blank\"' if cl.new_window else ''\n template_code += LINK_BUTTON.format(\n link_rendered, link_target, cl.button_class, text_rendered\n )\n except Exception as e:\n template_code += '<a class=\"btn btn-sm btn-default\" disabled=\"disabled\" title=\"{}\">' \\\n '<i class=\"fa fa-warning\"></i> {}</a>\\n'.format(e, cl.name)\n\n # Add grouped links to template\n for group, links in group_names.items():\n\n links_rendered = []\n\n for cl in links:\n try:\n text_rendered = render_jinja2(cl.text, context)\n if text_rendered:\n link_target = ' target=\"_blank\"' if cl.new_window else ''\n link_rendered = render_jinja2(cl.url, context)\n links_rendered.append(\n GROUP_LINK.format(link_rendered, link_target, text_rendered)\n )\n except Exception as e:\n links_rendered.append(\n '<li><a disabled=\"disabled\" title=\"{}\"><span class=\"text-muted\">'\n '<i class=\"fa fa-warning\"></i> {}</span></a></li>'.format(e, cl.name)\n )\n\n if links_rendered:\n template_code += GROUP_BUTTON.format(\n links[0].button_class, group, ''.join(links_rendered)\n )\n\n return mark_safe(template_code)\n", "path": "netbox/extras/templatetags/custom_links.py"}]} | 1,724 | 172 |
gh_patches_debug_2899 | rasdani/github-patches | git_diff | hydroshare__hydroshare-4798 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change Mezzanine Form to Disallow Username Changes
**Describe the feature you'd like and what it will do**
The internal mezzanine form for the admin account should be altered to no longer allow username changes if possible, since doing so breaks the resource.
**Why is this feature important?**
This will be policy moving forward, that usernames cannot be changed.
I will work with Scott to see if this change is feasible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_core/admin.py`
Content:
```
1 from django import forms
2 from django.contrib.auth.admin import UserAdmin
3 from django.contrib.auth.forms import UserCreationForm
4 from django.contrib.gis import admin
5 from django.contrib.contenttypes.admin import GenericTabularInline
6 from django.utils.translation import ugettext_lazy as _
7
8 from .models import *
9
10
11 class UserCreationFormExtended(UserCreationForm):
12 def __init__(self, *args, **kwargs):
13 super(UserCreationFormExtended, self).__init__(*args, **kwargs)
14 self.fields['email'] = forms.EmailField(label=_("E-mail"), max_length=75)
15
16 UserAdmin.add_form = UserCreationFormExtended
17 UserAdmin.add_fieldsets = (
18 (None, {
19 'classes': ('wide',),
20 'fields': ('email', 'username', 'password1', 'password2',)
21 }),
22 )
23 UserAdmin.list_display = [
24 'username', 'email', 'first_name', 'last_name', 'is_staff',
25 'is_active', 'date_joined', 'last_login'
26 ]
27
28 class InlineResourceFiles(GenericTabularInline):
29 model = ResourceFile
30
31 admin.site.unregister(User)
32 admin.site.register(User, UserAdmin)
33 admin.site.unregister(GenericResource)
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hs_core/admin.py b/hs_core/admin.py
--- a/hs_core/admin.py
+++ b/hs_core/admin.py
@@ -14,6 +14,7 @@
self.fields['email'] = forms.EmailField(label=_("E-mail"), max_length=75)
UserAdmin.add_form = UserCreationFormExtended
+UserAdmin.readonly_fields = ('username',)
UserAdmin.add_fieldsets = (
(None, {
'classes': ('wide',),
| {"golden_diff": "diff --git a/hs_core/admin.py b/hs_core/admin.py\n--- a/hs_core/admin.py\n+++ b/hs_core/admin.py\n@@ -14,6 +14,7 @@\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n \n UserAdmin.add_form = UserCreationFormExtended\n+UserAdmin.readonly_fields = ('username',)\n UserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n", "issue": "Change Mezzanine Form to Disallow Username Changes\n**Describe the feature you'd like and what it will do**\r\nThe internal mezzanine form for the admin account should be altered to no longer allow username changes if possible, since doing so breaks the resource. \r\n\r\n\r\n**Why is this feature important?**\r\nThis will be policy moving forward, that usernames cannot be changed. \r\nI will work with Scott to see if this change is feasible. \r\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.gis import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import *\n\n\nclass UserCreationFormExtended(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(UserCreationFormExtended, self).__init__(*args, **kwargs)\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n\nUserAdmin.add_form = UserCreationFormExtended\nUserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'username', 'password1', 'password2',)\n }),\n)\nUserAdmin.list_display = [\n 'username', 'email', 'first_name', 'last_name', 'is_staff',\n 'is_active', 'date_joined', 'last_login'\n]\n\nclass InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.unregister(GenericResource)\n", "path": "hs_core/admin.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.gis import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import *\n\n\nclass UserCreationFormExtended(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(UserCreationFormExtended, self).__init__(*args, **kwargs)\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n\nUserAdmin.add_form = UserCreationFormExtended\nUserAdmin.readonly_fields = ('username',)\nUserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'username', 'password1', 'password2',)\n }),\n)\nUserAdmin.list_display = [\n 'username', 'email', 'first_name', 'last_name', 'is_staff',\n 'is_active', 'date_joined', 'last_login'\n]\n\nclass InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.unregister(GenericResource)\n", "path": "hs_core/admin.py"}]} | 661 | 106 |
gh_patches_debug_37672 | rasdani/github-patches | git_diff | open-mmlab__mmpose-1139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'ConcatDataset' is not available in MMPOSE
My config is something like this. I want to use the 'ConcatDataset' just like it in the MMDETECTION and MMSEGMENT. However, the 'ConcatDataset' is not available in MMPOSE
```
# myconfig.py
data = dict(
train=dict(
type = 'ConcatDataset',
datasets = [dict(
type='BottomUpCocoDataset',
ann_file='data/keypoints/2021-11-02-bwrat_side6-kp_val.json',
img_prefix='data/keypoints/2021-11-02-bwrat_side6-kp/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info=dataset_info),
dict(
type='BottomUpCocoDataset',
ann_file='data/keypoints/2021-11-24-wrat_side6-kp_val.json',
img_prefix='data/keypoints/2021-11-24-wrat_side6-kp/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info=dataset_info)
])
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/datasets/builder.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import platform
3 import random
4 from functools import partial
5
6 import numpy as np
7 from mmcv.parallel import collate
8 from mmcv.runner import get_dist_info
9 from mmcv.utils import Registry, build_from_cfg
10 from mmcv.utils.parrots_wrapper import _get_dataloader
11
12 from .samplers import DistributedSampler
13
14 if platform.system() != 'Windows':
15 # https://github.com/pytorch/pytorch/issues/973
16 import resource
17 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
18 base_soft_limit = rlimit[0]
19 hard_limit = rlimit[1]
20 soft_limit = min(max(4096, base_soft_limit), hard_limit)
21 resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
22
23 DATASETS = Registry('dataset')
24 PIPELINES = Registry('pipeline')
25
26
27 def build_dataset(cfg, default_args=None):
28 """Build a dataset from config dict.
29
30 Args:
31 cfg (dict): Config dict. It should at least contain the key "type".
32 default_args (dict, optional): Default initialization arguments.
33 Default: None.
34
35 Returns:
36 Dataset: The constructed dataset.
37 """
38 from .dataset_wrappers import RepeatDataset
39
40 if cfg['type'] == 'RepeatDataset':
41 dataset = RepeatDataset(
42 build_dataset(cfg['dataset'], default_args), cfg['times'])
43 else:
44 dataset = build_from_cfg(cfg, DATASETS, default_args)
45 return dataset
46
47
48 def build_dataloader(dataset,
49 samples_per_gpu,
50 workers_per_gpu,
51 num_gpus=1,
52 dist=True,
53 shuffle=True,
54 seed=None,
55 drop_last=True,
56 pin_memory=True,
57 **kwargs):
58 """Build PyTorch DataLoader.
59
60 In distributed training, each GPU/process has a dataloader.
61 In non-distributed training, there is only one dataloader for all GPUs.
62
63 Args:
64 dataset (Dataset): A PyTorch dataset.
65 samples_per_gpu (int): Number of training samples on each GPU, i.e.,
66 batch size of each GPU.
67 workers_per_gpu (int): How many subprocesses to use for data loading
68 for each GPU.
69 num_gpus (int): Number of GPUs. Only used in non-distributed training.
70 dist (bool): Distributed training/test or not. Default: True.
71 shuffle (bool): Whether to shuffle the data at every epoch.
72 Default: True.
73 drop_last (bool): Whether to drop the last incomplete batch in epoch.
74 Default: True
75 pin_memory (bool): Whether to use pin_memory in DataLoader.
76 Default: True
77 kwargs: any keyword argument to be used to initialize DataLoader
78
79 Returns:
80 DataLoader: A PyTorch dataloader.
81 """
82 rank, world_size = get_dist_info()
83 if dist:
84 sampler = DistributedSampler(
85 dataset, world_size, rank, shuffle=shuffle, seed=seed)
86 shuffle = False
87 batch_size = samples_per_gpu
88 num_workers = workers_per_gpu
89 else:
90 sampler = None
91 batch_size = num_gpus * samples_per_gpu
92 num_workers = num_gpus * workers_per_gpu
93
94 init_fn = partial(
95 worker_init_fn, num_workers=num_workers, rank=rank,
96 seed=seed) if seed is not None else None
97
98 _, DataLoader = _get_dataloader()
99 data_loader = DataLoader(
100 dataset,
101 batch_size=batch_size,
102 sampler=sampler,
103 num_workers=num_workers,
104 collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
105 pin_memory=pin_memory,
106 shuffle=shuffle,
107 worker_init_fn=init_fn,
108 drop_last=drop_last,
109 **kwargs)
110
111 return data_loader
112
113
114 def worker_init_fn(worker_id, num_workers, rank, seed):
115 """Init the random seed for various workers."""
116 # The seed of each worker equals to
117 # num_worker * rank + worker_id + user_seed
118 worker_seed = num_workers * rank + worker_id + seed
119 np.random.seed(worker_seed)
120 random.seed(worker_seed)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py
--- a/mmpose/datasets/builder.py
+++ b/mmpose/datasets/builder.py
@@ -1,4 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
+import copy
import platform
import random
from functools import partial
@@ -6,8 +7,9 @@
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
-from mmcv.utils import Registry, build_from_cfg
+from mmcv.utils import Registry, build_from_cfg, is_seq_of
from mmcv.utils.parrots_wrapper import _get_dataloader
+from torch.utils.data.dataset import ConcatDataset
from .samplers import DistributedSampler
@@ -24,6 +26,39 @@
PIPELINES = Registry('pipeline')
+def _concat_dataset(cfg, default_args=None):
+ types = cfg['type']
+ ann_files = cfg['ann_file']
+ img_prefixes = cfg.get('img_prefix', None)
+ dataset_infos = cfg.get('dataset_info', None)
+
+ num_joints = cfg['data_cfg'].get('num_joints', None)
+ dataset_channel = cfg['data_cfg'].get('dataset_channel', None)
+
+ datasets = []
+ num_dset = len(ann_files)
+ for i in range(num_dset):
+ cfg_copy = copy.deepcopy(cfg)
+ cfg_copy['ann_file'] = ann_files[i]
+
+ if isinstance(types, (list, tuple)):
+ cfg_copy['type'] = types[i]
+ if isinstance(img_prefixes, (list, tuple)):
+ cfg_copy['img_prefix'] = img_prefixes[i]
+ if isinstance(dataset_infos, (list, tuple)):
+ cfg_copy['dataset_info'] = dataset_infos[i]
+
+ if isinstance(num_joints, (list, tuple)):
+ cfg_copy['data_cfg']['num_joints'] = num_joints[i]
+
+ if is_seq_of(dataset_channel, list):
+ cfg_copy['data_cfg']['dataset_channel'] = dataset_channel[i]
+
+ datasets.append(build_dataset(cfg_copy, default_args))
+
+ return ConcatDataset(datasets)
+
+
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
@@ -37,9 +72,16 @@
"""
from .dataset_wrappers import RepeatDataset
- if cfg['type'] == 'RepeatDataset':
+ if isinstance(cfg, (list, tuple)):
+ dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
+ elif cfg['type'] == 'ConcatDataset':
+ dataset = ConcatDataset(
+ [build_dataset(c, default_args) for c in cfg['datasets']])
+ elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
+ elif isinstance(cfg.get('ann_file'), (list, tuple)):
+ dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
| {"golden_diff": "diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py\n--- a/mmpose/datasets/builder.py\n+++ b/mmpose/datasets/builder.py\n@@ -1,4 +1,5 @@\n # Copyright (c) OpenMMLab. All rights reserved.\n+import copy\n import platform\n import random\n from functools import partial\n@@ -6,8 +7,9 @@\n import numpy as np\n from mmcv.parallel import collate\n from mmcv.runner import get_dist_info\n-from mmcv.utils import Registry, build_from_cfg\n+from mmcv.utils import Registry, build_from_cfg, is_seq_of\n from mmcv.utils.parrots_wrapper import _get_dataloader\n+from torch.utils.data.dataset import ConcatDataset\n \n from .samplers import DistributedSampler\n \n@@ -24,6 +26,39 @@\n PIPELINES = Registry('pipeline')\n \n \n+def _concat_dataset(cfg, default_args=None):\n+ types = cfg['type']\n+ ann_files = cfg['ann_file']\n+ img_prefixes = cfg.get('img_prefix', None)\n+ dataset_infos = cfg.get('dataset_info', None)\n+\n+ num_joints = cfg['data_cfg'].get('num_joints', None)\n+ dataset_channel = cfg['data_cfg'].get('dataset_channel', None)\n+\n+ datasets = []\n+ num_dset = len(ann_files)\n+ for i in range(num_dset):\n+ cfg_copy = copy.deepcopy(cfg)\n+ cfg_copy['ann_file'] = ann_files[i]\n+\n+ if isinstance(types, (list, tuple)):\n+ cfg_copy['type'] = types[i]\n+ if isinstance(img_prefixes, (list, tuple)):\n+ cfg_copy['img_prefix'] = img_prefixes[i]\n+ if isinstance(dataset_infos, (list, tuple)):\n+ cfg_copy['dataset_info'] = dataset_infos[i]\n+\n+ if isinstance(num_joints, (list, tuple)):\n+ cfg_copy['data_cfg']['num_joints'] = num_joints[i]\n+\n+ if is_seq_of(dataset_channel, list):\n+ cfg_copy['data_cfg']['dataset_channel'] = dataset_channel[i]\n+\n+ datasets.append(build_dataset(cfg_copy, default_args))\n+\n+ return ConcatDataset(datasets)\n+\n+\n def build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n \n@@ -37,9 +72,16 @@\n \"\"\"\n from .dataset_wrappers import RepeatDataset\n \n- if cfg['type'] == 'RepeatDataset':\n+ if isinstance(cfg, (list, tuple)):\n+ dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n+ elif cfg['type'] == 'ConcatDataset':\n+ dataset = ConcatDataset(\n+ [build_dataset(c, default_args) for c in cfg['datasets']])\n+ elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n+ elif isinstance(cfg.get('ann_file'), (list, tuple)):\n+ dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n", "issue": "'ConcatDataset' is not available in MMPOSE\nMy config is something like this. I want to use the 'ConcatDataset' just like it in the MMDETECTION and MMSEGMENT. However, the 'ConcatDataset' is not available in MMPOSE\r\n```\r\n# myconfig.py\r\ndata = dict(\r\n train=dict(\r\n type = 'ConcatDataset',\r\n datasets = [dict(\r\n type='BottomUpCocoDataset',\r\n ann_file='data/keypoints/2021-11-02-bwrat_side6-kp_val.json',\r\n img_prefix='data/keypoints/2021-11-02-bwrat_side6-kp/',\r\n data_cfg=data_cfg,\r\n pipeline=test_pipeline,\r\n dataset_info=dataset_info),\r\n dict(\r\n type='BottomUpCocoDataset',\r\n ann_file='data/keypoints/2021-11-24-wrat_side6-kp_val.json',\r\n img_prefix='data/keypoints/2021-11-24-wrat_side6-kp/',\r\n data_cfg=data_cfg,\r\n pipeline=val_pipeline,\r\n dataset_info=dataset_info)\r\n ])\r\n)\r\n```\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n base_soft_limit = rlimit[0]\n hard_limit = rlimit[1]\n soft_limit = min(max(4096, base_soft_limit), hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nDATASETS = Registry('dataset')\nPIPELINES = Registry('pipeline')\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n from .dataset_wrappers import RepeatDataset\n\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg, is_seq_of\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\nfrom torch.utils.data.dataset import ConcatDataset\n\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n base_soft_limit = rlimit[0]\n hard_limit = rlimit[1]\n soft_limit = min(max(4096, base_soft_limit), hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nDATASETS = Registry('dataset')\nPIPELINES = Registry('pipeline')\n\n\ndef _concat_dataset(cfg, default_args=None):\n types = cfg['type']\n ann_files = cfg['ann_file']\n img_prefixes = cfg.get('img_prefix', None)\n dataset_infos = cfg.get('dataset_info', None)\n\n num_joints = cfg['data_cfg'].get('num_joints', None)\n dataset_channel = cfg['data_cfg'].get('dataset_channel', None)\n\n datasets = []\n num_dset = len(ann_files)\n for i in range(num_dset):\n cfg_copy = copy.deepcopy(cfg)\n cfg_copy['ann_file'] = ann_files[i]\n\n if isinstance(types, (list, tuple)):\n cfg_copy['type'] = types[i]\n if isinstance(img_prefixes, (list, tuple)):\n cfg_copy['img_prefix'] = img_prefixes[i]\n if isinstance(dataset_infos, (list, tuple)):\n cfg_copy['dataset_info'] = dataset_infos[i]\n\n if isinstance(num_joints, (list, tuple)):\n cfg_copy['data_cfg']['num_joints'] = num_joints[i]\n\n if is_seq_of(dataset_channel, list):\n cfg_copy['data_cfg']['dataset_channel'] = dataset_channel[i]\n\n datasets.append(build_dataset(cfg_copy, default_args))\n\n return ConcatDataset(datasets)\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n from .dataset_wrappers import RepeatDataset\n\n if isinstance(cfg, (list, tuple)):\n dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n elif cfg['type'] == 'ConcatDataset':\n dataset = ConcatDataset(\n [build_dataset(c, default_args) for c in cfg['datasets']])\n elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n elif isinstance(cfg.get('ann_file'), (list, tuple)):\n dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}]} | 1,669 | 699 |
gh_patches_debug_37181 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1225 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
__call__ using ClassWrapper does not update wrapped metric
## 🐛 Bug
Using __call__ with ClasswiseWrapper does not update the Metric within, such that subsequent calls to compute are only using the last batch input.
### To Reproduce
See code sample
#### Code sample
```py
import numpy as np
import torch
import torchmetrics
np.random.seed(42)
m1 = torchmetrics.Precision(num_classes=3, average=None)
m2 = torchmetrics.Precision(num_classes=3, average=None)
mcw1 = torchmetrics.ClasswiseWrapper(torchmetrics.Precision(num_classes=3, average=None))
mcw2 = torchmetrics.ClasswiseWrapper(torchmetrics.Precision(num_classes=3, average=None))
for i in range(2):
print(f"Epoch: {i}")
preds = torch.Tensor(np.random.uniform(0, 1, (100, 3)))
targets = torch.Tensor(np.random.randint(0, 2, (100, 3))).int()
m1(preds, targets)
m2.update(preds, targets)
mcw1(preds, targets)
mcw2.update(preds, targets)
print(f"Prec: {m1.compute().numpy()}")
print(f"Prec update: {m2.compute().numpy()}")
print(f"Prec wrap: {np.asarray([v.numpy() for v in mcw1.compute().values()])}")
print(f"Prec wrap update: {np.asarray([v.numpy() for v in mcw2.compute().values()])}")
```
Output is:
```
Epoch: 0
Prec: [0.42857143 0.537037 0.509434 ]
Prec update: [0.42857143 0.537037 0.509434 ]
Prec wrap: [0.42857143 0.537037 0.509434 ]
Prec wrap update: [0.42857143 0.537037 0.509434 ]
Epoch: 1
Prec: [0.46464646 0.5247525 0.48039216]
Prec update: [0.46464646 0.5247525 0.48039216]
Prec wrap: [0.5 0.5106383 0.4489796]
Prec wrap update: [0.46464646 0.5247525 0.48039216]
```
### Expected behavior
The values should be the same whether calling `update` or `__call__`
### Environment
- TorchMetrics version: 0.9.3
- Python & PyTorch Version (e.g., 1.0): 1.12.0
- Any other relevant information such as OS (e.g., Linux): Ubuntu 20.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/wrappers/classwise.py`
Content:
```
1 from typing import Any, Dict, List, Optional
2
3 from torch import Tensor
4
5 from torchmetrics import Metric
6
7
8 class ClasswiseWrapper(Metric):
9 """Wrapper class for altering the output of classification metrics that returns multiple values to include
10 label information.
11
12 Args:
13 metric: base metric that should be wrapped. It is assumed that the metric outputs a single
14 tensor that is split along the first dimension.
15 labels: list of strings indicating the different classes.
16
17 Example:
18 >>> import torch
19 >>> _ = torch.manual_seed(42)
20 >>> from torchmetrics import Accuracy, ClasswiseWrapper
21 >>> metric = ClasswiseWrapper(Accuracy(num_classes=3, average=None))
22 >>> preds = torch.randn(10, 3).softmax(dim=-1)
23 >>> target = torch.randint(3, (10,))
24 >>> metric(preds, target)
25 {'accuracy_0': tensor(0.5000), 'accuracy_1': tensor(0.7500), 'accuracy_2': tensor(0.)}
26
27 Example (labels as list of strings):
28 >>> import torch
29 >>> from torchmetrics import Accuracy, ClasswiseWrapper
30 >>> metric = ClasswiseWrapper(
31 ... Accuracy(num_classes=3, average=None),
32 ... labels=["horse", "fish", "dog"]
33 ... )
34 >>> preds = torch.randn(10, 3).softmax(dim=-1)
35 >>> target = torch.randint(3, (10,))
36 >>> metric(preds, target)
37 {'accuracy_horse': tensor(0.3333), 'accuracy_fish': tensor(0.6667), 'accuracy_dog': tensor(0.)}
38
39 Example (in metric collection):
40 >>> import torch
41 >>> from torchmetrics import Accuracy, ClasswiseWrapper, MetricCollection, Recall
42 >>> labels = ["horse", "fish", "dog"]
43 >>> metric = MetricCollection(
44 ... {'accuracy': ClasswiseWrapper(Accuracy(num_classes=3, average=None), labels),
45 ... 'recall': ClasswiseWrapper(Recall(num_classes=3, average=None), labels)}
46 ... )
47 >>> preds = torch.randn(10, 3).softmax(dim=-1)
48 >>> target = torch.randint(3, (10,))
49 >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE
50 {'accuracy_horse': tensor(0.), 'accuracy_fish': tensor(0.3333), 'accuracy_dog': tensor(0.4000),
51 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}
52 """
53
54 full_state_update: Optional[bool] = True
55
56 def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:
57 super().__init__()
58 if not isinstance(metric, Metric):
59 raise ValueError(f"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}")
60 if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):
61 raise ValueError(f"Expected argument `labels` to either be `None` or a list of strings but got {labels}")
62 self.metric = metric
63 self.labels = labels
64
65 def _convert(self, x: Tensor) -> Dict[str, Any]:
66 name = self.metric.__class__.__name__.lower()
67 if self.labels is None:
68 return {f"{name}_{i}": val for i, val in enumerate(x)}
69 return {f"{name}_{lab}": val for lab, val in zip(self.labels, x)}
70
71 def update(self, *args: Any, **kwargs: Any) -> None:
72 self.metric.update(*args, **kwargs)
73
74 def compute(self) -> Dict[str, Tensor]:
75 return self._convert(self.metric.compute())
76
77 def reset(self) -> None:
78 self.metric.reset()
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/wrappers/classwise.py b/src/torchmetrics/wrappers/classwise.py
--- a/src/torchmetrics/wrappers/classwise.py
+++ b/src/torchmetrics/wrappers/classwise.py
@@ -1,4 +1,17 @@
-from typing import Any, Dict, List, Optional
+# Copyright The PyTorch Lightning team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import Any, Callable, Dict, List, Optional
from torch import Tensor
@@ -51,8 +64,6 @@
'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}
"""
- full_state_update: Optional[bool] = True
-
def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:
super().__init__()
if not isinstance(metric, Metric):
@@ -61,6 +72,7 @@
raise ValueError(f"Expected argument `labels` to either be `None` or a list of strings but got {labels}")
self.metric = metric
self.labels = labels
+ self._update_count = 1
def _convert(self, x: Tensor) -> Dict[str, Any]:
name = self.metric.__class__.__name__.lower()
@@ -68,6 +80,9 @@
return {f"{name}_{i}": val for i, val in enumerate(x)}
return {f"{name}_{lab}": val for lab, val in zip(self.labels, x)}
+ def forward(self, *args: Any, **kwargs: Any) -> Any:
+ return self._convert(self.metric(*args, **kwargs))
+
def update(self, *args: Any, **kwargs: Any) -> None:
self.metric.update(*args, **kwargs)
@@ -76,3 +91,11 @@
def reset(self) -> None:
self.metric.reset()
+
+ def _wrap_update(self, update: Callable) -> Callable:
+ """Overwrite to do nothing."""
+ return update
+
+ def _wrap_compute(self, compute: Callable) -> Callable:
+ """Overwrite to do nothing."""
+ return compute
| {"golden_diff": "diff --git a/src/torchmetrics/wrappers/classwise.py b/src/torchmetrics/wrappers/classwise.py\n--- a/src/torchmetrics/wrappers/classwise.py\n+++ b/src/torchmetrics/wrappers/classwise.py\n@@ -1,4 +1,17 @@\n-from typing import Any, Dict, List, Optional\n+# Copyright The PyTorch Lightning team.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+from typing import Any, Callable, Dict, List, Optional\n \n from torch import Tensor\n \n@@ -51,8 +64,6 @@\n 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}\n \"\"\"\n \n- full_state_update: Optional[bool] = True\n-\n def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:\n super().__init__()\n if not isinstance(metric, Metric):\n@@ -61,6 +72,7 @@\n raise ValueError(f\"Expected argument `labels` to either be `None` or a list of strings but got {labels}\")\n self.metric = metric\n self.labels = labels\n+ self._update_count = 1\n \n def _convert(self, x: Tensor) -> Dict[str, Any]:\n name = self.metric.__class__.__name__.lower()\n@@ -68,6 +80,9 @@\n return {f\"{name}_{i}\": val for i, val in enumerate(x)}\n return {f\"{name}_{lab}\": val for lab, val in zip(self.labels, x)}\n \n+ def forward(self, *args: Any, **kwargs: Any) -> Any:\n+ return self._convert(self.metric(*args, **kwargs))\n+\n def update(self, *args: Any, **kwargs: Any) -> None:\n self.metric.update(*args, **kwargs)\n \n@@ -76,3 +91,11 @@\n \n def reset(self) -> None:\n self.metric.reset()\n+\n+ def _wrap_update(self, update: Callable) -> Callable:\n+ \"\"\"Overwrite to do nothing.\"\"\"\n+ return update\n+\n+ def _wrap_compute(self, compute: Callable) -> Callable:\n+ \"\"\"Overwrite to do nothing.\"\"\"\n+ return compute\n", "issue": "__call__ using ClassWrapper does not update wrapped metric\n## \ud83d\udc1b Bug\r\n\r\nUsing __call__ with ClasswiseWrapper does not update the Metric within, such that subsequent calls to compute are only using the last batch input.\r\n\r\n### To Reproduce\r\n\r\nSee code sample\r\n\r\n#### Code sample\r\n\r\n```py\r\nimport numpy as np\r\nimport torch\r\nimport torchmetrics\r\n\r\nnp.random.seed(42)\r\n\r\nm1 = torchmetrics.Precision(num_classes=3, average=None)\r\nm2 = torchmetrics.Precision(num_classes=3, average=None)\r\nmcw1 = torchmetrics.ClasswiseWrapper(torchmetrics.Precision(num_classes=3, average=None))\r\nmcw2 = torchmetrics.ClasswiseWrapper(torchmetrics.Precision(num_classes=3, average=None))\r\n\r\nfor i in range(2):\r\n print(f\"Epoch: {i}\")\r\n preds = torch.Tensor(np.random.uniform(0, 1, (100, 3)))\r\n targets = torch.Tensor(np.random.randint(0, 2, (100, 3))).int()\r\n m1(preds, targets)\r\n m2.update(preds, targets)\r\n mcw1(preds, targets)\r\n mcw2.update(preds, targets)\r\n print(f\"Prec: {m1.compute().numpy()}\")\r\n print(f\"Prec update: {m2.compute().numpy()}\")\r\n print(f\"Prec wrap: {np.asarray([v.numpy() for v in mcw1.compute().values()])}\")\r\n print(f\"Prec wrap update: {np.asarray([v.numpy() for v in mcw2.compute().values()])}\")\r\n```\r\n\r\nOutput is:\r\n\r\n```\r\nEpoch: 0\r\nPrec: [0.42857143 0.537037 0.509434 ]\r\nPrec update: [0.42857143 0.537037 0.509434 ]\r\nPrec wrap: [0.42857143 0.537037 0.509434 ]\r\nPrec wrap update: [0.42857143 0.537037 0.509434 ]\r\nEpoch: 1\r\nPrec: [0.46464646 0.5247525 0.48039216]\r\nPrec update: [0.46464646 0.5247525 0.48039216]\r\nPrec wrap: [0.5 0.5106383 0.4489796]\r\nPrec wrap update: [0.46464646 0.5247525 0.48039216]\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe values should be the same whether calling `update` or `__call__`\r\n\r\n### Environment\r\n\r\n- TorchMetrics version: 0.9.3\r\n- Python & PyTorch Version (e.g., 1.0): 1.12.0\r\n- Any other relevant information such as OS (e.g., Linux): Ubuntu 20.04\r\n\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional\n\nfrom torch import Tensor\n\nfrom torchmetrics import Metric\n\n\nclass ClasswiseWrapper(Metric):\n \"\"\"Wrapper class for altering the output of classification metrics that returns multiple values to include\n label information.\n\n Args:\n metric: base metric that should be wrapped. It is assumed that the metric outputs a single\n tensor that is split along the first dimension.\n labels: list of strings indicating the different classes.\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(Accuracy(num_classes=3, average=None))\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_0': tensor(0.5000), 'accuracy_1': tensor(0.7500), 'accuracy_2': tensor(0.)}\n\n Example (labels as list of strings):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(\n ... Accuracy(num_classes=3, average=None),\n ... labels=[\"horse\", \"fish\", \"dog\"]\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_horse': tensor(0.3333), 'accuracy_fish': tensor(0.6667), 'accuracy_dog': tensor(0.)}\n\n Example (in metric collection):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper, MetricCollection, Recall\n >>> labels = [\"horse\", \"fish\", \"dog\"]\n >>> metric = MetricCollection(\n ... {'accuracy': ClasswiseWrapper(Accuracy(num_classes=3, average=None), labels),\n ... 'recall': ClasswiseWrapper(Recall(num_classes=3, average=None), labels)}\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'accuracy_horse': tensor(0.), 'accuracy_fish': tensor(0.3333), 'accuracy_dog': tensor(0.4000),\n 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}\n \"\"\"\n\n full_state_update: Optional[bool] = True\n\n def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:\n super().__init__()\n if not isinstance(metric, Metric):\n raise ValueError(f\"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}\")\n if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):\n raise ValueError(f\"Expected argument `labels` to either be `None` or a list of strings but got {labels}\")\n self.metric = metric\n self.labels = labels\n\n def _convert(self, x: Tensor) -> Dict[str, Any]:\n name = self.metric.__class__.__name__.lower()\n if self.labels is None:\n return {f\"{name}_{i}\": val for i, val in enumerate(x)}\n return {f\"{name}_{lab}\": val for lab, val in zip(self.labels, x)}\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n self.metric.update(*args, **kwargs)\n\n def compute(self) -> Dict[str, Tensor]:\n return self._convert(self.metric.compute())\n\n def reset(self) -> None:\n self.metric.reset()\n", "path": "src/torchmetrics/wrappers/classwise.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Optional\n\nfrom torch import Tensor\n\nfrom torchmetrics import Metric\n\n\nclass ClasswiseWrapper(Metric):\n \"\"\"Wrapper class for altering the output of classification metrics that returns multiple values to include\n label information.\n\n Args:\n metric: base metric that should be wrapped. It is assumed that the metric outputs a single\n tensor that is split along the first dimension.\n labels: list of strings indicating the different classes.\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(Accuracy(num_classes=3, average=None))\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_0': tensor(0.5000), 'accuracy_1': tensor(0.7500), 'accuracy_2': tensor(0.)}\n\n Example (labels as list of strings):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper\n >>> metric = ClasswiseWrapper(\n ... Accuracy(num_classes=3, average=None),\n ... labels=[\"horse\", \"fish\", \"dog\"]\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target)\n {'accuracy_horse': tensor(0.3333), 'accuracy_fish': tensor(0.6667), 'accuracy_dog': tensor(0.)}\n\n Example (in metric collection):\n >>> import torch\n >>> from torchmetrics import Accuracy, ClasswiseWrapper, MetricCollection, Recall\n >>> labels = [\"horse\", \"fish\", \"dog\"]\n >>> metric = MetricCollection(\n ... {'accuracy': ClasswiseWrapper(Accuracy(num_classes=3, average=None), labels),\n ... 'recall': ClasswiseWrapper(Recall(num_classes=3, average=None), labels)}\n ... )\n >>> preds = torch.randn(10, 3).softmax(dim=-1)\n >>> target = torch.randint(3, (10,))\n >>> metric(preds, target) # doctest: +NORMALIZE_WHITESPACE\n {'accuracy_horse': tensor(0.), 'accuracy_fish': tensor(0.3333), 'accuracy_dog': tensor(0.4000),\n 'recall_horse': tensor(0.), 'recall_fish': tensor(0.3333), 'recall_dog': tensor(0.4000)}\n \"\"\"\n\n def __init__(self, metric: Metric, labels: Optional[List[str]] = None) -> None:\n super().__init__()\n if not isinstance(metric, Metric):\n raise ValueError(f\"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {metric}\")\n if labels is not None and not (isinstance(labels, list) and all(isinstance(lab, str) for lab in labels)):\n raise ValueError(f\"Expected argument `labels` to either be `None` or a list of strings but got {labels}\")\n self.metric = metric\n self.labels = labels\n self._update_count = 1\n\n def _convert(self, x: Tensor) -> Dict[str, Any]:\n name = self.metric.__class__.__name__.lower()\n if self.labels is None:\n return {f\"{name}_{i}\": val for i, val in enumerate(x)}\n return {f\"{name}_{lab}\": val for lab, val in zip(self.labels, x)}\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n return self._convert(self.metric(*args, **kwargs))\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n self.metric.update(*args, **kwargs)\n\n def compute(self) -> Dict[str, Tensor]:\n return self._convert(self.metric.compute())\n\n def reset(self) -> None:\n self.metric.reset()\n\n def _wrap_update(self, update: Callable) -> Callable:\n \"\"\"Overwrite to do nothing.\"\"\"\n return update\n\n def _wrap_compute(self, compute: Callable) -> Callable:\n \"\"\"Overwrite to do nothing.\"\"\"\n return compute\n", "path": "src/torchmetrics/wrappers/classwise.py"}]} | 2,017 | 620 |
gh_patches_debug_37878 | rasdani/github-patches | git_diff | opsdroid__opsdroid-34 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check for base classes instead of name when importing connectors and databases
Currently you must give your connector or database class a specific name for them to be imported.
Now that these classes should inherit from the base classes we can test for `isinstance(baseclass)` instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/core.py`
Content:
```
1 """Core components of OpsDroid."""
2
3 import logging
4 import sys
5 import weakref
6 from multiprocessing import Process
7 from opsdroid.helper import match
8 from opsdroid.memory import Memory
9
10
11 class OpsDroid():
12 """Root object for opsdroid."""
13
14 instances = []
15
16 def __init__(self):
17 """Start opsdroid."""
18 self.bot_name = 'opsdroid'
19 self.sys_status = 0
20 self.connectors = []
21 self.connector_jobs = []
22 self.skills = []
23 self.memory = Memory()
24 logging.info("Created main opsdroid object")
25
26 def __enter__(self):
27 """Add self to existing instances."""
28 if len(self.__class__.instances) == 0:
29 self.__class__.instances.append(weakref.proxy(self))
30 else:
31 self.critical("opsdroid has already been started", 1)
32 return self
33
34 def __exit__(self, exc_type, exc_value, traceback):
35 """Remove self from existing instances."""
36 self.__class__.instances = []
37
38 def exit(self):
39 """Exit application."""
40 logging.info("Exiting application with return code " +
41 str(self.sys_status))
42 sys.exit(self.sys_status)
43
44 def critical(self, error, code):
45 """Exit due to unrecoverable error."""
46 self.sys_status = code
47 logging.critical(error)
48 print("Error: " + error)
49 self.exit()
50
51 def start_connectors(self, connectors):
52 """Start the connectors."""
53 if len(connectors) == 0:
54 self.critical("All connectors failed to load", 1)
55 elif len(connectors) == 1:
56 for name, cls in connectors[0]["module"].__dict__.items():
57 if isinstance(cls, type) and "Connector" in name:
58 connectors[0]["config"]["bot-name"] = self.bot_name
59 connector = cls(connectors[0]["config"])
60 self.connectors.append(connector)
61 connector.connect(self)
62 else:
63 for connector_module in connectors:
64 for name, cls in connector_module["module"].__dict__.items():
65 if isinstance(cls, type) and "Connector" in name:
66 connector_module["config"]["bot-name"] = self.bot_name
67 connector = cls(connector_module["config"])
68 self.connectors.append(connector)
69 job = Process(target=connector.connect, args=(self,))
70 job.start()
71 self.connector_jobs.append(job)
72 for job in self.connector_jobs:
73 job.join()
74
75 def start_databases(self, databases):
76 """Start the databases."""
77 if len(databases) == 0:
78 logging.warning("All databases failed to load")
79 for database_module in databases:
80 for name, cls in database_module["module"].__dict__.items():
81 if isinstance(cls, type) and "Database" in name:
82 logging.debug("Adding database: " + name)
83 database = cls(database_module["config"])
84 self.memory.databases.append(database)
85 database.connect()
86
87 def load_regex_skill(self, regex, skill):
88 """Load skills."""
89 self.skills.append({"regex": regex, "skill": skill})
90
91 def parse(self, message):
92 """Parse a string against all skills."""
93 if message.text.strip() != "":
94 logging.debug("Parsing input: " + message.text)
95 for skill in self.skills:
96 if "regex" in skill:
97 regex = match(skill["regex"], message.text)
98 if regex:
99 message.regex = regex
100 skill["skill"](self, message)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/core.py b/opsdroid/core.py
--- a/opsdroid/core.py
+++ b/opsdroid/core.py
@@ -6,6 +6,8 @@
from multiprocessing import Process
from opsdroid.helper import match
from opsdroid.memory import Memory
+from opsdroid.connector import Connector
+from opsdroid.database import Database
class OpsDroid():
@@ -54,7 +56,9 @@
self.critical("All connectors failed to load", 1)
elif len(connectors) == 1:
for name, cls in connectors[0]["module"].__dict__.items():
- if isinstance(cls, type) and "Connector" in name:
+ if isinstance(cls, type) and \
+ isinstance(cls({}), Connector):
+ logging.debug("Adding connector: " + name)
connectors[0]["config"]["bot-name"] = self.bot_name
connector = cls(connectors[0]["config"])
self.connectors.append(connector)
@@ -62,7 +66,8 @@
else:
for connector_module in connectors:
for name, cls in connector_module["module"].__dict__.items():
- if isinstance(cls, type) and "Connector" in name:
+ if isinstance(cls, type) and \
+ isinstance(cls({}), Connector):
connector_module["config"]["bot-name"] = self.bot_name
connector = cls(connector_module["config"])
self.connectors.append(connector)
@@ -78,11 +83,12 @@
logging.warning("All databases failed to load")
for database_module in databases:
for name, cls in database_module["module"].__dict__.items():
- if isinstance(cls, type) and "Database" in name:
+ if isinstance(cls, type) and \
+ isinstance(cls({}), Database):
logging.debug("Adding database: " + name)
database = cls(database_module["config"])
self.memory.databases.append(database)
- database.connect()
+ database.connect(self)
def load_regex_skill(self, regex, skill):
"""Load skills."""
| {"golden_diff": "diff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -6,6 +6,8 @@\n from multiprocessing import Process\n from opsdroid.helper import match\n from opsdroid.memory import Memory\n+from opsdroid.connector import Connector\n+from opsdroid.database import Database\n \n \n class OpsDroid():\n@@ -54,7 +56,9 @@\n self.critical(\"All connectors failed to load\", 1)\n elif len(connectors) == 1:\n for name, cls in connectors[0][\"module\"].__dict__.items():\n- if isinstance(cls, type) and \"Connector\" in name:\n+ if isinstance(cls, type) and \\\n+ isinstance(cls({}), Connector):\n+ logging.debug(\"Adding connector: \" + name)\n connectors[0][\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connectors[0][\"config\"])\n self.connectors.append(connector)\n@@ -62,7 +66,8 @@\n else:\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n- if isinstance(cls, type) and \"Connector\" in name:\n+ if isinstance(cls, type) and \\\n+ isinstance(cls({}), Connector):\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n@@ -78,11 +83,12 @@\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n- if isinstance(cls, type) and \"Database\" in name:\n+ if isinstance(cls, type) and \\\n+ isinstance(cls({}), Database):\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n- database.connect()\n+ database.connect(self)\n \n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n", "issue": "Check for base classes instead of name when importing connectors and databases\nCurrently you must give your connector or database class a specific name for them to be imported.\n\nNow that these classes should inherit from the base classes we can test for `isinstance(baseclass)` instead.\n\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom multiprocessing import Process\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n elif len(connectors) == 1:\n for name, cls in connectors[0][\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connectors[0][\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connectors[0][\"config\"])\n self.connectors.append(connector)\n connector.connect(self)\n else:\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n job = Process(target=connector.connect, args=(self,))\n job.start()\n self.connector_jobs.append(job)\n for job in self.connector_jobs:\n job.join()\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Database\" in name:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect()\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}], "after_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom multiprocessing import Process\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\nfrom opsdroid.connector import Connector\nfrom opsdroid.database import Database\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n elif len(connectors) == 1:\n for name, cls in connectors[0][\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n isinstance(cls({}), Connector):\n logging.debug(\"Adding connector: \" + name)\n connectors[0][\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connectors[0][\"config\"])\n self.connectors.append(connector)\n connector.connect(self)\n else:\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n isinstance(cls({}), Connector):\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n job = Process(target=connector.connect, args=(self,))\n job.start()\n self.connector_jobs.append(job)\n for job in self.connector_jobs:\n job.join()\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n isinstance(cls({}), Database):\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect(self)\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}]} | 1,259 | 462 |
gh_patches_debug_2287 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1205 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Google Auth test_with_scopes_scoped test failing with google-auth==1.25.0 or greater
The GoogleAuth unittest [test_with_scopes_scoped](https://github.com/googleapis/google-api-python-client/blob/master/tests/test__auth.py#L79) is failing at [this line](https://github.com/googleapis/google-api-python-client/blob/master/tests/test__auth.py#L92).
With `google-auth==1.25.0`, the test fails.
```
tests/test__auth.py::TestAuthWithGoogleAuth::test_with_scopes_scoped FAILED
```
With `google-auth==1.24.0` , the same test passes.
```
tests/test__auth.py::TestAuthWithGoogleAuth::test_with_scopes_scoped PASSED
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (2, 7):
25 print("google-api-python-client requires python version >= 2.7.", file=sys.stderr)
26 sys.exit(1)
27 if (3, 1) <= sys.version_info < (3, 4):
28 print("google-api-python-client requires python3 version >= 3.4.", file=sys.stderr)
29 sys.exit(1)
30
31 import io
32 import os
33 from setuptools import setup
34
35 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
36
37 install_requires = [
38 "httplib2>=0.15.0,<1dev",
39 "google-auth>=1.16.0",
40 "google-auth-httplib2>=0.0.3",
41 "google-api-core>=1.21.0,<2dev",
42 "six>=1.13.0,<2dev",
43 "uritemplate>=3.0.0,<4dev",
44 ]
45
46 package_root = os.path.abspath(os.path.dirname(__file__))
47
48 readme_filename = os.path.join(package_root, "README.md")
49 with io.open(readme_filename, encoding="utf-8") as readme_file:
50 readme = readme_file.read()
51
52 version = "1.12.8"
53
54 setup(
55 name="google-api-python-client",
56 version=version,
57 description="Google API Client Library for Python",
58 long_description=readme,
59 long_description_content_type='text/markdown',
60 author="Google LLC",
61 author_email="[email protected]",
62 url="https://github.com/googleapis/google-api-python-client/",
63 install_requires=install_requires,
64 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
65 packages=packages,
66 package_data={},
67 license="Apache 2.0",
68 keywords="google api client",
69 classifiers=[
70 "Programming Language :: Python :: 2",
71 "Programming Language :: Python :: 2.7",
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.5",
74 "Programming Language :: Python :: 3.6",
75 "Programming Language :: Python :: 3.7",
76 "Development Status :: 5 - Production/Stable",
77 "Intended Audience :: Developers",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: OS Independent",
80 "Topic :: Internet :: WWW/HTTP",
81 ],
82 )
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -39,6 +39,8 @@
"google-auth>=1.16.0",
"google-auth-httplib2>=0.0.3",
"google-api-core>=1.21.0,<2dev",
+ # rsa version 4.5 is the last version that is compatible with Python 2.7
+ "rsa==4.5;python_version<'3'",
"six>=1.13.0,<2dev",
"uritemplate>=3.0.0,<4dev",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,6 +39,8 @@\n \"google-auth>=1.16.0\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n+ # rsa version 4.5 is the last version that is compatible with Python 2.7\n+ \"rsa==4.5;python_version<'3'\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n ]\n", "issue": "Google Auth test_with_scopes_scoped test failing with google-auth==1.25.0 or greater\nThe GoogleAuth unittest [test_with_scopes_scoped](https://github.com/googleapis/google-api-python-client/blob/master/tests/test__auth.py#L79) is failing at [this line](https://github.com/googleapis/google-api-python-client/blob/master/tests/test__auth.py#L92). \r\n\r\nWith `google-auth==1.25.0`, the test fails.\r\n```\r\ntests/test__auth.py::TestAuthWithGoogleAuth::test_with_scopes_scoped FAILED \r\n```\r\n\r\nWith `google-auth==1.24.0` , the same test passes.\r\n```\r\ntests/test__auth.py::TestAuthWithGoogleAuth::test_with_scopes_scoped PASSED \r\n```\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 7):\n print(\"google-api-python-client requires python version >= 2.7.\", file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 4):\n print(\"google-api-python-client requires python3 version >= 3.4.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"1.12.8\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 7):\n print(\"google-api-python-client requires python version >= 2.7.\", file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 4):\n print(\"google-api-python-client requires python3 version >= 3.4.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n # rsa version 4.5 is the last version that is compatible with Python 2.7\n \"rsa==4.5;python_version<'3'\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"1.12.8\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 1,293 | 142 |
gh_patches_debug_4499 | rasdani/github-patches | git_diff | ckan__ckan-4158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Datastore dump results are not the same as data in database
### CKAN Version if known (or site URL)
2.7.2
### Please describe the expected behaviour
Downloaded dataset is not consistent with the actual data!
### Please describe the actual behaviour
We've upserted data using datastore api. The data is shown correctly in data explorer, Table view, API call or even querying postgres, But the data in downloaded csv file is different! Total number of records are the same but in csv we have random number of duplicates and missing data.
### What steps can be taken to reproduce the issue?
Upsert data using API. Download the CSV file using URL. Filter and compare downloaded data and data shown in data explorer or table view.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext/datastore/controller.py`
Content:
```
1 # encoding: utf-8
2
3 from ckan.plugins.toolkit import (
4 Invalid,
5 ObjectNotFound,
6 NotAuthorized,
7 get_action,
8 get_validator,
9 _,
10 request,
11 response,
12 BaseController,
13 abort,
14 render,
15 c,
16 h,
17 )
18 from ckanext.datastore.writer import (
19 csv_writer,
20 tsv_writer,
21 json_writer,
22 xml_writer,
23 )
24 from ckan.logic import (
25 tuplize_dict,
26 parse_params,
27 )
28 import ckan.lib.navl.dictization_functions as dict_fns
29
30 from itertools import izip_longest
31
32 int_validator = get_validator('int_validator')
33 boolean_validator = get_validator('boolean_validator')
34
35 DUMP_FORMATS = 'csv', 'tsv', 'json', 'xml'
36 PAGINATE_BY = 32000
37
38
39 class DatastoreController(BaseController):
40 def dump(self, resource_id):
41 try:
42 offset = int_validator(request.GET.get('offset', 0), {})
43 except Invalid as e:
44 abort(400, u'offset: ' + e.error)
45 try:
46 limit = int_validator(request.GET.get('limit'), {})
47 except Invalid as e:
48 abort(400, u'limit: ' + e.error)
49 bom = boolean_validator(request.GET.get('bom'), {})
50 fmt = request.GET.get('format', 'csv')
51
52 if fmt not in DUMP_FORMATS:
53 abort(400, _(
54 u'format: must be one of %s') % u', '.join(DUMP_FORMATS))
55
56 try:
57 dump_to(
58 resource_id,
59 response,
60 fmt=fmt,
61 offset=offset,
62 limit=limit,
63 options={u'bom': bom})
64 except ObjectNotFound:
65 abort(404, _('DataStore resource not found'))
66
67 def dictionary(self, id, resource_id):
68 u'''data dictionary view: show/edit field labels and descriptions'''
69
70 try:
71 # resource_edit_base template uses these
72 c.pkg_dict = get_action('package_show')(
73 None, {'id': id})
74 c.resource = get_action('resource_show')(
75 None, {'id': resource_id})
76 rec = get_action('datastore_search')(None, {
77 'resource_id': resource_id,
78 'limit': 0})
79 except (ObjectNotFound, NotAuthorized):
80 abort(404, _('Resource not found'))
81
82 fields = [f for f in rec['fields'] if not f['id'].startswith('_')]
83
84 if request.method == 'POST':
85 data = dict_fns.unflatten(tuplize_dict(parse_params(
86 request.params)))
87 info = data.get(u'info')
88 if not isinstance(info, list):
89 info = []
90 info = info[:len(fields)]
91
92 get_action('datastore_create')(None, {
93 'resource_id': resource_id,
94 'force': True,
95 'fields': [{
96 'id': f['id'],
97 'type': f['type'],
98 'info': fi if isinstance(fi, dict) else {}
99 } for f, fi in izip_longest(fields, info)]})
100
101 h.flash_success(_('Data Dictionary saved. Any type overrides will '
102 'take effect when the resource is next uploaded '
103 'to DataStore'))
104 h.redirect_to(
105 controller='ckanext.datastore.controller:DatastoreController',
106 action='dictionary',
107 id=id,
108 resource_id=resource_id)
109
110 return render(
111 'datastore/dictionary.html',
112 extra_vars={'fields': fields})
113
114
115 def dump_to(resource_id, output, fmt, offset, limit, options):
116 if fmt == 'csv':
117 writer_factory = csv_writer
118 records_format = 'csv'
119 elif fmt == 'tsv':
120 writer_factory = tsv_writer
121 records_format = 'tsv'
122 elif fmt == 'json':
123 writer_factory = json_writer
124 records_format = 'lists'
125 elif fmt == 'xml':
126 writer_factory = xml_writer
127 records_format = 'objects'
128
129 def start_writer(fields):
130 bom = options.get(u'bom', False)
131 return writer_factory(output, fields, resource_id, bom)
132
133 def result_page(offs, lim):
134 return get_action('datastore_search')(None, {
135 'resource_id': resource_id,
136 'limit':
137 PAGINATE_BY if limit is None
138 else min(PAGINATE_BY, lim),
139 'offset': offs,
140 'records_format': records_format,
141 'include_total': 'false', # XXX: default() is broken
142 })
143
144 result = result_page(offset, limit)
145
146 with start_writer(result['fields']) as wr:
147 while True:
148 if limit is not None and limit <= 0:
149 break
150
151 records = result['records']
152
153 wr.write_records(records)
154
155 if records_format == 'objects' or records_format == 'lists':
156 if len(records) < PAGINATE_BY:
157 break
158 elif not records:
159 break
160
161 offset += PAGINATE_BY
162 if limit is not None:
163 limit -= PAGINATE_BY
164 if limit <= 0:
165 break
166
167 result = result_page(offset, limit)
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext/datastore/controller.py b/ckanext/datastore/controller.py
--- a/ckanext/datastore/controller.py
+++ b/ckanext/datastore/controller.py
@@ -137,6 +137,7 @@
PAGINATE_BY if limit is None
else min(PAGINATE_BY, lim),
'offset': offs,
+ 'sort': '_id',
'records_format': records_format,
'include_total': 'false', # XXX: default() is broken
})
| {"golden_diff": "diff --git a/ckanext/datastore/controller.py b/ckanext/datastore/controller.py\n--- a/ckanext/datastore/controller.py\n+++ b/ckanext/datastore/controller.py\n@@ -137,6 +137,7 @@\n PAGINATE_BY if limit is None\n else min(PAGINATE_BY, lim),\n 'offset': offs,\n+ 'sort': '_id',\n 'records_format': records_format,\n 'include_total': 'false', # XXX: default() is broken\n })\n", "issue": "Datastore dump results are not the same as data in database\n### CKAN Version if known (or site URL)\r\n2.7.2\r\n\r\n### Please describe the expected behaviour\r\nDownloaded dataset is not consistent with the actual data! \r\n\r\n### Please describe the actual behaviour\r\nWe've upserted data using datastore api. The data is shown correctly in data explorer, Table view, API call or even querying postgres, But the data in downloaded csv file is different! Total number of records are the same but in csv we have random number of duplicates and missing data.\r\n\r\n### What steps can be taken to reproduce the issue? \r\nUpsert data using API. Download the CSV file using URL. Filter and compare downloaded data and data shown in data explorer or table view.\n", "before_files": [{"content": "# encoding: utf-8\n\nfrom ckan.plugins.toolkit import (\n Invalid,\n ObjectNotFound,\n NotAuthorized,\n get_action,\n get_validator,\n _,\n request,\n response,\n BaseController,\n abort,\n render,\n c,\n h,\n)\nfrom ckanext.datastore.writer import (\n csv_writer,\n tsv_writer,\n json_writer,\n xml_writer,\n)\nfrom ckan.logic import (\n tuplize_dict,\n parse_params,\n)\nimport ckan.lib.navl.dictization_functions as dict_fns\n\nfrom itertools import izip_longest\n\nint_validator = get_validator('int_validator')\nboolean_validator = get_validator('boolean_validator')\n\nDUMP_FORMATS = 'csv', 'tsv', 'json', 'xml'\nPAGINATE_BY = 32000\n\n\nclass DatastoreController(BaseController):\n def dump(self, resource_id):\n try:\n offset = int_validator(request.GET.get('offset', 0), {})\n except Invalid as e:\n abort(400, u'offset: ' + e.error)\n try:\n limit = int_validator(request.GET.get('limit'), {})\n except Invalid as e:\n abort(400, u'limit: ' + e.error)\n bom = boolean_validator(request.GET.get('bom'), {})\n fmt = request.GET.get('format', 'csv')\n\n if fmt not in DUMP_FORMATS:\n abort(400, _(\n u'format: must be one of %s') % u', '.join(DUMP_FORMATS))\n\n try:\n dump_to(\n resource_id,\n response,\n fmt=fmt,\n offset=offset,\n limit=limit,\n options={u'bom': bom})\n except ObjectNotFound:\n abort(404, _('DataStore resource not found'))\n\n def dictionary(self, id, resource_id):\n u'''data dictionary view: show/edit field labels and descriptions'''\n\n try:\n # resource_edit_base template uses these\n c.pkg_dict = get_action('package_show')(\n None, {'id': id})\n c.resource = get_action('resource_show')(\n None, {'id': resource_id})\n rec = get_action('datastore_search')(None, {\n 'resource_id': resource_id,\n 'limit': 0})\n except (ObjectNotFound, NotAuthorized):\n abort(404, _('Resource not found'))\n\n fields = [f for f in rec['fields'] if not f['id'].startswith('_')]\n\n if request.method == 'POST':\n data = dict_fns.unflatten(tuplize_dict(parse_params(\n request.params)))\n info = data.get(u'info')\n if not isinstance(info, list):\n info = []\n info = info[:len(fields)]\n\n get_action('datastore_create')(None, {\n 'resource_id': resource_id,\n 'force': True,\n 'fields': [{\n 'id': f['id'],\n 'type': f['type'],\n 'info': fi if isinstance(fi, dict) else {}\n } for f, fi in izip_longest(fields, info)]})\n\n h.flash_success(_('Data Dictionary saved. Any type overrides will '\n 'take effect when the resource is next uploaded '\n 'to DataStore'))\n h.redirect_to(\n controller='ckanext.datastore.controller:DatastoreController',\n action='dictionary',\n id=id,\n resource_id=resource_id)\n\n return render(\n 'datastore/dictionary.html',\n extra_vars={'fields': fields})\n\n\ndef dump_to(resource_id, output, fmt, offset, limit, options):\n if fmt == 'csv':\n writer_factory = csv_writer\n records_format = 'csv'\n elif fmt == 'tsv':\n writer_factory = tsv_writer\n records_format = 'tsv'\n elif fmt == 'json':\n writer_factory = json_writer\n records_format = 'lists'\n elif fmt == 'xml':\n writer_factory = xml_writer\n records_format = 'objects'\n\n def start_writer(fields):\n bom = options.get(u'bom', False)\n return writer_factory(output, fields, resource_id, bom)\n\n def result_page(offs, lim):\n return get_action('datastore_search')(None, {\n 'resource_id': resource_id,\n 'limit':\n PAGINATE_BY if limit is None\n else min(PAGINATE_BY, lim),\n 'offset': offs,\n 'records_format': records_format,\n 'include_total': 'false', # XXX: default() is broken\n })\n\n result = result_page(offset, limit)\n\n with start_writer(result['fields']) as wr:\n while True:\n if limit is not None and limit <= 0:\n break\n\n records = result['records']\n\n wr.write_records(records)\n\n if records_format == 'objects' or records_format == 'lists':\n if len(records) < PAGINATE_BY:\n break\n elif not records:\n break\n\n offset += PAGINATE_BY\n if limit is not None:\n limit -= PAGINATE_BY\n if limit <= 0:\n break\n\n result = result_page(offset, limit)\n", "path": "ckanext/datastore/controller.py"}], "after_files": [{"content": "# encoding: utf-8\n\nfrom ckan.plugins.toolkit import (\n Invalid,\n ObjectNotFound,\n NotAuthorized,\n get_action,\n get_validator,\n _,\n request,\n response,\n BaseController,\n abort,\n render,\n c,\n h,\n)\nfrom ckanext.datastore.writer import (\n csv_writer,\n tsv_writer,\n json_writer,\n xml_writer,\n)\nfrom ckan.logic import (\n tuplize_dict,\n parse_params,\n)\nimport ckan.lib.navl.dictization_functions as dict_fns\n\nfrom itertools import izip_longest\n\nint_validator = get_validator('int_validator')\nboolean_validator = get_validator('boolean_validator')\n\nDUMP_FORMATS = 'csv', 'tsv', 'json', 'xml'\nPAGINATE_BY = 32000\n\n\nclass DatastoreController(BaseController):\n def dump(self, resource_id):\n try:\n offset = int_validator(request.GET.get('offset', 0), {})\n except Invalid as e:\n abort(400, u'offset: ' + e.error)\n try:\n limit = int_validator(request.GET.get('limit'), {})\n except Invalid as e:\n abort(400, u'limit: ' + e.error)\n bom = boolean_validator(request.GET.get('bom'), {})\n fmt = request.GET.get('format', 'csv')\n\n if fmt not in DUMP_FORMATS:\n abort(400, _(\n u'format: must be one of %s') % u', '.join(DUMP_FORMATS))\n\n try:\n dump_to(\n resource_id,\n response,\n fmt=fmt,\n offset=offset,\n limit=limit,\n options={u'bom': bom})\n except ObjectNotFound:\n abort(404, _('DataStore resource not found'))\n\n def dictionary(self, id, resource_id):\n u'''data dictionary view: show/edit field labels and descriptions'''\n\n try:\n # resource_edit_base template uses these\n c.pkg_dict = get_action('package_show')(\n None, {'id': id})\n c.resource = get_action('resource_show')(\n None, {'id': resource_id})\n rec = get_action('datastore_search')(None, {\n 'resource_id': resource_id,\n 'limit': 0})\n except (ObjectNotFound, NotAuthorized):\n abort(404, _('Resource not found'))\n\n fields = [f for f in rec['fields'] if not f['id'].startswith('_')]\n\n if request.method == 'POST':\n data = dict_fns.unflatten(tuplize_dict(parse_params(\n request.params)))\n info = data.get(u'info')\n if not isinstance(info, list):\n info = []\n info = info[:len(fields)]\n\n get_action('datastore_create')(None, {\n 'resource_id': resource_id,\n 'force': True,\n 'fields': [{\n 'id': f['id'],\n 'type': f['type'],\n 'info': fi if isinstance(fi, dict) else {}\n } for f, fi in izip_longest(fields, info)]})\n\n h.flash_success(_('Data Dictionary saved. Any type overrides will '\n 'take effect when the resource is next uploaded '\n 'to DataStore'))\n h.redirect_to(\n controller='ckanext.datastore.controller:DatastoreController',\n action='dictionary',\n id=id,\n resource_id=resource_id)\n\n return render(\n 'datastore/dictionary.html',\n extra_vars={'fields': fields})\n\n\ndef dump_to(resource_id, output, fmt, offset, limit, options):\n if fmt == 'csv':\n writer_factory = csv_writer\n records_format = 'csv'\n elif fmt == 'tsv':\n writer_factory = tsv_writer\n records_format = 'tsv'\n elif fmt == 'json':\n writer_factory = json_writer\n records_format = 'lists'\n elif fmt == 'xml':\n writer_factory = xml_writer\n records_format = 'objects'\n\n def start_writer(fields):\n bom = options.get(u'bom', False)\n return writer_factory(output, fields, resource_id, bom)\n\n def result_page(offs, lim):\n return get_action('datastore_search')(None, {\n 'resource_id': resource_id,\n 'limit':\n PAGINATE_BY if limit is None\n else min(PAGINATE_BY, lim),\n 'offset': offs,\n 'sort': '_id',\n 'records_format': records_format,\n 'include_total': 'false', # XXX: default() is broken\n })\n\n result = result_page(offset, limit)\n\n with start_writer(result['fields']) as wr:\n while True:\n if limit is not None and limit <= 0:\n break\n\n records = result['records']\n\n wr.write_records(records)\n\n if records_format == 'objects' or records_format == 'lists':\n if len(records) < PAGINATE_BY:\n break\n elif not records:\n break\n\n offset += PAGINATE_BY\n if limit is not None:\n limit -= PAGINATE_BY\n if limit <= 0:\n break\n\n result = result_page(offset, limit)\n", "path": "ckanext/datastore/controller.py"}]} | 1,946 | 121 |
gh_patches_debug_1003 | rasdani/github-patches | git_diff | ipython__ipython-3556 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
_margv for macros seems to be missing
At one point in time, arguments to macro's could be obtained from _margv , but this seems to be missing now ( https://github.com/ipython/ipython/wiki/Cookbook:-Macro-arguments ).
I searched the entire ipython folder and only found _margv in the documentation in the macro.py file.
Just wondering if this is still supported.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/core/macro.py`
Content:
```
1 """Support for interactive macros in IPython"""
2
3 #*****************************************************************************
4 # Copyright (C) 2001-2005 Fernando Perez <[email protected]>
5 #
6 # Distributed under the terms of the BSD License. The full license is in
7 # the file COPYING, distributed as part of this software.
8 #*****************************************************************************
9
10 import re
11
12 from IPython.utils import py3compat
13 from IPython.utils.encoding import DEFAULT_ENCODING
14
15 coding_declaration = re.compile(r"#\s*coding[:=]\s*([-\w.]+)")
16
17 class Macro(object):
18 """Simple class to store the value of macros as strings.
19
20 Macro is just a callable that executes a string of IPython
21 input when called.
22
23 Args to macro are available in _margv list if you need them.
24 """
25
26 def __init__(self,code):
27 """store the macro value, as a single string which can be executed"""
28 lines = []
29 enc = None
30 for line in code.splitlines():
31 coding_match = coding_declaration.match(line)
32 if coding_match:
33 enc = coding_match.group(1)
34 else:
35 lines.append(line)
36 code = "\n".join(lines)
37 if isinstance(code, bytes):
38 code = code.decode(enc or DEFAULT_ENCODING)
39 self.value = code + '\n'
40
41 def __str__(self):
42 return py3compat.unicode_to_str(self.value)
43
44 def __unicode__(self):
45 return self.value
46
47 def __repr__(self):
48 return 'IPython.macro.Macro(%s)' % repr(self.value)
49
50 def __getstate__(self):
51 """ needed for safe pickling via %store """
52 return {'value': self.value}
53
54 def __add__(self, other):
55 if isinstance(other, Macro):
56 return Macro(self.value + other.value)
57 elif isinstance(other, basestring):
58 return Macro(self.value + other)
59 raise TypeError
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/core/macro.py b/IPython/core/macro.py
--- a/IPython/core/macro.py
+++ b/IPython/core/macro.py
@@ -19,8 +19,6 @@
Macro is just a callable that executes a string of IPython
input when called.
-
- Args to macro are available in _margv list if you need them.
"""
def __init__(self,code):
| {"golden_diff": "diff --git a/IPython/core/macro.py b/IPython/core/macro.py\n--- a/IPython/core/macro.py\n+++ b/IPython/core/macro.py\n@@ -19,8 +19,6 @@\n \n Macro is just a callable that executes a string of IPython\n input when called.\n- \n- Args to macro are available in _margv list if you need them.\n \"\"\"\n \n def __init__(self,code):\n", "issue": " _margv for macros seems to be missing\nAt one point in time, arguments to macro's could be obtained from _margv , but this seems to be missing now ( https://github.com/ipython/ipython/wiki/Cookbook:-Macro-arguments ). \n\nI searched the entire ipython folder and only found _margv in the documentation in the macro.py file. \n\nJust wondering if this is still supported. \n\n", "before_files": [{"content": "\"\"\"Support for interactive macros in IPython\"\"\"\n\n#*****************************************************************************\n# Copyright (C) 2001-2005 Fernando Perez <[email protected]>\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#*****************************************************************************\n\nimport re\n\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\ncoding_declaration = re.compile(r\"#\\s*coding[:=]\\s*([-\\w.]+)\")\n\nclass Macro(object):\n \"\"\"Simple class to store the value of macros as strings.\n\n Macro is just a callable that executes a string of IPython\n input when called.\n \n Args to macro are available in _margv list if you need them.\n \"\"\"\n\n def __init__(self,code):\n \"\"\"store the macro value, as a single string which can be executed\"\"\"\n lines = []\n enc = None\n for line in code.splitlines():\n coding_match = coding_declaration.match(line)\n if coding_match:\n enc = coding_match.group(1)\n else:\n lines.append(line)\n code = \"\\n\".join(lines)\n if isinstance(code, bytes):\n code = code.decode(enc or DEFAULT_ENCODING)\n self.value = code + '\\n'\n \n def __str__(self):\n return py3compat.unicode_to_str(self.value)\n \n def __unicode__(self):\n return self.value\n\n def __repr__(self):\n return 'IPython.macro.Macro(%s)' % repr(self.value)\n \n def __getstate__(self):\n \"\"\" needed for safe pickling via %store \"\"\"\n return {'value': self.value}\n \n def __add__(self, other):\n if isinstance(other, Macro):\n return Macro(self.value + other.value)\n elif isinstance(other, basestring):\n return Macro(self.value + other)\n raise TypeError\n", "path": "IPython/core/macro.py"}], "after_files": [{"content": "\"\"\"Support for interactive macros in IPython\"\"\"\n\n#*****************************************************************************\n# Copyright (C) 2001-2005 Fernando Perez <[email protected]>\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#*****************************************************************************\n\nimport re\n\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\ncoding_declaration = re.compile(r\"#\\s*coding[:=]\\s*([-\\w.]+)\")\n\nclass Macro(object):\n \"\"\"Simple class to store the value of macros as strings.\n\n Macro is just a callable that executes a string of IPython\n input when called.\n \"\"\"\n\n def __init__(self,code):\n \"\"\"store the macro value, as a single string which can be executed\"\"\"\n lines = []\n enc = None\n for line in code.splitlines():\n coding_match = coding_declaration.match(line)\n if coding_match:\n enc = coding_match.group(1)\n else:\n lines.append(line)\n code = \"\\n\".join(lines)\n if isinstance(code, bytes):\n code = code.decode(enc or DEFAULT_ENCODING)\n self.value = code + '\\n'\n \n def __str__(self):\n return py3compat.unicode_to_str(self.value)\n \n def __unicode__(self):\n return self.value\n\n def __repr__(self):\n return 'IPython.macro.Macro(%s)' % repr(self.value)\n \n def __getstate__(self):\n \"\"\" needed for safe pickling via %store \"\"\"\n return {'value': self.value}\n \n def __add__(self, other):\n if isinstance(other, Macro):\n return Macro(self.value + other.value)\n elif isinstance(other, basestring):\n return Macro(self.value + other)\n raise TypeError\n", "path": "IPython/core/macro.py"}]} | 878 | 99 |
gh_patches_debug_2751 | rasdani/github-patches | git_diff | abey79__vpype-607 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Default to QT_QPA_PLATFORM=xcb on Linux/Wayland
If we detect a linux box running on wayland, we should force Qt to use the xcb platform as the wayland backend doesn't work properly with moderngl.
This maybe a good way to detect wayland:
```
XDG_SESSION_TYPE=wayland
```
Relevant discussions:
- https://github.com/abey79/vsketch/issues/353
- https://discord.com/channels/550302843777712148/696045774970028062/1072436292798926868
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vpype_viewer/qtviewer/__init__.py`
Content:
```
1 from .viewer import *
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vpype_viewer/qtviewer/__init__.py b/vpype_viewer/qtviewer/__init__.py
--- a/vpype_viewer/qtviewer/__init__.py
+++ b/vpype_viewer/qtviewer/__init__.py
@@ -1 +1,18 @@
+def _check_wayland():
+ """Fix QT env variable on Wayland-based systems.
+
+ See https://github.com/abey79/vpype/issues/596
+ """
+ import os
+ import sys
+
+ if sys.platform.startswith("linux"):
+ if os.environ.get("XDG_SESSION_TYPE", "") == "wayland":
+ if "QT_QPA_PLATFORM" not in os.environ:
+ os.environ["QT_QPA_PLATFORM"] = "xcb"
+
+
+_check_wayland()
+
+
from .viewer import *
| {"golden_diff": "diff --git a/vpype_viewer/qtviewer/__init__.py b/vpype_viewer/qtviewer/__init__.py\n--- a/vpype_viewer/qtviewer/__init__.py\n+++ b/vpype_viewer/qtviewer/__init__.py\n@@ -1 +1,18 @@\n+def _check_wayland():\n+ \"\"\"Fix QT env variable on Wayland-based systems.\n+\n+ See https://github.com/abey79/vpype/issues/596\n+ \"\"\"\n+ import os\n+ import sys\n+\n+ if sys.platform.startswith(\"linux\"):\n+ if os.environ.get(\"XDG_SESSION_TYPE\", \"\") == \"wayland\":\n+ if \"QT_QPA_PLATFORM\" not in os.environ:\n+ os.environ[\"QT_QPA_PLATFORM\"] = \"xcb\"\n+\n+\n+_check_wayland()\n+\n+\n from .viewer import *\n", "issue": "Default to QT_QPA_PLATFORM=xcb on Linux/Wayland\nIf we detect a linux box running on wayland, we should force Qt to use the xcb platform as the wayland backend doesn't work properly with moderngl.\r\n\r\nThis maybe a good way to detect wayland:\r\n```\r\nXDG_SESSION_TYPE=wayland\r\n```\r\n\r\nRelevant discussions:\r\n- https://github.com/abey79/vsketch/issues/353\r\n- https://discord.com/channels/550302843777712148/696045774970028062/1072436292798926868\n", "before_files": [{"content": "from .viewer import *\n", "path": "vpype_viewer/qtviewer/__init__.py"}], "after_files": [{"content": "def _check_wayland():\n \"\"\"Fix QT env variable on Wayland-based systems.\n\n See https://github.com/abey79/vpype/issues/596\n \"\"\"\n import os\n import sys\n\n if sys.platform.startswith(\"linux\"):\n if os.environ.get(\"XDG_SESSION_TYPE\", \"\") == \"wayland\":\n if \"QT_QPA_PLATFORM\" not in os.environ:\n os.environ[\"QT_QPA_PLATFORM\"] = \"xcb\"\n\n\n_check_wayland()\n\n\nfrom .viewer import *\n", "path": "vpype_viewer/qtviewer/__init__.py"}]} | 428 | 186 |
gh_patches_debug_16213 | rasdani/github-patches | git_diff | conan-io__conan-center-index-925 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] asio/1.13.0: broken on Andriod
Commit: https://github.com/conan-io/conan-center-index/commit/804be2ad15b2139960fe10efcd6667d1f2dd2e98 breaks Android because there is no `-lpthread`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/asio/all/conanfile.py`
Content:
```
1 import os
2 from conans import ConanFile, tools
3
4
5 class Asio(ConanFile):
6 name = "asio"
7 url = "https://github.com/conan-io/conan-center-index"
8 homepage = "http://think-async.com/Asio"
9 description = "Asio is a cross-platform C++ library for network and low-level I/O"
10 topics = ("conan", "asio", "network", "io", "low-level")
11 license = "BSL-1.0"
12
13 no_copy_source = True
14 _source_subfolder = "source_subfolder"
15
16 def source(self):
17 tools.get(**self.conan_data["sources"][self.version])
18 archive_name = "asio-" + self.version.replace(".", "-")
19 extracted_name = "asio-" + archive_name
20 os.rename(extracted_name, self._source_subfolder)
21
22 def package(self):
23 root_dir = os.path.join(self._source_subfolder, self.name)
24 include_dir = os.path.join(root_dir, "include")
25 self.copy(pattern="LICENSE_1_0.txt", dst="licenses", src=root_dir)
26 self.copy(pattern="*.hpp", dst="include", src=include_dir)
27 self.copy(pattern="*.ipp", dst="include", src=include_dir)
28
29 def package_info(self):
30 self.cpp_info.defines.append('ASIO_STANDALONE')
31 if tools.os_info.is_linux:
32 self.cpp_info.libs.append('pthread')
33
34 def package_id(self):
35 self.info.header_only()
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/asio/all/conanfile.py b/recipes/asio/all/conanfile.py
--- a/recipes/asio/all/conanfile.py
+++ b/recipes/asio/all/conanfile.py
@@ -8,6 +8,7 @@
homepage = "http://think-async.com/Asio"
description = "Asio is a cross-platform C++ library for network and low-level I/O"
topics = ("conan", "asio", "network", "io", "low-level")
+ settings = "os"
license = "BSL-1.0"
no_copy_source = True
@@ -28,7 +29,7 @@
def package_info(self):
self.cpp_info.defines.append('ASIO_STANDALONE')
- if tools.os_info.is_linux:
+ if str(self.settings.os) in ["Linux", "Android"]:
self.cpp_info.libs.append('pthread')
def package_id(self):
| {"golden_diff": "diff --git a/recipes/asio/all/conanfile.py b/recipes/asio/all/conanfile.py\n--- a/recipes/asio/all/conanfile.py\n+++ b/recipes/asio/all/conanfile.py\n@@ -8,6 +8,7 @@\n homepage = \"http://think-async.com/Asio\"\n description = \"Asio is a cross-platform C++ library for network and low-level I/O\"\n topics = (\"conan\", \"asio\", \"network\", \"io\", \"low-level\")\n+ settings = \"os\"\n license = \"BSL-1.0\"\n \n no_copy_source = True\n@@ -28,7 +29,7 @@\n \n def package_info(self):\n self.cpp_info.defines.append('ASIO_STANDALONE')\n- if tools.os_info.is_linux:\n+ if str(self.settings.os) in [\"Linux\", \"Android\"]:\n self.cpp_info.libs.append('pthread')\n \n def package_id(self):\n", "issue": "[package] asio/1.13.0: broken on Andriod\nCommit: https://github.com/conan-io/conan-center-index/commit/804be2ad15b2139960fe10efcd6667d1f2dd2e98 breaks Android because there is no `-lpthread` \n", "before_files": [{"content": "import os\nfrom conans import ConanFile, tools\n\n\nclass Asio(ConanFile):\n name = \"asio\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://think-async.com/Asio\"\n description = \"Asio is a cross-platform C++ library for network and low-level I/O\"\n topics = (\"conan\", \"asio\", \"network\", \"io\", \"low-level\")\n license = \"BSL-1.0\"\n\n no_copy_source = True\n _source_subfolder = \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n archive_name = \"asio-\" + self.version.replace(\".\", \"-\")\n extracted_name = \"asio-\" + archive_name\n os.rename(extracted_name, self._source_subfolder)\n\n def package(self):\n root_dir = os.path.join(self._source_subfolder, self.name)\n include_dir = os.path.join(root_dir, \"include\")\n self.copy(pattern=\"LICENSE_1_0.txt\", dst=\"licenses\", src=root_dir)\n self.copy(pattern=\"*.hpp\", dst=\"include\", src=include_dir)\n self.copy(pattern=\"*.ipp\", dst=\"include\", src=include_dir)\n\n def package_info(self):\n self.cpp_info.defines.append('ASIO_STANDALONE')\n if tools.os_info.is_linux:\n self.cpp_info.libs.append('pthread')\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/asio/all/conanfile.py"}], "after_files": [{"content": "import os\nfrom conans import ConanFile, tools\n\n\nclass Asio(ConanFile):\n name = \"asio\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://think-async.com/Asio\"\n description = \"Asio is a cross-platform C++ library for network and low-level I/O\"\n topics = (\"conan\", \"asio\", \"network\", \"io\", \"low-level\")\n settings = \"os\"\n license = \"BSL-1.0\"\n\n no_copy_source = True\n _source_subfolder = \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n archive_name = \"asio-\" + self.version.replace(\".\", \"-\")\n extracted_name = \"asio-\" + archive_name\n os.rename(extracted_name, self._source_subfolder)\n\n def package(self):\n root_dir = os.path.join(self._source_subfolder, self.name)\n include_dir = os.path.join(root_dir, \"include\")\n self.copy(pattern=\"LICENSE_1_0.txt\", dst=\"licenses\", src=root_dir)\n self.copy(pattern=\"*.hpp\", dst=\"include\", src=include_dir)\n self.copy(pattern=\"*.ipp\", dst=\"include\", src=include_dir)\n\n def package_info(self):\n self.cpp_info.defines.append('ASIO_STANDALONE')\n if str(self.settings.os) in [\"Linux\", \"Android\"]:\n self.cpp_info.libs.append('pthread')\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/asio/all/conanfile.py"}]} | 733 | 213 |
gh_patches_debug_18946 | rasdani/github-patches | git_diff | aws__aws-cli-6730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[v2] `aws sso login` should not require a fully-configured profile
Currently, `aws sso login` operates on a particular profile, even [requiring that `sso_account_id` and `sso_role_name` be present in the profile](https://github.com/aws/aws-cli/blob/f2788558422dac42a5ebe37c7e5a3d24b19dee9f/awscli/customizations/sso/login.py#L32) even though it does not use them, [only fetching the token](https://github.com/aws/aws-cli/blob/f2788558422dac42a5ebe37c7e5a3d24b19dee9f/awscli/customizations/sso/utils.py#L45) (as it should, because AWS SSO-capable SDKs can use the token to get credentials for the appropriate account and role).
At the very least, `sso_account_id` and `sso_role_name` should be removed from the list of required config variables, which would allow a profile like:
```ini
[profile login]
sso_start_url = https://d-2e69cb2b10.awsapps.com/start
sso_region = us-east-2
```
and then `aws sso login --profile login` would just work without requiring a specific account and role that won't be used anyway.
This matters because not all users in an organization have the same permissions, so there's not a good way to provide them all with a single working config file to start from.
A better alternative would be to have AWS SSO configuration be explicit in the config file, perhaps with a new section type:
```ini
[sso default]
sso_start_url = https://d-2e69cb2b10.awsapps.com/start
sso_region = us-east-2
```
Or, `aws sso login` should check the configured profiles and if there's only one AWS SSO configuration (i.e., they all use the same start URL and region), it should just use that.
I've implemented the latter in [`aws-sso-util login`](https://github.com/benkehoe/aws-sso-util#logging-in-and-out).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/sso/login.py`
Content:
```
1 # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 from awscli.customizations.commands import BasicCommand
14 from awscli.customizations.sso.utils import do_sso_login
15 from awscli.customizations.utils import uni_print
16 from awscli.customizations.exceptions import ConfigurationError
17
18
19 class InvalidSSOConfigError(ConfigurationError):
20 pass
21
22
23 class LoginCommand(BasicCommand):
24 NAME = 'login'
25 DESCRIPTION = (
26 'Retrieves and caches an AWS SSO access token to exchange for AWS '
27 'credentials. To login, the requested profile must have first been '
28 'setup using ``aws configure sso``. Each time the ``login`` command '
29 'is called, a new SSO access token will be retrieved.'
30 )
31 ARG_TABLE = []
32 _REQUIRED_SSO_CONFIG_VARS = [
33 'sso_start_url',
34 'sso_region',
35 'sso_role_name',
36 'sso_account_id',
37 ]
38
39 def _run_main(self, parsed_args, parsed_globals):
40 sso_config = self._get_sso_config()
41 do_sso_login(
42 session=self._session,
43 sso_region=sso_config['sso_region'],
44 start_url=sso_config['sso_start_url'],
45 force_refresh=True
46 )
47 success_msg = 'Successully logged into Start URL: %s\n'
48 uni_print(success_msg % sso_config['sso_start_url'])
49 return 0
50
51 def _get_sso_config(self):
52 scoped_config = self._session.get_scoped_config()
53 sso_config = {}
54 missing_vars = []
55 for config_var in self._REQUIRED_SSO_CONFIG_VARS:
56 if config_var not in scoped_config:
57 missing_vars.append(config_var)
58 else:
59 sso_config[config_var] = scoped_config[config_var]
60 if missing_vars:
61 raise InvalidSSOConfigError(
62 'Missing the following required SSO configuration values: %s. '
63 'To make sure this profile is properly configured to use SSO, '
64 'please run: aws configure sso' % ', '.join(missing_vars)
65 )
66 return sso_config
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/sso/login.py b/awscli/customizations/sso/login.py
--- a/awscli/customizations/sso/login.py
+++ b/awscli/customizations/sso/login.py
@@ -26,14 +26,15 @@
'Retrieves and caches an AWS SSO access token to exchange for AWS '
'credentials. To login, the requested profile must have first been '
'setup using ``aws configure sso``. Each time the ``login`` command '
- 'is called, a new SSO access token will be retrieved.'
+ 'is called, a new SSO access token will be retrieved. Please note '
+ 'that only one login session can be active for a given SSO Start URL '
+ 'and creating multiple profiles does not allow for multiple users to '
+ 'be authenticated against the same SSO Start URL.'
)
ARG_TABLE = []
_REQUIRED_SSO_CONFIG_VARS = [
'sso_start_url',
'sso_region',
- 'sso_role_name',
- 'sso_account_id',
]
def _run_main(self, parsed_args, parsed_globals):
| {"golden_diff": "diff --git a/awscli/customizations/sso/login.py b/awscli/customizations/sso/login.py\n--- a/awscli/customizations/sso/login.py\n+++ b/awscli/customizations/sso/login.py\n@@ -26,14 +26,15 @@\n 'Retrieves and caches an AWS SSO access token to exchange for AWS '\n 'credentials. To login, the requested profile must have first been '\n 'setup using ``aws configure sso``. Each time the ``login`` command '\n- 'is called, a new SSO access token will be retrieved.'\n+ 'is called, a new SSO access token will be retrieved. Please note '\n+ 'that only one login session can be active for a given SSO Start URL '\n+ 'and creating multiple profiles does not allow for multiple users to '\n+ 'be authenticated against the same SSO Start URL.'\n )\n ARG_TABLE = []\n _REQUIRED_SSO_CONFIG_VARS = [\n 'sso_start_url',\n 'sso_region',\n- 'sso_role_name',\n- 'sso_account_id',\n ]\n \n def _run_main(self, parsed_args, parsed_globals):\n", "issue": "[v2] `aws sso login` should not require a fully-configured profile\nCurrently, `aws sso login` operates on a particular profile, even [requiring that `sso_account_id` and `sso_role_name` be present in the profile](https://github.com/aws/aws-cli/blob/f2788558422dac42a5ebe37c7e5a3d24b19dee9f/awscli/customizations/sso/login.py#L32) even though it does not use them, [only fetching the token](https://github.com/aws/aws-cli/blob/f2788558422dac42a5ebe37c7e5a3d24b19dee9f/awscli/customizations/sso/utils.py#L45) (as it should, because AWS SSO-capable SDKs can use the token to get credentials for the appropriate account and role).\r\n\r\nAt the very least, `sso_account_id` and `sso_role_name` should be removed from the list of required config variables, which would allow a profile like:\r\n```ini\r\n[profile login]\r\nsso_start_url = https://d-2e69cb2b10.awsapps.com/start\r\nsso_region = us-east-2\r\n```\r\nand then `aws sso login --profile login` would just work without requiring a specific account and role that won't be used anyway.\r\n\r\nThis matters because not all users in an organization have the same permissions, so there's not a good way to provide them all with a single working config file to start from.\r\n\r\nA better alternative would be to have AWS SSO configuration be explicit in the config file, perhaps with a new section type:\r\n```ini\r\n[sso default]\r\nsso_start_url = https://d-2e69cb2b10.awsapps.com/start\r\nsso_region = us-east-2\r\n```\r\n\r\nOr, `aws sso login` should check the configured profiles and if there's only one AWS SSO configuration (i.e., they all use the same start URL and region), it should just use that.\r\n\r\nI've implemented the latter in [`aws-sso-util login`](https://github.com/benkehoe/aws-sso-util#logging-in-and-out).\n", "before_files": [{"content": "# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.sso.utils import do_sso_login\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.exceptions import ConfigurationError\n\n\nclass InvalidSSOConfigError(ConfigurationError):\n pass\n\n\nclass LoginCommand(BasicCommand):\n NAME = 'login'\n DESCRIPTION = (\n 'Retrieves and caches an AWS SSO access token to exchange for AWS '\n 'credentials. To login, the requested profile must have first been '\n 'setup using ``aws configure sso``. Each time the ``login`` command '\n 'is called, a new SSO access token will be retrieved.'\n )\n ARG_TABLE = []\n _REQUIRED_SSO_CONFIG_VARS = [\n 'sso_start_url',\n 'sso_region',\n 'sso_role_name',\n 'sso_account_id',\n ]\n\n def _run_main(self, parsed_args, parsed_globals):\n sso_config = self._get_sso_config()\n do_sso_login(\n session=self._session,\n sso_region=sso_config['sso_region'],\n start_url=sso_config['sso_start_url'],\n force_refresh=True\n )\n success_msg = 'Successully logged into Start URL: %s\\n'\n uni_print(success_msg % sso_config['sso_start_url'])\n return 0\n\n def _get_sso_config(self):\n scoped_config = self._session.get_scoped_config()\n sso_config = {}\n missing_vars = []\n for config_var in self._REQUIRED_SSO_CONFIG_VARS:\n if config_var not in scoped_config:\n missing_vars.append(config_var)\n else:\n sso_config[config_var] = scoped_config[config_var]\n if missing_vars:\n raise InvalidSSOConfigError(\n 'Missing the following required SSO configuration values: %s. '\n 'To make sure this profile is properly configured to use SSO, '\n 'please run: aws configure sso' % ', '.join(missing_vars)\n )\n return sso_config\n", "path": "awscli/customizations/sso/login.py"}], "after_files": [{"content": "# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.sso.utils import do_sso_login\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.exceptions import ConfigurationError\n\n\nclass InvalidSSOConfigError(ConfigurationError):\n pass\n\n\nclass LoginCommand(BasicCommand):\n NAME = 'login'\n DESCRIPTION = (\n 'Retrieves and caches an AWS SSO access token to exchange for AWS '\n 'credentials. To login, the requested profile must have first been '\n 'setup using ``aws configure sso``. Each time the ``login`` command '\n 'is called, a new SSO access token will be retrieved. Please note '\n 'that only one login session can be active for a given SSO Start URL '\n 'and creating multiple profiles does not allow for multiple users to '\n 'be authenticated against the same SSO Start URL.'\n )\n ARG_TABLE = []\n _REQUIRED_SSO_CONFIG_VARS = [\n 'sso_start_url',\n 'sso_region',\n ]\n\n def _run_main(self, parsed_args, parsed_globals):\n sso_config = self._get_sso_config()\n do_sso_login(\n session=self._session,\n sso_region=sso_config['sso_region'],\n start_url=sso_config['sso_start_url'],\n force_refresh=True\n )\n success_msg = 'Successully logged into Start URL: %s\\n'\n uni_print(success_msg % sso_config['sso_start_url'])\n return 0\n\n def _get_sso_config(self):\n scoped_config = self._session.get_scoped_config()\n sso_config = {}\n missing_vars = []\n for config_var in self._REQUIRED_SSO_CONFIG_VARS:\n if config_var not in scoped_config:\n missing_vars.append(config_var)\n else:\n sso_config[config_var] = scoped_config[config_var]\n if missing_vars:\n raise InvalidSSOConfigError(\n 'Missing the following required SSO configuration values: %s. '\n 'To make sure this profile is properly configured to use SSO, '\n 'please run: aws configure sso' % ', '.join(missing_vars)\n )\n return sso_config\n", "path": "awscli/customizations/sso/login.py"}]} | 1,456 | 253 |
gh_patches_debug_19864 | rasdani/github-patches | git_diff | watchdogpolska__feder-433 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sposób widzenia załączników do nas wysłanych
Nie wiem czemu jest tak, że ja wchodzę w korespondencję z daną gminą w danym monitoringu, to przy mailach widzę załączniki:

A jak już wejdę z konkretną wiadomość, to ich nie ma:

Czy to się da zmienić, żeby po wejściu z konkretną wiadomość też było widać te załączniki?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `feder/letters/factories.py`
Content:
```
1 from email.mime.text import MIMEText
2
3 import factory
4 import factory.fuzzy
5 from django.core.mail import EmailMessage
6 from factory.django import FileField
7
8 from feder.cases.factories import CaseFactory
9 from feder.institutions.factories import InstitutionFactory
10 from feder.records.factories import RecordFactory
11 from feder.users.factories import UserFactory
12 from .models import Letter
13
14
15 class MailField(FileField):
16 DEFAULT_FILENAME = 'data.eml'
17
18 def _make_data(self, params):
19 msg = MIMEText("Lorem ipsum")
20 msg['Subject'] = "Example message"
21 msg['From'] = "[email protected]"
22 msg['To'] = "[email protected]"
23
24 return params.get('data', msg.as_string().encode('utf-8'))
25
26
27 class LetterFactory(factory.django.DjangoModelFactory):
28 record = factory.SubFactory(RecordFactory)
29 title = factory.Sequence('title-letter-{0}'.format)
30 body = factory.Sequence('body-{0}'.format)
31 quote = factory.Sequence('quote-{0}'.format)
32
33 class Meta:
34 model = Letter
35
36
37 class IncomingLetterFactory(LetterFactory):
38 author_institution = factory.SubFactory(InstitutionFactory)
39 email = factory.Sequence('xxx-{0}@example.com'.format)
40 note = factory.fuzzy.FuzzyText()
41 eml = MailField()
42
43
44 class OutgoingLetterFactory(LetterFactory):
45 author_user = factory.SubFactory(UserFactory)
46 is_draft = False
47 eml = MailField()
48
49
50 class DraftLetterFactory(OutgoingLetterFactory):
51 is_draft = True
52
53
54 class SendOutgoingLetterFactory(LetterFactory):
55 author_user = factory.SubFactory(UserFactory)
56
57 is_send_yes = factory.PostGenerationMethodCall('send')
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/feder/letters/factories.py b/feder/letters/factories.py
--- a/feder/letters/factories.py
+++ b/feder/letters/factories.py
@@ -1,15 +1,12 @@
from email.mime.text import MIMEText
-import factory
import factory.fuzzy
-from django.core.mail import EmailMessage
from factory.django import FileField
-from feder.cases.factories import CaseFactory
from feder.institutions.factories import InstitutionFactory
from feder.records.factories import RecordFactory
from feder.users.factories import UserFactory
-from .models import Letter
+from .models import Letter, Attachment
class MailField(FileField):
@@ -55,3 +52,11 @@
author_user = factory.SubFactory(UserFactory)
is_send_yes = factory.PostGenerationMethodCall('send')
+
+
+class AttachmentFactory(factory.django.DjangoModelFactory):
+ letter = factory.SubFactory(InstitutionFactory)
+ attachment = factory.django.FileField()
+
+ class Meta:
+ model = Attachment
| {"golden_diff": "diff --git a/feder/letters/factories.py b/feder/letters/factories.py\n--- a/feder/letters/factories.py\n+++ b/feder/letters/factories.py\n@@ -1,15 +1,12 @@\n from email.mime.text import MIMEText\n \n-import factory\n import factory.fuzzy\n-from django.core.mail import EmailMessage\n from factory.django import FileField\n \n-from feder.cases.factories import CaseFactory\n from feder.institutions.factories import InstitutionFactory\n from feder.records.factories import RecordFactory\n from feder.users.factories import UserFactory\n-from .models import Letter\n+from .models import Letter, Attachment\n \n \n class MailField(FileField):\n@@ -55,3 +52,11 @@\n author_user = factory.SubFactory(UserFactory)\n \n is_send_yes = factory.PostGenerationMethodCall('send')\n+\n+\n+class AttachmentFactory(factory.django.DjangoModelFactory):\n+ letter = factory.SubFactory(InstitutionFactory)\n+ attachment = factory.django.FileField()\n+\n+ class Meta:\n+ model = Attachment\n", "issue": "spos\u00f3b widzenia za\u0142\u0105cznik\u00f3w do nas wys\u0142anych\nNie wiem czemu jest tak, \u017ce ja wchodz\u0119 w korespondencj\u0119 z dan\u0105 gmin\u0105 w danym monitoringu, to przy mailach widz\u0119 za\u0142\u0105czniki:\r\n\r\n\r\n\r\nA jak ju\u017c wejd\u0119 z konkretn\u0105 wiadomo\u015b\u0107, to ich nie ma:\r\n\r\n\r\n\r\nCzy to si\u0119 da zmieni\u0107, \u017ceby po wej\u015bciu z konkretn\u0105 wiadomo\u015b\u0107 te\u017c by\u0142o wida\u0107 te za\u0142\u0105czniki?\n", "before_files": [{"content": "from email.mime.text import MIMEText\n\nimport factory\nimport factory.fuzzy\nfrom django.core.mail import EmailMessage\nfrom factory.django import FileField\n\nfrom feder.cases.factories import CaseFactory\nfrom feder.institutions.factories import InstitutionFactory\nfrom feder.records.factories import RecordFactory\nfrom feder.users.factories import UserFactory\nfrom .models import Letter\n\n\nclass MailField(FileField):\n DEFAULT_FILENAME = 'data.eml'\n\n def _make_data(self, params):\n msg = MIMEText(\"Lorem ipsum\")\n msg['Subject'] = \"Example message\"\n msg['From'] = \"[email protected]\"\n msg['To'] = \"[email protected]\"\n\n return params.get('data', msg.as_string().encode('utf-8'))\n\n\nclass LetterFactory(factory.django.DjangoModelFactory):\n record = factory.SubFactory(RecordFactory)\n title = factory.Sequence('title-letter-{0}'.format)\n body = factory.Sequence('body-{0}'.format)\n quote = factory.Sequence('quote-{0}'.format)\n\n class Meta:\n model = Letter\n\n\nclass IncomingLetterFactory(LetterFactory):\n author_institution = factory.SubFactory(InstitutionFactory)\n email = factory.Sequence('xxx-{0}@example.com'.format)\n note = factory.fuzzy.FuzzyText()\n eml = MailField()\n\n\nclass OutgoingLetterFactory(LetterFactory):\n author_user = factory.SubFactory(UserFactory)\n is_draft = False\n eml = MailField()\n\n\nclass DraftLetterFactory(OutgoingLetterFactory):\n is_draft = True\n\n\nclass SendOutgoingLetterFactory(LetterFactory):\n author_user = factory.SubFactory(UserFactory)\n\n is_send_yes = factory.PostGenerationMethodCall('send')\n", "path": "feder/letters/factories.py"}], "after_files": [{"content": "from email.mime.text import MIMEText\n\nimport factory.fuzzy\nfrom factory.django import FileField\n\nfrom feder.institutions.factories import InstitutionFactory\nfrom feder.records.factories import RecordFactory\nfrom feder.users.factories import UserFactory\nfrom .models import Letter, Attachment\n\n\nclass MailField(FileField):\n DEFAULT_FILENAME = 'data.eml'\n\n def _make_data(self, params):\n msg = MIMEText(\"Lorem ipsum\")\n msg['Subject'] = \"Example message\"\n msg['From'] = \"[email protected]\"\n msg['To'] = \"[email protected]\"\n\n return params.get('data', msg.as_string().encode('utf-8'))\n\n\nclass LetterFactory(factory.django.DjangoModelFactory):\n record = factory.SubFactory(RecordFactory)\n title = factory.Sequence('title-letter-{0}'.format)\n body = factory.Sequence('body-{0}'.format)\n quote = factory.Sequence('quote-{0}'.format)\n\n class Meta:\n model = Letter\n\n\nclass IncomingLetterFactory(LetterFactory):\n author_institution = factory.SubFactory(InstitutionFactory)\n email = factory.Sequence('xxx-{0}@example.com'.format)\n note = factory.fuzzy.FuzzyText()\n eml = MailField()\n\n\nclass OutgoingLetterFactory(LetterFactory):\n author_user = factory.SubFactory(UserFactory)\n is_draft = False\n eml = MailField()\n\n\nclass DraftLetterFactory(OutgoingLetterFactory):\n is_draft = True\n\n\nclass SendOutgoingLetterFactory(LetterFactory):\n author_user = factory.SubFactory(UserFactory)\n\n is_send_yes = factory.PostGenerationMethodCall('send')\n\n\nclass AttachmentFactory(factory.django.DjangoModelFactory):\n letter = factory.SubFactory(InstitutionFactory)\n attachment = factory.django.FileField()\n\n class Meta:\n model = Attachment\n", "path": "feder/letters/factories.py"}]} | 979 | 228 |
gh_patches_debug_13983 | rasdani/github-patches | git_diff | Kinto__kinto-1620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IntegrityError if you try to create two records with the same last_modified value in a batch
```
demo:
collections:
demo:
records:
abc:
data:
last_modified: 123
efg:
data:
last_modified: 123
```
```
$ kinto-wizard load -s https://kinto.dev.mozaws.net/v1 -a admin:admin demo.yaml --force -b demo -c demo
kinto_http.exceptions.KintoException: POST /v1/batch - 503 503 -
{'message': 'Service temporary unavailable due to overloading or maintenance, please retry later.',
'code': 503, 'errno': 201, 'error': 'Service Unavailable'}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/views/batch.py`
Content:
```
1 import logging
2
3 import colander
4 from cornice.validators import colander_validator
5 from pyramid import httpexceptions
6 from pyramid.security import NO_PERMISSION_REQUIRED
7
8 from kinto.core import errors
9 from kinto.core import Service
10 from kinto.core.errors import ErrorSchema
11 from kinto.core.utils import merge_dicts, build_request, build_response
12
13
14 subrequest_logger = logging.getLogger('subrequest.summary')
15
16 valid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',
17 'POST', 'PUT', 'PATCH'))
18
19
20 def string_values(node, cstruct):
21 """Validate that a ``colander.Mapping`` only has strings in its values.
22
23 .. warning::
24
25 Should be associated to a ``colander.Mapping`` schema node.
26 """
27 are_strings = [isinstance(v, str) for v in cstruct.values()]
28 if not all(are_strings):
29 error_msg = '{} contains non string value'.format(cstruct)
30 raise colander.Invalid(node, error_msg)
31
32
33 class BatchRequestSchema(colander.MappingSchema):
34 method = colander.SchemaNode(colander.String(),
35 validator=valid_http_method,
36 missing=colander.drop)
37 path = colander.SchemaNode(colander.String(),
38 validator=colander.Regex('^/'))
39 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
40 validator=string_values,
41 missing=colander.drop)
42 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
43 missing=colander.drop)
44
45 @staticmethod
46 def schema_type():
47 return colander.Mapping(unknown='raise')
48
49
50 class BatchPayloadSchema(colander.MappingSchema):
51 defaults = BatchRequestSchema(missing=colander.drop).clone()
52 requests = colander.SchemaNode(colander.Sequence(),
53 BatchRequestSchema())
54
55 @staticmethod
56 def schema_type():
57 return colander.Mapping(unknown='raise')
58
59 def __init__(self, *args, **kwargs):
60 super().__init__(*args, **kwargs)
61 # On defaults, path is not mandatory.
62 self.get('defaults').get('path').missing = colander.drop
63
64 def deserialize(self, cstruct=colander.null):
65 """Preprocess received data to carefully merge defaults.
66 """
67 if cstruct is not colander.null:
68 defaults = cstruct.get('defaults')
69 requests = cstruct.get('requests')
70 if isinstance(defaults, dict) and isinstance(requests, list):
71 for request in requests:
72 if isinstance(request, dict):
73 merge_dicts(request, defaults)
74 return super().deserialize(cstruct)
75
76
77 class BatchRequest(colander.MappingSchema):
78 body = BatchPayloadSchema()
79
80
81 class BatchResponseSchema(colander.MappingSchema):
82 status = colander.SchemaNode(colander.Integer())
83 path = colander.SchemaNode(colander.String())
84 headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),
85 validator=string_values,
86 missing=colander.drop)
87 body = colander.SchemaNode(colander.Mapping(unknown='preserve'),
88 missing=colander.drop)
89
90
91 class BatchResponseBodySchema(colander.MappingSchema):
92 responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))
93
94
95 class BatchResponse(colander.MappingSchema):
96 body = BatchResponseBodySchema()
97
98
99 class ErrorResponseSchema(colander.MappingSchema):
100 body = ErrorSchema()
101
102
103 batch_responses = {
104 '200': BatchResponse(description='Return a list of operation responses.'),
105 '400': ErrorResponseSchema(description='The request was badly formatted.'),
106 'default': ErrorResponseSchema(description='an unknown error occurred.')
107 }
108
109 batch = Service(name='batch', path='/batch',
110 description='Batch operations')
111
112
113 @batch.post(schema=BatchRequest,
114 validators=(colander_validator,),
115 permission=NO_PERMISSION_REQUIRED,
116 tags=['Batch'], operation_id='batch',
117 response_schemas=batch_responses)
118 def post_batch(request):
119 requests = request.validated['body']['requests']
120
121 request.log_context(batch_size=len(requests))
122
123 limit = request.registry.settings['batch_max_requests']
124 if limit and len(requests) > int(limit):
125 error_msg = 'Number of requests is limited to {}'.format(limit)
126 request.errors.add('body', 'requests', error_msg)
127 return
128
129 if any([batch.path in req['path'] for req in requests]):
130 error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)
131 request.errors.add('body', 'requests', error_msg)
132 return
133
134 responses = []
135
136 for subrequest_spec in requests:
137 subrequest = build_request(request, subrequest_spec)
138
139 log_context = {**request.log_context(),
140 'path': subrequest.path,
141 'method': subrequest.method}
142 try:
143 # Invoke subrequest without individual transaction.
144 resp, subrequest = request.follow_subrequest(subrequest,
145 use_tweens=False)
146 except httpexceptions.HTTPException as e:
147 if e.content_type == 'application/json':
148 resp = e
149 else:
150 # JSONify raw Pyramid errors.
151 resp = errors.http_error(e)
152
153 subrequest_logger.info('subrequest.summary', extra=log_context)
154
155 dict_resp = build_response(resp, subrequest)
156 responses.append(dict_resp)
157
158 return {
159 'responses': responses
160 }
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py
--- a/kinto/core/views/batch.py
+++ b/kinto/core/views/batch.py
@@ -144,6 +144,13 @@
resp, subrequest = request.follow_subrequest(subrequest,
use_tweens=False)
except httpexceptions.HTTPException as e:
+ # Since some request in the batch failed, we need to stop the parent request
+ # through Pyramid's transaction manager. 5XX errors are already caught by
+ # pyramid_tm's commit_veto
+ # https://github.com/Kinto/kinto/issues/624
+ if e.status_code == 409:
+ request.tm.abort()
+
if e.content_type == 'application/json':
resp = e
else:
| {"golden_diff": "diff --git a/kinto/core/views/batch.py b/kinto/core/views/batch.py\n--- a/kinto/core/views/batch.py\n+++ b/kinto/core/views/batch.py\n@@ -144,6 +144,13 @@\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n+ # Since some request in the batch failed, we need to stop the parent request\n+ # through Pyramid's transaction manager. 5XX errors are already caught by\n+ # pyramid_tm's commit_veto\n+ # https://github.com/Kinto/kinto/issues/624\n+ if e.status_code == 409:\n+ request.tm.abort()\n+\n if e.content_type == 'application/json':\n resp = e\n else:\n", "issue": "IntegrityError if you try to create two records with the same last_modified value in a batch\n```\r\ndemo:\r\n collections:\r\n demo:\r\n records:\r\n abc:\r\n data:\r\n last_modified: 123\r\n efg:\r\n data:\r\n last_modified: 123\r\n```\r\n\r\n```\r\n$ kinto-wizard load -s https://kinto.dev.mozaws.net/v1 -a admin:admin demo.yaml --force -b demo -c demo\r\nkinto_http.exceptions.KintoException: POST /v1/batch - 503 503 - \r\n{'message': 'Service temporary unavailable due to overloading or maintenance, please retry later.',\r\n 'code': 503, 'errno': 201, 'error': 'Service Unavailable'}\r\n```\n", "before_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}], "after_files": [{"content": "import logging\n\nimport colander\nfrom cornice.validators import colander_validator\nfrom pyramid import httpexceptions\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import errors\nfrom kinto.core import Service\nfrom kinto.core.errors import ErrorSchema\nfrom kinto.core.utils import merge_dicts, build_request, build_response\n\n\nsubrequest_logger = logging.getLogger('subrequest.summary')\n\nvalid_http_method = colander.OneOf(('GET', 'HEAD', 'DELETE', 'TRACE',\n 'POST', 'PUT', 'PATCH'))\n\n\ndef string_values(node, cstruct):\n \"\"\"Validate that a ``colander.Mapping`` only has strings in its values.\n\n .. warning::\n\n Should be associated to a ``colander.Mapping`` schema node.\n \"\"\"\n are_strings = [isinstance(v, str) for v in cstruct.values()]\n if not all(are_strings):\n error_msg = '{} contains non string value'.format(cstruct)\n raise colander.Invalid(node, error_msg)\n\n\nclass BatchRequestSchema(colander.MappingSchema):\n method = colander.SchemaNode(colander.String(),\n validator=valid_http_method,\n missing=colander.drop)\n path = colander.SchemaNode(colander.String(),\n validator=colander.Regex('^/'))\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n\nclass BatchPayloadSchema(colander.MappingSchema):\n defaults = BatchRequestSchema(missing=colander.drop).clone()\n requests = colander.SchemaNode(colander.Sequence(),\n BatchRequestSchema())\n\n @staticmethod\n def schema_type():\n return colander.Mapping(unknown='raise')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # On defaults, path is not mandatory.\n self.get('defaults').get('path').missing = colander.drop\n\n def deserialize(self, cstruct=colander.null):\n \"\"\"Preprocess received data to carefully merge defaults.\n \"\"\"\n if cstruct is not colander.null:\n defaults = cstruct.get('defaults')\n requests = cstruct.get('requests')\n if isinstance(defaults, dict) and isinstance(requests, list):\n for request in requests:\n if isinstance(request, dict):\n merge_dicts(request, defaults)\n return super().deserialize(cstruct)\n\n\nclass BatchRequest(colander.MappingSchema):\n body = BatchPayloadSchema()\n\n\nclass BatchResponseSchema(colander.MappingSchema):\n status = colander.SchemaNode(colander.Integer())\n path = colander.SchemaNode(colander.String())\n headers = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n validator=string_values,\n missing=colander.drop)\n body = colander.SchemaNode(colander.Mapping(unknown='preserve'),\n missing=colander.drop)\n\n\nclass BatchResponseBodySchema(colander.MappingSchema):\n responses = colander.SequenceSchema(BatchResponseSchema(missing=colander.drop))\n\n\nclass BatchResponse(colander.MappingSchema):\n body = BatchResponseBodySchema()\n\n\nclass ErrorResponseSchema(colander.MappingSchema):\n body = ErrorSchema()\n\n\nbatch_responses = {\n '200': BatchResponse(description='Return a list of operation responses.'),\n '400': ErrorResponseSchema(description='The request was badly formatted.'),\n 'default': ErrorResponseSchema(description='an unknown error occurred.')\n}\n\nbatch = Service(name='batch', path='/batch',\n description='Batch operations')\n\n\[email protected](schema=BatchRequest,\n validators=(colander_validator,),\n permission=NO_PERMISSION_REQUIRED,\n tags=['Batch'], operation_id='batch',\n response_schemas=batch_responses)\ndef post_batch(request):\n requests = request.validated['body']['requests']\n\n request.log_context(batch_size=len(requests))\n\n limit = request.registry.settings['batch_max_requests']\n if limit and len(requests) > int(limit):\n error_msg = 'Number of requests is limited to {}'.format(limit)\n request.errors.add('body', 'requests', error_msg)\n return\n\n if any([batch.path in req['path'] for req in requests]):\n error_msg = 'Recursive call on {} endpoint is forbidden.'.format(batch.path)\n request.errors.add('body', 'requests', error_msg)\n return\n\n responses = []\n\n for subrequest_spec in requests:\n subrequest = build_request(request, subrequest_spec)\n\n log_context = {**request.log_context(),\n 'path': subrequest.path,\n 'method': subrequest.method}\n try:\n # Invoke subrequest without individual transaction.\n resp, subrequest = request.follow_subrequest(subrequest,\n use_tweens=False)\n except httpexceptions.HTTPException as e:\n # Since some request in the batch failed, we need to stop the parent request\n # through Pyramid's transaction manager. 5XX errors are already caught by\n # pyramid_tm's commit_veto\n # https://github.com/Kinto/kinto/issues/624\n if e.status_code == 409:\n request.tm.abort()\n\n if e.content_type == 'application/json':\n resp = e\n else:\n # JSONify raw Pyramid errors.\n resp = errors.http_error(e)\n\n subrequest_logger.info('subrequest.summary', extra=log_context)\n\n dict_resp = build_response(resp, subrequest)\n responses.append(dict_resp)\n\n return {\n 'responses': responses\n }\n", "path": "kinto/core/views/batch.py"}]} | 1,938 | 185 |
gh_patches_debug_33644 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1275 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement basic reporting functionality for MVP (besides Domain Growth report)
### Story
As an Admin, I want to quickly export domain data reports directly from the domains page (/admin/registrar/domain/) so that I can easily access and analyze the domain data.
### Acceptance Criteria
- [ ] Three reports are available to download on the domains page:
- [ ] [Domains by type](https://docs.google.com/spreadsheets/d/1_nMU2obW22U6NlOSC2ARxf3PpsJnSe2wMo5AyLSzXzk/edit?usp=sharing) (sorted by domain name)
- [ ] [current-full.csv](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv) (sorted by domain name, then agency, then domain type)
- [ ] [current-federal.csv](https://github.com/cisagov/dotgov-data/blob/main/current-federal.csv) (sorted by domain name, then agency, then domain type)
- [ ] Each CSV report should contain accurate and up-to-date domain data from the database, sorted in the ways they are in the examples above.
- [ ] Single dropdown with the three report options which the user can select
- [ ] Clicking on each report triggers an immediate download of the relevant CSV report
- [ ] The UI components should be consistent with the existing design language of the admin portal.
### Additional Context
- This feature is a stop-gap measure, meant to provide immediate access to crucial reports while the ideal report interface is being developed. Future work is at #997.
- Security email may be pulled from .gov database rather than thru EPP call to registry.
### Issue Links
🔄 Relates to: #938 #143 #1075
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/utility/csv_export.py`
Content:
```
1 import csv
2 from registrar.models.domain import Domain
3 from registrar.models.domain_information import DomainInformation
4 from registrar.models.public_contact import PublicContact
5
6
7 def export_domains_to_writer(writer, columns, sort_fields, filter_condition):
8 # write columns headers to writer
9 writer.writerow(columns)
10
11 domainInfos = DomainInformation.objects.filter(**filter_condition).order_by(
12 *sort_fields
13 )
14 for domainInfo in domainInfos:
15 security_contacts = domainInfo.domain.contacts.filter(
16 contact_type=PublicContact.ContactTypeChoices.SECURITY
17 )
18
19 # create a dictionary of fields which can be included in output
20 FIELDS = {
21 "Domain name": domainInfo.domain.name,
22 "Domain type": domainInfo.get_organization_type_display()
23 + " - "
24 + domainInfo.get_federal_type_display()
25 if domainInfo.federal_type
26 else domainInfo.get_organization_type_display(),
27 "Agency": domainInfo.federal_agency,
28 "Organization name": domainInfo.organization_name,
29 "City": domainInfo.city,
30 "State": domainInfo.state_territory,
31 "AO": domainInfo.authorizing_official.first_name
32 + " "
33 + domainInfo.authorizing_official.last_name
34 if domainInfo.authorizing_official
35 else " ",
36 "AO email": domainInfo.authorizing_official.email
37 if domainInfo.authorizing_official
38 else " ",
39 "Security Contact Email": security_contacts[0].email
40 if security_contacts
41 else " ",
42 "Status": domainInfo.domain.state,
43 "Expiration Date": domainInfo.domain.expiration_date,
44 }
45 writer.writerow([FIELDS.get(column, "") for column in columns])
46
47
48 def export_data_type_to_csv(csv_file):
49 writer = csv.writer(csv_file)
50 # define columns to include in export
51 columns = [
52 "Domain name",
53 "Domain type",
54 "Agency",
55 "Organization name",
56 "City",
57 "State",
58 "AO",
59 "AO email",
60 "Security Contact Email",
61 "Status",
62 "Expiration Date",
63 ]
64 sort_fields = ["domain__name"]
65 filter_condition = {
66 "domain__state__in": [
67 Domain.State.READY,
68 Domain.State.DNS_NEEDED,
69 Domain.State.ON_HOLD,
70 ],
71 }
72 export_domains_to_writer(writer, columns, sort_fields, filter_condition)
73
74
75 def export_data_full_to_csv(csv_file):
76 writer = csv.writer(csv_file)
77 # define columns to include in export
78 columns = [
79 "Domain name",
80 "Domain type",
81 "Agency",
82 "Organization name",
83 "City",
84 "State",
85 "Security Contact Email",
86 ]
87 sort_fields = ["domain__name", "federal_agency", "organization_type"]
88 filter_condition = {
89 "domain__state__in": [
90 Domain.State.READY,
91 Domain.State.DNS_NEEDED,
92 Domain.State.ON_HOLD,
93 ],
94 }
95 export_domains_to_writer(writer, columns, sort_fields, filter_condition)
96
97
98 def export_data_federal_to_csv(csv_file):
99 writer = csv.writer(csv_file)
100 # define columns to include in export
101 columns = [
102 "Domain name",
103 "Domain type",
104 "Agency",
105 "Organization name",
106 "City",
107 "State",
108 "Security Contact Email",
109 ]
110 sort_fields = ["domain__name", "federal_agency", "organization_type"]
111 filter_condition = {
112 "organization_type__icontains": "federal",
113 "domain__state__in": [
114 Domain.State.READY,
115 Domain.State.DNS_NEEDED,
116 Domain.State.ON_HOLD,
117 ],
118 }
119 export_domains_to_writer(writer, columns, sort_fields, filter_condition)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/utility/csv_export.py b/src/registrar/utility/csv_export.py
--- a/src/registrar/utility/csv_export.py
+++ b/src/registrar/utility/csv_export.py
@@ -2,6 +2,8 @@
from registrar.models.domain import Domain
from registrar.models.domain_information import DomainInformation
from registrar.models.public_contact import PublicContact
+from django.db.models import Value
+from django.db.models.functions import Coalesce
def export_domains_to_writer(writer, columns, sort_fields, filter_condition):
@@ -61,7 +63,13 @@
"Status",
"Expiration Date",
]
- sort_fields = ["domain__name"]
+ # Coalesce is used to replace federal_type of None with ZZZZZ
+ sort_fields = [
+ "organization_type",
+ Coalesce("federal_type", Value("ZZZZZ")),
+ "federal_agency",
+ "domain__name",
+ ]
filter_condition = {
"domain__state__in": [
Domain.State.READY,
@@ -84,7 +92,13 @@
"State",
"Security Contact Email",
]
- sort_fields = ["domain__name", "federal_agency", "organization_type"]
+ # Coalesce is used to replace federal_type of None with ZZZZZ
+ sort_fields = [
+ "organization_type",
+ Coalesce("federal_type", Value("ZZZZZ")),
+ "federal_agency",
+ "domain__name",
+ ]
filter_condition = {
"domain__state__in": [
Domain.State.READY,
@@ -107,7 +121,13 @@
"State",
"Security Contact Email",
]
- sort_fields = ["domain__name", "federal_agency", "organization_type"]
+ # Coalesce is used to replace federal_type of None with ZZZZZ
+ sort_fields = [
+ "organization_type",
+ Coalesce("federal_type", Value("ZZZZZ")),
+ "federal_agency",
+ "domain__name",
+ ]
filter_condition = {
"organization_type__icontains": "federal",
"domain__state__in": [
| {"golden_diff": "diff --git a/src/registrar/utility/csv_export.py b/src/registrar/utility/csv_export.py\n--- a/src/registrar/utility/csv_export.py\n+++ b/src/registrar/utility/csv_export.py\n@@ -2,6 +2,8 @@\n from registrar.models.domain import Domain\n from registrar.models.domain_information import DomainInformation\n from registrar.models.public_contact import PublicContact\n+from django.db.models import Value\n+from django.db.models.functions import Coalesce\n \n \n def export_domains_to_writer(writer, columns, sort_fields, filter_condition):\n@@ -61,7 +63,13 @@\n \"Status\",\n \"Expiration Date\",\n ]\n- sort_fields = [\"domain__name\"]\n+ # Coalesce is used to replace federal_type of None with ZZZZZ\n+ sort_fields = [\n+ \"organization_type\",\n+ Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n+ \"federal_agency\",\n+ \"domain__name\",\n+ ]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n@@ -84,7 +92,13 @@\n \"State\",\n \"Security Contact Email\",\n ]\n- sort_fields = [\"domain__name\", \"federal_agency\", \"organization_type\"]\n+ # Coalesce is used to replace federal_type of None with ZZZZZ\n+ sort_fields = [\n+ \"organization_type\",\n+ Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n+ \"federal_agency\",\n+ \"domain__name\",\n+ ]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n@@ -107,7 +121,13 @@\n \"State\",\n \"Security Contact Email\",\n ]\n- sort_fields = [\"domain__name\", \"federal_agency\", \"organization_type\"]\n+ # Coalesce is used to replace federal_type of None with ZZZZZ\n+ sort_fields = [\n+ \"organization_type\",\n+ Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n+ \"federal_agency\",\n+ \"domain__name\",\n+ ]\n filter_condition = {\n \"organization_type__icontains\": \"federal\",\n \"domain__state__in\": [\n", "issue": "Implement basic reporting functionality for MVP (besides Domain Growth report)\n### Story\r\n\r\nAs an Admin, I want to quickly export domain data reports directly from the domains page (/admin/registrar/domain/) so that I can easily access and analyze the domain data.\r\n\r\n### Acceptance Criteria\r\n\r\n- [ ] Three reports are available to download on the domains page:\r\n - [ ] [Domains by type](https://docs.google.com/spreadsheets/d/1_nMU2obW22U6NlOSC2ARxf3PpsJnSe2wMo5AyLSzXzk/edit?usp=sharing) (sorted by domain name)\r\n - [ ] [current-full.csv](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv) (sorted by domain name, then agency, then domain type)\r\n - [ ] [current-federal.csv](https://github.com/cisagov/dotgov-data/blob/main/current-federal.csv) (sorted by domain name, then agency, then domain type)\r\n- [ ] Each CSV report should contain accurate and up-to-date domain data from the database, sorted in the ways they are in the examples above.\r\n- [ ] Single dropdown with the three report options which the user can select\r\n- [ ] Clicking on each report triggers an immediate download of the relevant CSV report\r\n- [ ] The UI components should be consistent with the existing design language of the admin portal.\r\n\r\n### Additional Context\r\n\r\n- This feature is a stop-gap measure, meant to provide immediate access to crucial reports while the ideal report interface is being developed. Future work is at #997.\r\n\r\n- Security email may be pulled from .gov database rather than thru EPP call to registry.\r\n\r\n### Issue Links\r\n\r\n\ud83d\udd04 Relates to: #938 #143 #1075 \n", "before_files": [{"content": "import csv\nfrom registrar.models.domain import Domain\nfrom registrar.models.domain_information import DomainInformation\nfrom registrar.models.public_contact import PublicContact\n\n\ndef export_domains_to_writer(writer, columns, sort_fields, filter_condition):\n # write columns headers to writer\n writer.writerow(columns)\n\n domainInfos = DomainInformation.objects.filter(**filter_condition).order_by(\n *sort_fields\n )\n for domainInfo in domainInfos:\n security_contacts = domainInfo.domain.contacts.filter(\n contact_type=PublicContact.ContactTypeChoices.SECURITY\n )\n\n # create a dictionary of fields which can be included in output\n FIELDS = {\n \"Domain name\": domainInfo.domain.name,\n \"Domain type\": domainInfo.get_organization_type_display()\n + \" - \"\n + domainInfo.get_federal_type_display()\n if domainInfo.federal_type\n else domainInfo.get_organization_type_display(),\n \"Agency\": domainInfo.federal_agency,\n \"Organization name\": domainInfo.organization_name,\n \"City\": domainInfo.city,\n \"State\": domainInfo.state_territory,\n \"AO\": domainInfo.authorizing_official.first_name\n + \" \"\n + domainInfo.authorizing_official.last_name\n if domainInfo.authorizing_official\n else \" \",\n \"AO email\": domainInfo.authorizing_official.email\n if domainInfo.authorizing_official\n else \" \",\n \"Security Contact Email\": security_contacts[0].email\n if security_contacts\n else \" \",\n \"Status\": domainInfo.domain.state,\n \"Expiration Date\": domainInfo.domain.expiration_date,\n }\n writer.writerow([FIELDS.get(column, \"\") for column in columns])\n\n\ndef export_data_type_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"AO\",\n \"AO email\",\n \"Security Contact Email\",\n \"Status\",\n \"Expiration Date\",\n ]\n sort_fields = [\"domain__name\"]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n\n\ndef export_data_full_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"Security Contact Email\",\n ]\n sort_fields = [\"domain__name\", \"federal_agency\", \"organization_type\"]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n\n\ndef export_data_federal_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"Security Contact Email\",\n ]\n sort_fields = [\"domain__name\", \"federal_agency\", \"organization_type\"]\n filter_condition = {\n \"organization_type__icontains\": \"federal\",\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n", "path": "src/registrar/utility/csv_export.py"}], "after_files": [{"content": "import csv\nfrom registrar.models.domain import Domain\nfrom registrar.models.domain_information import DomainInformation\nfrom registrar.models.public_contact import PublicContact\nfrom django.db.models import Value\nfrom django.db.models.functions import Coalesce\n\n\ndef export_domains_to_writer(writer, columns, sort_fields, filter_condition):\n # write columns headers to writer\n writer.writerow(columns)\n\n domainInfos = DomainInformation.objects.filter(**filter_condition).order_by(\n *sort_fields\n )\n for domainInfo in domainInfos:\n security_contacts = domainInfo.domain.contacts.filter(\n contact_type=PublicContact.ContactTypeChoices.SECURITY\n )\n\n # create a dictionary of fields which can be included in output\n FIELDS = {\n \"Domain name\": domainInfo.domain.name,\n \"Domain type\": domainInfo.get_organization_type_display()\n + \" - \"\n + domainInfo.get_federal_type_display()\n if domainInfo.federal_type\n else domainInfo.get_organization_type_display(),\n \"Agency\": domainInfo.federal_agency,\n \"Organization name\": domainInfo.organization_name,\n \"City\": domainInfo.city,\n \"State\": domainInfo.state_territory,\n \"AO\": domainInfo.authorizing_official.first_name\n + \" \"\n + domainInfo.authorizing_official.last_name\n if domainInfo.authorizing_official\n else \" \",\n \"AO email\": domainInfo.authorizing_official.email\n if domainInfo.authorizing_official\n else \" \",\n \"Security Contact Email\": security_contacts[0].email\n if security_contacts\n else \" \",\n \"Status\": domainInfo.domain.state,\n \"Expiration Date\": domainInfo.domain.expiration_date,\n }\n writer.writerow([FIELDS.get(column, \"\") for column in columns])\n\n\ndef export_data_type_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"AO\",\n \"AO email\",\n \"Security Contact Email\",\n \"Status\",\n \"Expiration Date\",\n ]\n # Coalesce is used to replace federal_type of None with ZZZZZ\n sort_fields = [\n \"organization_type\",\n Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n \"federal_agency\",\n \"domain__name\",\n ]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n\n\ndef export_data_full_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"Security Contact Email\",\n ]\n # Coalesce is used to replace federal_type of None with ZZZZZ\n sort_fields = [\n \"organization_type\",\n Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n \"federal_agency\",\n \"domain__name\",\n ]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n\n\ndef export_data_federal_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"Security Contact Email\",\n ]\n # Coalesce is used to replace federal_type of None with ZZZZZ\n sort_fields = [\n \"organization_type\",\n Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n \"federal_agency\",\n \"domain__name\",\n ]\n filter_condition = {\n \"organization_type__icontains\": \"federal\",\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n", "path": "src/registrar/utility/csv_export.py"}]} | 1,682 | 489 |
gh_patches_debug_6734 | rasdani/github-patches | git_diff | boto__botocore-1312 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error when trying to read 0 byte from StreamingBody
Referring to the read method of `StreamingBody`:
https://github.com/boto/botocore/blob/c632931a6cc5eab4113c976d430bcb9c059f829f/botocore/response.py#L69-L81
If anyone asks for 0 bytes from a StreamingBody, the conditional on line 76 will pass because chunk is empty (since 0 bytes were asked for) and amount was set to 0 (not None). This leads to the content length verification, which will fail because you've read 0 bytes so far out of the entire content.
Might be an odd use case, but I feel like is a valid use case.
In fact, I ran into this issue when trying to use the `ijson` package [link](https://pypi.python.org/pypi/ijson).
That library uses `.read(0)` in order to figure out what type of encoding the stream reader should use. Whether that's the best way to do it or not, I'm not entirely sure. But I feel like `.read(0)` should still be supported.
If you guys agree that it should be supported, maybe considering a condition like this:
```
if (not chunk and amt > 0) or amt is None:
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `botocore/response.py`
Content:
```
1 # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
2 # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"). You
5 # may not use this file except in compliance with the License. A copy of
6 # the License is located at
7 #
8 # http://aws.amazon.com/apache2.0/
9 #
10 # or in the "license" file accompanying this file. This file is
11 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
12 # ANY KIND, either express or implied. See the License for the specific
13 # language governing permissions and limitations under the License.
14
15 import sys
16 import xml.etree.cElementTree
17 import logging
18
19 from botocore import ScalarTypes
20 from botocore.hooks import first_non_none_response
21 from botocore.compat import json, set_socket_timeout, XMLParseError
22 from botocore.exceptions import IncompleteReadError
23 from botocore import parsers
24
25
26 logger = logging.getLogger(__name__)
27
28
29 class StreamingBody(object):
30 """Wrapper class for an http response body.
31
32 This provides a few additional conveniences that do not exist
33 in the urllib3 model:
34
35 * Set the timeout on the socket (i.e read() timeouts)
36 * Auto validation of content length, if the amount of bytes
37 we read does not match the content length, an exception
38 is raised.
39
40 """
41 def __init__(self, raw_stream, content_length):
42 self._raw_stream = raw_stream
43 self._content_length = content_length
44 self._amount_read = 0
45
46 def set_socket_timeout(self, timeout):
47 """Set the timeout seconds on the socket."""
48 # The problem we're trying to solve is to prevent .read() calls from
49 # hanging. This can happen in rare cases. What we'd like to ideally
50 # do is set a timeout on the .read() call so that callers can retry
51 # the request.
52 # Unfortunately, this isn't currently possible in requests.
53 # See: https://github.com/kennethreitz/requests/issues/1803
54 # So what we're going to do is reach into the guts of the stream and
55 # grab the socket object, which we can set the timeout on. We're
56 # putting in a check here so in case this interface goes away, we'll
57 # know.
58 try:
59 # To further complicate things, the way to grab the
60 # underlying socket object from an HTTPResponse is different
61 # in py2 and py3. So this code has been pushed to botocore.compat.
62 set_socket_timeout(self._raw_stream, timeout)
63 except AttributeError:
64 logger.error("Cannot access the socket object of "
65 "a streaming response. It's possible "
66 "the interface has changed.", exc_info=True)
67 raise
68
69 def read(self, amt=None):
70 """Read at most amt bytes from the stream.
71
72 If the amt argument is omitted, read all data.
73 """
74 chunk = self._raw_stream.read(amt)
75 self._amount_read += len(chunk)
76 if not chunk or amt is None:
77 # If the server sends empty contents or
78 # we ask to read all of the contents, then we know
79 # we need to verify the content length.
80 self._verify_content_length()
81 return chunk
82
83 def _verify_content_length(self):
84 # See: https://github.com/kennethreitz/requests/issues/1855
85 # Basically, our http library doesn't do this for us, so we have
86 # to do this ourself.
87 if self._content_length is not None and \
88 self._amount_read != int(self._content_length):
89 raise IncompleteReadError(
90 actual_bytes=self._amount_read,
91 expected_bytes=int(self._content_length))
92
93 def close(self):
94 """Close the underlying http response stream."""
95 self._raw_stream.close()
96
97
98 def get_response(operation_model, http_response):
99 protocol = operation_model.metadata['protocol']
100 response_dict = {
101 'headers': http_response.headers,
102 'status_code': http_response.status_code,
103 }
104 # TODO: Unfortunately, we have to have error logic here.
105 # If it looks like an error, in the streaming response case we
106 # need to actually grab the contents.
107 if response_dict['status_code'] >= 300:
108 response_dict['body'] = http_response.content
109 elif operation_model.has_streaming_output:
110 response_dict['body'] = StreamingBody(
111 http_response.raw, response_dict['headers'].get('content-length'))
112 else:
113 response_dict['body'] = http_response.content
114
115 parser = parsers.create_parser(protocol)
116 return http_response, parser.parse(response_dict,
117 operation_model.output_shape)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/botocore/response.py b/botocore/response.py
--- a/botocore/response.py
+++ b/botocore/response.py
@@ -73,7 +73,7 @@
"""
chunk = self._raw_stream.read(amt)
self._amount_read += len(chunk)
- if not chunk or amt is None:
+ if amt is None or (not chunk and amt > 0):
# If the server sends empty contents or
# we ask to read all of the contents, then we know
# we need to verify the content length.
| {"golden_diff": "diff --git a/botocore/response.py b/botocore/response.py\n--- a/botocore/response.py\n+++ b/botocore/response.py\n@@ -73,7 +73,7 @@\n \"\"\"\n chunk = self._raw_stream.read(amt)\n self._amount_read += len(chunk)\n- if not chunk or amt is None:\n+ if amt is None or (not chunk and amt > 0):\n # If the server sends empty contents or\n # we ask to read all of the contents, then we know\n # we need to verify the content length.\n", "issue": "Error when trying to read 0 byte from StreamingBody\nReferring to the read method of `StreamingBody`:\r\nhttps://github.com/boto/botocore/blob/c632931a6cc5eab4113c976d430bcb9c059f829f/botocore/response.py#L69-L81\r\n\r\nIf anyone asks for 0 bytes from a StreamingBody, the conditional on line 76 will pass because chunk is empty (since 0 bytes were asked for) and amount was set to 0 (not None). This leads to the content length verification, which will fail because you've read 0 bytes so far out of the entire content.\r\n\r\nMight be an odd use case, but I feel like is a valid use case.\r\nIn fact, I ran into this issue when trying to use the `ijson` package [link](https://pypi.python.org/pypi/ijson).\r\nThat library uses `.read(0)` in order to figure out what type of encoding the stream reader should use. Whether that's the best way to do it or not, I'm not entirely sure. But I feel like `.read(0)` should still be supported.\r\n\r\nIf you guys agree that it should be supported, maybe considering a condition like this:\r\n```\r\nif (not chunk and amt > 0) or amt is None:\r\n```\n", "before_files": [{"content": "# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/\n# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport sys\nimport xml.etree.cElementTree\nimport logging\n\nfrom botocore import ScalarTypes\nfrom botocore.hooks import first_non_none_response\nfrom botocore.compat import json, set_socket_timeout, XMLParseError\nfrom botocore.exceptions import IncompleteReadError\nfrom botocore import parsers\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass StreamingBody(object):\n \"\"\"Wrapper class for an http response body.\n\n This provides a few additional conveniences that do not exist\n in the urllib3 model:\n\n * Set the timeout on the socket (i.e read() timeouts)\n * Auto validation of content length, if the amount of bytes\n we read does not match the content length, an exception\n is raised.\n\n \"\"\"\n def __init__(self, raw_stream, content_length):\n self._raw_stream = raw_stream\n self._content_length = content_length\n self._amount_read = 0\n\n def set_socket_timeout(self, timeout):\n \"\"\"Set the timeout seconds on the socket.\"\"\"\n # The problem we're trying to solve is to prevent .read() calls from\n # hanging. This can happen in rare cases. What we'd like to ideally\n # do is set a timeout on the .read() call so that callers can retry\n # the request.\n # Unfortunately, this isn't currently possible in requests.\n # See: https://github.com/kennethreitz/requests/issues/1803\n # So what we're going to do is reach into the guts of the stream and\n # grab the socket object, which we can set the timeout on. We're\n # putting in a check here so in case this interface goes away, we'll\n # know.\n try:\n # To further complicate things, the way to grab the\n # underlying socket object from an HTTPResponse is different\n # in py2 and py3. So this code has been pushed to botocore.compat.\n set_socket_timeout(self._raw_stream, timeout)\n except AttributeError:\n logger.error(\"Cannot access the socket object of \"\n \"a streaming response. It's possible \"\n \"the interface has changed.\", exc_info=True)\n raise\n\n def read(self, amt=None):\n \"\"\"Read at most amt bytes from the stream.\n\n If the amt argument is omitted, read all data.\n \"\"\"\n chunk = self._raw_stream.read(amt)\n self._amount_read += len(chunk)\n if not chunk or amt is None:\n # If the server sends empty contents or\n # we ask to read all of the contents, then we know\n # we need to verify the content length.\n self._verify_content_length()\n return chunk\n\n def _verify_content_length(self):\n # See: https://github.com/kennethreitz/requests/issues/1855\n # Basically, our http library doesn't do this for us, so we have\n # to do this ourself.\n if self._content_length is not None and \\\n self._amount_read != int(self._content_length):\n raise IncompleteReadError(\n actual_bytes=self._amount_read,\n expected_bytes=int(self._content_length))\n\n def close(self):\n \"\"\"Close the underlying http response stream.\"\"\"\n self._raw_stream.close()\n\n\ndef get_response(operation_model, http_response):\n protocol = operation_model.metadata['protocol']\n response_dict = {\n 'headers': http_response.headers,\n 'status_code': http_response.status_code,\n }\n # TODO: Unfortunately, we have to have error logic here.\n # If it looks like an error, in the streaming response case we\n # need to actually grab the contents.\n if response_dict['status_code'] >= 300:\n response_dict['body'] = http_response.content\n elif operation_model.has_streaming_output:\n response_dict['body'] = StreamingBody(\n http_response.raw, response_dict['headers'].get('content-length'))\n else:\n response_dict['body'] = http_response.content\n\n parser = parsers.create_parser(protocol)\n return http_response, parser.parse(response_dict,\n operation_model.output_shape)\n", "path": "botocore/response.py"}], "after_files": [{"content": "# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/\n# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport sys\nimport xml.etree.cElementTree\nimport logging\n\nfrom botocore import ScalarTypes\nfrom botocore.hooks import first_non_none_response\nfrom botocore.compat import json, set_socket_timeout, XMLParseError\nfrom botocore.exceptions import IncompleteReadError\nfrom botocore import parsers\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass StreamingBody(object):\n \"\"\"Wrapper class for an http response body.\n\n This provides a few additional conveniences that do not exist\n in the urllib3 model:\n\n * Set the timeout on the socket (i.e read() timeouts)\n * Auto validation of content length, if the amount of bytes\n we read does not match the content length, an exception\n is raised.\n\n \"\"\"\n def __init__(self, raw_stream, content_length):\n self._raw_stream = raw_stream\n self._content_length = content_length\n self._amount_read = 0\n\n def set_socket_timeout(self, timeout):\n \"\"\"Set the timeout seconds on the socket.\"\"\"\n # The problem we're trying to solve is to prevent .read() calls from\n # hanging. This can happen in rare cases. What we'd like to ideally\n # do is set a timeout on the .read() call so that callers can retry\n # the request.\n # Unfortunately, this isn't currently possible in requests.\n # See: https://github.com/kennethreitz/requests/issues/1803\n # So what we're going to do is reach into the guts of the stream and\n # grab the socket object, which we can set the timeout on. We're\n # putting in a check here so in case this interface goes away, we'll\n # know.\n try:\n # To further complicate things, the way to grab the\n # underlying socket object from an HTTPResponse is different\n # in py2 and py3. So this code has been pushed to botocore.compat.\n set_socket_timeout(self._raw_stream, timeout)\n except AttributeError:\n logger.error(\"Cannot access the socket object of \"\n \"a streaming response. It's possible \"\n \"the interface has changed.\", exc_info=True)\n raise\n\n def read(self, amt=None):\n \"\"\"Read at most amt bytes from the stream.\n\n If the amt argument is omitted, read all data.\n \"\"\"\n chunk = self._raw_stream.read(amt)\n self._amount_read += len(chunk)\n if amt is None or (not chunk and amt > 0):\n # If the server sends empty contents or\n # we ask to read all of the contents, then we know\n # we need to verify the content length.\n self._verify_content_length()\n return chunk\n\n def _verify_content_length(self):\n # See: https://github.com/kennethreitz/requests/issues/1855\n # Basically, our http library doesn't do this for us, so we have\n # to do this ourself.\n if self._content_length is not None and \\\n self._amount_read != int(self._content_length):\n raise IncompleteReadError(\n actual_bytes=self._amount_read,\n expected_bytes=int(self._content_length))\n\n def close(self):\n \"\"\"Close the underlying http response stream.\"\"\"\n self._raw_stream.close()\n\n\ndef get_response(operation_model, http_response):\n protocol = operation_model.metadata['protocol']\n response_dict = {\n 'headers': http_response.headers,\n 'status_code': http_response.status_code,\n }\n # TODO: Unfortunately, we have to have error logic here.\n # If it looks like an error, in the streaming response case we\n # need to actually grab the contents.\n if response_dict['status_code'] >= 300:\n response_dict['body'] = http_response.content\n elif operation_model.has_streaming_output:\n response_dict['body'] = StreamingBody(\n http_response.raw, response_dict['headers'].get('content-length'))\n else:\n response_dict['body'] = http_response.content\n\n parser = parsers.create_parser(protocol)\n return http_response, parser.parse(response_dict,\n operation_model.output_shape)\n", "path": "botocore/response.py"}]} | 1,871 | 131 |
gh_patches_debug_5265 | rasdani/github-patches | git_diff | obspy__obspy-2562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix simple typo: whith -> with
There is a small typo in obspy/io/gcf/core.py.
Should read `with` rather than `whith`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/io/gcf/core.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 GCF bindings to ObsPy core module.
4 """
5 from __future__ import (absolute_import, division, print_function,
6 unicode_literals)
7 from future.builtins import * # NOQA
8
9 from obspy import Stream, Trace, UTCDateTime
10
11 from . import libgcf
12
13
14 def merge_gcf_stream(st):
15 """
16 Merges GCF stream (replacing Stream.merge(-1) for headonly=True)
17
18 :type st: :class:`~obspy.core.stream.Stream`
19 :param st: GCF Stream object whith no data
20 :rtype: :class:`~obspy.core.stream.Stream`
21 :returns: Stream object containing header and data.
22 """
23 traces = []
24 for tr in st:
25 delta = tr.stats.delta
26 starttime = tr.stats.starttime
27 endtime = tr.stats.endtime
28 for trace in traces:
29 if tr.id == trace.id and delta == trace.stats.delta \
30 and not starttime == trace.stats.starttime:
31 if 0 < starttime - trace.stats.endtime <= delta:
32 trace.stats.npts += tr.stats.npts
33 break
34 elif 0 < trace.stats.starttime - endtime <= delta:
35 trace.stats.starttime = UTCDateTime(starttime)
36 trace.stats.npts += tr.stats.npts
37 break
38 else:
39 traces.append(tr)
40 return Stream(traces=traces)
41
42
43 def _is_gcf(filename):
44 """
45 Checks whether a file is GCF or not.
46
47 :type filename: str
48 :param filename: GCF file to be checked.
49 :rtype: bool
50 :return: ``True`` if a GCF file.
51 """
52 try:
53 with open(filename, 'rb') as f:
54 libgcf.is_gcf(f)
55 except Exception:
56 return False
57 return True
58
59
60 def _read_gcf(filename, headonly=False, **kwargs): # @UnusedVariable
61 """
62 Reads a GCF file and returns a Stream object.
63
64 only GCF files containing data records are supported.
65
66 .. warning::
67 This function should NOT be called directly, it registers via the
68 ObsPy :func:`~obspy.core.stream.read` function, call this instead.
69
70 :type filename: str
71 :param filename: GCF file to be read.
72 :type headonly: bool, optional
73 :param headonly: If True read only head of GCF file.
74 :type channel_prefix: str, optional
75 :param channel_prefix: Channel band and instrument codes.
76 Defaults to ``HH``.
77 :rtype: :class:`~obspy.core.stream.Stream`
78 :returns: Stream object containing header and data.
79
80 .. rubric:: Example
81 >>> from obspy import read
82 >>> st = read("/path/to/20160603_1955n.gcf", format="GCF")
83 """
84 traces = []
85 with open(filename, 'rb') as f:
86 while True:
87 try:
88 if headonly:
89 header = libgcf.read_header(f, **kwargs)
90 if header:
91 traces.append(Trace(header=header))
92 else:
93 hd = libgcf.read(f, **kwargs)
94 if hd:
95 traces.append(Trace(header=hd[0], data=hd[1]))
96 except EOFError:
97 break
98 st = Stream(traces=traces)
99 if headonly:
100 st = merge_gcf_stream(st)
101 else:
102 st.merge(-1)
103 return st
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/obspy/io/gcf/core.py b/obspy/io/gcf/core.py
--- a/obspy/io/gcf/core.py
+++ b/obspy/io/gcf/core.py
@@ -16,7 +16,7 @@
Merges GCF stream (replacing Stream.merge(-1) for headonly=True)
:type st: :class:`~obspy.core.stream.Stream`
- :param st: GCF Stream object whith no data
+ :param st: GCF Stream object with no data
:rtype: :class:`~obspy.core.stream.Stream`
:returns: Stream object containing header and data.
"""
| {"golden_diff": "diff --git a/obspy/io/gcf/core.py b/obspy/io/gcf/core.py\n--- a/obspy/io/gcf/core.py\n+++ b/obspy/io/gcf/core.py\n@@ -16,7 +16,7 @@\n Merges GCF stream (replacing Stream.merge(-1) for headonly=True)\n \n :type st: :class:`~obspy.core.stream.Stream`\n- :param st: GCF Stream object whith no data\n+ :param st: GCF Stream object with no data\n :rtype: :class:`~obspy.core.stream.Stream`\n :returns: Stream object containing header and data.\n \"\"\"\n", "issue": "Fix simple typo: whith -> with\nThere is a small typo in obspy/io/gcf/core.py.\nShould read `with` rather than `whith`.\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nGCF bindings to ObsPy core module.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nfrom obspy import Stream, Trace, UTCDateTime\n\nfrom . import libgcf\n\n\ndef merge_gcf_stream(st):\n \"\"\"\n Merges GCF stream (replacing Stream.merge(-1) for headonly=True)\n\n :type st: :class:`~obspy.core.stream.Stream`\n :param st: GCF Stream object whith no data\n :rtype: :class:`~obspy.core.stream.Stream`\n :returns: Stream object containing header and data.\n \"\"\"\n traces = []\n for tr in st:\n delta = tr.stats.delta\n starttime = tr.stats.starttime\n endtime = tr.stats.endtime\n for trace in traces:\n if tr.id == trace.id and delta == trace.stats.delta \\\n and not starttime == trace.stats.starttime:\n if 0 < starttime - trace.stats.endtime <= delta:\n trace.stats.npts += tr.stats.npts\n break\n elif 0 < trace.stats.starttime - endtime <= delta:\n trace.stats.starttime = UTCDateTime(starttime)\n trace.stats.npts += tr.stats.npts\n break\n else:\n traces.append(tr)\n return Stream(traces=traces)\n\n\ndef _is_gcf(filename):\n \"\"\"\n Checks whether a file is GCF or not.\n\n :type filename: str\n :param filename: GCF file to be checked.\n :rtype: bool\n :return: ``True`` if a GCF file.\n \"\"\"\n try:\n with open(filename, 'rb') as f:\n libgcf.is_gcf(f)\n except Exception:\n return False\n return True\n\n\ndef _read_gcf(filename, headonly=False, **kwargs): # @UnusedVariable\n \"\"\"\n Reads a GCF file and returns a Stream object.\n\n only GCF files containing data records are supported.\n\n .. warning::\n This function should NOT be called directly, it registers via the\n ObsPy :func:`~obspy.core.stream.read` function, call this instead.\n\n :type filename: str\n :param filename: GCF file to be read.\n :type headonly: bool, optional\n :param headonly: If True read only head of GCF file.\n :type channel_prefix: str, optional\n :param channel_prefix: Channel band and instrument codes.\n Defaults to ``HH``.\n :rtype: :class:`~obspy.core.stream.Stream`\n :returns: Stream object containing header and data.\n\n .. rubric:: Example\n >>> from obspy import read\n >>> st = read(\"/path/to/20160603_1955n.gcf\", format=\"GCF\")\n \"\"\"\n traces = []\n with open(filename, 'rb') as f:\n while True:\n try:\n if headonly:\n header = libgcf.read_header(f, **kwargs)\n if header:\n traces.append(Trace(header=header))\n else:\n hd = libgcf.read(f, **kwargs)\n if hd:\n traces.append(Trace(header=hd[0], data=hd[1]))\n except EOFError:\n break\n st = Stream(traces=traces)\n if headonly:\n st = merge_gcf_stream(st)\n else:\n st.merge(-1)\n return st\n", "path": "obspy/io/gcf/core.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nGCF bindings to ObsPy core module.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nfrom obspy import Stream, Trace, UTCDateTime\n\nfrom . import libgcf\n\n\ndef merge_gcf_stream(st):\n \"\"\"\n Merges GCF stream (replacing Stream.merge(-1) for headonly=True)\n\n :type st: :class:`~obspy.core.stream.Stream`\n :param st: GCF Stream object with no data\n :rtype: :class:`~obspy.core.stream.Stream`\n :returns: Stream object containing header and data.\n \"\"\"\n traces = []\n for tr in st:\n delta = tr.stats.delta\n starttime = tr.stats.starttime\n endtime = tr.stats.endtime\n for trace in traces:\n if tr.id == trace.id and delta == trace.stats.delta \\\n and not starttime == trace.stats.starttime:\n if 0 < starttime - trace.stats.endtime <= delta:\n trace.stats.npts += tr.stats.npts\n break\n elif 0 < trace.stats.starttime - endtime <= delta:\n trace.stats.starttime = UTCDateTime(starttime)\n trace.stats.npts += tr.stats.npts\n break\n else:\n traces.append(tr)\n return Stream(traces=traces)\n\n\ndef _is_gcf(filename):\n \"\"\"\n Checks whether a file is GCF or not.\n\n :type filename: str\n :param filename: GCF file to be checked.\n :rtype: bool\n :return: ``True`` if a GCF file.\n \"\"\"\n try:\n with open(filename, 'rb') as f:\n libgcf.is_gcf(f)\n except Exception:\n return False\n return True\n\n\ndef _read_gcf(filename, headonly=False, **kwargs): # @UnusedVariable\n \"\"\"\n Reads a GCF file and returns a Stream object.\n\n only GCF files containing data records are supported.\n\n .. warning::\n This function should NOT be called directly, it registers via the\n ObsPy :func:`~obspy.core.stream.read` function, call this instead.\n\n :type filename: str\n :param filename: GCF file to be read.\n :type headonly: bool, optional\n :param headonly: If True read only head of GCF file.\n :type channel_prefix: str, optional\n :param channel_prefix: Channel band and instrument codes.\n Defaults to ``HH``.\n :rtype: :class:`~obspy.core.stream.Stream`\n :returns: Stream object containing header and data.\n\n .. rubric:: Example\n >>> from obspy import read\n >>> st = read(\"/path/to/20160603_1955n.gcf\", format=\"GCF\")\n \"\"\"\n traces = []\n with open(filename, 'rb') as f:\n while True:\n try:\n if headonly:\n header = libgcf.read_header(f, **kwargs)\n if header:\n traces.append(Trace(header=header))\n else:\n hd = libgcf.read(f, **kwargs)\n if hd:\n traces.append(Trace(header=hd[0], data=hd[1]))\n except EOFError:\n break\n st = Stream(traces=traces)\n if headonly:\n st = merge_gcf_stream(st)\n else:\n st.merge(-1)\n return st\n", "path": "obspy/io/gcf/core.py"}]} | 1,269 | 146 |
gh_patches_debug_14426 | rasdani/github-patches | git_diff | boto__boto-3045 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
boto.vpc.routetable.Route does not contain route origin
RouteSet responses will contain an origin for each route, but boto.vpc.routetable.Route is not populated with the route origin:
Example response before:
```
In [8]: vars(all_route_tables[1].routes[1])
Out[8]:
{'destination_cidr_block': u'0.0.0.0/0',
'gateway_id': None,
'instance_id': u'i-123',
'interface_id': u'eni-123',
'state': u'active',
'vpc_peering_connection_id': None}
```
After:
```
In [25]: vars(all_route_tables[1].routes[1])
Out[25]:
{'destination_cidr_block': u'0.0.0.0/0',
'gateway_id': None,
'instance_id': u'i-123',
'interface_id': u'eni-123',
'origin': u'CreateRoute',
'state': u'active',
'vpc_peering_connection_id': None}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `boto/vpc/routetable.py`
Content:
```
1 # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21
22 """
23 Represents a Route Table
24 """
25
26 from boto.ec2.ec2object import TaggedEC2Object
27 from boto.resultset import ResultSet
28
29 class RouteTable(TaggedEC2Object):
30
31 def __init__(self, connection=None):
32 super(RouteTable, self).__init__(connection)
33 self.id = None
34 self.vpc_id = None
35 self.routes = []
36 self.associations = []
37
38 def __repr__(self):
39 return 'RouteTable:%s' % self.id
40
41 def startElement(self, name, attrs, connection):
42 result = super(RouteTable, self).startElement(name, attrs, connection)
43
44 if result is not None:
45 # Parent found an interested element, just return it
46 return result
47
48 if name == 'routeSet':
49 self.routes = ResultSet([('item', Route)])
50 return self.routes
51 elif name == 'associationSet':
52 self.associations = ResultSet([('item', RouteAssociation)])
53 return self.associations
54 else:
55 return None
56
57 def endElement(self, name, value, connection):
58 if name == 'routeTableId':
59 self.id = value
60 elif name == 'vpcId':
61 self.vpc_id = value
62 else:
63 setattr(self, name, value)
64
65 class Route(object):
66 def __init__(self, connection=None):
67 self.destination_cidr_block = None
68 self.gateway_id = None
69 self.instance_id = None
70 self.interface_id = None
71 self.vpc_peering_connection_id = None
72 self.state = None
73
74 def __repr__(self):
75 return 'Route:%s' % self.destination_cidr_block
76
77 def startElement(self, name, attrs, connection):
78 return None
79
80 def endElement(self, name, value, connection):
81 if name == 'destinationCidrBlock':
82 self.destination_cidr_block = value
83 elif name == 'gatewayId':
84 self.gateway_id = value
85 elif name == 'instanceId':
86 self.instance_id = value
87 elif name == 'networkInterfaceId':
88 self.interface_id = value
89 elif name == 'vpcPeeringConnectionId':
90 self.vpc_peering_connection_id = value
91 elif name == 'state':
92 self.state = value
93
94 class RouteAssociation(object):
95 def __init__(self, connection=None):
96 self.id = None
97 self.route_table_id = None
98 self.subnet_id = None
99 self.main = False
100
101 def __repr__(self):
102 return 'RouteAssociation:%s' % self.id
103
104 def startElement(self, name, attrs, connection):
105 return None
106
107 def endElement(self, name, value, connection):
108 if name == 'routeTableAssociationId':
109 self.id = value
110 elif name == 'routeTableId':
111 self.route_table_id = value
112 elif name == 'subnetId':
113 self.subnet_id = value
114 elif name == 'main':
115 self.main = value == 'true'
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/boto/vpc/routetable.py b/boto/vpc/routetable.py
--- a/boto/vpc/routetable.py
+++ b/boto/vpc/routetable.py
@@ -70,6 +70,7 @@
self.interface_id = None
self.vpc_peering_connection_id = None
self.state = None
+ self.origin = None
def __repr__(self):
return 'Route:%s' % self.destination_cidr_block
@@ -90,6 +91,8 @@
self.vpc_peering_connection_id = value
elif name == 'state':
self.state = value
+ elif name == 'origin':
+ self.origin = value
class RouteAssociation(object):
def __init__(self, connection=None):
| {"golden_diff": "diff --git a/boto/vpc/routetable.py b/boto/vpc/routetable.py\n--- a/boto/vpc/routetable.py\n+++ b/boto/vpc/routetable.py\n@@ -70,6 +70,7 @@\n self.interface_id = None\n self.vpc_peering_connection_id = None\n self.state = None\n+ self.origin = None\n \n def __repr__(self):\n return 'Route:%s' % self.destination_cidr_block\n@@ -90,6 +91,8 @@\n self.vpc_peering_connection_id = value\n elif name == 'state':\n self.state = value\n+ elif name == 'origin':\n+ self.origin = value\n \n class RouteAssociation(object):\n def __init__(self, connection=None):\n", "issue": "boto.vpc.routetable.Route does not contain route origin\nRouteSet responses will contain an origin for each route, but boto.vpc.routetable.Route is not populated with the route origin:\n\nExample response before:\n\n```\nIn [8]: vars(all_route_tables[1].routes[1])\nOut[8]:\n{'destination_cidr_block': u'0.0.0.0/0',\n 'gateway_id': None,\n 'instance_id': u'i-123',\n 'interface_id': u'eni-123',\n 'state': u'active',\n 'vpc_peering_connection_id': None}\n```\n\nAfter:\n\n```\nIn [25]: vars(all_route_tables[1].routes[1])\nOut[25]:\n{'destination_cidr_block': u'0.0.0.0/0',\n 'gateway_id': None,\n 'instance_id': u'i-123',\n 'interface_id': u'eni-123',\n 'origin': u'CreateRoute',\n 'state': u'active',\n 'vpc_peering_connection_id': None}\n```\n\n", "before_files": [{"content": "# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a Route Table\n\"\"\"\n\nfrom boto.ec2.ec2object import TaggedEC2Object\nfrom boto.resultset import ResultSet\n\nclass RouteTable(TaggedEC2Object):\n\n def __init__(self, connection=None):\n super(RouteTable, self).__init__(connection)\n self.id = None\n self.vpc_id = None\n self.routes = []\n self.associations = []\n\n def __repr__(self):\n return 'RouteTable:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n result = super(RouteTable, self).startElement(name, attrs, connection)\n\n if result is not None:\n # Parent found an interested element, just return it\n return result\n\n if name == 'routeSet':\n self.routes = ResultSet([('item', Route)])\n return self.routes\n elif name == 'associationSet':\n self.associations = ResultSet([('item', RouteAssociation)])\n return self.associations\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name == 'routeTableId':\n self.id = value\n elif name == 'vpcId':\n self.vpc_id = value\n else:\n setattr(self, name, value)\n\nclass Route(object):\n def __init__(self, connection=None):\n self.destination_cidr_block = None\n self.gateway_id = None\n self.instance_id = None\n self.interface_id = None\n self.vpc_peering_connection_id = None\n self.state = None\n\n def __repr__(self):\n return 'Route:%s' % self.destination_cidr_block\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'destinationCidrBlock':\n self.destination_cidr_block = value\n elif name == 'gatewayId':\n self.gateway_id = value\n elif name == 'instanceId':\n self.instance_id = value\n elif name == 'networkInterfaceId':\n self.interface_id = value\n elif name == 'vpcPeeringConnectionId':\n self.vpc_peering_connection_id = value\n elif name == 'state':\n self.state = value\n\nclass RouteAssociation(object):\n def __init__(self, connection=None):\n self.id = None\n self.route_table_id = None\n self.subnet_id = None\n self.main = False\n\n def __repr__(self):\n return 'RouteAssociation:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'routeTableAssociationId':\n self.id = value\n elif name == 'routeTableId':\n self.route_table_id = value\n elif name == 'subnetId':\n self.subnet_id = value\n elif name == 'main':\n self.main = value == 'true'\n", "path": "boto/vpc/routetable.py"}], "after_files": [{"content": "# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a Route Table\n\"\"\"\n\nfrom boto.ec2.ec2object import TaggedEC2Object\nfrom boto.resultset import ResultSet\n\nclass RouteTable(TaggedEC2Object):\n\n def __init__(self, connection=None):\n super(RouteTable, self).__init__(connection)\n self.id = None\n self.vpc_id = None\n self.routes = []\n self.associations = []\n\n def __repr__(self):\n return 'RouteTable:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n result = super(RouteTable, self).startElement(name, attrs, connection)\n\n if result is not None:\n # Parent found an interested element, just return it\n return result\n\n if name == 'routeSet':\n self.routes = ResultSet([('item', Route)])\n return self.routes\n elif name == 'associationSet':\n self.associations = ResultSet([('item', RouteAssociation)])\n return self.associations\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name == 'routeTableId':\n self.id = value\n elif name == 'vpcId':\n self.vpc_id = value\n else:\n setattr(self, name, value)\n\nclass Route(object):\n def __init__(self, connection=None):\n self.destination_cidr_block = None\n self.gateway_id = None\n self.instance_id = None\n self.interface_id = None\n self.vpc_peering_connection_id = None\n self.state = None\n self.origin = None\n\n def __repr__(self):\n return 'Route:%s' % self.destination_cidr_block\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'destinationCidrBlock':\n self.destination_cidr_block = value\n elif name == 'gatewayId':\n self.gateway_id = value\n elif name == 'instanceId':\n self.instance_id = value\n elif name == 'networkInterfaceId':\n self.interface_id = value\n elif name == 'vpcPeeringConnectionId':\n self.vpc_peering_connection_id = value\n elif name == 'state':\n self.state = value\n elif name == 'origin':\n self.origin = value\n\nclass RouteAssociation(object):\n def __init__(self, connection=None):\n self.id = None\n self.route_table_id = None\n self.subnet_id = None\n self.main = False\n\n def __repr__(self):\n return 'RouteAssociation:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'routeTableAssociationId':\n self.id = value\n elif name == 'routeTableId':\n self.route_table_id = value\n elif name == 'subnetId':\n self.subnet_id = value\n elif name == 'main':\n self.main = value == 'true'\n", "path": "boto/vpc/routetable.py"}]} | 1,637 | 174 |
gh_patches_debug_24565 | rasdani/github-patches | git_diff | borgbackup__borg-4393 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update bundled zstd code
we have 1.3.4 bundled, current see there: https://github.com/facebook/zstd/releases
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup_zstd.py`
Content:
```
1 # Support code for building a C extension with zstd files
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
4 # 2017-present, Thomas Waldmann (mods to make it more generic)
5 # All rights reserved.
6 #
7 # This software may be modified and distributed under the terms
8 # of the BSD license. See the LICENSE file for details.
9
10 import os
11
12 # zstd files, structure as seen in zstd project repository:
13
14 zstd_sources = [
15 'lib/common/entropy_common.c',
16 'lib/common/error_private.c',
17 'lib/common/fse_decompress.c',
18 'lib/common/pool.c',
19 'lib/common/threading.c',
20 'lib/common/xxhash.c',
21 'lib/common/zstd_common.c',
22 'lib/compress/fse_compress.c',
23 'lib/compress/huf_compress.c',
24 'lib/compress/zstd_compress.c',
25 'lib/compress/zstd_double_fast.c',
26 'lib/compress/zstd_fast.c',
27 'lib/compress/zstd_lazy.c',
28 'lib/compress/zstd_ldm.c',
29 'lib/compress/zstd_opt.c',
30 'lib/compress/zstdmt_compress.c',
31 'lib/decompress/huf_decompress.c',
32 'lib/decompress/zstd_decompress.c',
33 'lib/dictBuilder/cover.c',
34 'lib/dictBuilder/divsufsort.c',
35 'lib/dictBuilder/zdict.c',
36 ]
37
38 zstd_sources_legacy = [
39 'lib/deprecated/zbuff_common.c',
40 'lib/deprecated/zbuff_compress.c',
41 'lib/deprecated/zbuff_decompress.c',
42 'lib/legacy/zstd_v01.c',
43 'lib/legacy/zstd_v02.c',
44 'lib/legacy/zstd_v03.c',
45 'lib/legacy/zstd_v04.c',
46 'lib/legacy/zstd_v05.c',
47 'lib/legacy/zstd_v06.c',
48 'lib/legacy/zstd_v07.c',
49 ]
50
51 zstd_includes = [
52 'lib',
53 'lib/common',
54 'lib/compress',
55 'lib/decompress',
56 'lib/dictBuilder',
57 ]
58
59 zstd_includes_legacy = [
60 'lib/deprecated',
61 'lib/legacy',
62 ]
63
64
65 def zstd_system_prefix(prefixes):
66 for prefix in prefixes:
67 filename = os.path.join(prefix, 'include', 'zstd.h')
68 if os.path.exists(filename):
69 with open(filename, 'rb') as fd:
70 if b'ZSTD_getFrameContentSize' in fd.read(): # checks for zstd >= 1.3.0
71 return prefix
72
73
74 def zstd_ext_kwargs(bundled_path, system_prefix=None, system=False, multithreaded=False, legacy=False, **kwargs):
75 """amend kwargs with zstd suff for a distutils.extension.Extension initialization.
76
77 bundled_path: relative (to this file) path to the bundled library source code files
78 system_prefix: where the system-installed library can be found
79 system: True: use the system-installed shared library, False: use the bundled library code
80 multithreaded: True: define ZSTD_MULTITHREAD
81 legacy: include legacy API support
82 kwargs: distutils.extension.Extension kwargs that should be amended
83 returns: amended kwargs
84 """
85 def multi_join(paths, *path_segments):
86 """apply os.path.join on a list of paths"""
87 return [os.path.join(*(path_segments + (path, ))) for path in paths]
88
89 use_system = system and system_prefix is not None
90
91 sources = kwargs.get('sources', [])
92 if not use_system:
93 sources += multi_join(zstd_sources, bundled_path)
94 if legacy:
95 sources += multi_join(zstd_sources_legacy, bundled_path)
96
97 include_dirs = kwargs.get('include_dirs', [])
98 if use_system:
99 include_dirs += multi_join(['include'], system_prefix)
100 else:
101 include_dirs += multi_join(zstd_includes, bundled_path)
102 if legacy:
103 include_dirs += multi_join(zstd_includes_legacy, bundled_path)
104
105 library_dirs = kwargs.get('library_dirs', [])
106 if use_system:
107 library_dirs += multi_join(['lib'], system_prefix)
108
109 libraries = kwargs.get('libraries', [])
110 if use_system:
111 libraries += ['zstd', ]
112
113 extra_compile_args = kwargs.get('extra_compile_args', [])
114 if multithreaded:
115 extra_compile_args += ['-DZSTD_MULTITHREAD', ]
116 if not use_system:
117 extra_compile_args += ['-DZSTDLIB_VISIBILITY=', '-DZDICTLIB_VISIBILITY=', '-DZSTDERRORLIB_VISIBILITY=', ]
118 # '-fvisibility=hidden' does not work, doesn't find PyInit_compress then
119 if legacy:
120 extra_compile_args += ['-DZSTD_LEGACY_SUPPORT=1', ]
121
122 ret = dict(**kwargs)
123 ret.update(dict(sources=sources, extra_compile_args=extra_compile_args,
124 include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries))
125 return ret
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup_zstd.py b/setup_zstd.py
--- a/setup_zstd.py
+++ b/setup_zstd.py
@@ -12,6 +12,7 @@
# zstd files, structure as seen in zstd project repository:
zstd_sources = [
+ 'lib/common/debug.c',
'lib/common/entropy_common.c',
'lib/common/error_private.c',
'lib/common/fse_decompress.c',
@@ -20,6 +21,7 @@
'lib/common/xxhash.c',
'lib/common/zstd_common.c',
'lib/compress/fse_compress.c',
+ 'lib/compress/hist.c',
'lib/compress/huf_compress.c',
'lib/compress/zstd_compress.c',
'lib/compress/zstd_double_fast.c',
@@ -29,9 +31,12 @@
'lib/compress/zstd_opt.c',
'lib/compress/zstdmt_compress.c',
'lib/decompress/huf_decompress.c',
+ 'lib/decompress/zstd_ddict.c',
'lib/decompress/zstd_decompress.c',
+ 'lib/decompress/zstd_decompress_block.c',
'lib/dictBuilder/cover.c',
'lib/dictBuilder/divsufsort.c',
+ 'lib/dictBuilder/fastcover.c',
'lib/dictBuilder/zdict.c',
]
| {"golden_diff": "diff --git a/setup_zstd.py b/setup_zstd.py\n--- a/setup_zstd.py\n+++ b/setup_zstd.py\n@@ -12,6 +12,7 @@\n # zstd files, structure as seen in zstd project repository:\n \n zstd_sources = [\n+ 'lib/common/debug.c',\n 'lib/common/entropy_common.c',\n 'lib/common/error_private.c',\n 'lib/common/fse_decompress.c',\n@@ -20,6 +21,7 @@\n 'lib/common/xxhash.c',\n 'lib/common/zstd_common.c',\n 'lib/compress/fse_compress.c',\n+ 'lib/compress/hist.c',\n 'lib/compress/huf_compress.c',\n 'lib/compress/zstd_compress.c',\n 'lib/compress/zstd_double_fast.c',\n@@ -29,9 +31,12 @@\n 'lib/compress/zstd_opt.c',\n 'lib/compress/zstdmt_compress.c',\n 'lib/decompress/huf_decompress.c',\n+ 'lib/decompress/zstd_ddict.c',\n 'lib/decompress/zstd_decompress.c',\n+ 'lib/decompress/zstd_decompress_block.c',\n 'lib/dictBuilder/cover.c',\n 'lib/dictBuilder/divsufsort.c',\n+ 'lib/dictBuilder/fastcover.c',\n 'lib/dictBuilder/zdict.c',\n ]\n", "issue": "update bundled zstd code\nwe have 1.3.4 bundled, current see there: https://github.com/facebook/zstd/releases\n", "before_files": [{"content": "# Support code for building a C extension with zstd files\n#\n# Copyright (c) 2016-present, Gregory Szorc\n# 2017-present, Thomas Waldmann (mods to make it more generic)\n# All rights reserved.\n#\n# This software may be modified and distributed under the terms\n# of the BSD license. See the LICENSE file for details.\n\nimport os\n\n# zstd files, structure as seen in zstd project repository:\n\nzstd_sources = [\n 'lib/common/entropy_common.c',\n 'lib/common/error_private.c',\n 'lib/common/fse_decompress.c',\n 'lib/common/pool.c',\n 'lib/common/threading.c',\n 'lib/common/xxhash.c',\n 'lib/common/zstd_common.c',\n 'lib/compress/fse_compress.c',\n 'lib/compress/huf_compress.c',\n 'lib/compress/zstd_compress.c',\n 'lib/compress/zstd_double_fast.c',\n 'lib/compress/zstd_fast.c',\n 'lib/compress/zstd_lazy.c',\n 'lib/compress/zstd_ldm.c',\n 'lib/compress/zstd_opt.c',\n 'lib/compress/zstdmt_compress.c',\n 'lib/decompress/huf_decompress.c',\n 'lib/decompress/zstd_decompress.c',\n 'lib/dictBuilder/cover.c',\n 'lib/dictBuilder/divsufsort.c',\n 'lib/dictBuilder/zdict.c',\n]\n\nzstd_sources_legacy = [\n 'lib/deprecated/zbuff_common.c',\n 'lib/deprecated/zbuff_compress.c',\n 'lib/deprecated/zbuff_decompress.c',\n 'lib/legacy/zstd_v01.c',\n 'lib/legacy/zstd_v02.c',\n 'lib/legacy/zstd_v03.c',\n 'lib/legacy/zstd_v04.c',\n 'lib/legacy/zstd_v05.c',\n 'lib/legacy/zstd_v06.c',\n 'lib/legacy/zstd_v07.c',\n]\n\nzstd_includes = [\n 'lib',\n 'lib/common',\n 'lib/compress',\n 'lib/decompress',\n 'lib/dictBuilder',\n]\n\nzstd_includes_legacy = [\n 'lib/deprecated',\n 'lib/legacy',\n]\n\n\ndef zstd_system_prefix(prefixes):\n for prefix in prefixes:\n filename = os.path.join(prefix, 'include', 'zstd.h')\n if os.path.exists(filename):\n with open(filename, 'rb') as fd:\n if b'ZSTD_getFrameContentSize' in fd.read(): # checks for zstd >= 1.3.0\n return prefix\n\n\ndef zstd_ext_kwargs(bundled_path, system_prefix=None, system=False, multithreaded=False, legacy=False, **kwargs):\n \"\"\"amend kwargs with zstd suff for a distutils.extension.Extension initialization.\n\n bundled_path: relative (to this file) path to the bundled library source code files\n system_prefix: where the system-installed library can be found\n system: True: use the system-installed shared library, False: use the bundled library code\n multithreaded: True: define ZSTD_MULTITHREAD\n legacy: include legacy API support\n kwargs: distutils.extension.Extension kwargs that should be amended\n returns: amended kwargs\n \"\"\"\n def multi_join(paths, *path_segments):\n \"\"\"apply os.path.join on a list of paths\"\"\"\n return [os.path.join(*(path_segments + (path, ))) for path in paths]\n\n use_system = system and system_prefix is not None\n\n sources = kwargs.get('sources', [])\n if not use_system:\n sources += multi_join(zstd_sources, bundled_path)\n if legacy:\n sources += multi_join(zstd_sources_legacy, bundled_path)\n\n include_dirs = kwargs.get('include_dirs', [])\n if use_system:\n include_dirs += multi_join(['include'], system_prefix)\n else:\n include_dirs += multi_join(zstd_includes, bundled_path)\n if legacy:\n include_dirs += multi_join(zstd_includes_legacy, bundled_path)\n\n library_dirs = kwargs.get('library_dirs', [])\n if use_system:\n library_dirs += multi_join(['lib'], system_prefix)\n\n libraries = kwargs.get('libraries', [])\n if use_system:\n libraries += ['zstd', ]\n\n extra_compile_args = kwargs.get('extra_compile_args', [])\n if multithreaded:\n extra_compile_args += ['-DZSTD_MULTITHREAD', ]\n if not use_system:\n extra_compile_args += ['-DZSTDLIB_VISIBILITY=', '-DZDICTLIB_VISIBILITY=', '-DZSTDERRORLIB_VISIBILITY=', ]\n # '-fvisibility=hidden' does not work, doesn't find PyInit_compress then\n if legacy:\n extra_compile_args += ['-DZSTD_LEGACY_SUPPORT=1', ]\n\n ret = dict(**kwargs)\n ret.update(dict(sources=sources, extra_compile_args=extra_compile_args,\n include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries))\n return ret\n", "path": "setup_zstd.py"}], "after_files": [{"content": "# Support code for building a C extension with zstd files\n#\n# Copyright (c) 2016-present, Gregory Szorc\n# 2017-present, Thomas Waldmann (mods to make it more generic)\n# All rights reserved.\n#\n# This software may be modified and distributed under the terms\n# of the BSD license. See the LICENSE file for details.\n\nimport os\n\n# zstd files, structure as seen in zstd project repository:\n\nzstd_sources = [\n 'lib/common/debug.c',\n 'lib/common/entropy_common.c',\n 'lib/common/error_private.c',\n 'lib/common/fse_decompress.c',\n 'lib/common/pool.c',\n 'lib/common/threading.c',\n 'lib/common/xxhash.c',\n 'lib/common/zstd_common.c',\n 'lib/compress/fse_compress.c',\n 'lib/compress/hist.c',\n 'lib/compress/huf_compress.c',\n 'lib/compress/zstd_compress.c',\n 'lib/compress/zstd_double_fast.c',\n 'lib/compress/zstd_fast.c',\n 'lib/compress/zstd_lazy.c',\n 'lib/compress/zstd_ldm.c',\n 'lib/compress/zstd_opt.c',\n 'lib/compress/zstdmt_compress.c',\n 'lib/decompress/huf_decompress.c',\n 'lib/decompress/zstd_ddict.c',\n 'lib/decompress/zstd_decompress.c',\n 'lib/decompress/zstd_decompress_block.c',\n 'lib/dictBuilder/cover.c',\n 'lib/dictBuilder/divsufsort.c',\n 'lib/dictBuilder/fastcover.c',\n 'lib/dictBuilder/zdict.c',\n]\n\nzstd_sources_legacy = [\n 'lib/deprecated/zbuff_common.c',\n 'lib/deprecated/zbuff_compress.c',\n 'lib/deprecated/zbuff_decompress.c',\n 'lib/legacy/zstd_v01.c',\n 'lib/legacy/zstd_v02.c',\n 'lib/legacy/zstd_v03.c',\n 'lib/legacy/zstd_v04.c',\n 'lib/legacy/zstd_v05.c',\n 'lib/legacy/zstd_v06.c',\n 'lib/legacy/zstd_v07.c',\n]\n\nzstd_includes = [\n 'lib',\n 'lib/common',\n 'lib/compress',\n 'lib/decompress',\n 'lib/dictBuilder',\n]\n\nzstd_includes_legacy = [\n 'lib/deprecated',\n 'lib/legacy',\n]\n\n\ndef zstd_system_prefix(prefixes):\n for prefix in prefixes:\n filename = os.path.join(prefix, 'include', 'zstd.h')\n if os.path.exists(filename):\n with open(filename, 'rb') as fd:\n if b'ZSTD_getFrameContentSize' in fd.read(): # checks for zstd >= 1.3.0\n return prefix\n\n\ndef zstd_ext_kwargs(bundled_path, system_prefix=None, system=False, multithreaded=False, legacy=False, **kwargs):\n \"\"\"amend kwargs with zstd suff for a distutils.extension.Extension initialization.\n\n bundled_path: relative (to this file) path to the bundled library source code files\n system_prefix: where the system-installed library can be found\n system: True: use the system-installed shared library, False: use the bundled library code\n multithreaded: True: define ZSTD_MULTITHREAD\n legacy: include legacy API support\n kwargs: distutils.extension.Extension kwargs that should be amended\n returns: amended kwargs\n \"\"\"\n def multi_join(paths, *path_segments):\n \"\"\"apply os.path.join on a list of paths\"\"\"\n return [os.path.join(*(path_segments + (path, ))) for path in paths]\n\n use_system = system and system_prefix is not None\n\n sources = kwargs.get('sources', [])\n if not use_system:\n sources += multi_join(zstd_sources, bundled_path)\n if legacy:\n sources += multi_join(zstd_sources_legacy, bundled_path)\n\n include_dirs = kwargs.get('include_dirs', [])\n if use_system:\n include_dirs += multi_join(['include'], system_prefix)\n else:\n include_dirs += multi_join(zstd_includes, bundled_path)\n if legacy:\n include_dirs += multi_join(zstd_includes_legacy, bundled_path)\n\n library_dirs = kwargs.get('library_dirs', [])\n if use_system:\n library_dirs += multi_join(['lib'], system_prefix)\n\n libraries = kwargs.get('libraries', [])\n if use_system:\n libraries += ['zstd', ]\n\n extra_compile_args = kwargs.get('extra_compile_args', [])\n if multithreaded:\n extra_compile_args += ['-DZSTD_MULTITHREAD', ]\n if not use_system:\n extra_compile_args += ['-DZSTDLIB_VISIBILITY=', '-DZDICTLIB_VISIBILITY=', '-DZSTDERRORLIB_VISIBILITY=', ]\n # '-fvisibility=hidden' does not work, doesn't find PyInit_compress then\n if legacy:\n extra_compile_args += ['-DZSTD_LEGACY_SUPPORT=1', ]\n\n ret = dict(**kwargs)\n ret.update(dict(sources=sources, extra_compile_args=extra_compile_args,\n include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries))\n return ret\n", "path": "setup_zstd.py"}]} | 1,660 | 301 |
gh_patches_debug_2260 | rasdani/github-patches | git_diff | googleapis__python-bigquery-859 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Increase default timeout of retry objects to 10 minutes
Per internal issue 195337762, the general timeout for jobs.insert API is 4 minutes. We should increase our default deadline to 10 minutes to allow for at least 1 retry if the first request fails.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/cloud/bigquery/retry.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from google.api_core import exceptions
16 from google.api_core import retry
17 from google.auth import exceptions as auth_exceptions
18 import requests.exceptions
19
20
21 _RETRYABLE_REASONS = frozenset(
22 ["rateLimitExceeded", "backendError", "internalError", "badGateway"]
23 )
24
25 _UNSTRUCTURED_RETRYABLE_TYPES = (
26 ConnectionError,
27 exceptions.TooManyRequests,
28 exceptions.InternalServerError,
29 exceptions.BadGateway,
30 requests.exceptions.ChunkedEncodingError,
31 requests.exceptions.ConnectionError,
32 auth_exceptions.TransportError,
33 )
34
35
36 def _should_retry(exc):
37 """Predicate for determining when to retry.
38
39 We retry if and only if the 'reason' is 'backendError'
40 or 'rateLimitExceeded'.
41 """
42 if not hasattr(exc, "errors") or len(exc.errors) == 0:
43 # Check for unstructured error returns, e.g. from GFE
44 return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
45
46 reason = exc.errors[0]["reason"]
47 return reason in _RETRYABLE_REASONS
48
49
50 DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
51 """The default retry object.
52
53 Any method with a ``retry`` parameter will be retried automatically,
54 with reasonable defaults. To disable retry, pass ``retry=None``.
55 To modify the default retry behavior, call a ``with_XXX`` method
56 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,
57 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.
58 """
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py
--- a/google/cloud/bigquery/retry.py
+++ b/google/cloud/bigquery/retry.py
@@ -47,7 +47,7 @@
return reason in _RETRYABLE_REASONS
-DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
+DEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0)
"""The default retry object.
Any method with a ``retry`` parameter will be retried automatically,
| {"golden_diff": "diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py\n--- a/google/cloud/bigquery/retry.py\n+++ b/google/cloud/bigquery/retry.py\n@@ -47,7 +47,7 @@\n return reason in _RETRYABLE_REASONS\n \n \n-DEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n+DEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0)\n \"\"\"The default retry object.\n \n Any method with a ``retry`` parameter will be retried automatically,\n", "issue": "Increase default timeout of retry objects to 10 minutes\nPer internal issue 195337762, the general timeout for jobs.insert API is 4 minutes. We should increase our default deadline to 10 minutes to allow for at least 1 retry if the first request fails.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}]} | 893 | 118 |
gh_patches_debug_24323 | rasdani/github-patches | git_diff | cltk__cltk-1116 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lexicon process for Latin fails on regex special characters
LatinLexiconProcess fails when regex special characters, e.g. single open parenthesis (i.e. ```(```) are included in tokenized input. Occurred while running MacOS 11.4; Python 3.9.5; CLTK 1.0.15; regex 2021.4.4 (but should fail in any case when this input is passed to the regex module). The solution is to escape the input ```lemma``` before running ```regex.match``` at https://github.com/cltk/cltk/blob/5dbfcf6fccade146d322cae036b35533aec59286/src/cltk/lexicon/lat.py#L70
I have written the patch and will make a PR soon.
Example and traceback:
```
from cltk import NLP
text = "Omnes igitur partes mundi (tangam autem maximas) calore fultae sustinentur." # Cic. Nat. D. 2.25
cltk_nlp = NLP(language="lat")
cltk_doc = cltk_nlp.analyze(text=test)
```
```
Traceback (most recent call last):
File "test.py", line 4, in <module>
cltk_doc = cltk_nlp.analyze(text=text)
File "[PATH]/lib/python3.9/site-packages/cltk/nlp.py", line 142, in analyze
doc = a_process.run(doc)
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/processes.py", line 45, in run
word.definition = lookup_algo.lookup(word.lemma)
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py", line 70, in lookup
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py", line 70, in <listcomp>
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
File "[PATH]/lib/python3.9/site-packages/regex/regex.py", line 253, in match
pat = _compile(pattern, flags, ignore_unused, kwargs, True)
File "[PATH]/lib/python3.9/site-packages/regex/regex.py", line 532, in _compile
raise error(caught_exception.msg, caught_exception.pattern,
regex._regex_core.error: missing ) at position 9
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cltk/lexicon/lat.py`
Content:
```
1 """Code for querying Latin language dictionaries/lexicons."""
2
3 import regex
4 import yaml
5
6 from cltk.core.exceptions import CLTKException
7 from cltk.data.fetch import FetchCorpus
8 from cltk.utils.file_operations import make_cltk_path
9 from cltk.utils.utils import query_yes_no
10
11 __author__ = ["Clément Besnier <[email protected]>"]
12
13
14 class LatinLewisLexicon:
15 """Access a digital form of Charlton T. Lewis's *An Elementary Latin Dictionary* (1890)."""
16
17 def __init__(self, interactive: bool = True):
18 self.interactive = interactive
19 self.lewis_yaml_fp = make_cltk_path(
20 "lat", "lexicon", "cltk_lat_lewis_elementary_lexicon", "lewis.yaml"
21 )
22 try:
23 self.entries = self._load_entries()
24 except FileNotFoundError:
25 if self.interactive:
26 dl_msg = f"This part of the CLTK depends upon Lewis's *An Elementary Latin Dictionary* (1890)."
27 print(dl_msg)
28 dl_question = "Do you want to download this?"
29 do_download = query_yes_no(question=dl_question)
30 else:
31 do_download = True
32 if do_download:
33 fetch_corpus = FetchCorpus(language="lat")
34 fetch_corpus.import_corpus(
35 corpus_name="cltk_lat_lewis_elementary_lexicon"
36 )
37 else:
38 raise CLTKException(
39 f"File '{self.lewis_yaml_fp}' is not found. It is required for this class."
40 )
41 self.entries = self._load_entries()
42
43 def lookup(self, lemma: str) -> str:
44 """Perform match of a lemma against headwords. If more than one match,
45 then return the concatenated entries. For example:
46
47 >>> lll = LatinLewisLexicon()
48 >>> lll.lookup("clemens")[:50]
49 'clēmēns entis (abl. -tī; rarely -te, L.), adj. wit'
50 >>> lll.lookup("omnia")
51 ''
52 >>> lll.lookup(".")
53 ''
54 >>> lll.lookup("123")
55 ''
56 >>> lll.lookup("175.")
57 ''
58 """
59 if not self.entries:
60 raise CLTKException(
61 "No lexicon entries found in the .yaml file. This should never happen."
62 )
63
64 if regex.match(r"^[0-9\.\?,\:;\!\<\>\-]*$", lemma) is not None:
65 return ""
66
67 lemma = lemma.lower()
68
69 keys = self.entries.keys()
70 matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
71 n_matches = len(matches)
72 if n_matches > 1:
73 return "\n".join([self.entries[key] for key in matches])
74 elif n_matches == 1:
75 return self.entries[matches[0]]
76 else:
77 return ""
78
79 def _load_entries(self):
80 """Read the yaml file of the lexion."""
81 with open(self.lewis_yaml_fp) as file_open:
82 entries = yaml.load(file_open, Loader=yaml.Loader)
83 return entries
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cltk/lexicon/lat.py b/src/cltk/lexicon/lat.py
--- a/src/cltk/lexicon/lat.py
+++ b/src/cltk/lexicon/lat.py
@@ -47,6 +47,8 @@
>>> lll = LatinLewisLexicon()
>>> lll.lookup("clemens")[:50]
'clēmēns entis (abl. -tī; rarely -te, L.), adj. wit'
+ >>> all(word in lll.lookup("levis") for word in ["levis","lēvis"]) # Test for concatenated entries
+ True
>>> lll.lookup("omnia")
''
>>> lll.lookup(".")
@@ -55,6 +57,8 @@
''
>>> lll.lookup("175.")
''
+ >>> lll.lookup("(") # Test for regex special character
+ ''
"""
if not self.entries:
raise CLTKException(
@@ -64,7 +68,7 @@
if regex.match(r"^[0-9\.\?,\:;\!\<\>\-]*$", lemma) is not None:
return ""
- lemma = lemma.lower()
+ lemma = regex.escape(lemma.lower())
keys = self.entries.keys()
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
| {"golden_diff": "diff --git a/src/cltk/lexicon/lat.py b/src/cltk/lexicon/lat.py\n--- a/src/cltk/lexicon/lat.py\n+++ b/src/cltk/lexicon/lat.py\n@@ -47,6 +47,8 @@\n >>> lll = LatinLewisLexicon()\n >>> lll.lookup(\"clemens\")[:50]\n 'cl\u0113m\u0113ns entis (abl. -t\u012b; rarely -te, L.), adj. wit'\n+ >>> all(word in lll.lookup(\"levis\") for word in [\"levis\",\"l\u0113vis\"]) # Test for concatenated entries\n+ True\n >>> lll.lookup(\"omnia\")\n ''\n >>> lll.lookup(\".\")\n@@ -55,6 +57,8 @@\n ''\n >>> lll.lookup(\"175.\")\n ''\n+ >>> lll.lookup(\"(\") # Test for regex special character\n+ ''\n \"\"\"\n if not self.entries:\n raise CLTKException(\n@@ -64,7 +68,7 @@\n if regex.match(r\"^[0-9\\.\\?,\\:;\\!\\<\\>\\-]*$\", lemma) is not None:\n return \"\"\n \n- lemma = lemma.lower()\n+ lemma = regex.escape(lemma.lower())\n \n keys = self.entries.keys()\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\n", "issue": "Lexicon process for Latin fails on regex special characters\nLatinLexiconProcess fails when regex special characters, e.g. single open parenthesis (i.e. ```(```) are included in tokenized input. Occurred while running MacOS 11.4; Python 3.9.5; CLTK 1.0.15; regex 2021.4.4 (but should fail in any case when this input is passed to the regex module). The solution is to escape the input ```lemma``` before running ```regex.match``` at https://github.com/cltk/cltk/blob/5dbfcf6fccade146d322cae036b35533aec59286/src/cltk/lexicon/lat.py#L70\r\n\r\nI have written the patch and will make a PR soon.\r\n\r\nExample and traceback:\r\n\r\n```\r\nfrom cltk import NLP\r\ntext = \"Omnes igitur partes mundi (tangam autem maximas) calore fultae sustinentur.\" # Cic. Nat. D. 2.25\r\ncltk_nlp = NLP(language=\"lat\")\r\ncltk_doc = cltk_nlp.analyze(text=test)\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 4, in <module>\r\n cltk_doc = cltk_nlp.analyze(text=text)\r\n File \"[PATH]/lib/python3.9/site-packages/cltk/nlp.py\", line 142, in analyze\r\n doc = a_process.run(doc)\r\n File \"[PATH]/lib/python3.9/site-packages/cltk/lexicon/processes.py\", line 45, in run\r\n word.definition = lookup_algo.lookup(word.lemma)\r\n File \"[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py\", line 70, in lookup\r\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\r\n File \"[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py\", line 70, in <listcomp>\r\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\r\n File \"[PATH]/lib/python3.9/site-packages/regex/regex.py\", line 253, in match\r\n pat = _compile(pattern, flags, ignore_unused, kwargs, True)\r\n File \"[PATH]/lib/python3.9/site-packages/regex/regex.py\", line 532, in _compile\r\n raise error(caught_exception.msg, caught_exception.pattern,\r\nregex._regex_core.error: missing ) at position 9\r\n```\n", "before_files": [{"content": "\"\"\"Code for querying Latin language dictionaries/lexicons.\"\"\"\n\nimport regex\nimport yaml\n\nfrom cltk.core.exceptions import CLTKException\nfrom cltk.data.fetch import FetchCorpus\nfrom cltk.utils.file_operations import make_cltk_path\nfrom cltk.utils.utils import query_yes_no\n\n__author__ = [\"Cl\u00e9ment Besnier <[email protected]>\"]\n\n\nclass LatinLewisLexicon:\n \"\"\"Access a digital form of Charlton T. Lewis's *An Elementary Latin Dictionary* (1890).\"\"\"\n\n def __init__(self, interactive: bool = True):\n self.interactive = interactive\n self.lewis_yaml_fp = make_cltk_path(\n \"lat\", \"lexicon\", \"cltk_lat_lewis_elementary_lexicon\", \"lewis.yaml\"\n )\n try:\n self.entries = self._load_entries()\n except FileNotFoundError:\n if self.interactive:\n dl_msg = f\"This part of the CLTK depends upon Lewis's *An Elementary Latin Dictionary* (1890).\"\n print(dl_msg)\n dl_question = \"Do you want to download this?\"\n do_download = query_yes_no(question=dl_question)\n else:\n do_download = True\n if do_download:\n fetch_corpus = FetchCorpus(language=\"lat\")\n fetch_corpus.import_corpus(\n corpus_name=\"cltk_lat_lewis_elementary_lexicon\"\n )\n else:\n raise CLTKException(\n f\"File '{self.lewis_yaml_fp}' is not found. It is required for this class.\"\n )\n self.entries = self._load_entries()\n\n def lookup(self, lemma: str) -> str:\n \"\"\"Perform match of a lemma against headwords. If more than one match,\n then return the concatenated entries. For example:\n\n >>> lll = LatinLewisLexicon()\n >>> lll.lookup(\"clemens\")[:50]\n 'cl\u0113m\u0113ns entis (abl. -t\u012b; rarely -te, L.), adj. wit'\n >>> lll.lookup(\"omnia\")\n ''\n >>> lll.lookup(\".\")\n ''\n >>> lll.lookup(\"123\")\n ''\n >>> lll.lookup(\"175.\")\n ''\n \"\"\"\n if not self.entries:\n raise CLTKException(\n \"No lexicon entries found in the .yaml file. This should never happen.\"\n )\n\n if regex.match(r\"^[0-9\\.\\?,\\:;\\!\\<\\>\\-]*$\", lemma) is not None:\n return \"\"\n\n lemma = lemma.lower()\n\n keys = self.entries.keys()\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\n n_matches = len(matches)\n if n_matches > 1:\n return \"\\n\".join([self.entries[key] for key in matches])\n elif n_matches == 1:\n return self.entries[matches[0]]\n else:\n return \"\"\n\n def _load_entries(self):\n \"\"\"Read the yaml file of the lexion.\"\"\"\n with open(self.lewis_yaml_fp) as file_open:\n entries = yaml.load(file_open, Loader=yaml.Loader)\n return entries\n", "path": "src/cltk/lexicon/lat.py"}], "after_files": [{"content": "\"\"\"Code for querying Latin language dictionaries/lexicons.\"\"\"\n\nimport regex\nimport yaml\n\nfrom cltk.core.exceptions import CLTKException\nfrom cltk.data.fetch import FetchCorpus\nfrom cltk.utils.file_operations import make_cltk_path\nfrom cltk.utils.utils import query_yes_no\n\n__author__ = [\"Cl\u00e9ment Besnier <[email protected]>\"]\n\n\nclass LatinLewisLexicon:\n \"\"\"Access a digital form of Charlton T. Lewis's *An Elementary Latin Dictionary* (1890).\"\"\"\n\n def __init__(self, interactive: bool = True):\n self.interactive = interactive\n self.lewis_yaml_fp = make_cltk_path(\n \"lat\", \"lexicon\", \"cltk_lat_lewis_elementary_lexicon\", \"lewis.yaml\"\n )\n try:\n self.entries = self._load_entries()\n except FileNotFoundError:\n if self.interactive:\n dl_msg = f\"This part of the CLTK depends upon Lewis's *An Elementary Latin Dictionary* (1890).\"\n print(dl_msg)\n dl_question = \"Do you want to download this?\"\n do_download = query_yes_no(question=dl_question)\n else:\n do_download = True\n if do_download:\n fetch_corpus = FetchCorpus(language=\"lat\")\n fetch_corpus.import_corpus(\n corpus_name=\"cltk_lat_lewis_elementary_lexicon\"\n )\n else:\n raise CLTKException(\n f\"File '{self.lewis_yaml_fp}' is not found. It is required for this class.\"\n )\n self.entries = self._load_entries()\n\n def lookup(self, lemma: str) -> str:\n \"\"\"Perform match of a lemma against headwords. If more than one match,\n then return the concatenated entries. For example:\n\n >>> lll = LatinLewisLexicon()\n >>> lll.lookup(\"clemens\")[:50]\n 'cl\u0113m\u0113ns entis (abl. -t\u012b; rarely -te, L.), adj. wit'\n >>> all(word in lll.lookup(\"levis\") for word in [\"levis\",\"l\u0113vis\"]) # Test for concatenated entries\n True\n >>> lll.lookup(\"omnia\")\n ''\n >>> lll.lookup(\".\")\n ''\n >>> lll.lookup(\"123\")\n ''\n >>> lll.lookup(\"175.\")\n ''\n >>> lll.lookup(\"(\") # Test for regex special character\n ''\n \"\"\"\n if not self.entries:\n raise CLTKException(\n \"No lexicon entries found in the .yaml file. This should never happen.\"\n )\n\n if regex.match(r\"^[0-9\\.\\?,\\:;\\!\\<\\>\\-]*$\", lemma) is not None:\n return \"\"\n\n lemma = regex.escape(lemma.lower())\n\n keys = self.entries.keys()\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\n n_matches = len(matches)\n if n_matches > 1:\n return \"\\n\".join([self.entries[key] for key in matches])\n elif n_matches == 1:\n return self.entries[matches[0]]\n else:\n return \"\"\n\n def _load_entries(self):\n \"\"\"Read the yaml file of the lexion.\"\"\"\n with open(self.lewis_yaml_fp) as file_open:\n entries = yaml.load(file_open, Loader=yaml.Loader)\n return entries\n", "path": "src/cltk/lexicon/lat.py"}]} | 1,693 | 314 |
gh_patches_debug_1348 | rasdani/github-patches | git_diff | translate__pootle-5024 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception in terminology management view
When visiting https://mozilla.locamotion.org/eu/firefox/terminology/ the following exception is thrown:
`'SortedRelatedManager' object does not support indexing`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_terminology/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.core.urlresolvers import reverse
10 from django.shortcuts import render
11
12 from pootle.core.decorators import get_path_obj, permission_required
13 from pootle_app.views.admin import util
14 from pootle_store.models import Store, Unit
15
16 from .forms import term_unit_form_factory
17
18
19 def get_terminology_filename(translation_project):
20 try:
21 # See if a terminology store already exists
22 return translation_project.stores.live().filter(
23 name__startswith='pootle-terminology.',
24 ).values_list('name', flat=True)[0]
25 except IndexError:
26 pass
27
28 return (
29 'pootle-terminology.%s'
30 % translation_project.project.filetypes[0].extension)
31
32
33 def manage_store(request, ctx, language, term_store):
34 TermUnitForm = term_unit_form_factory(term_store)
35 template_name = 'translation_projects/terminology/manage.html'
36 return util.edit(request, template_name, Unit, ctx,
37 None, None, queryset=term_store.units, can_delete=True,
38 form=TermUnitForm)
39
40
41 @get_path_obj
42 @permission_required('administrate')
43 def manage(request, translation_project):
44 ctx = {
45 'page': 'admin-terminology',
46
47 'browse_url': reverse('pootle-tp-browse', kwargs={
48 'language_code': translation_project.language.code,
49 'project_code': translation_project.project.code,
50 }),
51 'translate_url': reverse('pootle-tp-translate', kwargs={
52 'language_code': translation_project.language.code,
53 'project_code': translation_project.project.code,
54 }),
55
56 'translation_project': translation_project,
57 'language': translation_project.language,
58 'project': translation_project.project,
59 'source_language': translation_project.project.source_language,
60 'directory': translation_project.directory,
61 }
62
63 if translation_project.project.is_terminology:
64 # Which file should we edit?
65 stores = list(Store.objects.live().filter(
66 translation_project=translation_project,
67 ))
68 if len(stores) == 1:
69 # There is only one, and we're not going to offer file-level
70 # activities, so let's just edit the one that is there.
71 return manage_store(request, ctx, ctx['language'], stores[0])
72 elif len(stores) > 1:
73 for store in stores:
74 path_length = len(translation_project.pootle_path)
75 store.nice_name = store.pootle_path[path_length:]
76
77 ctx['stores'] = stores
78 return render(request,
79 "translation_projects/terminology/stores.html", ctx)
80
81 try:
82 terminology_filename = get_terminology_filename(translation_project)
83 term_store = Store.objects.get(
84 pootle_path=translation_project.pootle_path + terminology_filename,
85 )
86 return manage_store(request, ctx, ctx['language'], term_store)
87 except Store.DoesNotExist:
88 return render(request, "translation_projects/terminology/manage.html",
89 ctx)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_terminology/views.py b/pootle/apps/pootle_terminology/views.py
--- a/pootle/apps/pootle_terminology/views.py
+++ b/pootle/apps/pootle_terminology/views.py
@@ -27,7 +27,7 @@
return (
'pootle-terminology.%s'
- % translation_project.project.filetypes[0].extension)
+ % translation_project.project.filetypes.first().extension)
def manage_store(request, ctx, language, term_store):
| {"golden_diff": "diff --git a/pootle/apps/pootle_terminology/views.py b/pootle/apps/pootle_terminology/views.py\n--- a/pootle/apps/pootle_terminology/views.py\n+++ b/pootle/apps/pootle_terminology/views.py\n@@ -27,7 +27,7 @@\n \n return (\n 'pootle-terminology.%s'\n- % translation_project.project.filetypes[0].extension)\n+ % translation_project.project.filetypes.first().extension)\n \n \n def manage_store(request, ctx, language, term_store):\n", "issue": "Exception in terminology management view\nWhen visiting https://mozilla.locamotion.org/eu/firefox/terminology/ the following exception is thrown:\n\n`'SortedRelatedManager' object does not support indexing`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render\n\nfrom pootle.core.decorators import get_path_obj, permission_required\nfrom pootle_app.views.admin import util\nfrom pootle_store.models import Store, Unit\n\nfrom .forms import term_unit_form_factory\n\n\ndef get_terminology_filename(translation_project):\n try:\n # See if a terminology store already exists\n return translation_project.stores.live().filter(\n name__startswith='pootle-terminology.',\n ).values_list('name', flat=True)[0]\n except IndexError:\n pass\n\n return (\n 'pootle-terminology.%s'\n % translation_project.project.filetypes[0].extension)\n\n\ndef manage_store(request, ctx, language, term_store):\n TermUnitForm = term_unit_form_factory(term_store)\n template_name = 'translation_projects/terminology/manage.html'\n return util.edit(request, template_name, Unit, ctx,\n None, None, queryset=term_store.units, can_delete=True,\n form=TermUnitForm)\n\n\n@get_path_obj\n@permission_required('administrate')\ndef manage(request, translation_project):\n ctx = {\n 'page': 'admin-terminology',\n\n 'browse_url': reverse('pootle-tp-browse', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n 'translate_url': reverse('pootle-tp-translate', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n\n 'translation_project': translation_project,\n 'language': translation_project.language,\n 'project': translation_project.project,\n 'source_language': translation_project.project.source_language,\n 'directory': translation_project.directory,\n }\n\n if translation_project.project.is_terminology:\n # Which file should we edit?\n stores = list(Store.objects.live().filter(\n translation_project=translation_project,\n ))\n if len(stores) == 1:\n # There is only one, and we're not going to offer file-level\n # activities, so let's just edit the one that is there.\n return manage_store(request, ctx, ctx['language'], stores[0])\n elif len(stores) > 1:\n for store in stores:\n path_length = len(translation_project.pootle_path)\n store.nice_name = store.pootle_path[path_length:]\n\n ctx['stores'] = stores\n return render(request,\n \"translation_projects/terminology/stores.html\", ctx)\n\n try:\n terminology_filename = get_terminology_filename(translation_project)\n term_store = Store.objects.get(\n pootle_path=translation_project.pootle_path + terminology_filename,\n )\n return manage_store(request, ctx, ctx['language'], term_store)\n except Store.DoesNotExist:\n return render(request, \"translation_projects/terminology/manage.html\",\n ctx)\n", "path": "pootle/apps/pootle_terminology/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render\n\nfrom pootle.core.decorators import get_path_obj, permission_required\nfrom pootle_app.views.admin import util\nfrom pootle_store.models import Store, Unit\n\nfrom .forms import term_unit_form_factory\n\n\ndef get_terminology_filename(translation_project):\n try:\n # See if a terminology store already exists\n return translation_project.stores.live().filter(\n name__startswith='pootle-terminology.',\n ).values_list('name', flat=True)[0]\n except IndexError:\n pass\n\n return (\n 'pootle-terminology.%s'\n % translation_project.project.filetypes.first().extension)\n\n\ndef manage_store(request, ctx, language, term_store):\n TermUnitForm = term_unit_form_factory(term_store)\n template_name = 'translation_projects/terminology/manage.html'\n return util.edit(request, template_name, Unit, ctx,\n None, None, queryset=term_store.units, can_delete=True,\n form=TermUnitForm)\n\n\n@get_path_obj\n@permission_required('administrate')\ndef manage(request, translation_project):\n ctx = {\n 'page': 'admin-terminology',\n\n 'browse_url': reverse('pootle-tp-browse', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n 'translate_url': reverse('pootle-tp-translate', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n\n 'translation_project': translation_project,\n 'language': translation_project.language,\n 'project': translation_project.project,\n 'source_language': translation_project.project.source_language,\n 'directory': translation_project.directory,\n }\n\n if translation_project.project.is_terminology:\n # Which file should we edit?\n stores = list(Store.objects.live().filter(\n translation_project=translation_project,\n ))\n if len(stores) == 1:\n # There is only one, and we're not going to offer file-level\n # activities, so let's just edit the one that is there.\n return manage_store(request, ctx, ctx['language'], stores[0])\n elif len(stores) > 1:\n for store in stores:\n path_length = len(translation_project.pootle_path)\n store.nice_name = store.pootle_path[path_length:]\n\n ctx['stores'] = stores\n return render(request,\n \"translation_projects/terminology/stores.html\", ctx)\n\n try:\n terminology_filename = get_terminology_filename(translation_project)\n term_store = Store.objects.get(\n pootle_path=translation_project.pootle_path + terminology_filename,\n )\n return manage_store(request, ctx, ctx['language'], term_store)\n except Store.DoesNotExist:\n return render(request, \"translation_projects/terminology/manage.html\",\n ctx)\n", "path": "pootle/apps/pootle_terminology/views.py"}]} | 1,183 | 125 |
gh_patches_debug_23775 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1102 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error while checking Dockerfile USER set with env variable
**Describe the bug**
The **checkov** CLI return an error when analyzing a Dockerfile with USER set according to an ENV variable.
**To Reproduce**
Steps to reproduce the behavior:
1. Get this snippet :
```Dockerfile
FROM python:alpine
WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt
ENV USERNAME=app
RUN addgroup -S ${USERNAME} && adduser -s /sbin/nologin -S ${USERNAME} -G ${USERNAME} && chown -R ${USERNAME} /app
USER ${USERNAME}
COPY --chown=${USERNAME} script.py .
CMD python3 script.py
```
2. Run cli command 'checkov -f Dockerfile'
3. See error
**Expected behavior**
No error.
**Screenshots**
<img width="750" alt="" src="https://user-images.githubusercontent.com/44492274/115271564-c380b080-a13d-11eb-9c4d-cb086e3bd9fd.png">
**Desktop (please complete the following information):**
- OS: macOS Big Sur 11.2.3
- Checkov Version 2.0.55
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/checks/RootUser.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
3
4
5 class RootUser(BaseDockerfileCheck):
6 def __init__(self):
7 name = "Ensure the last USER is not root"
8 id = "CKV_DOCKER_8"
9 supported_instructions = ["USER"]
10 categories = [CheckCategories.APPLICATION_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
12
13 def scan_entity_conf(self, conf):
14 contents = conf.get("USER")
15
16 if contents:
17 last_user = contents[-1]
18 if last_user["value"] == "root":
19 return CheckResult.FAILED, last_user
20
21 return CheckResult.PASSED, last_user
22
23 return CheckResult.UNKNOWN, None
24
25
26 check = RootUser()
27
```
Path: `checkov/dockerfile/checks/MaintainerExists.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
3
4
5 class MaintainerExists(BaseDockerfileCheck):
6 def __init__(self):
7 name = "Ensure that LABEL maintainer is used instead of MAINTAINER (deprecated)"
8 id = "CKV_DOCKER_6"
9 supported_instructions = ["MAINTAINER"]
10 categories = [CheckCategories.CONVENTION]
11 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
12
13 def scan_entity_conf(self, conf):
14 for instruction, content in conf.items():
15 if instruction == "MAINTAINER":
16 return CheckResult.FAILED, content[0]
17 return CheckResult.PASSED, None
18
19
20 check = MaintainerExists()
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/dockerfile/checks/MaintainerExists.py b/checkov/dockerfile/checks/MaintainerExists.py
--- a/checkov/dockerfile/checks/MaintainerExists.py
+++ b/checkov/dockerfile/checks/MaintainerExists.py
@@ -11,10 +11,7 @@
super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
def scan_entity_conf(self, conf):
- for instruction, content in conf.items():
- if instruction == "MAINTAINER":
- return CheckResult.FAILED, content[0]
- return CheckResult.PASSED, None
+ return CheckResult.FAILED, conf[0]
check = MaintainerExists()
diff --git a/checkov/dockerfile/checks/RootUser.py b/checkov/dockerfile/checks/RootUser.py
--- a/checkov/dockerfile/checks/RootUser.py
+++ b/checkov/dockerfile/checks/RootUser.py
@@ -11,16 +11,11 @@
super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
def scan_entity_conf(self, conf):
- contents = conf.get("USER")
+ last_user = conf[-1]
+ if last_user["value"] == "root":
+ return CheckResult.FAILED, last_user
- if contents:
- last_user = contents[-1]
- if last_user["value"] == "root":
- return CheckResult.FAILED, last_user
-
- return CheckResult.PASSED, last_user
-
- return CheckResult.UNKNOWN, None
+ return CheckResult.PASSED, last_user
check = RootUser()
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/MaintainerExists.py b/checkov/dockerfile/checks/MaintainerExists.py\n--- a/checkov/dockerfile/checks/MaintainerExists.py\n+++ b/checkov/dockerfile/checks/MaintainerExists.py\n@@ -11,10 +11,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n \n def scan_entity_conf(self, conf):\n- for instruction, content in conf.items():\n- if instruction == \"MAINTAINER\":\n- return CheckResult.FAILED, content[0]\n- return CheckResult.PASSED, None\n+ return CheckResult.FAILED, conf[0]\n \n \n check = MaintainerExists()\ndiff --git a/checkov/dockerfile/checks/RootUser.py b/checkov/dockerfile/checks/RootUser.py\n--- a/checkov/dockerfile/checks/RootUser.py\n+++ b/checkov/dockerfile/checks/RootUser.py\n@@ -11,16 +11,11 @@\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n \n def scan_entity_conf(self, conf):\n- contents = conf.get(\"USER\")\n+ last_user = conf[-1]\n+ if last_user[\"value\"] == \"root\":\n+ return CheckResult.FAILED, last_user\n \n- if contents:\n- last_user = contents[-1]\n- if last_user[\"value\"] == \"root\":\n- return CheckResult.FAILED, last_user\n-\n- return CheckResult.PASSED, last_user\n-\n- return CheckResult.UNKNOWN, None\n+ return CheckResult.PASSED, last_user\n \n \n check = RootUser()\n", "issue": "Error while checking Dockerfile USER set with env variable\n**Describe the bug**\r\nThe **checkov** CLI return an error when analyzing a Dockerfile with USER set according to an ENV variable. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Get this snippet :\r\n```Dockerfile\r\nFROM python:alpine\r\n\r\nWORKDIR /app\r\nCOPY requirements.txt .\r\nRUN pip install -r requirements.txt\r\n\r\nENV USERNAME=app\r\nRUN addgroup -S ${USERNAME} && adduser -s /sbin/nologin -S ${USERNAME} -G ${USERNAME} && chown -R ${USERNAME} /app\r\nUSER ${USERNAME}\r\n\r\nCOPY --chown=${USERNAME} script.py .\r\n\r\nCMD python3 script.py\r\n```\r\n2. Run cli command 'checkov -f Dockerfile'\r\n3. See error\r\n\r\n**Expected behavior**\r\nNo error.\r\n\r\n**Screenshots**\r\n<img width=\"750\" alt=\"\" src=\"https://user-images.githubusercontent.com/44492274/115271564-c380b080-a13d-11eb-9c4d-cb086e3bd9fd.png\">\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS Big Sur 11.2.3\r\n - Checkov Version 2.0.55\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\n\nclass RootUser(BaseDockerfileCheck):\n def __init__(self):\n name = \"Ensure the last USER is not root\"\n id = \"CKV_DOCKER_8\"\n supported_instructions = [\"USER\"]\n categories = [CheckCategories.APPLICATION_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n contents = conf.get(\"USER\")\n\n if contents:\n last_user = contents[-1]\n if last_user[\"value\"] == \"root\":\n return CheckResult.FAILED, last_user\n\n return CheckResult.PASSED, last_user\n\n return CheckResult.UNKNOWN, None\n\n\ncheck = RootUser()\n", "path": "checkov/dockerfile/checks/RootUser.py"}, {"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\n\nclass MaintainerExists(BaseDockerfileCheck):\n def __init__(self):\n name = \"Ensure that LABEL maintainer is used instead of MAINTAINER (deprecated)\"\n id = \"CKV_DOCKER_6\"\n supported_instructions = [\"MAINTAINER\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n for instruction, content in conf.items():\n if instruction == \"MAINTAINER\":\n return CheckResult.FAILED, content[0]\n return CheckResult.PASSED, None\n\n\ncheck = MaintainerExists()\n", "path": "checkov/dockerfile/checks/MaintainerExists.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\n\nclass RootUser(BaseDockerfileCheck):\n def __init__(self):\n name = \"Ensure the last USER is not root\"\n id = \"CKV_DOCKER_8\"\n supported_instructions = [\"USER\"]\n categories = [CheckCategories.APPLICATION_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n last_user = conf[-1]\n if last_user[\"value\"] == \"root\":\n return CheckResult.FAILED, last_user\n\n return CheckResult.PASSED, last_user\n\n\ncheck = RootUser()\n", "path": "checkov/dockerfile/checks/RootUser.py"}, {"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\n\nclass MaintainerExists(BaseDockerfileCheck):\n def __init__(self):\n name = \"Ensure that LABEL maintainer is used instead of MAINTAINER (deprecated)\"\n id = \"CKV_DOCKER_6\"\n supported_instructions = [\"MAINTAINER\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n return CheckResult.FAILED, conf[0]\n\n\ncheck = MaintainerExists()\n", "path": "checkov/dockerfile/checks/MaintainerExists.py"}]} | 1,019 | 382 |
gh_patches_debug_17449 | rasdani/github-patches | git_diff | keras-team__keras-nlp-876 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deberta tokenizer.detokenize() errors out with mask token
When working on the Deberta masked language model, we had to do some special treatment for the mask token in the tokenizer.
We left one outstanding bug on the main PR, which is that detokenize will error out with a mask token. See:
https://github.com/keras-team/keras-nlp/pull/732#issuecomment-1449746110
Here's a colab:
https://colab.research.google.com/gist/mattdangerw/5164a7cad80e9f5fcbb9a495264f80e1/deberta-detokenize-error.ipynb
We should either strip or properly render the mask token during detokenize so the call does not error out.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py`
Content:
```
1 # Copyright 2023 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """DeBERTa tokenizer."""
16
17 import copy
18
19 from keras_nlp.api_export import keras_nlp_export
20 from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets
21 from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
22 from keras_nlp.utils.python_utils import classproperty
23
24
25 @keras_nlp_export("keras_nlp.models.DebertaV3Tokenizer")
26 class DebertaV3Tokenizer(SentencePieceTokenizer):
27 """DeBERTa tokenizer layer based on SentencePiece.
28
29 This tokenizer class will tokenize raw strings into integer sequences and
30 is based on `keras_nlp.tokenizers.SentencePieceTokenizer`. Unlike the
31 underlying tokenizer, it will check for all special tokens needed by
32 DeBERTa models and provides a `from_preset()` method to automatically
33 download a matching vocabulary for a DeBERTa preset.
34
35 This tokenizer does not provide truncation or padding of inputs. It can be
36 combined with a `keras_nlp.models.DebertaV3Preprocessor` layer for input
37 packing.
38
39 If input is a batch of strings (rank > 0), the layer will output a
40 `tf.RaggedTensor` where the last dimension of the output is ragged.
41
42 If input is a scalar string (rank == 0), the layer will output a dense
43 `tf.Tensor` with static shape `[None]`.
44
45 Note: The mask token (`"[MASK]"`) is handled differently in this tokenizer.
46 If the token is not present in the provided SentencePiece vocabulary, the
47 token will be appended to the vocabulary. For example, if the vocabulary
48 size is 100, the mask token will be assigned the ID 100.
49
50 Args:
51 proto: Either a `string` path to a SentencePiece proto file, or a
52 `bytes` object with a serialized SentencePiece proto. See the
53 [SentencePiece repository](https://github.com/google/sentencepiece)
54 for more details on the format.
55
56 Examples:
57
58 ```python
59 tokenizer = keras_nlp.models.DebertaV3Tokenizer(proto="model.spm")
60
61 # Batched inputs.
62 tokenizer(["the quick brown fox", "the earth is round"])
63
64 # Unbatched inputs.
65 tokenizer("the quick brown fox")
66
67 # Detokenization.
68 tokenizer.detokenize(tf.constant([[1, 4, 9, 5, 7, 2]]))
69 ```
70 """
71
72 def __init__(self, proto, **kwargs):
73 super().__init__(proto=proto, **kwargs)
74
75 # Check for necessary special tokens.
76 cls_token = "[CLS]"
77 sep_token = "[SEP]"
78 pad_token = "[PAD]"
79 mask_token = "[MASK]"
80
81 # We do not throw an error if `mask_token` is not present in the
82 # vocabulary.
83 for token in [cls_token, pad_token, sep_token]:
84 if token not in super().get_vocabulary():
85 raise ValueError(
86 f"Cannot find token `'{token}'` in the provided "
87 f"`vocabulary`. Please provide `'{token}'` in your "
88 "`vocabulary` or use a pretrained `vocabulary` name."
89 )
90
91 self.cls_token_id = self.token_to_id(cls_token)
92 self.sep_token_id = self.token_to_id(sep_token)
93 self.pad_token_id = self.token_to_id(pad_token)
94 # If the mask token is not in the vocabulary, add it to the end of the
95 # vocabulary.
96 if mask_token in super().get_vocabulary():
97 self.mask_token_id = super().token_to_id(mask_token)
98 else:
99 self.mask_token_id = super().vocabulary_size()
100
101 def vocabulary_size(self):
102 sentence_piece_size = super().vocabulary_size()
103 if sentence_piece_size == self.mask_token_id:
104 return sentence_piece_size + 1
105 return sentence_piece_size
106
107 def get_vocabulary(self):
108 sentence_piece_vocabulary = super().get_vocabulary()
109 if self.mask_token_id < super().vocabulary_size():
110 return sentence_piece_vocabulary
111 return sentence_piece_vocabulary + ["[MASK]"]
112
113 def id_to_token(self, id):
114 if id == self.mask_token_id:
115 return "[MASK]"
116 return super().id_to_token(id)
117
118 def token_to_id(self, token):
119 if token == "[MASK]":
120 return self.mask_token_id
121 return super().token_to_id(token)
122
123 @classproperty
124 def presets(cls):
125 return copy.deepcopy(backbone_presets)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py
--- a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py
+++ b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py
@@ -16,6 +16,8 @@
import copy
+import tensorflow as tf
+
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets
from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
@@ -120,6 +122,10 @@
return self.mask_token_id
return super().token_to_id(token)
+ def detokenize(self, ids):
+ ids = tf.ragged.boolean_mask(ids, tf.not_equal(ids, self.mask_token_id))
+ return super().detokenize(ids)
+
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
| {"golden_diff": "diff --git a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py\n--- a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py\n+++ b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py\n@@ -16,6 +16,8 @@\n \n import copy\n \n+import tensorflow as tf\n+\n from keras_nlp.api_export import keras_nlp_export\n from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets\n from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer\n@@ -120,6 +122,10 @@\n return self.mask_token_id\n return super().token_to_id(token)\n \n+ def detokenize(self, ids):\n+ ids = tf.ragged.boolean_mask(ids, tf.not_equal(ids, self.mask_token_id))\n+ return super().detokenize(ids)\n+\n @classproperty\n def presets(cls):\n return copy.deepcopy(backbone_presets)\n", "issue": "Deberta tokenizer.detokenize() errors out with mask token\nWhen working on the Deberta masked language model, we had to do some special treatment for the mask token in the tokenizer.\r\n\r\nWe left one outstanding bug on the main PR, which is that detokenize will error out with a mask token. See:\r\nhttps://github.com/keras-team/keras-nlp/pull/732#issuecomment-1449746110\r\n\r\nHere's a colab:\r\nhttps://colab.research.google.com/gist/mattdangerw/5164a7cad80e9f5fcbb9a495264f80e1/deberta-detokenize-error.ipynb\r\n\r\nWe should either strip or properly render the mask token during detokenize so the call does not error out.\n", "before_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"DeBERTa tokenizer.\"\"\"\n\nimport copy\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets\nfrom keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer\nfrom keras_nlp.utils.python_utils import classproperty\n\n\n@keras_nlp_export(\"keras_nlp.models.DebertaV3Tokenizer\")\nclass DebertaV3Tokenizer(SentencePieceTokenizer):\n \"\"\"DeBERTa tokenizer layer based on SentencePiece.\n\n This tokenizer class will tokenize raw strings into integer sequences and\n is based on `keras_nlp.tokenizers.SentencePieceTokenizer`. Unlike the\n underlying tokenizer, it will check for all special tokens needed by\n DeBERTa models and provides a `from_preset()` method to automatically\n download a matching vocabulary for a DeBERTa preset.\n\n This tokenizer does not provide truncation or padding of inputs. It can be\n combined with a `keras_nlp.models.DebertaV3Preprocessor` layer for input\n packing.\n\n If input is a batch of strings (rank > 0), the layer will output a\n `tf.RaggedTensor` where the last dimension of the output is ragged.\n\n If input is a scalar string (rank == 0), the layer will output a dense\n `tf.Tensor` with static shape `[None]`.\n\n Note: The mask token (`\"[MASK]\"`) is handled differently in this tokenizer.\n If the token is not present in the provided SentencePiece vocabulary, the\n token will be appended to the vocabulary. For example, if the vocabulary\n size is 100, the mask token will be assigned the ID 100.\n\n Args:\n proto: Either a `string` path to a SentencePiece proto file, or a\n `bytes` object with a serialized SentencePiece proto. See the\n [SentencePiece repository](https://github.com/google/sentencepiece)\n for more details on the format.\n\n Examples:\n\n ```python\n tokenizer = keras_nlp.models.DebertaV3Tokenizer(proto=\"model.spm\")\n\n # Batched inputs.\n tokenizer([\"the quick brown fox\", \"the earth is round\"])\n\n # Unbatched inputs.\n tokenizer(\"the quick brown fox\")\n\n # Detokenization.\n tokenizer.detokenize(tf.constant([[1, 4, 9, 5, 7, 2]]))\n ```\n \"\"\"\n\n def __init__(self, proto, **kwargs):\n super().__init__(proto=proto, **kwargs)\n\n # Check for necessary special tokens.\n cls_token = \"[CLS]\"\n sep_token = \"[SEP]\"\n pad_token = \"[PAD]\"\n mask_token = \"[MASK]\"\n\n # We do not throw an error if `mask_token` is not present in the\n # vocabulary.\n for token in [cls_token, pad_token, sep_token]:\n if token not in super().get_vocabulary():\n raise ValueError(\n f\"Cannot find token `'{token}'` in the provided \"\n f\"`vocabulary`. Please provide `'{token}'` in your \"\n \"`vocabulary` or use a pretrained `vocabulary` name.\"\n )\n\n self.cls_token_id = self.token_to_id(cls_token)\n self.sep_token_id = self.token_to_id(sep_token)\n self.pad_token_id = self.token_to_id(pad_token)\n # If the mask token is not in the vocabulary, add it to the end of the\n # vocabulary.\n if mask_token in super().get_vocabulary():\n self.mask_token_id = super().token_to_id(mask_token)\n else:\n self.mask_token_id = super().vocabulary_size()\n\n def vocabulary_size(self):\n sentence_piece_size = super().vocabulary_size()\n if sentence_piece_size == self.mask_token_id:\n return sentence_piece_size + 1\n return sentence_piece_size\n\n def get_vocabulary(self):\n sentence_piece_vocabulary = super().get_vocabulary()\n if self.mask_token_id < super().vocabulary_size():\n return sentence_piece_vocabulary\n return sentence_piece_vocabulary + [\"[MASK]\"]\n\n def id_to_token(self, id):\n if id == self.mask_token_id:\n return \"[MASK]\"\n return super().id_to_token(id)\n\n def token_to_id(self, token):\n if token == \"[MASK]\":\n return self.mask_token_id\n return super().token_to_id(token)\n\n @classproperty\n def presets(cls):\n return copy.deepcopy(backbone_presets)\n", "path": "keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py"}], "after_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"DeBERTa tokenizer.\"\"\"\n\nimport copy\n\nimport tensorflow as tf\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets\nfrom keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer\nfrom keras_nlp.utils.python_utils import classproperty\n\n\n@keras_nlp_export(\"keras_nlp.models.DebertaV3Tokenizer\")\nclass DebertaV3Tokenizer(SentencePieceTokenizer):\n \"\"\"DeBERTa tokenizer layer based on SentencePiece.\n\n This tokenizer class will tokenize raw strings into integer sequences and\n is based on `keras_nlp.tokenizers.SentencePieceTokenizer`. Unlike the\n underlying tokenizer, it will check for all special tokens needed by\n DeBERTa models and provides a `from_preset()` method to automatically\n download a matching vocabulary for a DeBERTa preset.\n\n This tokenizer does not provide truncation or padding of inputs. It can be\n combined with a `keras_nlp.models.DebertaV3Preprocessor` layer for input\n packing.\n\n If input is a batch of strings (rank > 0), the layer will output a\n `tf.RaggedTensor` where the last dimension of the output is ragged.\n\n If input is a scalar string (rank == 0), the layer will output a dense\n `tf.Tensor` with static shape `[None]`.\n\n Note: The mask token (`\"[MASK]\"`) is handled differently in this tokenizer.\n If the token is not present in the provided SentencePiece vocabulary, the\n token will be appended to the vocabulary. For example, if the vocabulary\n size is 100, the mask token will be assigned the ID 100.\n\n Args:\n proto: Either a `string` path to a SentencePiece proto file, or a\n `bytes` object with a serialized SentencePiece proto. See the\n [SentencePiece repository](https://github.com/google/sentencepiece)\n for more details on the format.\n\n Examples:\n\n ```python\n tokenizer = keras_nlp.models.DebertaV3Tokenizer(proto=\"model.spm\")\n\n # Batched inputs.\n tokenizer([\"the quick brown fox\", \"the earth is round\"])\n\n # Unbatched inputs.\n tokenizer(\"the quick brown fox\")\n\n # Detokenization.\n tokenizer.detokenize(tf.constant([[1, 4, 9, 5, 7, 2]]))\n ```\n \"\"\"\n\n def __init__(self, proto, **kwargs):\n super().__init__(proto=proto, **kwargs)\n\n # Check for necessary special tokens.\n cls_token = \"[CLS]\"\n sep_token = \"[SEP]\"\n pad_token = \"[PAD]\"\n mask_token = \"[MASK]\"\n\n # We do not throw an error if `mask_token` is not present in the\n # vocabulary.\n for token in [cls_token, pad_token, sep_token]:\n if token not in super().get_vocabulary():\n raise ValueError(\n f\"Cannot find token `'{token}'` in the provided \"\n f\"`vocabulary`. Please provide `'{token}'` in your \"\n \"`vocabulary` or use a pretrained `vocabulary` name.\"\n )\n\n self.cls_token_id = self.token_to_id(cls_token)\n self.sep_token_id = self.token_to_id(sep_token)\n self.pad_token_id = self.token_to_id(pad_token)\n # If the mask token is not in the vocabulary, add it to the end of the\n # vocabulary.\n if mask_token in super().get_vocabulary():\n self.mask_token_id = super().token_to_id(mask_token)\n else:\n self.mask_token_id = super().vocabulary_size()\n\n def vocabulary_size(self):\n sentence_piece_size = super().vocabulary_size()\n if sentence_piece_size == self.mask_token_id:\n return sentence_piece_size + 1\n return sentence_piece_size\n\n def get_vocabulary(self):\n sentence_piece_vocabulary = super().get_vocabulary()\n if self.mask_token_id < super().vocabulary_size():\n return sentence_piece_vocabulary\n return sentence_piece_vocabulary + [\"[MASK]\"]\n\n def id_to_token(self, id):\n if id == self.mask_token_id:\n return \"[MASK]\"\n return super().id_to_token(id)\n\n def token_to_id(self, token):\n if token == \"[MASK]\":\n return self.mask_token_id\n return super().token_to_id(token)\n\n def detokenize(self, ids):\n ids = tf.ragged.boolean_mask(ids, tf.not_equal(ids, self.mask_token_id))\n return super().detokenize(ids)\n\n @classproperty\n def presets(cls):\n return copy.deepcopy(backbone_presets)\n", "path": "keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py"}]} | 1,853 | 254 |
gh_patches_debug_39546 | rasdani/github-patches | git_diff | canonical__snapcraft-4353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remote-build: add control logic for envvar `SNAPCRAFT_REMOTE_BUILD_STRATEGY`
### What needs to get done
This adds control logic to determine whether to execute the new or legacy remote-build code.
There are four possibilities with `SNAPCRAFT_REMOTE_BUILD_STRATEGY`:
- `disable-fallback` - use new remote-build code
- `force-fallback` - use legacy remote-build code
- unset - continue on to next control logic step
- unknown - raise an error

### Why it needs to get done
remote-build needs to be migrated because it does not leverage the new craft libraries, has issues with building core22 snaps, and has issues related to how the local project is bundled.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snapcraft/commands/remote.py`
Content:
```
1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
2 #
3 # Copyright 2022-2023 Canonical Ltd.
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License version 3 as
7 # published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 """Snapcraft remote build command."""
18
19 import argparse
20 import os
21 import textwrap
22
23 from craft_cli import BaseCommand, emit
24 from craft_cli.helptexts import HIDDEN
25 from overrides import overrides
26
27 from snapcraft.errors import MaintenanceBase, SnapcraftError
28 from snapcraft.legacy_cli import run_legacy
29 from snapcraft.parts import yaml_utils
30 from snapcraft.utils import confirm_with_user
31 from snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError
32
33 _CONFIRMATION_PROMPT = (
34 "All data sent to remote builders will be publicly available. "
35 "Are you sure you want to continue?"
36 )
37
38
39 class RemoteBuildCommand(BaseCommand):
40 """Command passthrough for the remote-build command."""
41
42 name = "remote-build"
43 help_msg = "Dispatch a snap for remote build"
44 overview = textwrap.dedent(
45 """
46 Command remote-build sends the current project to be built
47 remotely. After the build is complete, packages for each
48 architecture are retrieved and will be available in the
49 local filesystem.
50
51 If not specified in the snapcraft.yaml file, the list of
52 architectures to build can be set using the --build-on option.
53 If both are specified, an error will occur.
54
55 Interrupted remote builds can be resumed using the --recover
56 option, followed by the build number informed when the remote
57 build was originally dispatched. The current state of the
58 remote build for each architecture can be checked using the
59 --status option."""
60 )
61
62 @overrides
63 def fill_parser(self, parser: argparse.ArgumentParser) -> None:
64 parser.add_argument(
65 "--recover", action="store_true", help="recover an interrupted build"
66 )
67 parser.add_argument(
68 "--status", action="store_true", help="display remote build status"
69 )
70 parser_target = parser.add_mutually_exclusive_group()
71 parser_target.add_argument(
72 "--build-on",
73 metavar="arch",
74 nargs="+",
75 help=HIDDEN,
76 )
77 parser_target.add_argument(
78 "--build-for",
79 metavar="arch",
80 nargs="+",
81 help="architecture to build for",
82 )
83 parser.add_argument(
84 "--build-id", metavar="build-id", help="specific build id to retrieve"
85 )
86 parser.add_argument(
87 "--launchpad-accept-public-upload",
88 action="store_true",
89 help="acknowledge that uploaded code will be publicly available.",
90 )
91
92 def _get_effective_base(self) -> str:
93 """Get a valid effective base from the project's snapcraft.yaml.
94
95 :returns: The project's effective base.
96
97 :raises SnapcraftError: If the base is unknown or missing or if the
98 snapcraft.yaml cannot be loaded.
99 :raises MaintenanceBase: If the base is not supported
100 """
101 snapcraft_yaml = yaml_utils.get_snap_project().project_file
102
103 with open(snapcraft_yaml, encoding="utf-8") as file:
104 base = yaml_utils.get_base(file)
105
106 if base is None:
107 raise SnapcraftError(
108 f"Could not determine base from {str(snapcraft_yaml)!r}."
109 )
110
111 emit.debug(f"Got base {base!r} from {str(snapcraft_yaml)!r}.")
112
113 if base in yaml_utils.ESM_BASES:
114 raise MaintenanceBase(base)
115
116 if base not in yaml_utils.BASES:
117 raise SnapcraftError(f"Unknown base {base!r} in {str(snapcraft_yaml)!r}.")
118
119 return base
120
121 def _run_remote_build(self, base: str) -> None:
122 # bases newer than core22 must use the new remote-build
123 if base in yaml_utils.CURRENT_BASES - {"core22"}:
124 emit.debug(
125 "Using fallback remote-build because new remote-build is not available."
126 )
127 # TODO: use new remote-build code (#4323)
128 run_legacy()
129 return
130
131 emit.debug("Running fallback remote-build.")
132 run_legacy()
133
134 @overrides
135 def run(self, parsed_args) -> None:
136 if os.getenv("SUDO_USER") and os.geteuid() == 0:
137 emit.message(
138 "Running with 'sudo' may cause permission errors and is discouraged."
139 )
140
141 emit.message(
142 "snapcraft remote-build is experimental and is subject to change "
143 "- use with caution."
144 )
145
146 if parsed_args.build_on:
147 emit.message("Use --build-for instead of --build-on")
148 parsed_args.build_for = parsed_args.build_on
149
150 if not parsed_args.launchpad_accept_public_upload and not confirm_with_user(
151 _CONFIRMATION_PROMPT
152 ):
153 raise AcceptPublicUploadError()
154
155 base = self._get_effective_base()
156 self._run_remote_build(base)
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snapcraft/commands/remote.py b/snapcraft/commands/remote.py
--- a/snapcraft/commands/remote.py
+++ b/snapcraft/commands/remote.py
@@ -19,6 +19,8 @@
import argparse
import os
import textwrap
+from enum import Enum
+from typing import Optional
from craft_cli import BaseCommand, emit
from craft_cli.helptexts import HIDDEN
@@ -27,7 +29,7 @@
from snapcraft.errors import MaintenanceBase, SnapcraftError
from snapcraft.legacy_cli import run_legacy
from snapcraft.parts import yaml_utils
-from snapcraft.utils import confirm_with_user
+from snapcraft.utils import confirm_with_user, humanize_list
from snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError
_CONFIRMATION_PROMPT = (
@@ -36,6 +38,16 @@
)
+_STRATEGY_ENVVAR = "SNAPCRAFT_REMOTE_BUILD_STRATEGY"
+
+
+class _Strategies(Enum):
+ """Possible values of the build strategy."""
+
+ DISABLE_FALLBACK = "disable-fallback"
+ FORCE_FALLBACK = "force-fallback"
+
+
class RemoteBuildCommand(BaseCommand):
"""Command passthrough for the remote-build command."""
@@ -89,6 +101,29 @@
help="acknowledge that uploaded code will be publicly available.",
)
+ def _get_build_strategy(self) -> Optional[_Strategies]:
+ """Get the build strategy from the envvar `SNAPCRAFT_REMOTE_BUILD_STRATEGY`.
+
+ :returns: The strategy or None.
+
+ :raises SnapcraftError: If the variable is set to an invalid value.
+ """
+ strategy = os.getenv(_STRATEGY_ENVVAR)
+
+ if not strategy:
+ return None
+
+ try:
+ return _Strategies(strategy)
+ except ValueError as err:
+ valid_strategies = humanize_list(
+ (strategy.value for strategy in _Strategies), "and"
+ )
+ raise SnapcraftError(
+ f"Unknown value {strategy!r} in environment variable "
+ f"{_STRATEGY_ENVVAR!r}. Valid values are {valid_strategies}."
+ ) from err
+
def _get_effective_base(self) -> str:
"""Get a valid effective base from the project's snapcraft.yaml.
@@ -128,6 +163,25 @@
run_legacy()
return
+ strategy = self._get_build_strategy()
+
+ if strategy == _Strategies.DISABLE_FALLBACK:
+ emit.debug(
+ f"Environment variable {_STRATEGY_ENVVAR!r} is "
+ f"{_Strategies.DISABLE_FALLBACK.value!r} but running fallback "
+ "remote-build because new remote-build is not available."
+ )
+ run_legacy()
+ return
+
+ if strategy == _Strategies.FORCE_FALLBACK:
+ emit.debug(
+ "Running fallback remote-build because environment variable "
+ f"{_STRATEGY_ENVVAR!r} is {_Strategies.FORCE_FALLBACK.value!r}."
+ )
+ run_legacy()
+ return
+
emit.debug("Running fallback remote-build.")
run_legacy()
| {"golden_diff": "diff --git a/snapcraft/commands/remote.py b/snapcraft/commands/remote.py\n--- a/snapcraft/commands/remote.py\n+++ b/snapcraft/commands/remote.py\n@@ -19,6 +19,8 @@\n import argparse\n import os\n import textwrap\n+from enum import Enum\n+from typing import Optional\n \n from craft_cli import BaseCommand, emit\n from craft_cli.helptexts import HIDDEN\n@@ -27,7 +29,7 @@\n from snapcraft.errors import MaintenanceBase, SnapcraftError\n from snapcraft.legacy_cli import run_legacy\n from snapcraft.parts import yaml_utils\n-from snapcraft.utils import confirm_with_user\n+from snapcraft.utils import confirm_with_user, humanize_list\n from snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError\n \n _CONFIRMATION_PROMPT = (\n@@ -36,6 +38,16 @@\n )\n \n \n+_STRATEGY_ENVVAR = \"SNAPCRAFT_REMOTE_BUILD_STRATEGY\"\n+\n+\n+class _Strategies(Enum):\n+ \"\"\"Possible values of the build strategy.\"\"\"\n+\n+ DISABLE_FALLBACK = \"disable-fallback\"\n+ FORCE_FALLBACK = \"force-fallback\"\n+\n+\n class RemoteBuildCommand(BaseCommand):\n \"\"\"Command passthrough for the remote-build command.\"\"\"\n \n@@ -89,6 +101,29 @@\n help=\"acknowledge that uploaded code will be publicly available.\",\n )\n \n+ def _get_build_strategy(self) -> Optional[_Strategies]:\n+ \"\"\"Get the build strategy from the envvar `SNAPCRAFT_REMOTE_BUILD_STRATEGY`.\n+\n+ :returns: The strategy or None.\n+\n+ :raises SnapcraftError: If the variable is set to an invalid value.\n+ \"\"\"\n+ strategy = os.getenv(_STRATEGY_ENVVAR)\n+\n+ if not strategy:\n+ return None\n+\n+ try:\n+ return _Strategies(strategy)\n+ except ValueError as err:\n+ valid_strategies = humanize_list(\n+ (strategy.value for strategy in _Strategies), \"and\"\n+ )\n+ raise SnapcraftError(\n+ f\"Unknown value {strategy!r} in environment variable \"\n+ f\"{_STRATEGY_ENVVAR!r}. Valid values are {valid_strategies}.\"\n+ ) from err\n+\n def _get_effective_base(self) -> str:\n \"\"\"Get a valid effective base from the project's snapcraft.yaml.\n \n@@ -128,6 +163,25 @@\n run_legacy()\n return\n \n+ strategy = self._get_build_strategy()\n+\n+ if strategy == _Strategies.DISABLE_FALLBACK:\n+ emit.debug(\n+ f\"Environment variable {_STRATEGY_ENVVAR!r} is \"\n+ f\"{_Strategies.DISABLE_FALLBACK.value!r} but running fallback \"\n+ \"remote-build because new remote-build is not available.\"\n+ )\n+ run_legacy()\n+ return\n+\n+ if strategy == _Strategies.FORCE_FALLBACK:\n+ emit.debug(\n+ \"Running fallback remote-build because environment variable \"\n+ f\"{_STRATEGY_ENVVAR!r} is {_Strategies.FORCE_FALLBACK.value!r}.\"\n+ )\n+ run_legacy()\n+ return\n+\n emit.debug(\"Running fallback remote-build.\")\n run_legacy()\n", "issue": "remote-build: add control logic for envvar `SNAPCRAFT_REMOTE_BUILD_STRATEGY`\n### What needs to get done\n\nThis adds control logic to determine whether to execute the new or legacy remote-build code.\r\n\r\nThere are four possibilities with `SNAPCRAFT_REMOTE_BUILD_STRATEGY`:\r\n\r\n- `disable-fallback` - use new remote-build code\r\n- `force-fallback` - use legacy remote-build code\r\n- unset - continue on to next control logic step\r\n- unknown - raise an error\r\n\r\n\r\n\n\n### Why it needs to get done\n\nremote-build needs to be migrated because it does not leverage the new craft libraries, has issues with building core22 snaps, and has issues related to how the local project is bundled.\n", "before_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2022-2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Snapcraft remote build command.\"\"\"\n\nimport argparse\nimport os\nimport textwrap\n\nfrom craft_cli import BaseCommand, emit\nfrom craft_cli.helptexts import HIDDEN\nfrom overrides import overrides\n\nfrom snapcraft.errors import MaintenanceBase, SnapcraftError\nfrom snapcraft.legacy_cli import run_legacy\nfrom snapcraft.parts import yaml_utils\nfrom snapcraft.utils import confirm_with_user\nfrom snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError\n\n_CONFIRMATION_PROMPT = (\n \"All data sent to remote builders will be publicly available. \"\n \"Are you sure you want to continue?\"\n)\n\n\nclass RemoteBuildCommand(BaseCommand):\n \"\"\"Command passthrough for the remote-build command.\"\"\"\n\n name = \"remote-build\"\n help_msg = \"Dispatch a snap for remote build\"\n overview = textwrap.dedent(\n \"\"\"\n Command remote-build sends the current project to be built\n remotely. After the build is complete, packages for each\n architecture are retrieved and will be available in the\n local filesystem.\n\n If not specified in the snapcraft.yaml file, the list of\n architectures to build can be set using the --build-on option.\n If both are specified, an error will occur.\n\n Interrupted remote builds can be resumed using the --recover\n option, followed by the build number informed when the remote\n build was originally dispatched. The current state of the\n remote build for each architecture can be checked using the\n --status option.\"\"\"\n )\n\n @overrides\n def fill_parser(self, parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n \"--recover\", action=\"store_true\", help=\"recover an interrupted build\"\n )\n parser.add_argument(\n \"--status\", action=\"store_true\", help=\"display remote build status\"\n )\n parser_target = parser.add_mutually_exclusive_group()\n parser_target.add_argument(\n \"--build-on\",\n metavar=\"arch\",\n nargs=\"+\",\n help=HIDDEN,\n )\n parser_target.add_argument(\n \"--build-for\",\n metavar=\"arch\",\n nargs=\"+\",\n help=\"architecture to build for\",\n )\n parser.add_argument(\n \"--build-id\", metavar=\"build-id\", help=\"specific build id to retrieve\"\n )\n parser.add_argument(\n \"--launchpad-accept-public-upload\",\n action=\"store_true\",\n help=\"acknowledge that uploaded code will be publicly available.\",\n )\n\n def _get_effective_base(self) -> str:\n \"\"\"Get a valid effective base from the project's snapcraft.yaml.\n\n :returns: The project's effective base.\n\n :raises SnapcraftError: If the base is unknown or missing or if the\n snapcraft.yaml cannot be loaded.\n :raises MaintenanceBase: If the base is not supported\n \"\"\"\n snapcraft_yaml = yaml_utils.get_snap_project().project_file\n\n with open(snapcraft_yaml, encoding=\"utf-8\") as file:\n base = yaml_utils.get_base(file)\n\n if base is None:\n raise SnapcraftError(\n f\"Could not determine base from {str(snapcraft_yaml)!r}.\"\n )\n\n emit.debug(f\"Got base {base!r} from {str(snapcraft_yaml)!r}.\")\n\n if base in yaml_utils.ESM_BASES:\n raise MaintenanceBase(base)\n\n if base not in yaml_utils.BASES:\n raise SnapcraftError(f\"Unknown base {base!r} in {str(snapcraft_yaml)!r}.\")\n\n return base\n\n def _run_remote_build(self, base: str) -> None:\n # bases newer than core22 must use the new remote-build\n if base in yaml_utils.CURRENT_BASES - {\"core22\"}:\n emit.debug(\n \"Using fallback remote-build because new remote-build is not available.\"\n )\n # TODO: use new remote-build code (#4323)\n run_legacy()\n return\n\n emit.debug(\"Running fallback remote-build.\")\n run_legacy()\n\n @overrides\n def run(self, parsed_args) -> None:\n if os.getenv(\"SUDO_USER\") and os.geteuid() == 0:\n emit.message(\n \"Running with 'sudo' may cause permission errors and is discouraged.\"\n )\n\n emit.message(\n \"snapcraft remote-build is experimental and is subject to change \"\n \"- use with caution.\"\n )\n\n if parsed_args.build_on:\n emit.message(\"Use --build-for instead of --build-on\")\n parsed_args.build_for = parsed_args.build_on\n\n if not parsed_args.launchpad_accept_public_upload and not confirm_with_user(\n _CONFIRMATION_PROMPT\n ):\n raise AcceptPublicUploadError()\n\n base = self._get_effective_base()\n self._run_remote_build(base)\n", "path": "snapcraft/commands/remote.py"}], "after_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2022-2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Snapcraft remote build command.\"\"\"\n\nimport argparse\nimport os\nimport textwrap\nfrom enum import Enum\nfrom typing import Optional\n\nfrom craft_cli import BaseCommand, emit\nfrom craft_cli.helptexts import HIDDEN\nfrom overrides import overrides\n\nfrom snapcraft.errors import MaintenanceBase, SnapcraftError\nfrom snapcraft.legacy_cli import run_legacy\nfrom snapcraft.parts import yaml_utils\nfrom snapcraft.utils import confirm_with_user, humanize_list\nfrom snapcraft_legacy.internal.remote_build.errors import AcceptPublicUploadError\n\n_CONFIRMATION_PROMPT = (\n \"All data sent to remote builders will be publicly available. \"\n \"Are you sure you want to continue?\"\n)\n\n\n_STRATEGY_ENVVAR = \"SNAPCRAFT_REMOTE_BUILD_STRATEGY\"\n\n\nclass _Strategies(Enum):\n \"\"\"Possible values of the build strategy.\"\"\"\n\n DISABLE_FALLBACK = \"disable-fallback\"\n FORCE_FALLBACK = \"force-fallback\"\n\n\nclass RemoteBuildCommand(BaseCommand):\n \"\"\"Command passthrough for the remote-build command.\"\"\"\n\n name = \"remote-build\"\n help_msg = \"Dispatch a snap for remote build\"\n overview = textwrap.dedent(\n \"\"\"\n Command remote-build sends the current project to be built\n remotely. After the build is complete, packages for each\n architecture are retrieved and will be available in the\n local filesystem.\n\n If not specified in the snapcraft.yaml file, the list of\n architectures to build can be set using the --build-on option.\n If both are specified, an error will occur.\n\n Interrupted remote builds can be resumed using the --recover\n option, followed by the build number informed when the remote\n build was originally dispatched. The current state of the\n remote build for each architecture can be checked using the\n --status option.\"\"\"\n )\n\n @overrides\n def fill_parser(self, parser: argparse.ArgumentParser) -> None:\n parser.add_argument(\n \"--recover\", action=\"store_true\", help=\"recover an interrupted build\"\n )\n parser.add_argument(\n \"--status\", action=\"store_true\", help=\"display remote build status\"\n )\n parser_target = parser.add_mutually_exclusive_group()\n parser_target.add_argument(\n \"--build-on\",\n metavar=\"arch\",\n nargs=\"+\",\n help=HIDDEN,\n )\n parser_target.add_argument(\n \"--build-for\",\n metavar=\"arch\",\n nargs=\"+\",\n help=\"architecture to build for\",\n )\n parser.add_argument(\n \"--build-id\", metavar=\"build-id\", help=\"specific build id to retrieve\"\n )\n parser.add_argument(\n \"--launchpad-accept-public-upload\",\n action=\"store_true\",\n help=\"acknowledge that uploaded code will be publicly available.\",\n )\n\n def _get_build_strategy(self) -> Optional[_Strategies]:\n \"\"\"Get the build strategy from the envvar `SNAPCRAFT_REMOTE_BUILD_STRATEGY`.\n\n :returns: The strategy or None.\n\n :raises SnapcraftError: If the variable is set to an invalid value.\n \"\"\"\n strategy = os.getenv(_STRATEGY_ENVVAR)\n\n if not strategy:\n return None\n\n try:\n return _Strategies(strategy)\n except ValueError as err:\n valid_strategies = humanize_list(\n (strategy.value for strategy in _Strategies), \"and\"\n )\n raise SnapcraftError(\n f\"Unknown value {strategy!r} in environment variable \"\n f\"{_STRATEGY_ENVVAR!r}. Valid values are {valid_strategies}.\"\n ) from err\n\n def _get_effective_base(self) -> str:\n \"\"\"Get a valid effective base from the project's snapcraft.yaml.\n\n :returns: The project's effective base.\n\n :raises SnapcraftError: If the base is unknown or missing or if the\n snapcraft.yaml cannot be loaded.\n :raises MaintenanceBase: If the base is not supported\n \"\"\"\n snapcraft_yaml = yaml_utils.get_snap_project().project_file\n\n with open(snapcraft_yaml, encoding=\"utf-8\") as file:\n base = yaml_utils.get_base(file)\n\n if base is None:\n raise SnapcraftError(\n f\"Could not determine base from {str(snapcraft_yaml)!r}.\"\n )\n\n emit.debug(f\"Got base {base!r} from {str(snapcraft_yaml)!r}.\")\n\n if base in yaml_utils.ESM_BASES:\n raise MaintenanceBase(base)\n\n if base not in yaml_utils.BASES:\n raise SnapcraftError(f\"Unknown base {base!r} in {str(snapcraft_yaml)!r}.\")\n\n return base\n\n def _run_remote_build(self, base: str) -> None:\n # bases newer than core22 must use the new remote-build\n if base in yaml_utils.CURRENT_BASES - {\"core22\"}:\n emit.debug(\n \"Using fallback remote-build because new remote-build is not available.\"\n )\n # TODO: use new remote-build code (#4323)\n run_legacy()\n return\n\n strategy = self._get_build_strategy()\n\n if strategy == _Strategies.DISABLE_FALLBACK:\n emit.debug(\n f\"Environment variable {_STRATEGY_ENVVAR!r} is \"\n f\"{_Strategies.DISABLE_FALLBACK.value!r} but running fallback \"\n \"remote-build because new remote-build is not available.\"\n )\n run_legacy()\n return\n\n if strategy == _Strategies.FORCE_FALLBACK:\n emit.debug(\n \"Running fallback remote-build because environment variable \"\n f\"{_STRATEGY_ENVVAR!r} is {_Strategies.FORCE_FALLBACK.value!r}.\"\n )\n run_legacy()\n return\n\n emit.debug(\"Running fallback remote-build.\")\n run_legacy()\n\n @overrides\n def run(self, parsed_args) -> None:\n if os.getenv(\"SUDO_USER\") and os.geteuid() == 0:\n emit.message(\n \"Running with 'sudo' may cause permission errors and is discouraged.\"\n )\n\n emit.message(\n \"snapcraft remote-build is experimental and is subject to change \"\n \"- use with caution.\"\n )\n\n if parsed_args.build_on:\n emit.message(\"Use --build-for instead of --build-on\")\n parsed_args.build_for = parsed_args.build_on\n\n if not parsed_args.launchpad_accept_public_upload and not confirm_with_user(\n _CONFIRMATION_PROMPT\n ):\n raise AcceptPublicUploadError()\n\n base = self._get_effective_base()\n self._run_remote_build(base)\n", "path": "snapcraft/commands/remote.py"}]} | 2,020 | 723 |
gh_patches_debug_6411 | rasdani/github-patches | git_diff | SeldonIO__MLServer-625 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
starting mlserver using `mlserver start .` is not consistent with `mlserver start $PWD`
When I started mlserver using `mlserver start .` in directory tree
```
└── iris1
└── 1
├── model.joblib
└── model-settings.json
```
and settings `{"name":"iris1","implementation":"mlserver_sklearn.SKLearnModel","parameters":{"version":"1"}}`
results in an error:
```
mlserver.errors.InvalidModelURI: Invalid URI specified for model iris1 (iris1/1/iris1/1)
```
However using
`mlserver start $PWD` is successful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/repository.py`
Content:
```
1 import os
2 import glob
3
4 from typing import List
5
6 from .settings import ModelParameters, ModelSettings
7 from .errors import ModelNotFound
8 from .logging import logger
9
10 DEFAULT_MODEL_SETTINGS_FILENAME = "model-settings.json"
11
12
13 class ModelRepository:
14 """
15 Model repository, responsible of the discovery of models which can be
16 loaded onto the model registry.
17 """
18
19 def __init__(self, root: str = None):
20 self._root = root
21
22 async def list(self) -> List[ModelSettings]:
23 all_model_settings = []
24
25 # TODO: Use an async alternative for filesys ops
26 if self._root:
27 pattern = os.path.join(self._root, "**", DEFAULT_MODEL_SETTINGS_FILENAME)
28 matches = glob.glob(pattern, recursive=True)
29
30 for model_settings_path in matches:
31 model_settings = self._load_model_settings(model_settings_path)
32 all_model_settings.append(model_settings)
33
34 # If there were no matches, try to load model from environment
35 if not all_model_settings:
36 # return default
37 model_settings = ModelSettings()
38 model_settings.parameters = ModelParameters()
39 all_model_settings.append(model_settings)
40
41 return all_model_settings
42
43 def _load_model_settings(self, model_settings_path: str) -> ModelSettings:
44 model_settings = ModelSettings.parse_file(model_settings_path)
45 model_settings._source = model_settings_path
46
47 # If name not present, default to folder name
48 model_settings_folder = os.path.dirname(model_settings_path)
49 folder_name = os.path.basename(model_settings_folder)
50 if model_settings.name:
51 if not self._folder_matches(folder_name, model_settings):
52 # Raise warning if name is different than folder's name
53 logger.warning(
54 f"Model name '{model_settings.name}' is different than "
55 f"model's folder name '{folder_name}'."
56 )
57 else:
58 model_settings.name = folder_name
59
60 if not model_settings.parameters:
61 model_settings.parameters = ModelParameters()
62
63 if not model_settings.parameters.uri:
64 # If not specified, default to its own folder
65 default_model_uri = os.path.dirname(model_settings_path)
66 model_settings.parameters.uri = default_model_uri
67
68 return model_settings
69
70 def _folder_matches(self, folder_name: str, model_settings: ModelSettings) -> bool:
71 if model_settings.name == folder_name:
72 return True
73
74 # To be compatible with Triton, check whether the folder name matches
75 # with the model's version
76 if model_settings.parameters and model_settings.parameters.version:
77 model_version = model_settings.parameters.version
78 if model_version == folder_name:
79 return True
80
81 return False
82
83 async def find(self, name: str) -> List[ModelSettings]:
84 all_settings = await self.list()
85 selected = []
86 for model_settings in all_settings:
87 # TODO: Implement other version policies (e.g. "Last N")
88 if model_settings.name == name:
89 selected.append(model_settings)
90
91 if len(selected) == 0:
92 raise ModelNotFound(name)
93
94 return selected
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/repository.py b/mlserver/repository.py
--- a/mlserver/repository.py
+++ b/mlserver/repository.py
@@ -24,7 +24,8 @@
# TODO: Use an async alternative for filesys ops
if self._root:
- pattern = os.path.join(self._root, "**", DEFAULT_MODEL_SETTINGS_FILENAME)
+ abs_root = os.path.abspath(self._root)
+ pattern = os.path.join(abs_root, "**", DEFAULT_MODEL_SETTINGS_FILENAME)
matches = glob.glob(pattern, recursive=True)
for model_settings_path in matches:
| {"golden_diff": "diff --git a/mlserver/repository.py b/mlserver/repository.py\n--- a/mlserver/repository.py\n+++ b/mlserver/repository.py\n@@ -24,7 +24,8 @@\n \n # TODO: Use an async alternative for filesys ops\n if self._root:\n- pattern = os.path.join(self._root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n+ abs_root = os.path.abspath(self._root)\n+ pattern = os.path.join(abs_root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n matches = glob.glob(pattern, recursive=True)\n \n for model_settings_path in matches:\n", "issue": "starting mlserver using `mlserver start .` is not consistent with `mlserver start $PWD`\nWhen I started mlserver using `mlserver start .` in directory tree \r\n```\r\n\u2514\u2500\u2500 iris1\r\n \u2514\u2500\u2500 1\r\n \u251c\u2500\u2500 model.joblib\r\n \u2514\u2500\u2500 model-settings.json\r\n```\r\nand settings `{\"name\":\"iris1\",\"implementation\":\"mlserver_sklearn.SKLearnModel\",\"parameters\":{\"version\":\"1\"}}`\r\n\r\nresults in an error:\r\n```\r\nmlserver.errors.InvalidModelURI: Invalid URI specified for model iris1 (iris1/1/iris1/1)\r\n```\r\n\r\nHowever using\r\n`mlserver start $PWD` is successful.\n", "before_files": [{"content": "import os\nimport glob\n\nfrom typing import List\n\nfrom .settings import ModelParameters, ModelSettings\nfrom .errors import ModelNotFound\nfrom .logging import logger\n\nDEFAULT_MODEL_SETTINGS_FILENAME = \"model-settings.json\"\n\n\nclass ModelRepository:\n \"\"\"\n Model repository, responsible of the discovery of models which can be\n loaded onto the model registry.\n \"\"\"\n\n def __init__(self, root: str = None):\n self._root = root\n\n async def list(self) -> List[ModelSettings]:\n all_model_settings = []\n\n # TODO: Use an async alternative for filesys ops\n if self._root:\n pattern = os.path.join(self._root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n matches = glob.glob(pattern, recursive=True)\n\n for model_settings_path in matches:\n model_settings = self._load_model_settings(model_settings_path)\n all_model_settings.append(model_settings)\n\n # If there were no matches, try to load model from environment\n if not all_model_settings:\n # return default\n model_settings = ModelSettings()\n model_settings.parameters = ModelParameters()\n all_model_settings.append(model_settings)\n\n return all_model_settings\n\n def _load_model_settings(self, model_settings_path: str) -> ModelSettings:\n model_settings = ModelSettings.parse_file(model_settings_path)\n model_settings._source = model_settings_path\n\n # If name not present, default to folder name\n model_settings_folder = os.path.dirname(model_settings_path)\n folder_name = os.path.basename(model_settings_folder)\n if model_settings.name:\n if not self._folder_matches(folder_name, model_settings):\n # Raise warning if name is different than folder's name\n logger.warning(\n f\"Model name '{model_settings.name}' is different than \"\n f\"model's folder name '{folder_name}'.\"\n )\n else:\n model_settings.name = folder_name\n\n if not model_settings.parameters:\n model_settings.parameters = ModelParameters()\n\n if not model_settings.parameters.uri:\n # If not specified, default to its own folder\n default_model_uri = os.path.dirname(model_settings_path)\n model_settings.parameters.uri = default_model_uri\n\n return model_settings\n\n def _folder_matches(self, folder_name: str, model_settings: ModelSettings) -> bool:\n if model_settings.name == folder_name:\n return True\n\n # To be compatible with Triton, check whether the folder name matches\n # with the model's version\n if model_settings.parameters and model_settings.parameters.version:\n model_version = model_settings.parameters.version\n if model_version == folder_name:\n return True\n\n return False\n\n async def find(self, name: str) -> List[ModelSettings]:\n all_settings = await self.list()\n selected = []\n for model_settings in all_settings:\n # TODO: Implement other version policies (e.g. \"Last N\")\n if model_settings.name == name:\n selected.append(model_settings)\n\n if len(selected) == 0:\n raise ModelNotFound(name)\n\n return selected\n", "path": "mlserver/repository.py"}], "after_files": [{"content": "import os\nimport glob\n\nfrom typing import List\n\nfrom .settings import ModelParameters, ModelSettings\nfrom .errors import ModelNotFound\nfrom .logging import logger\n\nDEFAULT_MODEL_SETTINGS_FILENAME = \"model-settings.json\"\n\n\nclass ModelRepository:\n \"\"\"\n Model repository, responsible of the discovery of models which can be\n loaded onto the model registry.\n \"\"\"\n\n def __init__(self, root: str = None):\n self._root = root\n\n async def list(self) -> List[ModelSettings]:\n all_model_settings = []\n\n # TODO: Use an async alternative for filesys ops\n if self._root:\n abs_root = os.path.abspath(self._root)\n pattern = os.path.join(abs_root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n matches = glob.glob(pattern, recursive=True)\n\n for model_settings_path in matches:\n model_settings = self._load_model_settings(model_settings_path)\n all_model_settings.append(model_settings)\n\n # If there were no matches, try to load model from environment\n if not all_model_settings:\n # return default\n model_settings = ModelSettings()\n model_settings.parameters = ModelParameters()\n all_model_settings.append(model_settings)\n\n return all_model_settings\n\n def _load_model_settings(self, model_settings_path: str) -> ModelSettings:\n model_settings = ModelSettings.parse_file(model_settings_path)\n model_settings._source = model_settings_path\n\n # If name not present, default to folder name\n model_settings_folder = os.path.dirname(model_settings_path)\n folder_name = os.path.basename(model_settings_folder)\n if model_settings.name:\n if not self._folder_matches(folder_name, model_settings):\n # Raise warning if name is different than folder's name\n logger.warning(\n f\"Model name '{model_settings.name}' is different than \"\n f\"model's folder name '{folder_name}'.\"\n )\n else:\n model_settings.name = folder_name\n\n if not model_settings.parameters:\n model_settings.parameters = ModelParameters()\n\n if not model_settings.parameters.uri:\n # If not specified, default to its own folder\n default_model_uri = os.path.dirname(model_settings_path)\n model_settings.parameters.uri = default_model_uri\n\n return model_settings\n\n def _folder_matches(self, folder_name: str, model_settings: ModelSettings) -> bool:\n if model_settings.name == folder_name:\n return True\n\n # To be compatible with Triton, check whether the folder name matches\n # with the model's version\n if model_settings.parameters and model_settings.parameters.version:\n model_version = model_settings.parameters.version\n if model_version == folder_name:\n return True\n\n return False\n\n async def find(self, name: str) -> List[ModelSettings]:\n all_settings = await self.list()\n selected = []\n for model_settings in all_settings:\n # TODO: Implement other version policies (e.g. \"Last N\")\n if model_settings.name == name:\n selected.append(model_settings)\n\n if len(selected) == 0:\n raise ModelNotFound(name)\n\n return selected\n", "path": "mlserver/repository.py"}]} | 1,236 | 126 |
gh_patches_debug_2870 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-753 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug for replay feature from pwd
Running the following command inside of a template repo:
`$ cookiecutter -o tmp .`
Will cause `replay.dump` to files like this:
`~/.cookiecutter_replay/..json`
Identified by @eliasdorneles
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/main.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.main
6 -----------------
7
8 Main entry point for the `cookiecutter` command.
9
10 The code in this module is also a good example of how to use Cookiecutter as a
11 library rather than a script.
12 """
13
14 from __future__ import unicode_literals
15 import logging
16 import os
17 import re
18
19 from .config import get_user_config, USER_CONFIG_PATH
20 from .exceptions import InvalidModeException, RepositoryNotFound
21 from .prompt import prompt_for_config
22 from .generate import generate_context, generate_files
23 from .vcs import clone
24 from .replay import dump, load
25
26 logger = logging.getLogger(__name__)
27
28 builtin_abbreviations = {
29 'gh': 'https://github.com/{0}.git',
30 'bb': 'https://bitbucket.org/{0}',
31 }
32
33 REPO_REGEX = re.compile(r"""
34 (?x)
35 ((((git|hg)\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.
36 | # or
37 (\w+@[\w\.]+) # something like user@...
38 )
39 """)
40
41
42 def is_repo_url(value):
43 """Return True if value is a repository URL."""
44 return bool(REPO_REGEX.match(value))
45
46
47 def expand_abbreviations(template, config_dict):
48 """
49 Expand abbreviations in a template name.
50
51 :param template: The project template name.
52 :param config_dict: The user config, which will contain abbreviation
53 definitions.
54 """
55
56 abbreviations = builtin_abbreviations.copy()
57 abbreviations.update(config_dict.get('abbreviations', {}))
58
59 if template in abbreviations:
60 return abbreviations[template]
61
62 # Split on colon. If there is no colon, rest will be empty
63 # and prefix will be the whole template
64 prefix, sep, rest = template.partition(':')
65 if prefix in abbreviations:
66 return abbreviations[prefix].format(rest)
67
68 return template
69
70
71 def cookiecutter(
72 template, checkout=None, no_input=False, extra_context=None,
73 replay=False, overwrite_if_exists=False, output_dir='.',
74 config_file=USER_CONFIG_PATH):
75 """
76 API equivalent to using Cookiecutter at the command line.
77
78 :param template: A directory containing a project template directory,
79 or a URL to a git repository.
80 :param checkout: The branch, tag or commit ID to checkout after clone.
81 :param no_input: Prompt the user at command line for manual configuration?
82 :param extra_context: A dictionary of context that overrides default
83 and user configuration.
84 :param: overwrite_if_exists: Overwrite the contents of output directory
85 if it exists
86 :param output_dir: Where to output the generated project dir into.
87 :param config_file: User configuration file path.
88 """
89 if replay and ((no_input is not False) or (extra_context is not None)):
90 err_msg = (
91 "You can not use both replay and no_input or extra_context "
92 "at the same time."
93 )
94 raise InvalidModeException(err_msg)
95
96 # Get user config from ~/.cookiecutterrc or equivalent
97 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used
98 config_dict = get_user_config(config_file=config_file)
99
100 template = expand_abbreviations(template, config_dict)
101
102 if is_repo_url(template):
103 repo_dir = clone(
104 repo_url=template,
105 checkout=checkout,
106 clone_to_dir=config_dict['cookiecutters_dir'],
107 no_input=no_input
108 )
109 else:
110 # If it's a local repo, no need to clone or copy to your
111 # cookiecutters_dir
112 repo_dir = template
113
114 if not os.path.isdir(repo_dir):
115 raise RepositoryNotFound(
116 'The repository {0} could not be located.'.format(template)
117 )
118
119 template_name = os.path.basename(template)
120
121 if replay:
122 context = load(config_dict['replay_dir'], template_name)
123 else:
124 context_file = os.path.join(repo_dir, 'cookiecutter.json')
125 logging.debug('context_file is {0}'.format(context_file))
126
127 context = generate_context(
128 context_file=context_file,
129 default_context=config_dict['default_context'],
130 extra_context=extra_context,
131 )
132
133 # prompt the user to manually configure at the command line.
134 # except when 'no-input' flag is set
135 context['cookiecutter'] = prompt_for_config(context, no_input)
136
137 dump(config_dict['replay_dir'], template_name, context)
138
139 # Create project from local context and project template.
140 return generate_files(
141 repo_dir=repo_dir,
142 context=context,
143 overwrite_if_exists=overwrite_if_exists,
144 output_dir=output_dir
145 )
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -116,7 +116,7 @@
'The repository {0} could not be located.'.format(template)
)
- template_name = os.path.basename(template)
+ template_name = os.path.basename(os.path.abspath(template))
if replay:
context = load(config_dict['replay_dir'], template_name)
| {"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -116,7 +116,7 @@\n 'The repository {0} could not be located.'.format(template)\n )\n \n- template_name = os.path.basename(template)\n+ template_name = os.path.basename(os.path.abspath(template))\n \n if replay:\n context = load(config_dict['replay_dir'], template_name)\n", "issue": "Bug for replay feature from pwd\nRunning the following command inside of a template repo:\n\n`$ cookiecutter -o tmp .`\n\nWill cause `replay.dump` to files like this:\n\n`~/.cookiecutter_replay/..json`\n\nIdentified by @eliasdorneles \n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(os.path.abspath(template))\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}]} | 1,681 | 110 |
gh_patches_debug_13322 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-285 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove save button on disabled forms
Even if objects are archived and the corresponding forms are disabled, the save buttons are still visible, leading to errors when submitting.
Remove the buttons for:
- [ ] Pages
- [ ] Events
- [x] POIs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/cms/views/pages/page_view.py`
Content:
```
1 """
2
3 Returns:
4 [type]: [description]
5 """
6 import logging
7
8 from django.contrib import messages
9 from django.contrib.auth.decorators import login_required
10 from django.contrib.auth.mixins import PermissionRequiredMixin
11 from django.core.exceptions import PermissionDenied
12 from django.shortcuts import render, redirect
13 from django.utils.decorators import method_decorator
14 from django.utils.translation import ugettext as _
15 from django.views.generic import TemplateView
16
17 from ...constants import status
18 from ...decorators import region_permission_required
19 from ...forms.pages import PageForm, PageTranslationForm
20 from ...models import Page, PageTranslation, Region, Language
21
22 logger = logging.getLogger(__name__)
23
24
25 @method_decorator(login_required, name='dispatch')
26 @method_decorator(region_permission_required, name='dispatch')
27 class PageView(PermissionRequiredMixin, TemplateView):
28 permission_required = 'cms.view_pages'
29 raise_exception = True
30
31 template_name = 'pages/page_form.html'
32 base_context = {
33 'current_menu_item': 'pages',
34 'PUBLIC': status.PUBLIC
35 }
36
37 def get(self, request, *args, **kwargs):
38
39 region = Region.objects.get(slug=kwargs.get('region_slug'))
40
41 language = Language.objects.get(code=kwargs.get('language_code'))
42
43 # get page and translation objects if they exist
44 page = Page.objects.filter(id=kwargs.get('page_id')).first()
45 page_translation = PageTranslation.objects.filter(
46 page=page,
47 language=language,
48 ).first()
49
50 # Make form disabled if user has no permission to edit the page
51 disabled = not request.user.has_perm('cms.edit_page', page)
52 if disabled:
53 messages.warning(request, _("You don't have the permission to edit this page."))
54
55 page_form = PageForm(
56 instance=page,
57 region=region,
58 language=language,
59 disabled=disabled
60 )
61 page_translation_form = PageTranslationForm(
62 instance=page_translation,
63 disabled=disabled
64 )
65
66 return render(request, self.template_name, {
67 **self.base_context,
68 'page_form': page_form,
69 'page_translation_form': page_translation_form,
70 'page': page,
71 'language': language,
72 # Languages for tab view
73 'languages': region.languages if page else [language],
74 })
75
76 # pylint: disable=too-many-branches,unused-argument
77 def post(self, request, *args, **kwargs):
78
79 region = Region.objects.get(slug=kwargs.get('region_slug'))
80 language = Language.objects.get(code=kwargs.get('language_code'))
81
82 page_instance = Page.objects.filter(id=kwargs.get('page_id')).first()
83 page_translation_instance = PageTranslation.objects.filter(
84 page=page_instance,
85 language=language,
86 ).first()
87
88 if not request.user.has_perm('cms.edit_page', page_instance):
89 raise PermissionDenied
90
91 page_form = PageForm(
92 request.POST,
93 instance=page_instance,
94 region=region,
95 language=language,
96 )
97 page_translation_form = PageTranslationForm(
98 request.POST,
99 instance=page_translation_instance,
100 region=region,
101 language=language,
102 )
103
104 if page_translation_form.data.get('public') and 'public' in page_translation_form.changed_data:
105 if not request.user.has_perm('cms.publish_page', page_instance):
106 raise PermissionDenied
107
108 # TODO: error handling
109 if not page_form.is_valid() or not page_translation_form.is_valid():
110 messages.error(request, _('Errors have occurred.'))
111 return render(request, self.template_name, {
112 **self.base_context,
113 'page_form': page_form,
114 'page_translation_form': page_translation_form,
115 'page': page_instance,
116 'language': language,
117 # Languages for tab view
118 'languages': region.languages if page_instance else [language],
119 })
120
121 if not page_form.has_changed() and not page_translation_form.has_changed():
122 messages.info(request, _('No changes detected.'))
123 return render(request, self.template_name, {
124 **self.base_context,
125 'page_form': page_form,
126 'page_translation_form': page_translation_form,
127 'page': page_instance,
128 'language': language,
129 # Languages for tab view
130 'languages': region.languages if page_instance else [language],
131 })
132
133 page = page_form.save()
134 page_translation = page_translation_form.save(
135 page=page,
136 user=request.user,
137 )
138
139 published = page_translation.status == status.PUBLIC
140 if not page_instance:
141 if published:
142 messages.success(request, _('Page was successfully created and published.'))
143 else:
144 messages.success(request, _('Page was successfully created.'))
145 elif not page_translation_instance:
146 if published:
147 messages.success(request, _('Translation was successfully created and published.'))
148 else:
149 messages.success(request, _('Translation was successfully created.'))
150 else:
151 if published:
152 messages.success(request, _('Translation was successfully published.'))
153 else:
154 messages.success(request, _('Translation was successfully saved.'))
155
156 return redirect('edit_page', **{
157 'page_id': page.id,
158 'region_slug': region.slug,
159 'language_code': language.code,
160 })
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/cms/views/pages/page_view.py b/backend/cms/views/pages/page_view.py
--- a/backend/cms/views/pages/page_view.py
+++ b/backend/cms/views/pages/page_view.py
@@ -48,9 +48,14 @@
).first()
# Make form disabled if user has no permission to edit the page
- disabled = not request.user.has_perm('cms.edit_page', page)
- if disabled:
+ if not request.user.has_perm('cms.edit_page', page):
+ disabled = True
messages.warning(request, _("You don't have the permission to edit this page."))
+ elif page and page.archived:
+ disabled = True
+ messages.warning(request, _("You cannot edit this page because it is archived."))
+ else:
+ disabled = False
page_form = PageForm(
instance=page,
| {"golden_diff": "diff --git a/backend/cms/views/pages/page_view.py b/backend/cms/views/pages/page_view.py\n--- a/backend/cms/views/pages/page_view.py\n+++ b/backend/cms/views/pages/page_view.py\n@@ -48,9 +48,14 @@\n ).first()\n \n # Make form disabled if user has no permission to edit the page\n- disabled = not request.user.has_perm('cms.edit_page', page)\n- if disabled:\n+ if not request.user.has_perm('cms.edit_page', page):\n+ disabled = True\n messages.warning(request, _(\"You don't have the permission to edit this page.\"))\n+ elif page and page.archived:\n+ disabled = True\n+ messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n+ else:\n+ disabled = False\n \n page_form = PageForm(\n instance=page,\n", "issue": "Remove save button on disabled forms\nEven if objects are archived and the corresponding forms are disabled, the save buttons are still visible, leading to errors when submitting.\r\nRemove the buttons for:\r\n- [ ] Pages\r\n- [ ] Events\r\n- [x] POIs\n", "before_files": [{"content": "\"\"\"\n\nReturns:\n [type]: [description]\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.pages import PageForm, PageTranslationForm\nfrom ...models import Page, PageTranslation, Region, Language\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass PageView(PermissionRequiredMixin, TemplateView):\n permission_required = 'cms.view_pages'\n raise_exception = True\n\n template_name = 'pages/page_form.html'\n base_context = {\n 'current_menu_item': 'pages',\n 'PUBLIC': status.PUBLIC\n }\n\n def get(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n # get page and translation objects if they exist\n page = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation = PageTranslation.objects.filter(\n page=page,\n language=language,\n ).first()\n\n # Make form disabled if user has no permission to edit the page\n disabled = not request.user.has_perm('cms.edit_page', page)\n if disabled:\n messages.warning(request, _(\"You don't have the permission to edit this page.\"))\n\n page_form = PageForm(\n instance=page,\n region=region,\n language=language,\n disabled=disabled\n )\n page_translation_form = PageTranslationForm(\n instance=page_translation,\n disabled=disabled\n )\n\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page else [language],\n })\n\n # pylint: disable=too-many-branches,unused-argument\n def post(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n page_instance = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation_instance = PageTranslation.objects.filter(\n page=page_instance,\n language=language,\n ).first()\n\n if not request.user.has_perm('cms.edit_page', page_instance):\n raise PermissionDenied\n\n page_form = PageForm(\n request.POST,\n instance=page_instance,\n region=region,\n language=language,\n )\n page_translation_form = PageTranslationForm(\n request.POST,\n instance=page_translation_instance,\n region=region,\n language=language,\n )\n\n if page_translation_form.data.get('public') and 'public' in page_translation_form.changed_data:\n if not request.user.has_perm('cms.publish_page', page_instance):\n raise PermissionDenied\n\n # TODO: error handling\n if not page_form.is_valid() or not page_translation_form.is_valid():\n messages.error(request, _('Errors have occurred.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n if not page_form.has_changed() and not page_translation_form.has_changed():\n messages.info(request, _('No changes detected.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n page = page_form.save()\n page_translation = page_translation_form.save(\n page=page,\n user=request.user,\n )\n\n published = page_translation.status == status.PUBLIC\n if not page_instance:\n if published:\n messages.success(request, _('Page was successfully created and published.'))\n else:\n messages.success(request, _('Page was successfully created.'))\n elif not page_translation_instance:\n if published:\n messages.success(request, _('Translation was successfully created and published.'))\n else:\n messages.success(request, _('Translation was successfully created.'))\n else:\n if published:\n messages.success(request, _('Translation was successfully published.'))\n else:\n messages.success(request, _('Translation was successfully saved.'))\n\n return redirect('edit_page', **{\n 'page_id': page.id,\n 'region_slug': region.slug,\n 'language_code': language.code,\n })\n", "path": "backend/cms/views/pages/page_view.py"}], "after_files": [{"content": "\"\"\"\n\nReturns:\n [type]: [description]\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.pages import PageForm, PageTranslationForm\nfrom ...models import Page, PageTranslation, Region, Language\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass PageView(PermissionRequiredMixin, TemplateView):\n permission_required = 'cms.view_pages'\n raise_exception = True\n\n template_name = 'pages/page_form.html'\n base_context = {\n 'current_menu_item': 'pages',\n 'PUBLIC': status.PUBLIC\n }\n\n def get(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n # get page and translation objects if they exist\n page = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation = PageTranslation.objects.filter(\n page=page,\n language=language,\n ).first()\n\n # Make form disabled if user has no permission to edit the page\n if not request.user.has_perm('cms.edit_page', page):\n disabled = True\n messages.warning(request, _(\"You don't have the permission to edit this page.\"))\n elif page and page.archived:\n disabled = True\n messages.warning(request, _(\"You cannot edit this page because it is archived.\"))\n else:\n disabled = False\n\n page_form = PageForm(\n instance=page,\n region=region,\n language=language,\n disabled=disabled\n )\n page_translation_form = PageTranslationForm(\n instance=page_translation,\n disabled=disabled\n )\n\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page else [language],\n })\n\n # pylint: disable=too-many-branches,unused-argument\n def post(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n page_instance = Page.objects.filter(id=kwargs.get('page_id')).first()\n page_translation_instance = PageTranslation.objects.filter(\n page=page_instance,\n language=language,\n ).first()\n\n if not request.user.has_perm('cms.edit_page', page_instance):\n raise PermissionDenied\n\n page_form = PageForm(\n request.POST,\n instance=page_instance,\n region=region,\n language=language,\n )\n page_translation_form = PageTranslationForm(\n request.POST,\n instance=page_translation_instance,\n region=region,\n language=language,\n )\n\n if page_translation_form.data.get('public') and 'public' in page_translation_form.changed_data:\n if not request.user.has_perm('cms.publish_page', page_instance):\n raise PermissionDenied\n\n # TODO: error handling\n if not page_form.is_valid() or not page_translation_form.is_valid():\n messages.error(request, _('Errors have occurred.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n if not page_form.has_changed() and not page_translation_form.has_changed():\n messages.info(request, _('No changes detected.'))\n return render(request, self.template_name, {\n **self.base_context,\n 'page_form': page_form,\n 'page_translation_form': page_translation_form,\n 'page': page_instance,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if page_instance else [language],\n })\n\n page = page_form.save()\n page_translation = page_translation_form.save(\n page=page,\n user=request.user,\n )\n\n published = page_translation.status == status.PUBLIC\n if not page_instance:\n if published:\n messages.success(request, _('Page was successfully created and published.'))\n else:\n messages.success(request, _('Page was successfully created.'))\n elif not page_translation_instance:\n if published:\n messages.success(request, _('Translation was successfully created and published.'))\n else:\n messages.success(request, _('Translation was successfully created.'))\n else:\n if published:\n messages.success(request, _('Translation was successfully published.'))\n else:\n messages.success(request, _('Translation was successfully saved.'))\n\n return redirect('edit_page', **{\n 'page_id': page.id,\n 'region_slug': region.slug,\n 'language_code': language.code,\n })\n", "path": "backend/cms/views/pages/page_view.py"}]} | 1,788 | 188 |
gh_patches_debug_59100 | rasdani/github-patches | git_diff | kserve__kserve-2343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KServe 0.9 release tracking
/kind feature
**Describe the solution you'd like**
KServe 0.9 release tracking:
RC release Date: 6/13/2022
Release Date: 6/27/2022
## KServe Model Serving:
- [X] Storage spec for unifying single model serving and model mesh
- https://github.com/kserve/kserve/pull/1899 @Tomcli
- [x] Transformer ModelMesh support
- https://github.com/kserve/kserve/pull/2136 @chinhuang007
- [x] Model Status API for unifying single model serving and model mesh
- https://github.com/kserve/kserve/pull/2084 @pvaneck
- https://github.com/kserve/kserve/pull/2088 @Suresh-Nakkeran
- [x] Inferece Graph v1alpha1 API and impmentation
- https://github.com/kserve/kserve/pull/1910 @yuzisun @Iamlovingit
- [X] KServe control plane HA
- https://github.com/kserve/kserve/pull/2160 @Suresh-Nakkeran
- [X] Enable inference protocol version auto selection for servingruntime
- https://github.com/kserve/kserve/pull/2118 @Suresh-Nakkeran
- [x] Webhdfs storage uri support
- https://github.com/kserve/kserve/pull/2077 @markwinter
- [x] Azure file share support for storage initializer
- https://github.com/kserve/kserve/pull/1985 @laozc
- [x] KServe Autoscaling spec API
- https://github.com/kserve/kserve/pull/2082 @andyi2it
- [X] KServe ingress class and domain template support for raw deployment mode
- https://github.com/kserve/kserve/pull/2054 @pradithya
- https://github.com/kserve/kserve/pull/2049 @pradithya
## ModelMesh:
- [X] OpenVINO model server support
- https://github.com/kserve/modelmesh-runtime-adapter/pull/18 @tjohnson31415
- [x] Import ServingRuntime and InferenceService types from KServe
- https://github.com/kserve/modelmesh-serving/pull/146 @tjohnson31415
- https://github.com/kserve/modelmesh-serving/pull/140 @pvaneck
- [x] Azure storage support for ModelMesh
- https://github.com/kserve/modelmesh-runtime-adapter/pull/23 @pvaneck
## Models UI:
- [x] Models Web App KServe 0.8 release support
- https://github.com/kserve/models-web-app/pull/35 @DavidSpek
## Website:
- [x] Website doc update
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kserve/setup.py`
Content:
```
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import setuptools
16
17 TESTS_REQUIRES = [
18 'pytest',
19 'pytest-xdist',
20 'pytest-cov',
21 'pytest-asyncio',
22 'pytest-tornasync',
23 'mypy'
24 ]
25
26 with open('requirements.txt') as f:
27 REQUIRES = f.readlines()
28
29 setuptools.setup(
30 name='kserve',
31 version='0.9.0rc0',
32 author="The KServe Authors",
33 author_email='[email protected], [email protected], [email protected]',
34 license="Apache License Version 2.0",
35 url="https://github.com/kserve/kserve/tree/master/python/kserve",
36 description="KServe Python SDK",
37 long_description="Python SDK for KServe Server and Client.",
38 python_requires='>=3.7',
39 packages=[
40 'kserve',
41 'kserve.api',
42 'kserve.constants',
43 'kserve.models',
44 'kserve.handlers',
45 'kserve.utils',
46 ],
47 package_data={'': ['requirements.txt']},
48 include_package_data=True,
49 zip_safe=False,
50 classifiers=[
51 'Intended Audience :: Developers',
52 'Intended Audience :: Education',
53 'Intended Audience :: Science/Research',
54 'Programming Language :: Python :: 3',
55 'Programming Language :: Python :: 3.7',
56 'Programming Language :: Python :: 3.8',
57 'Programming Language :: Python :: 3.9',
58 "License :: OSI Approved :: Apache Software License",
59 "Operating System :: OS Independent",
60 'Topic :: Scientific/Engineering',
61 'Topic :: Scientific/Engineering :: Artificial Intelligence',
62 'Topic :: Software Development',
63 'Topic :: Software Development :: Libraries',
64 'Topic :: Software Development :: Libraries :: Python Modules',
65 ],
66 install_requires=REQUIRES,
67 tests_require=TESTS_REQUIRES,
68 extras_require={'test': TESTS_REQUIRES}
69 )
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kserve/setup.py b/python/kserve/setup.py
--- a/python/kserve/setup.py
+++ b/python/kserve/setup.py
@@ -28,7 +28,7 @@
setuptools.setup(
name='kserve',
- version='0.9.0rc0',
+ version='0.9.0',
author="The KServe Authors",
author_email='[email protected], [email protected], [email protected]',
license="Apache License Version 2.0",
| {"golden_diff": "diff --git a/python/kserve/setup.py b/python/kserve/setup.py\n--- a/python/kserve/setup.py\n+++ b/python/kserve/setup.py\n@@ -28,7 +28,7 @@\n \n setuptools.setup(\n name='kserve',\n- version='0.9.0rc0',\n+ version='0.9.0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n", "issue": "KServe 0.9 release tracking\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\nKServe 0.9 release tracking:\r\nRC release Date: 6/13/2022\r\nRelease Date: 6/27/2022\r\n\r\n## KServe Model Serving:\r\n- [X] Storage spec for unifying single model serving and model mesh\r\n - https://github.com/kserve/kserve/pull/1899 @Tomcli \r\n- [x] Transformer ModelMesh support\r\n - https://github.com/kserve/kserve/pull/2136 @chinhuang007 \r\n- [x] Model Status API for unifying single model serving and model mesh\r\n - https://github.com/kserve/kserve/pull/2084 @pvaneck \r\n - https://github.com/kserve/kserve/pull/2088 @Suresh-Nakkeran \r\n- [x] Inferece Graph v1alpha1 API and impmentation\r\n - https://github.com/kserve/kserve/pull/1910 @yuzisun @Iamlovingit \r\n- [X] KServe control plane HA\r\n - https://github.com/kserve/kserve/pull/2160 @Suresh-Nakkeran \r\n- [X] Enable inference protocol version auto selection for servingruntime \r\n - https://github.com/kserve/kserve/pull/2118 @Suresh-Nakkeran \r\n- [x] Webhdfs storage uri support\r\n - https://github.com/kserve/kserve/pull/2077 @markwinter \r\n- [x] Azure file share support for storage initializer \r\n - https://github.com/kserve/kserve/pull/1985 @laozc \r\n- [x] KServe Autoscaling spec API\r\n - https://github.com/kserve/kserve/pull/2082 @andyi2it \r\n- [X] KServe ingress class and domain template support for raw deployment mode\r\n - https://github.com/kserve/kserve/pull/2054 @pradithya \r\n - https://github.com/kserve/kserve/pull/2049 @pradithya \r\n\r\n## ModelMesh:\r\n- [X] OpenVINO model server support\r\n - https://github.com/kserve/modelmesh-runtime-adapter/pull/18 @tjohnson31415\r\n- [x] Import ServingRuntime and InferenceService types from KServe \r\n - https://github.com/kserve/modelmesh-serving/pull/146 @tjohnson31415 \r\n - https://github.com/kserve/modelmesh-serving/pull/140 @pvaneck \r\n- [x] Azure storage support for ModelMesh\r\n - https://github.com/kserve/modelmesh-runtime-adapter/pull/23 @pvaneck \r\n\r\n## Models UI:\r\n- [x] Models Web App KServe 0.8 release support \r\n - https://github.com/kserve/models-web-app/pull/35 @DavidSpek \r\n\r\n \r\n## Website: \r\n- [x] Website doc update\r\n\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kserve',\n version='0.9.0rc0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}], "after_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kserve',\n version='0.9.0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}]} | 1,639 | 124 |
gh_patches_debug_28851 | rasdani/github-patches | git_diff | webkom__lego-2560 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RestrictedMail notification
> Restricted mail is used when sending mails to multiple users at once by selecting users/events/meetings, and then send the email to <[email protected]> together with the token.
The `restricted mail sent` should be sent to the proper email, not the `user.email` field. The address `user.email_address` should be used instead.
If the `from_address` is not the same as the `user.email_address`, both should receive the mail.
https://github.com/webkom/lego/blob/ccab14fbee223f16842ace6ca2ba0c2f3ac3ac86/lego/apps/restricted/notifications.py#L9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lego/apps/restricted/serializers.py`
Content:
```
1 from lego.apps.events.fields import PublicEventListField
2 from lego.apps.meetings.fields import MeetingListField
3 from lego.apps.restricted.models import RestrictedMail
4 from lego.apps.users.fields import AbakusGroupListField, PublicUserListField
5 from lego.utils.serializers import BasisModelSerializer
6
7
8 class RestrictedMailListSerializer(BasisModelSerializer):
9 class Meta:
10 model = RestrictedMail
11 fields = ("id", "from_address", "hide_sender", "used", "created_at", "weekly")
12 read_only_fields = ("used", "created_at")
13
14 def save(self, **kwargs):
15 kwargs["token"] = RestrictedMail.create_token()
16 return super().save(**kwargs)
17
18
19 class RestrictedMailSerializer(RestrictedMailListSerializer):
20 class Meta(RestrictedMailListSerializer.Meta):
21 fields = RestrictedMailListSerializer.Meta.fields + (
22 "users",
23 "groups",
24 "events",
25 "meetings",
26 "raw_addresses",
27 "weekly",
28 "hide_sender",
29 )
30
31
32 class RestrictedMailDetailSerializer(RestrictedMailSerializer):
33 users = PublicUserListField({"read_only": True})
34 groups = AbakusGroupListField({"read_only": True})
35 events = PublicEventListField({"read_only": True})
36 meetings = MeetingListField({"read_only": True})
37
38 class Meta(RestrictedMailSerializer.Meta):
39 fields = RestrictedMailSerializer.Meta.fields + ("token_query_param",)
40
```
Path: `lego/apps/restricted/notifications.py`
Content:
```
1 from lego.apps.notifications.constants import RESTRICTED_MAIL_SENT
2 from lego.apps.notifications.notification import Notification
3
4
5 class RestrictedMailSentNotification(Notification):
6
7 name = RESTRICTED_MAIL_SENT
8
9 def generate_mail(self):
10 return self._delay_mail(
11 to_email=self.user.email,
12 context={"first_name": self.user.first_name},
13 subject="Begrenset epost sendt ut",
14 plain_template="restricted/email/process_success.txt",
15 html_template="restricted/email/process_success.html",
16 )
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lego/apps/restricted/notifications.py b/lego/apps/restricted/notifications.py
--- a/lego/apps/restricted/notifications.py
+++ b/lego/apps/restricted/notifications.py
@@ -8,7 +8,7 @@
def generate_mail(self):
return self._delay_mail(
- to_email=self.user.email,
+ to_email=self.user.email.address,
context={"first_name": self.user.first_name},
subject="Begrenset epost sendt ut",
plain_template="restricted/email/process_success.txt",
diff --git a/lego/apps/restricted/serializers.py b/lego/apps/restricted/serializers.py
--- a/lego/apps/restricted/serializers.py
+++ b/lego/apps/restricted/serializers.py
@@ -1,3 +1,7 @@
+from functools import reduce
+
+from rest_framework import exceptions
+
from lego.apps.events.fields import PublicEventListField
from lego.apps.meetings.fields import MeetingListField
from lego.apps.restricted.models import RestrictedMail
@@ -28,6 +32,18 @@
"hide_sender",
)
+ def create(self, validated_data):
+ groups = validated_data["groups"]
+ events = validated_data["events"]
+ MaxPermittedAmout = 500
+ num = reduce((lambda a, b: a + b.number_of_users), groups, 0)
+ num += reduce((lambda a, b: a + b.registration_count), events, 0)
+ if num > MaxPermittedAmout:
+ raise exceptions.ValidationError(
+ f"The number of students in selected groups/events exceed the permitted amount which is {MaxPermittedAmout}"
+ )
+ return super().create(validated_data)
+
class RestrictedMailDetailSerializer(RestrictedMailSerializer):
users = PublicUserListField({"read_only": True})
| {"golden_diff": "diff --git a/lego/apps/restricted/notifications.py b/lego/apps/restricted/notifications.py\n--- a/lego/apps/restricted/notifications.py\n+++ b/lego/apps/restricted/notifications.py\n@@ -8,7 +8,7 @@\n \n def generate_mail(self):\n return self._delay_mail(\n- to_email=self.user.email,\n+ to_email=self.user.email.address,\n context={\"first_name\": self.user.first_name},\n subject=\"Begrenset epost sendt ut\",\n plain_template=\"restricted/email/process_success.txt\",\ndiff --git a/lego/apps/restricted/serializers.py b/lego/apps/restricted/serializers.py\n--- a/lego/apps/restricted/serializers.py\n+++ b/lego/apps/restricted/serializers.py\n@@ -1,3 +1,7 @@\n+from functools import reduce\n+\n+from rest_framework import exceptions\n+\n from lego.apps.events.fields import PublicEventListField\n from lego.apps.meetings.fields import MeetingListField\n from lego.apps.restricted.models import RestrictedMail\n@@ -28,6 +32,18 @@\n \"hide_sender\",\n )\n \n+ def create(self, validated_data):\n+ groups = validated_data[\"groups\"]\n+ events = validated_data[\"events\"]\n+ MaxPermittedAmout = 500\n+ num = reduce((lambda a, b: a + b.number_of_users), groups, 0)\n+ num += reduce((lambda a, b: a + b.registration_count), events, 0)\n+ if num > MaxPermittedAmout:\n+ raise exceptions.ValidationError(\n+ f\"The number of students in selected groups/events exceed the permitted amount which is {MaxPermittedAmout}\"\n+ )\n+ return super().create(validated_data)\n+\n \n class RestrictedMailDetailSerializer(RestrictedMailSerializer):\n users = PublicUserListField({\"read_only\": True})\n", "issue": "RestrictedMail notification\n> Restricted mail is used when sending mails to multiple users at once by selecting users/events/meetings, and then send the email to <[email protected]> together with the token.\r\n\r\nThe `restricted mail sent` should be sent to the proper email, not the `user.email` field. The address `user.email_address` should be used instead.\r\n\r\nIf the `from_address` is not the same as the `user.email_address`, both should receive the mail.\r\n\r\nhttps://github.com/webkom/lego/blob/ccab14fbee223f16842ace6ca2ba0c2f3ac3ac86/lego/apps/restricted/notifications.py#L9\n", "before_files": [{"content": "from lego.apps.events.fields import PublicEventListField\nfrom lego.apps.meetings.fields import MeetingListField\nfrom lego.apps.restricted.models import RestrictedMail\nfrom lego.apps.users.fields import AbakusGroupListField, PublicUserListField\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass RestrictedMailListSerializer(BasisModelSerializer):\n class Meta:\n model = RestrictedMail\n fields = (\"id\", \"from_address\", \"hide_sender\", \"used\", \"created_at\", \"weekly\")\n read_only_fields = (\"used\", \"created_at\")\n\n def save(self, **kwargs):\n kwargs[\"token\"] = RestrictedMail.create_token()\n return super().save(**kwargs)\n\n\nclass RestrictedMailSerializer(RestrictedMailListSerializer):\n class Meta(RestrictedMailListSerializer.Meta):\n fields = RestrictedMailListSerializer.Meta.fields + (\n \"users\",\n \"groups\",\n \"events\",\n \"meetings\",\n \"raw_addresses\",\n \"weekly\",\n \"hide_sender\",\n )\n\n\nclass RestrictedMailDetailSerializer(RestrictedMailSerializer):\n users = PublicUserListField({\"read_only\": True})\n groups = AbakusGroupListField({\"read_only\": True})\n events = PublicEventListField({\"read_only\": True})\n meetings = MeetingListField({\"read_only\": True})\n\n class Meta(RestrictedMailSerializer.Meta):\n fields = RestrictedMailSerializer.Meta.fields + (\"token_query_param\",)\n", "path": "lego/apps/restricted/serializers.py"}, {"content": "from lego.apps.notifications.constants import RESTRICTED_MAIL_SENT\nfrom lego.apps.notifications.notification import Notification\n\n\nclass RestrictedMailSentNotification(Notification):\n\n name = RESTRICTED_MAIL_SENT\n\n def generate_mail(self):\n return self._delay_mail(\n to_email=self.user.email,\n context={\"first_name\": self.user.first_name},\n subject=\"Begrenset epost sendt ut\",\n plain_template=\"restricted/email/process_success.txt\",\n html_template=\"restricted/email/process_success.html\",\n )\n", "path": "lego/apps/restricted/notifications.py"}], "after_files": [{"content": "from functools import reduce\n\nfrom rest_framework import exceptions\n\nfrom lego.apps.events.fields import PublicEventListField\nfrom lego.apps.meetings.fields import MeetingListField\nfrom lego.apps.restricted.models import RestrictedMail\nfrom lego.apps.users.fields import AbakusGroupListField, PublicUserListField\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass RestrictedMailListSerializer(BasisModelSerializer):\n class Meta:\n model = RestrictedMail\n fields = (\"id\", \"from_address\", \"hide_sender\", \"used\", \"created_at\", \"weekly\")\n read_only_fields = (\"used\", \"created_at\")\n\n def save(self, **kwargs):\n kwargs[\"token\"] = RestrictedMail.create_token()\n return super().save(**kwargs)\n\n\nclass RestrictedMailSerializer(RestrictedMailListSerializer):\n class Meta(RestrictedMailListSerializer.Meta):\n fields = RestrictedMailListSerializer.Meta.fields + (\n \"users\",\n \"groups\",\n \"events\",\n \"meetings\",\n \"raw_addresses\",\n \"weekly\",\n \"hide_sender\",\n )\n\n def create(self, validated_data):\n groups = validated_data[\"groups\"]\n events = validated_data[\"events\"]\n MaxPermittedAmout = 500\n num = reduce((lambda a, b: a + b.number_of_users), groups, 0)\n num += reduce((lambda a, b: a + b.registration_count), events, 0)\n if num > MaxPermittedAmout:\n raise exceptions.ValidationError(\n f\"The number of students in selected groups/events exceed the permitted amount which is {MaxPermittedAmout}\"\n )\n return super().create(validated_data)\n\n\nclass RestrictedMailDetailSerializer(RestrictedMailSerializer):\n users = PublicUserListField({\"read_only\": True})\n groups = AbakusGroupListField({\"read_only\": True})\n events = PublicEventListField({\"read_only\": True})\n meetings = MeetingListField({\"read_only\": True})\n\n class Meta(RestrictedMailSerializer.Meta):\n fields = RestrictedMailSerializer.Meta.fields + (\"token_query_param\",)\n", "path": "lego/apps/restricted/serializers.py"}, {"content": "from lego.apps.notifications.constants import RESTRICTED_MAIL_SENT\nfrom lego.apps.notifications.notification import Notification\n\n\nclass RestrictedMailSentNotification(Notification):\n\n name = RESTRICTED_MAIL_SENT\n\n def generate_mail(self):\n return self._delay_mail(\n to_email=self.user.email.address,\n context={\"first_name\": self.user.first_name},\n subject=\"Begrenset epost sendt ut\",\n plain_template=\"restricted/email/process_success.txt\",\n html_template=\"restricted/email/process_success.html\",\n )\n", "path": "lego/apps/restricted/notifications.py"}]} | 952 | 422 |
gh_patches_debug_3626 | rasdani/github-patches | git_diff | ivy-llc__ivy-25492 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multinomial
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/random.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.func_wrapper import with_supported_device_and_dtypes, with_unsupported_dtypes
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @with_supported_dtypes(
11 {"2.5.1 and below": ("float32", "float64")},
12 "paddle",
13 )
14 @to_ivy_arrays_and_back
15 def normal(mean=0.0, std=1.0, shape=None, name=None):
16 return ivy.random_normal(mean=mean, std=std, shape=shape)
17
18
19 @with_supported_dtypes(
20 {"2.5.1 and below": ("float32", "float64")},
21 "paddle",
22 )
23 @to_ivy_arrays_and_back
24 def poisson(x, name=None):
25 return ivy.poisson(x, shape=None, device=None, dtype=None, seed=None, out=None)
26
27
28 @with_supported_device_and_dtypes(
29 {
30 "2.5.1 and above": {
31 "cpu": (
32 "bfloat16",
33 "float32",
34 "float64",
35 ),
36 "gpu": (
37 "bfloat16",
38 "float16",
39 "float32",
40 "float64",
41 ),
42 },
43 "2.4.2 and below": {
44 "cpu": (
45 "float32",
46 "float64",
47 ),
48 "gpu": (
49 "float16",
50 "float32",
51 "float64",
52 ),
53 },
54 },
55 "paddle",
56 )
57 @to_ivy_arrays_and_back
58 def rand(shape, dtype=None, name=None):
59 return ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=None)
60
61
62 @to_ivy_arrays_and_back
63 def randint(low=0, high=None, shape=[1], dtype=None, name=None):
64 return ivy.randint(low, high, shape=shape, dtype=dtype)
65
66
67 @with_unsupported_dtypes(
68 {"2.5.1 and below": ("int16", "float16", "bfloat16", "uint8")},
69 "paddle",
70 )
71 @to_ivy_arrays_and_back
72 def randint_like(x, low=0, high=None, dtype=None, name=None):
73 if high is None:
74 high = low
75 low = 0
76 if high <= 0:
77 raise ivy.exceptions.IvyError(
78 "If high is None, low must be greater than 0, but received low = 0."
79 )
80 return ivy.randint(low, high, shape=x.shape, dtype=dtype, seed=None)
81
82
83 def randn(shape, dtype=None, name=None):
84 if dtype not in ["float32", "float64"]:
85 raise ivy.exceptions.IvyError(
86 "Unsupported dtype for randn, only float32 and float64 are supported, "
87 )
88 return ivy.random_normal(shape=shape, dtype=dtype, seed=None)
89
90
91 @with_supported_dtypes(
92 {"2.5.1 and below": ("float32", "float64")},
93 "paddle",
94 )
95 @to_ivy_arrays_and_back
96 def standard_normal(shape, dtype=None, name=None):
97 return ivy.random_normal(mean=0, std=1, shape=shape, dtype=dtype)
98
99
100 @with_supported_dtypes(
101 {"2.5.1 and below": ("float32", "float64")},
102 "paddle",
103 )
104 @to_ivy_arrays_and_back
105 def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
106 return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/random.py b/ivy/functional/frontends/paddle/random.py
--- a/ivy/functional/frontends/paddle/random.py
+++ b/ivy/functional/frontends/paddle/random.py
@@ -7,6 +7,16 @@
)
+@with_supported_dtypes(
+ {"2.5.1 and below": ("float32", "float64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def multinomial(x, num_samples=1, replacement=False, name=None):
+ n = num_samples + 1
+ return ivy.multinomial(n, num_samples, probs=x, replace=replacement)
+
+
@with_supported_dtypes(
{"2.5.1 and below": ("float32", "float64")},
"paddle",
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/random.py b/ivy/functional/frontends/paddle/random.py\n--- a/ivy/functional/frontends/paddle/random.py\n+++ b/ivy/functional/frontends/paddle/random.py\n@@ -7,6 +7,16 @@\n )\n \n \n+@with_supported_dtypes(\n+ {\"2.5.1 and below\": (\"float32\", \"float64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def multinomial(x, num_samples=1, replacement=False, name=None):\n+ n = num_samples + 1\n+ return ivy.multinomial(n, num_samples, probs=x, replace=replacement)\n+\n+\n @with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n", "issue": "multinomial\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.func_wrapper import with_supported_device_and_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef normal(mean=0.0, std=1.0, shape=None, name=None):\n return ivy.random_normal(mean=mean, std=std, shape=shape)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef poisson(x, name=None):\n return ivy.poisson(x, shape=None, device=None, dtype=None, seed=None, out=None)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.5.1 and above\": {\n \"cpu\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"2.4.2 and below\": {\n \"cpu\": (\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rand(shape, dtype=None, name=None):\n return ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=None)\n\n\n@to_ivy_arrays_and_back\ndef randint(low=0, high=None, shape=[1], dtype=None, name=None):\n return ivy.randint(low, high, shape=shape, dtype=dtype)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int16\", \"float16\", \"bfloat16\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef randint_like(x, low=0, high=None, dtype=None, name=None):\n if high is None:\n high = low\n low = 0\n if high <= 0:\n raise ivy.exceptions.IvyError(\n \"If high is None, low must be greater than 0, but received low = 0.\"\n )\n return ivy.randint(low, high, shape=x.shape, dtype=dtype, seed=None)\n\n\ndef randn(shape, dtype=None, name=None):\n if dtype not in [\"float32\", \"float64\"]:\n raise ivy.exceptions.IvyError(\n \"Unsupported dtype for randn, only float32 and float64 are supported, \"\n )\n return ivy.random_normal(shape=shape, dtype=dtype, seed=None)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef standard_normal(shape, dtype=None, name=None):\n return ivy.random_normal(mean=0, std=1, shape=shape, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):\n return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)\n", "path": "ivy/functional/frontends/paddle/random.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.func_wrapper import with_supported_device_and_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(x, num_samples=1, replacement=False, name=None):\n n = num_samples + 1\n return ivy.multinomial(n, num_samples, probs=x, replace=replacement)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef normal(mean=0.0, std=1.0, shape=None, name=None):\n return ivy.random_normal(mean=mean, std=std, shape=shape)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef poisson(x, name=None):\n return ivy.poisson(x, shape=None, device=None, dtype=None, seed=None, out=None)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.5.1 and above\": {\n \"cpu\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"2.4.2 and below\": {\n \"cpu\": (\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rand(shape, dtype=None, name=None):\n return ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=None)\n\n\n@to_ivy_arrays_and_back\ndef randint(low=0, high=None, shape=[1], dtype=None, name=None):\n return ivy.randint(low, high, shape=shape, dtype=dtype)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int16\", \"float16\", \"bfloat16\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef randint_like(x, low=0, high=None, dtype=None, name=None):\n if high is None:\n high = low\n low = 0\n if high <= 0:\n raise ivy.exceptions.IvyError(\n \"If high is None, low must be greater than 0, but received low = 0.\"\n )\n return ivy.randint(low, high, shape=x.shape, dtype=dtype, seed=None)\n\n\ndef randn(shape, dtype=None, name=None):\n if dtype not in [\"float32\", \"float64\"]:\n raise ivy.exceptions.IvyError(\n \"Unsupported dtype for randn, only float32 and float64 are supported, \"\n )\n return ivy.random_normal(shape=shape, dtype=dtype, seed=None)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef standard_normal(shape, dtype=None, name=None):\n return ivy.random_normal(mean=0, std=1, shape=shape, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):\n return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)\n", "path": "ivy/functional/frontends/paddle/random.py"}]} | 1,324 | 190 |
gh_patches_debug_25453 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-113 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`python3.7` raises `flake8` warning
It is a bug in `flake8`:
- https://github.com/PyCQA/pycodestyle/issues/728
We currently allow `python3.7` build to fail.
```
=============================== warnings summary ===============================
tests/test_visitors/test_wrong_class/test_base_class.py::FLAKE8
/home/travis/virtualenv/python3.7.0/lib/python3.7/site-packages/pycodestyle.py:113: FutureWarning: Possible nested set at position 1
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
-- Docs: https://docs.pytest.org/en/latest/warnings.html
=================== 1514 passed, 1 warnings in 27.96 seconds ===================
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/compat.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 This module contains ugly hacks and fixes for version compat issues.
5
6 Do not be over-exited to add anything here.
7 """
8
9 import ast
10
11
12 def maybe_set_parent(tree: ast.AST) -> ast.AST:
13 """Sets parents for all nodes that do not have this prop."""
14 for statement in ast.walk(tree):
15 for child in ast.iter_child_nodes(statement):
16 if not hasattr(child, 'parent'): # noqa: Z113
17 setattr(child, 'parent', statement)
18
19 return tree
20
```
Path: `wemake_python_styleguide/checker.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from ast import Module
4 from typing import Generator
5
6 from flake8.options.manager import OptionManager
7
8 from wemake_python_styleguide import constants
9 from wemake_python_styleguide.compat import maybe_set_parent
10 from wemake_python_styleguide.options.config import Configuration
11 from wemake_python_styleguide.types import (
12 CheckerSequence,
13 CheckResult,
14 ConfigurationOptions,
15 )
16 from wemake_python_styleguide.version import version
17 from wemake_python_styleguide.visitors.ast.complexity.counts import (
18 MethodMembersVisitor,
19 ModuleMembersVisitor,
20 )
21 from wemake_python_styleguide.visitors.ast.complexity.function import (
22 FunctionComplexityVisitor,
23 )
24 from wemake_python_styleguide.visitors.ast.complexity.nested import (
25 NestedComplexityVisitor,
26 )
27 from wemake_python_styleguide.visitors.ast.complexity.offset import (
28 OffsetVisitor,
29 )
30 from wemake_python_styleguide.visitors.ast.wrong_class import WrongClassVisitor
31 from wemake_python_styleguide.visitors.ast.wrong_contents import (
32 WrongContentsVisitor,
33 )
34 from wemake_python_styleguide.visitors.ast.wrong_function_call import (
35 WrongFunctionCallVisitor,
36 )
37 from wemake_python_styleguide.visitors.ast.wrong_import import (
38 WrongImportVisitor,
39 )
40 from wemake_python_styleguide.visitors.ast.wrong_keyword import (
41 WrongKeywordVisitor,
42 WrongRaiseVisitor,
43 )
44 from wemake_python_styleguide.visitors.ast.wrong_name import (
45 WrongModuleMetadataVisitor,
46 WrongNameVisitor,
47 )
48 from wemake_python_styleguide.visitors.ast.wrong_string import (
49 WrongStringVisitor,
50 )
51 from wemake_python_styleguide.visitors.filenames.wrong_module_name import (
52 WrongModuleNameVisitor,
53 )
54
55 #: Visitors that should be working by default:
56 ENABLED_VISITORS: CheckerSequence = [
57 # Styling and correctness:
58 WrongRaiseVisitor,
59 WrongFunctionCallVisitor,
60 WrongImportVisitor,
61 WrongKeywordVisitor,
62 WrongNameVisitor,
63 WrongModuleMetadataVisitor,
64 WrongClassVisitor,
65 WrongStringVisitor,
66 WrongContentsVisitor,
67
68 # Complexity:
69 FunctionComplexityVisitor,
70 NestedComplexityVisitor,
71 OffsetVisitor,
72 ModuleMembersVisitor,
73 MethodMembersVisitor,
74
75 # Modules:
76 WrongModuleNameVisitor,
77 ]
78
79
80 class Checker(object):
81 """
82 Main checker class.
83
84 Runs all checks that are bundled with this package.
85 If you want to add new checks they should be added to ``ENABLED_VISITORS``.
86 """
87
88 name = 'wemake-python-styleguide'
89 version = version
90
91 config = Configuration()
92 options: ConfigurationOptions
93
94 def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:
95 """Creates new checker instance."""
96 self.tree = maybe_set_parent(tree)
97 self.filename = filename
98
99 @classmethod
100 def add_options(cls, parser: OptionManager) -> None:
101 """Calls Configuration instance method for registering options."""
102 cls.config.register_options(parser)
103
104 @classmethod
105 def parse_options(cls, options: ConfigurationOptions) -> None:
106 """Parses registered options for providing to the visitor."""
107 cls.options = options
108
109 def run(self) -> Generator[CheckResult, None, None]:
110 """
111 Runs the checker.
112
113 This method is used by `flake8` API.
114 After all configuration is parsed and passed.
115 """
116 for visitor_class in ENABLED_VISITORS:
117 visitor = visitor_class(
118 self.options,
119 tree=self.tree,
120 filename=self.filename,
121 )
122 visitor.run()
123
124 for error in visitor.errors:
125 yield (*error.node_items(), type(self))
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py
--- a/wemake_python_styleguide/checker.py
+++ b/wemake_python_styleguide/checker.py
@@ -6,7 +6,6 @@
from flake8.options.manager import OptionManager
from wemake_python_styleguide import constants
-from wemake_python_styleguide.compat import maybe_set_parent
from wemake_python_styleguide.options.config import Configuration
from wemake_python_styleguide.types import (
CheckerSequence,
@@ -93,7 +92,7 @@
def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:
"""Creates new checker instance."""
- self.tree = maybe_set_parent(tree)
+ self.tree = tree
self.filename = filename
@classmethod
diff --git a/wemake_python_styleguide/compat.py b/wemake_python_styleguide/compat.py
deleted file mode 100644
--- a/wemake_python_styleguide/compat.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-This module contains ugly hacks and fixes for version compat issues.
-
-Do not be over-exited to add anything here.
-"""
-
-import ast
-
-
-def maybe_set_parent(tree: ast.AST) -> ast.AST:
- """Sets parents for all nodes that do not have this prop."""
- for statement in ast.walk(tree):
- for child in ast.iter_child_nodes(statement):
- if not hasattr(child, 'parent'): # noqa: Z113
- setattr(child, 'parent', statement)
-
- return tree
| {"golden_diff": "diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py\n--- a/wemake_python_styleguide/checker.py\n+++ b/wemake_python_styleguide/checker.py\n@@ -6,7 +6,6 @@\n from flake8.options.manager import OptionManager\n \n from wemake_python_styleguide import constants\n-from wemake_python_styleguide.compat import maybe_set_parent\n from wemake_python_styleguide.options.config import Configuration\n from wemake_python_styleguide.types import (\n CheckerSequence,\n@@ -93,7 +92,7 @@\n \n def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:\n \"\"\"Creates new checker instance.\"\"\"\n- self.tree = maybe_set_parent(tree)\n+ self.tree = tree\n self.filename = filename\n \n @classmethod\ndiff --git a/wemake_python_styleguide/compat.py b/wemake_python_styleguide/compat.py\ndeleted file mode 100644\n--- a/wemake_python_styleguide/compat.py\n+++ /dev/null\n@@ -1,19 +0,0 @@\n-# -*- coding: utf-8 -*-\n-\n-\"\"\"\n-This module contains ugly hacks and fixes for version compat issues.\n-\n-Do not be over-exited to add anything here.\n-\"\"\"\n-\n-import ast\n-\n-\n-def maybe_set_parent(tree: ast.AST) -> ast.AST:\n- \"\"\"Sets parents for all nodes that do not have this prop.\"\"\"\n- for statement in ast.walk(tree):\n- for child in ast.iter_child_nodes(statement):\n- if not hasattr(child, 'parent'): # noqa: Z113\n- setattr(child, 'parent', statement)\n-\n- return tree\n", "issue": "`python3.7` raises `flake8` warning\nIt is a bug in `flake8`:\r\n- https://github.com/PyCQA/pycodestyle/issues/728\r\n\r\nWe currently allow `python3.7` build to fail.\r\n\r\n```\r\n=============================== warnings summary ===============================\r\ntests/test_visitors/test_wrong_class/test_base_class.py::FLAKE8\r\n /home/travis/virtualenv/python3.7.0/lib/python3.7/site-packages/pycodestyle.py:113: FutureWarning: Possible nested set at position 1\r\n EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')\r\n-- Docs: https://docs.pytest.org/en/latest/warnings.html\r\n=================== 1514 passed, 1 warnings in 27.96 seconds ===================\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module contains ugly hacks and fixes for version compat issues.\n\nDo not be over-exited to add anything here.\n\"\"\"\n\nimport ast\n\n\ndef maybe_set_parent(tree: ast.AST) -> ast.AST:\n \"\"\"Sets parents for all nodes that do not have this prop.\"\"\"\n for statement in ast.walk(tree):\n for child in ast.iter_child_nodes(statement):\n if not hasattr(child, 'parent'): # noqa: Z113\n setattr(child, 'parent', statement)\n\n return tree\n", "path": "wemake_python_styleguide/compat.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom ast import Module\nfrom typing import Generator\n\nfrom flake8.options.manager import OptionManager\n\nfrom wemake_python_styleguide import constants\nfrom wemake_python_styleguide.compat import maybe_set_parent\nfrom wemake_python_styleguide.options.config import Configuration\nfrom wemake_python_styleguide.types import (\n CheckerSequence,\n CheckResult,\n ConfigurationOptions,\n)\nfrom wemake_python_styleguide.version import version\nfrom wemake_python_styleguide.visitors.ast.complexity.counts import (\n MethodMembersVisitor,\n ModuleMembersVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.function import (\n FunctionComplexityVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.nested import (\n NestedComplexityVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.offset import (\n OffsetVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_class import WrongClassVisitor\nfrom wemake_python_styleguide.visitors.ast.wrong_contents import (\n WrongContentsVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_function_call import (\n WrongFunctionCallVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_import import (\n WrongImportVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_keyword import (\n WrongKeywordVisitor,\n WrongRaiseVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_name import (\n WrongModuleMetadataVisitor,\n WrongNameVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_string import (\n WrongStringVisitor,\n)\nfrom wemake_python_styleguide.visitors.filenames.wrong_module_name import (\n WrongModuleNameVisitor,\n)\n\n#: Visitors that should be working by default:\nENABLED_VISITORS: CheckerSequence = [\n # Styling and correctness:\n WrongRaiseVisitor,\n WrongFunctionCallVisitor,\n WrongImportVisitor,\n WrongKeywordVisitor,\n WrongNameVisitor,\n WrongModuleMetadataVisitor,\n WrongClassVisitor,\n WrongStringVisitor,\n WrongContentsVisitor,\n\n # Complexity:\n FunctionComplexityVisitor,\n NestedComplexityVisitor,\n OffsetVisitor,\n ModuleMembersVisitor,\n MethodMembersVisitor,\n\n # Modules:\n WrongModuleNameVisitor,\n]\n\n\nclass Checker(object):\n \"\"\"\n Main checker class.\n\n Runs all checks that are bundled with this package.\n If you want to add new checks they should be added to ``ENABLED_VISITORS``.\n \"\"\"\n\n name = 'wemake-python-styleguide'\n version = version\n\n config = Configuration()\n options: ConfigurationOptions\n\n def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:\n \"\"\"Creates new checker instance.\"\"\"\n self.tree = maybe_set_parent(tree)\n self.filename = filename\n\n @classmethod\n def add_options(cls, parser: OptionManager) -> None:\n \"\"\"Calls Configuration instance method for registering options.\"\"\"\n cls.config.register_options(parser)\n\n @classmethod\n def parse_options(cls, options: ConfigurationOptions) -> None:\n \"\"\"Parses registered options for providing to the visitor.\"\"\"\n cls.options = options\n\n def run(self) -> Generator[CheckResult, None, None]:\n \"\"\"\n Runs the checker.\n\n This method is used by `flake8` API.\n After all configuration is parsed and passed.\n \"\"\"\n for visitor_class in ENABLED_VISITORS:\n visitor = visitor_class(\n self.options,\n tree=self.tree,\n filename=self.filename,\n )\n visitor.run()\n\n for error in visitor.errors:\n yield (*error.node_items(), type(self))\n", "path": "wemake_python_styleguide/checker.py"}], "after_files": [{"content": null, "path": "wemake_python_styleguide/compat.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom ast import Module\nfrom typing import Generator\n\nfrom flake8.options.manager import OptionManager\n\nfrom wemake_python_styleguide import constants\nfrom wemake_python_styleguide.options.config import Configuration\nfrom wemake_python_styleguide.types import (\n CheckerSequence,\n CheckResult,\n ConfigurationOptions,\n)\nfrom wemake_python_styleguide.version import version\nfrom wemake_python_styleguide.visitors.ast.complexity.counts import (\n MethodMembersVisitor,\n ModuleMembersVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.function import (\n FunctionComplexityVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.nested import (\n NestedComplexityVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.offset import (\n OffsetVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_class import WrongClassVisitor\nfrom wemake_python_styleguide.visitors.ast.wrong_contents import (\n WrongContentsVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_function_call import (\n WrongFunctionCallVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_import import (\n WrongImportVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_keyword import (\n WrongKeywordVisitor,\n WrongRaiseVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_name import (\n WrongModuleMetadataVisitor,\n WrongNameVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_string import (\n WrongStringVisitor,\n)\nfrom wemake_python_styleguide.visitors.filenames.wrong_module_name import (\n WrongModuleNameVisitor,\n)\n\n#: Visitors that should be working by default:\nENABLED_VISITORS: CheckerSequence = [\n # Styling and correctness:\n WrongRaiseVisitor,\n WrongFunctionCallVisitor,\n WrongImportVisitor,\n WrongKeywordVisitor,\n WrongNameVisitor,\n WrongModuleMetadataVisitor,\n WrongClassVisitor,\n WrongStringVisitor,\n WrongContentsVisitor,\n\n # Complexity:\n FunctionComplexityVisitor,\n NestedComplexityVisitor,\n OffsetVisitor,\n ModuleMembersVisitor,\n MethodMembersVisitor,\n\n # Modules:\n WrongModuleNameVisitor,\n]\n\n\nclass Checker(object):\n \"\"\"\n Main checker class.\n\n Runs all checks that are bundled with this package.\n If you want to add new checks they should be added to ``ENABLED_VISITORS``.\n \"\"\"\n\n name = 'wemake-python-styleguide'\n version = version\n\n config = Configuration()\n options: ConfigurationOptions\n\n def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:\n \"\"\"Creates new checker instance.\"\"\"\n self.tree = tree\n self.filename = filename\n\n @classmethod\n def add_options(cls, parser: OptionManager) -> None:\n \"\"\"Calls Configuration instance method for registering options.\"\"\"\n cls.config.register_options(parser)\n\n @classmethod\n def parse_options(cls, options: ConfigurationOptions) -> None:\n \"\"\"Parses registered options for providing to the visitor.\"\"\"\n cls.options = options\n\n def run(self) -> Generator[CheckResult, None, None]:\n \"\"\"\n Runs the checker.\n\n This method is used by `flake8` API.\n After all configuration is parsed and passed.\n \"\"\"\n for visitor_class in ENABLED_VISITORS:\n visitor = visitor_class(\n self.options,\n tree=self.tree,\n filename=self.filename,\n )\n visitor.run()\n\n for error in visitor.errors:\n yield (*error.node_items(), type(self))\n", "path": "wemake_python_styleguide/checker.py"}]} | 1,678 | 381 |
gh_patches_debug_28846 | rasdani/github-patches | git_diff | mozilla__pontoon-2416 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove aurora redirects
I just looked at our root urls.py, and saw a bunch of aurora-related redirects.
It's been ... a decade or so, let's get rid of them.
CC @flodolo
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/urls.py`
Content:
```
1 from django.urls import include, path, register_converter
2 from django.urls.converters import StringConverter
3 from django.contrib import admin
4 from django.contrib.auth import logout
5 from django.views.generic import RedirectView, TemplateView
6
7 from pontoon.teams.views import team
8
9
10 class LocaleConverter(StringConverter):
11 regex = r"[A-Za-z0-9\-\@\.]+"
12
13
14 register_converter(LocaleConverter, "locale")
15
16 pontoon_js_view = TemplateView.as_view(
17 template_name="js/pontoon.js", content_type="text/javascript"
18 )
19
20 permission_denied_view = TemplateView.as_view(template_name="403.html")
21 page_not_found_view = TemplateView.as_view(template_name="404.html")
22 server_error_view = TemplateView.as_view(template_name="500.html")
23
24 urlpatterns = [
25 # Redirect legacy Aurora projects
26 path(
27 "projects/firefox-aurora/<path:url>",
28 RedirectView.as_view(url="/projects/firefox/%(url)s", permanent=True),
29 ),
30 path(
31 "projects/firefox-for-android-aurora/<path:url>",
32 RedirectView.as_view(
33 url="/projects/firefox-for-android/%(url)s", permanent=True
34 ),
35 ),
36 path(
37 "projects/thunderbird-aurora/<path:url>",
38 RedirectView.as_view(url="/projects/thunderbird/%(url)s", permanent=True),
39 ),
40 path(
41 "projects/lightning-aurora/<path:url>",
42 RedirectView.as_view(url="/projects/lightning/%(url)s", permanent=True),
43 ),
44 path(
45 "projects/seamonkey-aurora/<path:url>",
46 RedirectView.as_view(url="/projects/seamonkey/%(url)s", permanent=True),
47 ),
48 path(
49 "<locale:locale>/firefox-aurora/<path:url>",
50 RedirectView.as_view(url="/%(locale)s/firefox/%(url)s", permanent=True),
51 ),
52 path(
53 "<locale:locale>/firefox-for-android-aurora/<path:url>",
54 RedirectView.as_view(
55 url="/%(locale)s/firefox-for-android/%(url)s", permanent=True
56 ),
57 ),
58 path(
59 "<locale:locale>/thunderbird-aurora/<path:url>",
60 RedirectView.as_view(url="/%(locale)s/thunderbird/%(url)s", permanent=True),
61 ),
62 path(
63 "<locale:locale>/lightning-aurora/<path:url>",
64 RedirectView.as_view(url="/%(locale)s/lightning/%(url)s", permanent=True),
65 ),
66 path(
67 "<locale:locale>/seamonkey-aurora/<path:url>",
68 RedirectView.as_view(url="/%(locale)s/seamonkey/%(url)s", permanent=True),
69 ),
70 # Accounts
71 path("accounts/", include("pontoon.allauth_urls")),
72 # Admin
73 path("admin/", include("pontoon.administration.urls")),
74 # Django admin: Disable the login form
75 path("a/login/", permission_denied_view),
76 # Django admin
77 path("a/", admin.site.urls),
78 # Logout
79 path("signout/", logout, {"next_page": "/"}, name="signout"),
80 # Error pages
81 path("403/", permission_denied_view),
82 path("404/", page_not_found_view),
83 path("500/", server_error_view),
84 # Robots.txt
85 path(
86 "robots.txt",
87 TemplateView.as_view(template_name="robots.txt", content_type="text/plain"),
88 ),
89 # contribute.json
90 path(
91 "contribute.json",
92 TemplateView.as_view(
93 template_name="contribute.json", content_type="text/plain"
94 ),
95 ),
96 # Favicon
97 path(
98 "favicon.ico",
99 RedirectView.as_view(url="/static/img/favicon.ico", permanent=True),
100 ),
101 # Include script
102 path("pontoon.js", pontoon_js_view),
103 path("static/js/pontoon.js", pontoon_js_view),
104 # Include URL configurations from installed apps
105 path("terminology/", include("pontoon.terminology.urls")),
106 path("translations/", include("pontoon.translations.urls")),
107 path("", include("pontoon.teams.urls")),
108 path("", include("pontoon.tour.urls")),
109 path("", include("pontoon.tags.urls")),
110 path("", include("pontoon.sync.urls")),
111 path("", include("pontoon.projects.urls")),
112 path("", include("pontoon.machinery.urls")),
113 path("", include("pontoon.contributors.urls")),
114 path("", include("pontoon.localizations.urls")),
115 path("", include("pontoon.base.urls")),
116 path("", include("pontoon.translate.urls")),
117 path("", include("pontoon.batch.urls")),
118 path("", include("pontoon.api.urls")),
119 path("", include("pontoon.homepage.urls")),
120 path("", include("pontoon.in_context.urls")),
121 path("", include("pontoon.uxactionlog.urls")),
122 # Team page: Must be at the end
123 path("<locale:locale>/", team, name="pontoon.teams.team"),
124 ]
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/urls.py b/pontoon/urls.py
--- a/pontoon/urls.py
+++ b/pontoon/urls.py
@@ -22,51 +22,6 @@
server_error_view = TemplateView.as_view(template_name="500.html")
urlpatterns = [
- # Redirect legacy Aurora projects
- path(
- "projects/firefox-aurora/<path:url>",
- RedirectView.as_view(url="/projects/firefox/%(url)s", permanent=True),
- ),
- path(
- "projects/firefox-for-android-aurora/<path:url>",
- RedirectView.as_view(
- url="/projects/firefox-for-android/%(url)s", permanent=True
- ),
- ),
- path(
- "projects/thunderbird-aurora/<path:url>",
- RedirectView.as_view(url="/projects/thunderbird/%(url)s", permanent=True),
- ),
- path(
- "projects/lightning-aurora/<path:url>",
- RedirectView.as_view(url="/projects/lightning/%(url)s", permanent=True),
- ),
- path(
- "projects/seamonkey-aurora/<path:url>",
- RedirectView.as_view(url="/projects/seamonkey/%(url)s", permanent=True),
- ),
- path(
- "<locale:locale>/firefox-aurora/<path:url>",
- RedirectView.as_view(url="/%(locale)s/firefox/%(url)s", permanent=True),
- ),
- path(
- "<locale:locale>/firefox-for-android-aurora/<path:url>",
- RedirectView.as_view(
- url="/%(locale)s/firefox-for-android/%(url)s", permanent=True
- ),
- ),
- path(
- "<locale:locale>/thunderbird-aurora/<path:url>",
- RedirectView.as_view(url="/%(locale)s/thunderbird/%(url)s", permanent=True),
- ),
- path(
- "<locale:locale>/lightning-aurora/<path:url>",
- RedirectView.as_view(url="/%(locale)s/lightning/%(url)s", permanent=True),
- ),
- path(
- "<locale:locale>/seamonkey-aurora/<path:url>",
- RedirectView.as_view(url="/%(locale)s/seamonkey/%(url)s", permanent=True),
- ),
# Accounts
path("accounts/", include("pontoon.allauth_urls")),
# Admin
| {"golden_diff": "diff --git a/pontoon/urls.py b/pontoon/urls.py\n--- a/pontoon/urls.py\n+++ b/pontoon/urls.py\n@@ -22,51 +22,6 @@\n server_error_view = TemplateView.as_view(template_name=\"500.html\")\n \n urlpatterns = [\n- # Redirect legacy Aurora projects\n- path(\n- \"projects/firefox-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/projects/firefox/%(url)s\", permanent=True),\n- ),\n- path(\n- \"projects/firefox-for-android-aurora/<path:url>\",\n- RedirectView.as_view(\n- url=\"/projects/firefox-for-android/%(url)s\", permanent=True\n- ),\n- ),\n- path(\n- \"projects/thunderbird-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/projects/thunderbird/%(url)s\", permanent=True),\n- ),\n- path(\n- \"projects/lightning-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/projects/lightning/%(url)s\", permanent=True),\n- ),\n- path(\n- \"projects/seamonkey-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/projects/seamonkey/%(url)s\", permanent=True),\n- ),\n- path(\n- \"<locale:locale>/firefox-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/%(locale)s/firefox/%(url)s\", permanent=True),\n- ),\n- path(\n- \"<locale:locale>/firefox-for-android-aurora/<path:url>\",\n- RedirectView.as_view(\n- url=\"/%(locale)s/firefox-for-android/%(url)s\", permanent=True\n- ),\n- ),\n- path(\n- \"<locale:locale>/thunderbird-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/%(locale)s/thunderbird/%(url)s\", permanent=True),\n- ),\n- path(\n- \"<locale:locale>/lightning-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/%(locale)s/lightning/%(url)s\", permanent=True),\n- ),\n- path(\n- \"<locale:locale>/seamonkey-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/%(locale)s/seamonkey/%(url)s\", permanent=True),\n- ),\n # Accounts\n path(\"accounts/\", include(\"pontoon.allauth_urls\")),\n # Admin\n", "issue": "Remove aurora redirects\nI just looked at our root urls.py, and saw a bunch of aurora-related redirects.\r\n\r\nIt's been ... a decade or so, let's get rid of them.\r\n\r\nCC @flodolo \n", "before_files": [{"content": "from django.urls import include, path, register_converter\nfrom django.urls.converters import StringConverter\nfrom django.contrib import admin\nfrom django.contrib.auth import logout\nfrom django.views.generic import RedirectView, TemplateView\n\nfrom pontoon.teams.views import team\n\n\nclass LocaleConverter(StringConverter):\n regex = r\"[A-Za-z0-9\\-\\@\\.]+\"\n\n\nregister_converter(LocaleConverter, \"locale\")\n\npontoon_js_view = TemplateView.as_view(\n template_name=\"js/pontoon.js\", content_type=\"text/javascript\"\n)\n\npermission_denied_view = TemplateView.as_view(template_name=\"403.html\")\npage_not_found_view = TemplateView.as_view(template_name=\"404.html\")\nserver_error_view = TemplateView.as_view(template_name=\"500.html\")\n\nurlpatterns = [\n # Redirect legacy Aurora projects\n path(\n \"projects/firefox-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/firefox/%(url)s\", permanent=True),\n ),\n path(\n \"projects/firefox-for-android-aurora/<path:url>\",\n RedirectView.as_view(\n url=\"/projects/firefox-for-android/%(url)s\", permanent=True\n ),\n ),\n path(\n \"projects/thunderbird-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/thunderbird/%(url)s\", permanent=True),\n ),\n path(\n \"projects/lightning-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/lightning/%(url)s\", permanent=True),\n ),\n path(\n \"projects/seamonkey-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/seamonkey/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/firefox-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/firefox/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/firefox-for-android-aurora/<path:url>\",\n RedirectView.as_view(\n url=\"/%(locale)s/firefox-for-android/%(url)s\", permanent=True\n ),\n ),\n path(\n \"<locale:locale>/thunderbird-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/thunderbird/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/lightning-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/lightning/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/seamonkey-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/seamonkey/%(url)s\", permanent=True),\n ),\n # Accounts\n path(\"accounts/\", include(\"pontoon.allauth_urls\")),\n # Admin\n path(\"admin/\", include(\"pontoon.administration.urls\")),\n # Django admin: Disable the login form\n path(\"a/login/\", permission_denied_view),\n # Django admin\n path(\"a/\", admin.site.urls),\n # Logout\n path(\"signout/\", logout, {\"next_page\": \"/\"}, name=\"signout\"),\n # Error pages\n path(\"403/\", permission_denied_view),\n path(\"404/\", page_not_found_view),\n path(\"500/\", server_error_view),\n # Robots.txt\n path(\n \"robots.txt\",\n TemplateView.as_view(template_name=\"robots.txt\", content_type=\"text/plain\"),\n ),\n # contribute.json\n path(\n \"contribute.json\",\n TemplateView.as_view(\n template_name=\"contribute.json\", content_type=\"text/plain\"\n ),\n ),\n # Favicon\n path(\n \"favicon.ico\",\n RedirectView.as_view(url=\"/static/img/favicon.ico\", permanent=True),\n ),\n # Include script\n path(\"pontoon.js\", pontoon_js_view),\n path(\"static/js/pontoon.js\", pontoon_js_view),\n # Include URL configurations from installed apps\n path(\"terminology/\", include(\"pontoon.terminology.urls\")),\n path(\"translations/\", include(\"pontoon.translations.urls\")),\n path(\"\", include(\"pontoon.teams.urls\")),\n path(\"\", include(\"pontoon.tour.urls\")),\n path(\"\", include(\"pontoon.tags.urls\")),\n path(\"\", include(\"pontoon.sync.urls\")),\n path(\"\", include(\"pontoon.projects.urls\")),\n path(\"\", include(\"pontoon.machinery.urls\")),\n path(\"\", include(\"pontoon.contributors.urls\")),\n path(\"\", include(\"pontoon.localizations.urls\")),\n path(\"\", include(\"pontoon.base.urls\")),\n path(\"\", include(\"pontoon.translate.urls\")),\n path(\"\", include(\"pontoon.batch.urls\")),\n path(\"\", include(\"pontoon.api.urls\")),\n path(\"\", include(\"pontoon.homepage.urls\")),\n path(\"\", include(\"pontoon.in_context.urls\")),\n path(\"\", include(\"pontoon.uxactionlog.urls\")),\n # Team page: Must be at the end\n path(\"<locale:locale>/\", team, name=\"pontoon.teams.team\"),\n]\n", "path": "pontoon/urls.py"}], "after_files": [{"content": "from django.urls import include, path, register_converter\nfrom django.urls.converters import StringConverter\nfrom django.contrib import admin\nfrom django.contrib.auth import logout\nfrom django.views.generic import RedirectView, TemplateView\n\nfrom pontoon.teams.views import team\n\n\nclass LocaleConverter(StringConverter):\n regex = r\"[A-Za-z0-9\\-\\@\\.]+\"\n\n\nregister_converter(LocaleConverter, \"locale\")\n\npontoon_js_view = TemplateView.as_view(\n template_name=\"js/pontoon.js\", content_type=\"text/javascript\"\n)\n\npermission_denied_view = TemplateView.as_view(template_name=\"403.html\")\npage_not_found_view = TemplateView.as_view(template_name=\"404.html\")\nserver_error_view = TemplateView.as_view(template_name=\"500.html\")\n\nurlpatterns = [\n # Accounts\n path(\"accounts/\", include(\"pontoon.allauth_urls\")),\n # Admin\n path(\"admin/\", include(\"pontoon.administration.urls\")),\n # Django admin: Disable the login form\n path(\"a/login/\", permission_denied_view),\n # Django admin\n path(\"a/\", admin.site.urls),\n # Logout\n path(\"signout/\", logout, {\"next_page\": \"/\"}, name=\"signout\"),\n # Error pages\n path(\"403/\", permission_denied_view),\n path(\"404/\", page_not_found_view),\n path(\"500/\", server_error_view),\n # Robots.txt\n path(\n \"robots.txt\",\n TemplateView.as_view(template_name=\"robots.txt\", content_type=\"text/plain\"),\n ),\n # contribute.json\n path(\n \"contribute.json\",\n TemplateView.as_view(\n template_name=\"contribute.json\", content_type=\"text/plain\"\n ),\n ),\n # Favicon\n path(\n \"favicon.ico\",\n RedirectView.as_view(url=\"/static/img/favicon.ico\", permanent=True),\n ),\n # Include script\n path(\"pontoon.js\", pontoon_js_view),\n path(\"static/js/pontoon.js\", pontoon_js_view),\n # Include URL configurations from installed apps\n path(\"terminology/\", include(\"pontoon.terminology.urls\")),\n path(\"translations/\", include(\"pontoon.translations.urls\")),\n path(\"\", include(\"pontoon.teams.urls\")),\n path(\"\", include(\"pontoon.tour.urls\")),\n path(\"\", include(\"pontoon.tags.urls\")),\n path(\"\", include(\"pontoon.sync.urls\")),\n path(\"\", include(\"pontoon.projects.urls\")),\n path(\"\", include(\"pontoon.machinery.urls\")),\n path(\"\", include(\"pontoon.contributors.urls\")),\n path(\"\", include(\"pontoon.localizations.urls\")),\n path(\"\", include(\"pontoon.base.urls\")),\n path(\"\", include(\"pontoon.translate.urls\")),\n path(\"\", include(\"pontoon.batch.urls\")),\n path(\"\", include(\"pontoon.api.urls\")),\n path(\"\", include(\"pontoon.homepage.urls\")),\n path(\"\", include(\"pontoon.in_context.urls\")),\n path(\"\", include(\"pontoon.uxactionlog.urls\")),\n # Team page: Must be at the end\n path(\"<locale:locale>/\", team, name=\"pontoon.teams.team\"),\n]\n", "path": "pontoon/urls.py"}]} | 1,661 | 540 |
gh_patches_debug_1707 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing AWS RDS CA in CKV_AWS_211
**Describe the issue**
In check CKV_AWS_211, checkov currently only checks for one possible CA on AWS RDS instances, namely `rds-ca-2019` (see [associated code](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py#L24)) whereas RDS supports several (see [AWS docs](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.RegionCertificateAuthorities)). The check should accept those CAs: `rds-ca-rsa2048-g1`, `rds-ca-rsa4096-g1` and `rds-ca-ecc384-g1`.
**Examples**
Terraform code on which the check should pass:
```terraform
resource "aws_db_instance" "pass3" {
allocated_storage = 20
storage_type = "gp2"
engine = "mysql"
engine_version = "5.7"
instance_class = "db.t2.micro"
db_name = "mydb"
username = "foo"
password = "foobarbaz"
iam_database_authentication_enabled = true
storage_encrypted = true
ca_cert_identifier = "rds-ca-rsa2048-g1"
}
```
When I run checkov on this Terraform example, I get an error whereas the test should pass:
```
Check: CKV_AWS_211: "Ensure RDS uses a modern CaCert"
FAILED for resource: aws_db_instance.pass3
File: /main.tf:43-55
Guide: https://docs.paloaltonetworks.com/content/techdocs/en_US/prisma/prisma-cloud/prisma-cloud-code-security-policy-reference/aws-policies/aws-general-policies/ensure-aws-rds-uses-a-modern-cacert.html
43 | resource "aws_db_instance" "pass3" {
44 | allocated_storage = 20
45 | storage_type = "gp2"
46 | engine = "mysql"
47 | engine_version = "5.7"
48 | instance_class = "db.t2.micro"
49 | db_name = "mydb"
50 | username = "foo"
51 | password = "foobarbaz"
52 | iam_database_authentication_enabled = true
53 | storage_encrypted = true
54 | ca_cert_identifier = "rds-ca-rsa2048-g1"
55 | }
```
**Version (please complete the following information):**
- Checkov Version 2.0.930
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3 from typing import Any, List
4
5
6 class RDSCACertIsRecent(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure RDS uses a modern CaCert"
9 id = "CKV_AWS_211"
10 supported_resources = ["aws_db_instance"]
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(
13 name=name,
14 id=id,
15 categories=categories,
16 supported_resources=supported_resources,
17 missing_block_result=CheckResult.PASSED
18 )
19
20 def get_inspected_key(self) -> str:
21 return "ca_cert_identifier"
22
23 def get_expected_values(self) -> List[Any]:
24 return ["rds-ca-2019"]
25
26
27 check = RDSCACertIsRecent()
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
--- a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
+++ b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
@@ -21,7 +21,7 @@
return "ca_cert_identifier"
def get_expected_values(self) -> List[Any]:
- return ["rds-ca-2019"]
+ return ["rds-ca-rsa2048-g1", "rds-ca-rsa4096-g1", "rds-ca-ecc384-g1"]
check = RDSCACertIsRecent()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py\n--- a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py\n+++ b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py\n@@ -21,7 +21,7 @@\n return \"ca_cert_identifier\"\n \n def get_expected_values(self) -> List[Any]:\n- return [\"rds-ca-2019\"]\n+ return [\"rds-ca-rsa2048-g1\", \"rds-ca-rsa4096-g1\", \"rds-ca-ecc384-g1\"]\n \n \n check = RDSCACertIsRecent()\n", "issue": "Missing AWS RDS CA in CKV_AWS_211\n**Describe the issue**\r\nIn check CKV_AWS_211, checkov currently only checks for one possible CA on AWS RDS instances, namely `rds-ca-2019` (see [associated code](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py#L24)) whereas RDS supports several (see [AWS docs](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.RegionCertificateAuthorities)). The check should accept those CAs: `rds-ca-rsa2048-g1`, `rds-ca-rsa4096-g1` and `rds-ca-ecc384-g1`.\r\n\r\n**Examples**\r\nTerraform code on which the check should pass:\r\n```terraform\r\nresource \"aws_db_instance\" \"pass3\" {\r\n allocated_storage = 20\r\n storage_type = \"gp2\"\r\n engine = \"mysql\"\r\n engine_version = \"5.7\"\r\n instance_class = \"db.t2.micro\"\r\n db_name = \"mydb\"\r\n username = \"foo\"\r\n password = \"foobarbaz\"\r\n iam_database_authentication_enabled = true\r\n storage_encrypted = true\r\n ca_cert_identifier = \"rds-ca-rsa2048-g1\"\r\n}\r\n```\r\nWhen I run checkov on this Terraform example, I get an error whereas the test should pass:\r\n\r\n```\r\nCheck: CKV_AWS_211: \"Ensure RDS uses a modern CaCert\"\r\n\tFAILED for resource: aws_db_instance.pass3\r\n\tFile: /main.tf:43-55\r\n\tGuide: https://docs.paloaltonetworks.com/content/techdocs/en_US/prisma/prisma-cloud/prisma-cloud-code-security-policy-reference/aws-policies/aws-general-policies/ensure-aws-rds-uses-a-modern-cacert.html\r\n\r\n\t\t43 | resource \"aws_db_instance\" \"pass3\" {\r\n\t\t44 | allocated_storage = 20\r\n\t\t45 | storage_type = \"gp2\"\r\n\t\t46 | engine = \"mysql\"\r\n\t\t47 | engine_version = \"5.7\"\r\n\t\t48 | instance_class = \"db.t2.micro\"\r\n\t\t49 | db_name = \"mydb\"\r\n\t\t50 | username = \"foo\"\r\n\t\t51 | password = \"foobarbaz\"\r\n\t\t52 | iam_database_authentication_enabled = true\r\n\t\t53 | storage_encrypted = true\r\n\t\t54 | ca_cert_identifier = \"rds-ca-rsa2048-g1\"\r\n\t\t55 | }\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.0.930\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom typing import Any, List\n\n\nclass RDSCACertIsRecent(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure RDS uses a modern CaCert\"\n id = \"CKV_AWS_211\"\n supported_resources = [\"aws_db_instance\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(\n name=name,\n id=id,\n categories=categories,\n supported_resources=supported_resources,\n missing_block_result=CheckResult.PASSED\n )\n\n def get_inspected_key(self) -> str:\n return \"ca_cert_identifier\"\n\n def get_expected_values(self) -> List[Any]:\n return [\"rds-ca-2019\"]\n\n\ncheck = RDSCACertIsRecent()\n", "path": "checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom typing import Any, List\n\n\nclass RDSCACertIsRecent(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure RDS uses a modern CaCert\"\n id = \"CKV_AWS_211\"\n supported_resources = [\"aws_db_instance\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(\n name=name,\n id=id,\n categories=categories,\n supported_resources=supported_resources,\n missing_block_result=CheckResult.PASSED\n )\n\n def get_inspected_key(self) -> str:\n return \"ca_cert_identifier\"\n\n def get_expected_values(self) -> List[Any]:\n return [\"rds-ca-rsa2048-g1\", \"rds-ca-rsa4096-g1\", \"rds-ca-ecc384-g1\"]\n\n\ncheck = RDSCACertIsRecent()\n", "path": "checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py"}]} | 1,166 | 171 |
gh_patches_debug_42294 | rasdani/github-patches | git_diff | lightly-ai__lightly-491 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lightly-Crop: memory leak
When using lightly-crop some users experience a memory leak.
- [ ] Try to reproduce it.
- [ ] Fix it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightly/utils/cropping/crop_image_by_bounding_boxes.py`
Content:
```
1 import os.path
2 import warnings
3 from pathlib import Path
4 from typing import List
5
6 from PIL import Image
7 from tqdm import tqdm
8
9 from lightly.active_learning.utils import BoundingBox
10 from lightly.data import LightlyDataset
11
12
13 def crop_image_by_bounding_boxes(image_filepath: str, bounding_boxes: List[BoundingBox]) -> List[Image.Image]:
14 image = Image.open(image_filepath)
15 cropped_images = []
16 for bbox in bounding_boxes:
17 w, h = image.size
18 crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)
19 crop_box = tuple(int(i) for i in crop_box)
20 cropped_image = image.crop(crop_box)
21 cropped_images.append(cropped_image)
22 return cropped_images
23
24
25 def crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,
26 output_dir: str,
27 bounding_boxes_list_list: List[List[BoundingBox]],
28 class_indices_list_list: List[List[int]],
29 class_names: List[str] = None
30 ) -> List[List[str]]:
31 """Crops all images in a dataset by the bounding boxes and saves them in the output dir
32
33 Args:
34 dataset:
35 The dataset with the images to be cropped. Must contain M images.
36 output_dir:
37 The output directory to saved the cropped images to.
38 bounding_boxes_list_list:
39 The bounding boxes of the detections for each image. Must have M sublists, one for each image.
40 Each sublist contains the bounding boxes for each detection, thus N_m elements.
41 class_indices_list_list:
42 The object class ids of the detections for each image. Must have M sublists, one for each image.
43 Each sublist contains the bounding boxes for each detection, thus N_m elements.
44 class_names:
45 The names of the classes, used to map the class id to the class name.
46
47
48 Returns:
49 The filepaths to all saved cropped images. Has M sublists, one for each image.
50 Each sublist contains the filepath of the crop each detection, thus N_m elements.
51
52 """
53 filenames_images = dataset.get_filenames()
54 if len(filenames_images) != len(bounding_boxes_list_list) or len(filenames_images) != len(class_indices_list_list):
55 raise ValueError("There must be one bounding box and class index list for each image in the datasets,"
56 "but the lengths dont align.")
57
58 cropped_image_filepath_list_list: List[List[Image]] = []
59
60
61 print(f"Cropping objects out of {len(filenames_images)} images...")
62 for filename_image, class_indices, bounding_boxes in \
63 tqdm(zip(filenames_images, class_indices_list_list, bounding_boxes_list_list)):
64
65 if not len(class_indices) == len(bounding_boxes):
66 warnings.warn(UserWarning(f"Length of class indices ({len(class_indices)} does not equal length of bounding boxes"
67 f"({len(bounding_boxes)}. This is an error in the input arguments. "
68 f"Skipping this image {filename_image}."))
69 continue
70
71 filepath_image = dataset.get_filepath_from_filename(filename_image)
72 filepath_image_base, image_extension = os.path.splitext(filepath_image)
73
74 filepath_out_dir = os.path.join(output_dir, filename_image).replace(image_extension, '')
75 Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)
76
77 cropped_images = crop_image_by_bounding_boxes(filepath_image, bounding_boxes)
78 cropped_images_filepaths = []
79 for index, (class_index, cropped_image) in enumerate((zip(class_indices, cropped_images))):
80 if class_names:
81 class_name = class_names[class_index]
82 else:
83 class_name = f"class{class_index}"
84 cropped_image_last_filename = f'{index}_{class_name}{image_extension}'
85 cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)
86 cropped_image.save(cropped_image_filepath)
87
88 cropped_image_filename = os.path.join(filename_image.replace(image_extension, ''), cropped_image_last_filename)
89 cropped_images_filepaths.append(cropped_image_filename)
90
91 cropped_image_filepath_list_list.append(cropped_images_filepaths)
92
93 return cropped_image_filepath_list_list
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightly/utils/cropping/crop_image_by_bounding_boxes.py b/lightly/utils/cropping/crop_image_by_bounding_boxes.py
--- a/lightly/utils/cropping/crop_image_by_bounding_boxes.py
+++ b/lightly/utils/cropping/crop_image_by_bounding_boxes.py
@@ -10,18 +10,6 @@
from lightly.data import LightlyDataset
-def crop_image_by_bounding_boxes(image_filepath: str, bounding_boxes: List[BoundingBox]) -> List[Image.Image]:
- image = Image.open(image_filepath)
- cropped_images = []
- for bbox in bounding_boxes:
- w, h = image.size
- crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)
- crop_box = tuple(int(i) for i in crop_box)
- cropped_image = image.crop(crop_box)
- cropped_images.append(cropped_image)
- return cropped_images
-
-
def crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,
output_dir: str,
bounding_boxes_list_list: List[List[BoundingBox]],
@@ -55,7 +43,7 @@
raise ValueError("There must be one bounding box and class index list for each image in the datasets,"
"but the lengths dont align.")
- cropped_image_filepath_list_list: List[List[Image]] = []
+ cropped_image_filepath_list_list: List[List[str]] = []
print(f"Cropping objects out of {len(filenames_images)} images...")
@@ -71,21 +59,38 @@
filepath_image = dataset.get_filepath_from_filename(filename_image)
filepath_image_base, image_extension = os.path.splitext(filepath_image)
- filepath_out_dir = os.path.join(output_dir, filename_image).replace(image_extension, '')
+ filepath_out_dir = os.path.join(output_dir, filename_image)\
+ .replace(image_extension, '')
Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)
- cropped_images = crop_image_by_bounding_boxes(filepath_image, bounding_boxes)
+ image = Image.open(filepath_image)
+
cropped_images_filepaths = []
- for index, (class_index, cropped_image) in enumerate((zip(class_indices, cropped_images))):
+ # For every image, crop out multiple cropped images, one for each
+ # bounding box
+ for index, (class_index, bbox) in \
+ enumerate((zip(class_indices, bounding_boxes))):
+
+ # determine the filename and filepath of the cropped image
if class_names:
class_name = class_names[class_index]
else:
class_name = f"class{class_index}"
cropped_image_last_filename = f'{index}_{class_name}{image_extension}'
cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)
+
+ # crop out the image and save it
+ w, h = image.size
+ crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)
+ crop_box = tuple(int(i) for i in crop_box)
+ cropped_image = image.crop(crop_box)
cropped_image.save(cropped_image_filepath)
- cropped_image_filename = os.path.join(filename_image.replace(image_extension, ''), cropped_image_last_filename)
+ # add the filename of the cropped image to the corresponding list
+ cropped_image_filename: str = os.path.join(
+ filename_image.replace(image_extension, ''),
+ cropped_image_last_filename
+ )
cropped_images_filepaths.append(cropped_image_filename)
cropped_image_filepath_list_list.append(cropped_images_filepaths)
| {"golden_diff": "diff --git a/lightly/utils/cropping/crop_image_by_bounding_boxes.py b/lightly/utils/cropping/crop_image_by_bounding_boxes.py\n--- a/lightly/utils/cropping/crop_image_by_bounding_boxes.py\n+++ b/lightly/utils/cropping/crop_image_by_bounding_boxes.py\n@@ -10,18 +10,6 @@\n from lightly.data import LightlyDataset\n \n \n-def crop_image_by_bounding_boxes(image_filepath: str, bounding_boxes: List[BoundingBox]) -> List[Image.Image]:\n- image = Image.open(image_filepath)\n- cropped_images = []\n- for bbox in bounding_boxes:\n- w, h = image.size\n- crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)\n- crop_box = tuple(int(i) for i in crop_box)\n- cropped_image = image.crop(crop_box)\n- cropped_images.append(cropped_image)\n- return cropped_images\n-\n-\n def crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,\n output_dir: str,\n bounding_boxes_list_list: List[List[BoundingBox]],\n@@ -55,7 +43,7 @@\n raise ValueError(\"There must be one bounding box and class index list for each image in the datasets,\"\n \"but the lengths dont align.\")\n \n- cropped_image_filepath_list_list: List[List[Image]] = []\n+ cropped_image_filepath_list_list: List[List[str]] = []\n \n \n print(f\"Cropping objects out of {len(filenames_images)} images...\")\n@@ -71,21 +59,38 @@\n filepath_image = dataset.get_filepath_from_filename(filename_image)\n filepath_image_base, image_extension = os.path.splitext(filepath_image)\n \n- filepath_out_dir = os.path.join(output_dir, filename_image).replace(image_extension, '')\n+ filepath_out_dir = os.path.join(output_dir, filename_image)\\\n+ .replace(image_extension, '')\n Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)\n \n- cropped_images = crop_image_by_bounding_boxes(filepath_image, bounding_boxes)\n+ image = Image.open(filepath_image)\n+ \n cropped_images_filepaths = []\n- for index, (class_index, cropped_image) in enumerate((zip(class_indices, cropped_images))):\n+ # For every image, crop out multiple cropped images, one for each\n+ # bounding box\n+ for index, (class_index, bbox) in \\\n+ enumerate((zip(class_indices, bounding_boxes))):\n+\n+ # determine the filename and filepath of the cropped image\n if class_names:\n class_name = class_names[class_index]\n else:\n class_name = f\"class{class_index}\"\n cropped_image_last_filename = f'{index}_{class_name}{image_extension}'\n cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)\n+\n+ # crop out the image and save it\n+ w, h = image.size\n+ crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)\n+ crop_box = tuple(int(i) for i in crop_box)\n+ cropped_image = image.crop(crop_box)\n cropped_image.save(cropped_image_filepath)\n \n- cropped_image_filename = os.path.join(filename_image.replace(image_extension, ''), cropped_image_last_filename)\n+ # add the filename of the cropped image to the corresponding list\n+ cropped_image_filename: str = os.path.join(\n+ filename_image.replace(image_extension, ''),\n+ cropped_image_last_filename\n+ )\n cropped_images_filepaths.append(cropped_image_filename)\n \n cropped_image_filepath_list_list.append(cropped_images_filepaths)\n", "issue": "Lightly-Crop: memory leak\nWhen using lightly-crop some users experience a memory leak.\r\n\r\n- [ ] Try to reproduce it.\r\n- [ ] Fix it\n", "before_files": [{"content": "import os.path\nimport warnings\nfrom pathlib import Path\nfrom typing import List\n\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom lightly.active_learning.utils import BoundingBox\nfrom lightly.data import LightlyDataset\n\n\ndef crop_image_by_bounding_boxes(image_filepath: str, bounding_boxes: List[BoundingBox]) -> List[Image.Image]:\n image = Image.open(image_filepath)\n cropped_images = []\n for bbox in bounding_boxes:\n w, h = image.size\n crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)\n crop_box = tuple(int(i) for i in crop_box)\n cropped_image = image.crop(crop_box)\n cropped_images.append(cropped_image)\n return cropped_images\n\n\ndef crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,\n output_dir: str,\n bounding_boxes_list_list: List[List[BoundingBox]],\n class_indices_list_list: List[List[int]],\n class_names: List[str] = None\n ) -> List[List[str]]:\n \"\"\"Crops all images in a dataset by the bounding boxes and saves them in the output dir\n\n Args:\n dataset:\n The dataset with the images to be cropped. Must contain M images.\n output_dir:\n The output directory to saved the cropped images to.\n bounding_boxes_list_list:\n The bounding boxes of the detections for each image. Must have M sublists, one for each image.\n Each sublist contains the bounding boxes for each detection, thus N_m elements.\n class_indices_list_list:\n The object class ids of the detections for each image. Must have M sublists, one for each image.\n Each sublist contains the bounding boxes for each detection, thus N_m elements.\n class_names:\n The names of the classes, used to map the class id to the class name.\n\n\n Returns:\n The filepaths to all saved cropped images. Has M sublists, one for each image.\n Each sublist contains the filepath of the crop each detection, thus N_m elements.\n\n \"\"\"\n filenames_images = dataset.get_filenames()\n if len(filenames_images) != len(bounding_boxes_list_list) or len(filenames_images) != len(class_indices_list_list):\n raise ValueError(\"There must be one bounding box and class index list for each image in the datasets,\"\n \"but the lengths dont align.\")\n\n cropped_image_filepath_list_list: List[List[Image]] = []\n\n\n print(f\"Cropping objects out of {len(filenames_images)} images...\")\n for filename_image, class_indices, bounding_boxes in \\\n tqdm(zip(filenames_images, class_indices_list_list, bounding_boxes_list_list)):\n\n if not len(class_indices) == len(bounding_boxes):\n warnings.warn(UserWarning(f\"Length of class indices ({len(class_indices)} does not equal length of bounding boxes\"\n f\"({len(bounding_boxes)}. This is an error in the input arguments. \"\n f\"Skipping this image {filename_image}.\"))\n continue\n\n filepath_image = dataset.get_filepath_from_filename(filename_image)\n filepath_image_base, image_extension = os.path.splitext(filepath_image)\n\n filepath_out_dir = os.path.join(output_dir, filename_image).replace(image_extension, '')\n Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)\n\n cropped_images = crop_image_by_bounding_boxes(filepath_image, bounding_boxes)\n cropped_images_filepaths = []\n for index, (class_index, cropped_image) in enumerate((zip(class_indices, cropped_images))):\n if class_names:\n class_name = class_names[class_index]\n else:\n class_name = f\"class{class_index}\"\n cropped_image_last_filename = f'{index}_{class_name}{image_extension}'\n cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)\n cropped_image.save(cropped_image_filepath)\n\n cropped_image_filename = os.path.join(filename_image.replace(image_extension, ''), cropped_image_last_filename)\n cropped_images_filepaths.append(cropped_image_filename)\n\n cropped_image_filepath_list_list.append(cropped_images_filepaths)\n\n return cropped_image_filepath_list_list\n", "path": "lightly/utils/cropping/crop_image_by_bounding_boxes.py"}], "after_files": [{"content": "import os.path\nimport warnings\nfrom pathlib import Path\nfrom typing import List\n\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom lightly.active_learning.utils import BoundingBox\nfrom lightly.data import LightlyDataset\n\n\ndef crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,\n output_dir: str,\n bounding_boxes_list_list: List[List[BoundingBox]],\n class_indices_list_list: List[List[int]],\n class_names: List[str] = None\n ) -> List[List[str]]:\n \"\"\"Crops all images in a dataset by the bounding boxes and saves them in the output dir\n\n Args:\n dataset:\n The dataset with the images to be cropped. Must contain M images.\n output_dir:\n The output directory to saved the cropped images to.\n bounding_boxes_list_list:\n The bounding boxes of the detections for each image. Must have M sublists, one for each image.\n Each sublist contains the bounding boxes for each detection, thus N_m elements.\n class_indices_list_list:\n The object class ids of the detections for each image. Must have M sublists, one for each image.\n Each sublist contains the bounding boxes for each detection, thus N_m elements.\n class_names:\n The names of the classes, used to map the class id to the class name.\n\n\n Returns:\n The filepaths to all saved cropped images. Has M sublists, one for each image.\n Each sublist contains the filepath of the crop each detection, thus N_m elements.\n\n \"\"\"\n filenames_images = dataset.get_filenames()\n if len(filenames_images) != len(bounding_boxes_list_list) or len(filenames_images) != len(class_indices_list_list):\n raise ValueError(\"There must be one bounding box and class index list for each image in the datasets,\"\n \"but the lengths dont align.\")\n\n cropped_image_filepath_list_list: List[List[str]] = []\n\n\n print(f\"Cropping objects out of {len(filenames_images)} images...\")\n for filename_image, class_indices, bounding_boxes in \\\n tqdm(zip(filenames_images, class_indices_list_list, bounding_boxes_list_list)):\n\n if not len(class_indices) == len(bounding_boxes):\n warnings.warn(UserWarning(f\"Length of class indices ({len(class_indices)} does not equal length of bounding boxes\"\n f\"({len(bounding_boxes)}. This is an error in the input arguments. \"\n f\"Skipping this image {filename_image}.\"))\n continue\n\n filepath_image = dataset.get_filepath_from_filename(filename_image)\n filepath_image_base, image_extension = os.path.splitext(filepath_image)\n\n filepath_out_dir = os.path.join(output_dir, filename_image)\\\n .replace(image_extension, '')\n Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)\n\n image = Image.open(filepath_image)\n \n cropped_images_filepaths = []\n # For every image, crop out multiple cropped images, one for each\n # bounding box\n for index, (class_index, bbox) in \\\n enumerate((zip(class_indices, bounding_boxes))):\n\n # determine the filename and filepath of the cropped image\n if class_names:\n class_name = class_names[class_index]\n else:\n class_name = f\"class{class_index}\"\n cropped_image_last_filename = f'{index}_{class_name}{image_extension}'\n cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)\n\n # crop out the image and save it\n w, h = image.size\n crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)\n crop_box = tuple(int(i) for i in crop_box)\n cropped_image = image.crop(crop_box)\n cropped_image.save(cropped_image_filepath)\n\n # add the filename of the cropped image to the corresponding list\n cropped_image_filename: str = os.path.join(\n filename_image.replace(image_extension, ''),\n cropped_image_last_filename\n )\n cropped_images_filepaths.append(cropped_image_filename)\n\n cropped_image_filepath_list_list.append(cropped_images_filepaths)\n\n return cropped_image_filepath_list_list\n", "path": "lightly/utils/cropping/crop_image_by_bounding_boxes.py"}]} | 1,350 | 793 |
gh_patches_debug_23013 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2015 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Project update endpoint gives internal server error
See `http://rsr.akvo.org/rest/v1/project_update/`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/project_update.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3
4 See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6 """
7
8 from akvo.rsr.models import ProjectUpdate
9
10 from ..serializers import ProjectUpdateSerializer, ProjectUpdateExtraSerializer
11 from ..viewsets import PublicProjectViewSet
12
13 from rest_framework.decorators import api_view, permission_classes
14 from rest_framework.permissions import IsAuthenticated
15 from rest_framework.response import Response
16
17
18 class ProjectUpdateViewSet(PublicProjectViewSet):
19
20 """."""
21 queryset = ProjectUpdate.objects.select_related('project',
22 'user').prefetch_related('locations')
23 serializer_class = ProjectUpdateSerializer
24 filter_fields = {
25 'project': ['exact', ],
26 'indicator_period': ['exact', ],
27 'user': ['exact', ],
28 'uuid': ['exact', 'icontains', ],
29 'period_update': ['exact', 'gt', 'gte', 'lt', 'lte', ],
30 }
31
32 paginate_by_param = 'limit'
33 max_paginate_by = 1000
34
35 def get_queryset(self):
36 """
37 Allow simple filtering on selected fields.
38 We don't use the default filter_fields, because Up filters on
39 datetime for last_modified_at, and they only support a date, not datetime.
40 """
41 created_at__gt = self.request.QUERY_PARAMS.get('created_at__gt', None)
42 if created_at__gt is not None:
43 self.queryset = self.queryset.filter(created_at__gt=created_at__gt)
44 created_at__lt = self.request.QUERY_PARAMS.get('created_at__lt', None)
45 if created_at__lt is not None:
46 self.queryset = self.queryset.filter(created_at__lt=created_at__lt)
47 last_modified_at__gt = self.request.QUERY_PARAMS.get('last_modified_at__gt', None)
48 if last_modified_at__gt is not None:
49 self.queryset = self.queryset.filter(last_modified_at__gt=last_modified_at__gt)
50 last_modified_at__lt = self.request.QUERY_PARAMS.get('last_modified_at__lt', None)
51 if last_modified_at__lt is not None:
52 self.queryset = self.queryset.filter(last_modified_at__lt=last_modified_at__lt)
53 # Get updates per organisation
54 project__partners = self.request.QUERY_PARAMS.get('project__partners', None)
55 if project__partners:
56 self.queryset = self.queryset.filter(project__partners=project__partners)
57 user__organisations = self.request.QUERY_PARAMS.get('user__organisations', None)
58 if user__organisations:
59 self.queryset = self.queryset.filter(user__organisations=user__organisations)
60 return super(ProjectUpdateViewSet, self).get_queryset()
61
62
63 class ProjectUpdateExtraViewSet(PublicProjectViewSet):
64
65 """Project update extra resource."""
66
67 max_paginate_by = 30
68 paginate_by = 10
69
70 queryset = ProjectUpdate.objects.select_related(
71 'primary_location',
72 'primary_location__location_target',
73 'primary_location__location_target__project',
74 'primary_location__location_target__user',
75 'primary_location__location_target__primary_location',
76 'primary_location__location_target__country',
77 'project',
78 'user',
79 'user__organisation',
80 'user__organisation__primary_location',
81 'user__organisation__primary_location__country',
82 'user__organisation__primary_location__location_target',
83 'user__organisation__primary_location__location_target__internal_org_ids',
84
85 ).prefetch_related(
86 'user__organisations',
87 'user__organisations__primary_location',
88 'user__organisations__primary_location__country',
89 'user__organisations__primary_location__location_target')
90 serializer_class = ProjectUpdateExtraSerializer
91 filter_fields = {
92 'project': ['exact', ],
93 'indicator_period': ['exact', ],
94 'user': ['exact', ],
95 'uuid': ['exact', 'icontains', ],
96 'period_update': ['exact', 'gt', 'gte', 'lt', 'lte', ],
97 # These filters only accept a date, not a datetime
98 # 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
99 # 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
100 }
101
102 def get_queryset(self):
103 """
104 Allow simple filtering on selected fields.
105 We don't use the default filter_fields, because Up filters on
106 datetime for last_modified_at, and they only support a date, not datetime.
107 """
108 created_at__gt = self.request.QUERY_PARAMS.get('created_at__gt', None)
109 if created_at__gt is not None:
110 self.queryset = self.queryset.filter(created_at__gt=created_at__gt)
111 created_at__lt = self.request.QUERY_PARAMS.get('created_at__lt', None)
112 if created_at__lt is not None:
113 self.queryset = self.queryset.filter(created_at__lt=created_at__lt)
114 last_modified_at__gt = self.request.QUERY_PARAMS.get('last_modified_at__gt', None)
115 if last_modified_at__gt is not None:
116 self.queryset = self.queryset.filter(last_modified_at__gt=last_modified_at__gt)
117 last_modified_at__lt = self.request.QUERY_PARAMS.get('last_modified_at__lt', None)
118 if last_modified_at__lt is not None:
119 self.queryset = self.queryset.filter(last_modified_at__lt=last_modified_at__lt)
120 # Get updates per organisation
121 project__partners = self.request.QUERY_PARAMS.get('project__partners', None)
122 if project__partners:
123 self.queryset = self.queryset.filter(project__partners=project__partners)
124 user__organisations = self.request.QUERY_PARAMS.get('user__organisations', None)
125 if user__organisations:
126 self.queryset = self.queryset.filter(user__organisations=user__organisations)
127 return super(ProjectUpdateExtraViewSet, self).get_queryset()
128
129
130 @api_view(['POST'])
131 @permission_classes((IsAuthenticated, ))
132 def upload_indicator_update_photo(request, pk=None):
133 update = ProjectUpdate.objects.get(pk=pk)
134 user = request.user
135
136 # TODO: permissions
137
138 files = request.FILES
139
140 if 'photo' in files.keys():
141 update.photo = files['photo']
142 update.save(update_fields=['photo'])
143
144 return Response(ProjectUpdateExtraSerializer(update).data)
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/project_update.py b/akvo/rest/views/project_update.py
--- a/akvo/rest/views/project_update.py
+++ b/akvo/rest/views/project_update.py
@@ -23,10 +23,8 @@
serializer_class = ProjectUpdateSerializer
filter_fields = {
'project': ['exact', ],
- 'indicator_period': ['exact', ],
'user': ['exact', ],
'uuid': ['exact', 'icontains', ],
- 'period_update': ['exact', 'gt', 'gte', 'lt', 'lte', ],
}
paginate_by_param = 'limit'
@@ -90,10 +88,8 @@
serializer_class = ProjectUpdateExtraSerializer
filter_fields = {
'project': ['exact', ],
- 'indicator_period': ['exact', ],
'user': ['exact', ],
'uuid': ['exact', 'icontains', ],
- 'period_update': ['exact', 'gt', 'gte', 'lt', 'lte', ],
# These filters only accept a date, not a datetime
# 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
# 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],
| {"golden_diff": "diff --git a/akvo/rest/views/project_update.py b/akvo/rest/views/project_update.py\n--- a/akvo/rest/views/project_update.py\n+++ b/akvo/rest/views/project_update.py\n@@ -23,10 +23,8 @@\n serializer_class = ProjectUpdateSerializer\n filter_fields = {\n 'project': ['exact', ],\n- 'indicator_period': ['exact', ],\n 'user': ['exact', ],\n 'uuid': ['exact', 'icontains', ],\n- 'period_update': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n }\n \n paginate_by_param = 'limit'\n@@ -90,10 +88,8 @@\n serializer_class = ProjectUpdateExtraSerializer\n filter_fields = {\n 'project': ['exact', ],\n- 'indicator_period': ['exact', ],\n 'user': ['exact', ],\n 'uuid': ['exact', 'icontains', ],\n- 'period_update': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n # These filters only accept a date, not a datetime\n # 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n # 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n", "issue": "Project update endpoint gives internal server error\nSee `http://rsr.akvo.org/rest/v1/project_update/`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import ProjectUpdate\n\nfrom ..serializers import ProjectUpdateSerializer, ProjectUpdateExtraSerializer\nfrom ..viewsets import PublicProjectViewSet\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n\nclass ProjectUpdateViewSet(PublicProjectViewSet):\n\n \"\"\".\"\"\"\n queryset = ProjectUpdate.objects.select_related('project',\n 'user').prefetch_related('locations')\n serializer_class = ProjectUpdateSerializer\n filter_fields = {\n 'project': ['exact', ],\n 'indicator_period': ['exact', ],\n 'user': ['exact', ],\n 'uuid': ['exact', 'icontains', ],\n 'period_update': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n }\n\n paginate_by_param = 'limit'\n max_paginate_by = 1000\n\n def get_queryset(self):\n \"\"\"\n Allow simple filtering on selected fields.\n We don't use the default filter_fields, because Up filters on\n datetime for last_modified_at, and they only support a date, not datetime.\n \"\"\"\n created_at__gt = self.request.QUERY_PARAMS.get('created_at__gt', None)\n if created_at__gt is not None:\n self.queryset = self.queryset.filter(created_at__gt=created_at__gt)\n created_at__lt = self.request.QUERY_PARAMS.get('created_at__lt', None)\n if created_at__lt is not None:\n self.queryset = self.queryset.filter(created_at__lt=created_at__lt)\n last_modified_at__gt = self.request.QUERY_PARAMS.get('last_modified_at__gt', None)\n if last_modified_at__gt is not None:\n self.queryset = self.queryset.filter(last_modified_at__gt=last_modified_at__gt)\n last_modified_at__lt = self.request.QUERY_PARAMS.get('last_modified_at__lt', None)\n if last_modified_at__lt is not None:\n self.queryset = self.queryset.filter(last_modified_at__lt=last_modified_at__lt)\n # Get updates per organisation\n project__partners = self.request.QUERY_PARAMS.get('project__partners', None)\n if project__partners:\n self.queryset = self.queryset.filter(project__partners=project__partners)\n user__organisations = self.request.QUERY_PARAMS.get('user__organisations', None)\n if user__organisations:\n self.queryset = self.queryset.filter(user__organisations=user__organisations)\n return super(ProjectUpdateViewSet, self).get_queryset()\n\n\nclass ProjectUpdateExtraViewSet(PublicProjectViewSet):\n\n \"\"\"Project update extra resource.\"\"\"\n\n max_paginate_by = 30\n paginate_by = 10\n\n queryset = ProjectUpdate.objects.select_related(\n 'primary_location',\n 'primary_location__location_target',\n 'primary_location__location_target__project',\n 'primary_location__location_target__user',\n 'primary_location__location_target__primary_location',\n 'primary_location__location_target__country',\n 'project',\n 'user',\n 'user__organisation',\n 'user__organisation__primary_location',\n 'user__organisation__primary_location__country',\n 'user__organisation__primary_location__location_target',\n 'user__organisation__primary_location__location_target__internal_org_ids',\n\n ).prefetch_related(\n 'user__organisations',\n 'user__organisations__primary_location',\n 'user__organisations__primary_location__country',\n 'user__organisations__primary_location__location_target')\n serializer_class = ProjectUpdateExtraSerializer\n filter_fields = {\n 'project': ['exact', ],\n 'indicator_period': ['exact', ],\n 'user': ['exact', ],\n 'uuid': ['exact', 'icontains', ],\n 'period_update': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n # These filters only accept a date, not a datetime\n # 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n # 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n }\n\n def get_queryset(self):\n \"\"\"\n Allow simple filtering on selected fields.\n We don't use the default filter_fields, because Up filters on\n datetime for last_modified_at, and they only support a date, not datetime.\n \"\"\"\n created_at__gt = self.request.QUERY_PARAMS.get('created_at__gt', None)\n if created_at__gt is not None:\n self.queryset = self.queryset.filter(created_at__gt=created_at__gt)\n created_at__lt = self.request.QUERY_PARAMS.get('created_at__lt', None)\n if created_at__lt is not None:\n self.queryset = self.queryset.filter(created_at__lt=created_at__lt)\n last_modified_at__gt = self.request.QUERY_PARAMS.get('last_modified_at__gt', None)\n if last_modified_at__gt is not None:\n self.queryset = self.queryset.filter(last_modified_at__gt=last_modified_at__gt)\n last_modified_at__lt = self.request.QUERY_PARAMS.get('last_modified_at__lt', None)\n if last_modified_at__lt is not None:\n self.queryset = self.queryset.filter(last_modified_at__lt=last_modified_at__lt)\n # Get updates per organisation\n project__partners = self.request.QUERY_PARAMS.get('project__partners', None)\n if project__partners:\n self.queryset = self.queryset.filter(project__partners=project__partners)\n user__organisations = self.request.QUERY_PARAMS.get('user__organisations', None)\n if user__organisations:\n self.queryset = self.queryset.filter(user__organisations=user__organisations)\n return super(ProjectUpdateExtraViewSet, self).get_queryset()\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef upload_indicator_update_photo(request, pk=None):\n update = ProjectUpdate.objects.get(pk=pk)\n user = request.user\n\n # TODO: permissions\n\n files = request.FILES\n\n if 'photo' in files.keys():\n update.photo = files['photo']\n update.save(update_fields=['photo'])\n\n return Response(ProjectUpdateExtraSerializer(update).data)\n", "path": "akvo/rest/views/project_update.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rsr.models import ProjectUpdate\n\nfrom ..serializers import ProjectUpdateSerializer, ProjectUpdateExtraSerializer\nfrom ..viewsets import PublicProjectViewSet\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\n\nclass ProjectUpdateViewSet(PublicProjectViewSet):\n\n \"\"\".\"\"\"\n queryset = ProjectUpdate.objects.select_related('project',\n 'user').prefetch_related('locations')\n serializer_class = ProjectUpdateSerializer\n filter_fields = {\n 'project': ['exact', ],\n 'user': ['exact', ],\n 'uuid': ['exact', 'icontains', ],\n }\n\n paginate_by_param = 'limit'\n max_paginate_by = 1000\n\n def get_queryset(self):\n \"\"\"\n Allow simple filtering on selected fields.\n We don't use the default filter_fields, because Up filters on\n datetime for last_modified_at, and they only support a date, not datetime.\n \"\"\"\n created_at__gt = self.request.QUERY_PARAMS.get('created_at__gt', None)\n if created_at__gt is not None:\n self.queryset = self.queryset.filter(created_at__gt=created_at__gt)\n created_at__lt = self.request.QUERY_PARAMS.get('created_at__lt', None)\n if created_at__lt is not None:\n self.queryset = self.queryset.filter(created_at__lt=created_at__lt)\n last_modified_at__gt = self.request.QUERY_PARAMS.get('last_modified_at__gt', None)\n if last_modified_at__gt is not None:\n self.queryset = self.queryset.filter(last_modified_at__gt=last_modified_at__gt)\n last_modified_at__lt = self.request.QUERY_PARAMS.get('last_modified_at__lt', None)\n if last_modified_at__lt is not None:\n self.queryset = self.queryset.filter(last_modified_at__lt=last_modified_at__lt)\n # Get updates per organisation\n project__partners = self.request.QUERY_PARAMS.get('project__partners', None)\n if project__partners:\n self.queryset = self.queryset.filter(project__partners=project__partners)\n user__organisations = self.request.QUERY_PARAMS.get('user__organisations', None)\n if user__organisations:\n self.queryset = self.queryset.filter(user__organisations=user__organisations)\n return super(ProjectUpdateViewSet, self).get_queryset()\n\n\nclass ProjectUpdateExtraViewSet(PublicProjectViewSet):\n\n \"\"\"Project update extra resource.\"\"\"\n\n max_paginate_by = 30\n paginate_by = 10\n\n queryset = ProjectUpdate.objects.select_related(\n 'primary_location',\n 'primary_location__location_target',\n 'primary_location__location_target__project',\n 'primary_location__location_target__user',\n 'primary_location__location_target__primary_location',\n 'primary_location__location_target__country',\n 'project',\n 'user',\n 'user__organisation',\n 'user__organisation__primary_location',\n 'user__organisation__primary_location__country',\n 'user__organisation__primary_location__location_target',\n 'user__organisation__primary_location__location_target__internal_org_ids',\n\n ).prefetch_related(\n 'user__organisations',\n 'user__organisations__primary_location',\n 'user__organisations__primary_location__country',\n 'user__organisations__primary_location__location_target')\n serializer_class = ProjectUpdateExtraSerializer\n filter_fields = {\n 'project': ['exact', ],\n 'user': ['exact', ],\n 'uuid': ['exact', 'icontains', ],\n # These filters only accept a date, not a datetime\n # 'created_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n # 'last_modified_at': ['exact', 'gt', 'gte', 'lt', 'lte', ],\n }\n\n def get_queryset(self):\n \"\"\"\n Allow simple filtering on selected fields.\n We don't use the default filter_fields, because Up filters on\n datetime for last_modified_at, and they only support a date, not datetime.\n \"\"\"\n created_at__gt = self.request.QUERY_PARAMS.get('created_at__gt', None)\n if created_at__gt is not None:\n self.queryset = self.queryset.filter(created_at__gt=created_at__gt)\n created_at__lt = self.request.QUERY_PARAMS.get('created_at__lt', None)\n if created_at__lt is not None:\n self.queryset = self.queryset.filter(created_at__lt=created_at__lt)\n last_modified_at__gt = self.request.QUERY_PARAMS.get('last_modified_at__gt', None)\n if last_modified_at__gt is not None:\n self.queryset = self.queryset.filter(last_modified_at__gt=last_modified_at__gt)\n last_modified_at__lt = self.request.QUERY_PARAMS.get('last_modified_at__lt', None)\n if last_modified_at__lt is not None:\n self.queryset = self.queryset.filter(last_modified_at__lt=last_modified_at__lt)\n # Get updates per organisation\n project__partners = self.request.QUERY_PARAMS.get('project__partners', None)\n if project__partners:\n self.queryset = self.queryset.filter(project__partners=project__partners)\n user__organisations = self.request.QUERY_PARAMS.get('user__organisations', None)\n if user__organisations:\n self.queryset = self.queryset.filter(user__organisations=user__organisations)\n return super(ProjectUpdateExtraViewSet, self).get_queryset()\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef upload_indicator_update_photo(request, pk=None):\n update = ProjectUpdate.objects.get(pk=pk)\n user = request.user\n\n # TODO: permissions\n\n files = request.FILES\n\n if 'photo' in files.keys():\n update.photo = files['photo']\n update.save(update_fields=['photo'])\n\n return Response(ProjectUpdateExtraSerializer(update).data)\n", "path": "akvo/rest/views/project_update.py"}]} | 2,035 | 284 |
gh_patches_debug_3391 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mitmdump does not apply filter to saved data
##### Steps to reproduce the problem:
1. I captured some traffic, and ran the following to filter it:
```
$ mitmdump -r traffic.mitm -w out.mitm '~u main.css'
Proxy server listening at http://[::]:8080
172.16.122.1:51049: GET https://www.sjoerdlangkemper.nl/css/main.css
<< 304 Not Modified 0b
$
```
It displays only the matched URL, but it saves all traffic. When done, out.mitm contains the same requests and responses as traffic.mitm. I.e. `mitmproxy -r out.mitm` shows a lot of requests, where I would expect only the request for main.css.
##### Any other comments? What have you tried so far?
I tried this with release 2.0.2, and there it worked as expected. This issue seems to be similar to #1089.
##### System information
```
$ mitmdump --version
Mitmproxy version: 3.0.0 (2.0.0dev0965-0x168c72a)
Python version: 3.5.2
Platform: Linux-4.4.0-98-generic-x86_64-with-Ubuntu-16.04-xenial
SSL version: OpenSSL 1.1.0f 25 May 2017
Linux distro: Ubuntu 16.04 xenial
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/save.py`
Content:
```
1 import os.path
2 import typing
3
4 from mitmproxy import exceptions
5 from mitmproxy import flowfilter
6 from mitmproxy import io
7 from mitmproxy import ctx
8 from mitmproxy import flow
9
10
11 class Save:
12 def __init__(self):
13 self.stream = None
14 self.filt = None
15 self.active_flows = set() # type: Set[flow.Flow]
16
17 def open_file(self, path):
18 if path.startswith("+"):
19 path = path[1:]
20 mode = "ab"
21 else:
22 mode = "wb"
23 path = os.path.expanduser(path)
24 return open(path, mode)
25
26 def start_stream_to_path(self, path, flt):
27 try:
28 f = self.open_file(path)
29 except IOError as v:
30 raise exceptions.OptionsError(str(v))
31 self.stream = io.FilteredFlowWriter(f, flt)
32 self.active_flows = set()
33
34 def configure(self, updated):
35 # We're already streaming - stop the previous stream and restart
36 if "save_stream_filter" in updated:
37 if ctx.options.save_stream_filter:
38 self.filt = flowfilter.parse(ctx.options.save_stream_filter)
39 if not self.filt:
40 raise exceptions.OptionsError(
41 "Invalid filter specification: %s" % ctx.options.save_stream_filter
42 )
43 else:
44 self.filt = None
45 if "save_stream_file" in updated:
46 if self.stream:
47 self.done()
48 if ctx.options.save_stream_file:
49 self.start_stream_to_path(ctx.options.save_stream_file, self.filt)
50
51 def save(self, flows: typing.Sequence[flow.Flow], path: str) -> None:
52 """
53 Save flows to a file. If the path starts with a +, flows are
54 appended to the file, otherwise it is over-written.
55 """
56 try:
57 f = self.open_file(path)
58 except IOError as v:
59 raise exceptions.CommandError(v) from v
60 stream = io.FlowWriter(f)
61 for i in flows:
62 stream.add(i)
63 f.close()
64 ctx.log.alert("Saved %s flows." % len(flows))
65
66 def load(self, l):
67 l.add_command("save.file", self.save)
68
69 def tcp_start(self, flow):
70 if self.stream:
71 self.active_flows.add(flow)
72
73 def tcp_end(self, flow):
74 if self.stream:
75 self.stream.add(flow)
76 self.active_flows.discard(flow)
77
78 def response(self, flow):
79 if self.stream:
80 self.stream.add(flow)
81 self.active_flows.discard(flow)
82
83 def request(self, flow):
84 if self.stream:
85 self.active_flows.add(flow)
86
87 def done(self):
88 if self.stream:
89 for f in self.active_flows:
90 self.stream.add(f)
91 self.active_flows = set([])
92 self.stream.fo.close()
93 self.stream = None
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/save.py b/mitmproxy/addons/save.py
--- a/mitmproxy/addons/save.py
+++ b/mitmproxy/addons/save.py
@@ -42,7 +42,7 @@
)
else:
self.filt = None
- if "save_stream_file" in updated:
+ if "save_stream_file" in updated or "save_stream_filter" in updated:
if self.stream:
self.done()
if ctx.options.save_stream_file:
| {"golden_diff": "diff --git a/mitmproxy/addons/save.py b/mitmproxy/addons/save.py\n--- a/mitmproxy/addons/save.py\n+++ b/mitmproxy/addons/save.py\n@@ -42,7 +42,7 @@\n )\n else:\n self.filt = None\n- if \"save_stream_file\" in updated:\n+ if \"save_stream_file\" in updated or \"save_stream_filter\" in updated:\n if self.stream:\n self.done()\n if ctx.options.save_stream_file:\n", "issue": "mitmdump does not apply filter to saved data\n##### Steps to reproduce the problem:\r\n\r\n1. I captured some traffic, and ran the following to filter it:\r\n\r\n```\r\n$ mitmdump -r traffic.mitm -w out.mitm '~u main.css'\r\nProxy server listening at http://[::]:8080\r\n172.16.122.1:51049: GET https://www.sjoerdlangkemper.nl/css/main.css\r\n << 304 Not Modified 0b\r\n$\r\n```\r\n\r\nIt displays only the matched URL, but it saves all traffic. When done, out.mitm contains the same requests and responses as traffic.mitm. I.e. `mitmproxy -r out.mitm` shows a lot of requests, where I would expect only the request for main.css.\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nI tried this with release 2.0.2, and there it worked as expected. This issue seems to be similar to #1089.\r\n\r\n##### System information\r\n\r\n```\r\n$ mitmdump --version\r\nMitmproxy version: 3.0.0 (2.0.0dev0965-0x168c72a) \r\nPython version: 3.5.2\r\nPlatform: Linux-4.4.0-98-generic-x86_64-with-Ubuntu-16.04-xenial\r\nSSL version: OpenSSL 1.1.0f 25 May 2017\r\nLinux distro: Ubuntu 16.04 xenial\r\n```\r\n\n", "before_files": [{"content": "import os.path\nimport typing\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import io\nfrom mitmproxy import ctx\nfrom mitmproxy import flow\n\n\nclass Save:\n def __init__(self):\n self.stream = None\n self.filt = None\n self.active_flows = set() # type: Set[flow.Flow]\n\n def open_file(self, path):\n if path.startswith(\"+\"):\n path = path[1:]\n mode = \"ab\"\n else:\n mode = \"wb\"\n path = os.path.expanduser(path)\n return open(path, mode)\n\n def start_stream_to_path(self, path, flt):\n try:\n f = self.open_file(path)\n except IOError as v:\n raise exceptions.OptionsError(str(v))\n self.stream = io.FilteredFlowWriter(f, flt)\n self.active_flows = set()\n\n def configure(self, updated):\n # We're already streaming - stop the previous stream and restart\n if \"save_stream_filter\" in updated:\n if ctx.options.save_stream_filter:\n self.filt = flowfilter.parse(ctx.options.save_stream_filter)\n if not self.filt:\n raise exceptions.OptionsError(\n \"Invalid filter specification: %s\" % ctx.options.save_stream_filter\n )\n else:\n self.filt = None\n if \"save_stream_file\" in updated:\n if self.stream:\n self.done()\n if ctx.options.save_stream_file:\n self.start_stream_to_path(ctx.options.save_stream_file, self.filt)\n\n def save(self, flows: typing.Sequence[flow.Flow], path: str) -> None:\n \"\"\"\n Save flows to a file. If the path starts with a +, flows are\n appended to the file, otherwise it is over-written.\n \"\"\"\n try:\n f = self.open_file(path)\n except IOError as v:\n raise exceptions.CommandError(v) from v\n stream = io.FlowWriter(f)\n for i in flows:\n stream.add(i)\n f.close()\n ctx.log.alert(\"Saved %s flows.\" % len(flows))\n\n def load(self, l):\n l.add_command(\"save.file\", self.save)\n\n def tcp_start(self, flow):\n if self.stream:\n self.active_flows.add(flow)\n\n def tcp_end(self, flow):\n if self.stream:\n self.stream.add(flow)\n self.active_flows.discard(flow)\n\n def response(self, flow):\n if self.stream:\n self.stream.add(flow)\n self.active_flows.discard(flow)\n\n def request(self, flow):\n if self.stream:\n self.active_flows.add(flow)\n\n def done(self):\n if self.stream:\n for f in self.active_flows:\n self.stream.add(f)\n self.active_flows = set([])\n self.stream.fo.close()\n self.stream = None\n", "path": "mitmproxy/addons/save.py"}], "after_files": [{"content": "import os.path\nimport typing\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import io\nfrom mitmproxy import ctx\nfrom mitmproxy import flow\n\n\nclass Save:\n def __init__(self):\n self.stream = None\n self.filt = None\n self.active_flows = set() # type: Set[flow.Flow]\n\n def open_file(self, path):\n if path.startswith(\"+\"):\n path = path[1:]\n mode = \"ab\"\n else:\n mode = \"wb\"\n path = os.path.expanduser(path)\n return open(path, mode)\n\n def start_stream_to_path(self, path, flt):\n try:\n f = self.open_file(path)\n except IOError as v:\n raise exceptions.OptionsError(str(v))\n self.stream = io.FilteredFlowWriter(f, flt)\n self.active_flows = set()\n\n def configure(self, updated):\n # We're already streaming - stop the previous stream and restart\n if \"save_stream_filter\" in updated:\n if ctx.options.save_stream_filter:\n self.filt = flowfilter.parse(ctx.options.save_stream_filter)\n if not self.filt:\n raise exceptions.OptionsError(\n \"Invalid filter specification: %s\" % ctx.options.save_stream_filter\n )\n else:\n self.filt = None\n if \"save_stream_file\" in updated or \"save_stream_filter\" in updated:\n if self.stream:\n self.done()\n if ctx.options.save_stream_file:\n self.start_stream_to_path(ctx.options.save_stream_file, self.filt)\n\n def save(self, flows: typing.Sequence[flow.Flow], path: str) -> None:\n \"\"\"\n Save flows to a file. If the path starts with a +, flows are\n appended to the file, otherwise it is over-written.\n \"\"\"\n try:\n f = self.open_file(path)\n except IOError as v:\n raise exceptions.CommandError(v) from v\n stream = io.FlowWriter(f)\n for i in flows:\n stream.add(i)\n f.close()\n ctx.log.alert(\"Saved %s flows.\" % len(flows))\n\n def load(self, l):\n l.add_command(\"save.file\", self.save)\n\n def tcp_start(self, flow):\n if self.stream:\n self.active_flows.add(flow)\n\n def tcp_end(self, flow):\n if self.stream:\n self.stream.add(flow)\n self.active_flows.discard(flow)\n\n def response(self, flow):\n if self.stream:\n self.stream.add(flow)\n self.active_flows.discard(flow)\n\n def request(self, flow):\n if self.stream:\n self.active_flows.add(flow)\n\n def done(self):\n if self.stream:\n for f in self.active_flows:\n self.stream.add(f)\n self.active_flows = set([])\n self.stream.fo.close()\n self.stream = None\n", "path": "mitmproxy/addons/save.py"}]} | 1,417 | 111 |
gh_patches_debug_29098 | rasdani/github-patches | git_diff | mesonbuild__meson-2815 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
windows.compile_resources() can't be used with custom targets
```meson
rc_target = custom_target('Windows resource file',
command : [preprocess_command, rcdefs, '@INPUT@', '@OUTPUT@'],
build_always : true,
input : 'taisei.rc.in',
output : 'taisei.rc',
)
version_deps += winmod.compile_resources(rc_target)
```
```
Meson encountered an error in file src/meson.build, line 59, column 4:
Windows resource arguments must be strings or files not <CustomTargetHolder Windows resource file@cus: ['/data/git/taisei/scripts/configure-file.py', '--rootdir', '/data/git/taisei', '--fallback-version', 'v1.1.0-9999', '-DMESON_BUILD_TYPE=release', '-DICONS_DIR=/data/git/taisei/misc/icons', '-DBUILDTYPE_DEFINE=#define RELEASE_BUILD', '@INPUT@', '@OUTPUT@']>
```
This bug makes it impossible to reliably regenerate the `.rc` source on every rebuild.
Add something like depend_files to windows.compile_resources()
Resource script can include various other files (bitmap, cursor, font, html, icon, message table, binary data, manifest), it would be nice if it were possible to declare the resource script depends on these.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/modules/windows.py`
Content:
```
1 # Copyright 2015 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from .. import mlog
18 from .. import mesonlib, dependencies, build
19 from ..mesonlib import MesonException, extract_as_list
20 from . import get_include_args
21 from . import ModuleReturnValue
22 from . import ExtensionModule
23 from ..interpreterbase import permittedKwargs
24
25 class WindowsModule(ExtensionModule):
26
27 def detect_compiler(self, compilers):
28 for l in ('c', 'cpp'):
29 if l in compilers:
30 return compilers[l]
31 raise MesonException('Resource compilation requires a C or C++ compiler.')
32
33 @permittedKwargs({'args', 'include_directories'})
34 def compile_resources(self, state, args, kwargs):
35 comp = self.detect_compiler(state.compilers)
36
37 extra_args = mesonlib.stringlistify(kwargs.get('args', []))
38 inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)
39 for incd in inc_dirs:
40 if not isinstance(incd.held_object, (str, build.IncludeDirs)):
41 raise MesonException('Resource include dirs should be include_directories().')
42 extra_args += get_include_args(inc_dirs)
43
44 if comp.id == 'msvc':
45 rescomp = dependencies.ExternalProgram('rc', silent=True)
46 res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']
47 suffix = 'res'
48 else:
49 m = 'Argument {!r} has a space which may not work with windres due to ' \
50 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'
51 for arg in extra_args:
52 if ' ' in arg:
53 mlog.warning(m.format(arg))
54 rescomp_name = None
55 # FIXME: Does not handle `native: true` executables, see
56 # https://github.com/mesonbuild/meson/issues/1531
57 if state.environment.is_cross_build():
58 # If cross compiling see if windres has been specified in the
59 # cross file before trying to find it another way.
60 rescomp_name = state.environment.cross_info.config['binaries'].get('windres')
61 if rescomp_name is None:
62 # Pick-up env var WINDRES if set. This is often used for
63 # specifying an arch-specific windres.
64 rescomp_name = os.environ.get('WINDRES', 'windres')
65 rescomp = dependencies.ExternalProgram(rescomp_name, silent=True)
66 res_args = extra_args + ['@INPUT@', '@OUTPUT@']
67 suffix = 'o'
68 if not rescomp.found():
69 raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))
70 res_kwargs = {'output': '@BASENAME@.' + suffix,
71 'arguments': res_args}
72 res_gen = build.Generator([rescomp], res_kwargs)
73 res_output = res_gen.process_files('Windows resource', args, state)
74 return ModuleReturnValue(res_output, [res_output])
75
76 def initialize():
77 return WindowsModule()
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesonbuild/modules/windows.py b/mesonbuild/modules/windows.py
--- a/mesonbuild/modules/windows.py
+++ b/mesonbuild/modules/windows.py
@@ -67,11 +67,42 @@
suffix = 'o'
if not rescomp.found():
raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))
- res_kwargs = {'output': '@BASENAME@.' + suffix,
- 'arguments': res_args}
- res_gen = build.Generator([rescomp], res_kwargs)
- res_output = res_gen.process_files('Windows resource', args, state)
- return ModuleReturnValue(res_output, [res_output])
+
+ res_targets = []
+
+ def add_target(src):
+ if isinstance(src, list):
+ for subsrc in src:
+ add_target(subsrc)
+ return
+
+ if hasattr(src, 'held_object'):
+ src = src.held_object
+
+ res_kwargs = {
+ 'output': '@BASENAME@.' + suffix,
+ 'input': [src],
+ 'command': [rescomp] + res_args,
+ }
+
+ if isinstance(src, (str, mesonlib.File)):
+ name = 'file {!r}'.format(str(src))
+ elif isinstance(src, build.CustomTarget):
+ if len(src.get_outputs()) > 1:
+ raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')
+
+ name = 'target {!r}'.format(src.get_id())
+ else:
+ raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))
+
+ # Path separators are not allowed in target names
+ name = name.replace('/', '_').replace('\\', '_')
+
+ res_targets.append(build.CustomTarget('Windows resource for ' + name, state.subdir, state.subproject, res_kwargs))
+
+ add_target(args)
+
+ return ModuleReturnValue(res_targets, [res_targets])
def initialize():
return WindowsModule()
| {"golden_diff": "diff --git a/mesonbuild/modules/windows.py b/mesonbuild/modules/windows.py\n--- a/mesonbuild/modules/windows.py\n+++ b/mesonbuild/modules/windows.py\n@@ -67,11 +67,42 @@\n suffix = 'o'\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))\n- res_kwargs = {'output': '@BASENAME@.' + suffix,\n- 'arguments': res_args}\n- res_gen = build.Generator([rescomp], res_kwargs)\n- res_output = res_gen.process_files('Windows resource', args, state)\n- return ModuleReturnValue(res_output, [res_output])\n+\n+ res_targets = []\n+\n+ def add_target(src):\n+ if isinstance(src, list):\n+ for subsrc in src:\n+ add_target(subsrc)\n+ return\n+\n+ if hasattr(src, 'held_object'):\n+ src = src.held_object\n+\n+ res_kwargs = {\n+ 'output': '@BASENAME@.' + suffix,\n+ 'input': [src],\n+ 'command': [rescomp] + res_args,\n+ }\n+\n+ if isinstance(src, (str, mesonlib.File)):\n+ name = 'file {!r}'.format(str(src))\n+ elif isinstance(src, build.CustomTarget):\n+ if len(src.get_outputs()) > 1:\n+ raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')\n+\n+ name = 'target {!r}'.format(src.get_id())\n+ else:\n+ raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))\n+\n+ # Path separators are not allowed in target names\n+ name = name.replace('/', '_').replace('\\\\', '_')\n+\n+ res_targets.append(build.CustomTarget('Windows resource for ' + name, state.subdir, state.subproject, res_kwargs))\n+\n+ add_target(args)\n+\n+ return ModuleReturnValue(res_targets, [res_targets])\n \n def initialize():\n return WindowsModule()\n", "issue": "windows.compile_resources() can't be used with custom targets\n```meson\r\n rc_target = custom_target('Windows resource file',\r\n command : [preprocess_command, rcdefs, '@INPUT@', '@OUTPUT@'],\r\n build_always : true,\r\n input : 'taisei.rc.in',\r\n output : 'taisei.rc',\r\n )\r\n\r\n version_deps += winmod.compile_resources(rc_target)\r\n```\r\n\r\n```\r\nMeson encountered an error in file src/meson.build, line 59, column 4:\r\nWindows resource arguments must be strings or files not <CustomTargetHolder Windows resource file@cus: ['/data/git/taisei/scripts/configure-file.py', '--rootdir', '/data/git/taisei', '--fallback-version', 'v1.1.0-9999', '-DMESON_BUILD_TYPE=release', '-DICONS_DIR=/data/git/taisei/misc/icons', '-DBUILDTYPE_DEFINE=#define RELEASE_BUILD', '@INPUT@', '@OUTPUT@']>\r\n```\r\n\r\nThis bug makes it impossible to reliably regenerate the `.rc` source on every rebuild.\nAdd something like depend_files to windows.compile_resources()\nResource script can include various other files (bitmap, cursor, font, html, icon, message table, binary data, manifest), it would be nice if it were possible to declare the resource script depends on these.\n", "before_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .. import mlog\nfrom .. import mesonlib, dependencies, build\nfrom ..mesonlib import MesonException, extract_as_list\nfrom . import get_include_args\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs\n\nclass WindowsModule(ExtensionModule):\n\n def detect_compiler(self, compilers):\n for l in ('c', 'cpp'):\n if l in compilers:\n return compilers[l]\n raise MesonException('Resource compilation requires a C or C++ compiler.')\n\n @permittedKwargs({'args', 'include_directories'})\n def compile_resources(self, state, args, kwargs):\n comp = self.detect_compiler(state.compilers)\n\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)\n for incd in inc_dirs:\n if not isinstance(incd.held_object, (str, build.IncludeDirs)):\n raise MesonException('Resource include dirs should be include_directories().')\n extra_args += get_include_args(inc_dirs)\n\n if comp.id == 'msvc':\n rescomp = dependencies.ExternalProgram('rc', silent=True)\n res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']\n suffix = 'res'\n else:\n m = 'Argument {!r} has a space which may not work with windres due to ' \\\n 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'\n for arg in extra_args:\n if ' ' in arg:\n mlog.warning(m.format(arg))\n rescomp_name = None\n # FIXME: Does not handle `native: true` executables, see\n # https://github.com/mesonbuild/meson/issues/1531\n if state.environment.is_cross_build():\n # If cross compiling see if windres has been specified in the\n # cross file before trying to find it another way.\n rescomp_name = state.environment.cross_info.config['binaries'].get('windres')\n if rescomp_name is None:\n # Pick-up env var WINDRES if set. This is often used for\n # specifying an arch-specific windres.\n rescomp_name = os.environ.get('WINDRES', 'windres')\n rescomp = dependencies.ExternalProgram(rescomp_name, silent=True)\n res_args = extra_args + ['@INPUT@', '@OUTPUT@']\n suffix = 'o'\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))\n res_kwargs = {'output': '@BASENAME@.' + suffix,\n 'arguments': res_args}\n res_gen = build.Generator([rescomp], res_kwargs)\n res_output = res_gen.process_files('Windows resource', args, state)\n return ModuleReturnValue(res_output, [res_output])\n\ndef initialize():\n return WindowsModule()\n", "path": "mesonbuild/modules/windows.py"}], "after_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .. import mlog\nfrom .. import mesonlib, dependencies, build\nfrom ..mesonlib import MesonException, extract_as_list\nfrom . import get_include_args\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs\n\nclass WindowsModule(ExtensionModule):\n\n def detect_compiler(self, compilers):\n for l in ('c', 'cpp'):\n if l in compilers:\n return compilers[l]\n raise MesonException('Resource compilation requires a C or C++ compiler.')\n\n @permittedKwargs({'args', 'include_directories'})\n def compile_resources(self, state, args, kwargs):\n comp = self.detect_compiler(state.compilers)\n\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)\n for incd in inc_dirs:\n if not isinstance(incd.held_object, (str, build.IncludeDirs)):\n raise MesonException('Resource include dirs should be include_directories().')\n extra_args += get_include_args(inc_dirs)\n\n if comp.id == 'msvc':\n rescomp = dependencies.ExternalProgram('rc', silent=True)\n res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']\n suffix = 'res'\n else:\n m = 'Argument {!r} has a space which may not work with windres due to ' \\\n 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'\n for arg in extra_args:\n if ' ' in arg:\n mlog.warning(m.format(arg))\n rescomp_name = None\n # FIXME: Does not handle `native: true` executables, see\n # https://github.com/mesonbuild/meson/issues/1531\n if state.environment.is_cross_build():\n # If cross compiling see if windres has been specified in the\n # cross file before trying to find it another way.\n rescomp_name = state.environment.cross_info.config['binaries'].get('windres')\n if rescomp_name is None:\n # Pick-up env var WINDRES if set. This is often used for\n # specifying an arch-specific windres.\n rescomp_name = os.environ.get('WINDRES', 'windres')\n rescomp = dependencies.ExternalProgram(rescomp_name, silent=True)\n res_args = extra_args + ['@INPUT@', '@OUTPUT@']\n suffix = 'o'\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))\n\n res_targets = []\n\n def add_target(src):\n if isinstance(src, list):\n for subsrc in src:\n add_target(subsrc)\n return\n\n if hasattr(src, 'held_object'):\n src = src.held_object\n\n res_kwargs = {\n 'output': '@BASENAME@.' + suffix,\n 'input': [src],\n 'command': [rescomp] + res_args,\n }\n\n if isinstance(src, (str, mesonlib.File)):\n name = 'file {!r}'.format(str(src))\n elif isinstance(src, build.CustomTarget):\n if len(src.get_outputs()) > 1:\n raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')\n\n name = 'target {!r}'.format(src.get_id())\n else:\n raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))\n\n # Path separators are not allowed in target names\n name = name.replace('/', '_').replace('\\\\', '_')\n\n res_targets.append(build.CustomTarget('Windows resource for ' + name, state.subdir, state.subproject, res_kwargs))\n\n add_target(args)\n\n return ModuleReturnValue(res_targets, [res_targets])\n\ndef initialize():\n return WindowsModule()\n", "path": "mesonbuild/modules/windows.py"}]} | 1,482 | 469 |
gh_patches_debug_57622 | rasdani/github-patches | git_diff | AnalogJ__lexicon-164 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Namecheap support not optional
Unlike route53 or softlayer and unlike what setup.py suggests, the namecheap provider is not optional in 2.1.17.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lexicon/providers/namecheap.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3
4 import logging
5
6 import namecheap
7
8 from .base import Provider as BaseProvider
9
10 logger = logging.getLogger(__name__)
11
12
13 def ProviderParser(subparser):
14 subparser.add_argument(
15 '--auth-token',
16 help='specify api token used to authenticate'
17 )
18 subparser.add_argument(
19 '--auth-username',
20 help='specify email address used to authenticate'
21 )
22 # FIXME What is the client IP used for?
23 subparser.add_argument(
24 '--auth-client-ip',
25 help='Client IP address to send to Namecheap API calls',
26 default='127.0.0.1'
27 )
28 subparser.add_argument(
29 '--auth-sandbox',
30 help='Whether to use the sandbox server',
31 action='store_true'
32 )
33
34 class Provider(BaseProvider):
35
36 def __init__(self, options, engine_overrides=None):
37 super(Provider, self).__init__(options, engine_overrides)
38 self.options = options
39 self.client = namecheap.Api(
40 ApiUser=options.get('auth_username',''),
41 ApiKey=options.get('auth_token',''),
42 UserName=options.get('auth_username',''),
43 ClientIP=options.get('auth_client_ip',''),
44 sandbox=options.get('auth_sandbox', False),
45 debug=False
46 )
47 self.domain = self.options['domain']
48 self.domain_id = None
49
50 def authenticate(self):
51 try:
52 domain_names = [x['Name'] for x in self.client.domains_getList()]
53 except namecheap.ApiError:
54 raise Exception('Authentication failed')
55 if self.domain not in domain_names:
56 raise Exception('The domain {} is not controlled by this Namecheap '
57 'account'.format(self.domain))
58 # FIXME What is this for?
59 self.domain_id = self.domain
60
61 # Create record. If record already exists with the same content, do nothing
62 def create_record(self, type, name, content):
63 record = {
64 # required
65 'Type': type,
66 'Name': self._relative_name(name),
67 'Address': content
68 }
69 # logger.debug('create_record: %s', 'id' in payload)
70 # return 'id' in payload
71 self.client.domains_dns_addHost(self.domain, record)
72 return True
73
74 # List all records. Return an empty list if no records found.
75 # type, name and content are used to filter records.
76 # If possible filter during the query, otherwise filter after response is
77 # received.
78 def list_records(self, type=None, name=None, content=None, id=None):
79
80
81 records = []
82 raw_records = self.client.domains_dns_getHosts(self.domain)
83 for record in raw_records:
84 records.append(self._convert_to_lexicon(record))
85
86 if id:
87 records = [record for record in records if record['id'] == id]
88 if type:
89 records = [record for record in records if record['type'] == type]
90 if name:
91 if name.endswith('.'):
92 name = name[:-1]
93 records = [record for record in records if name in record['name'] ]
94 if content:
95 records = [record for record in records if record['content'].lower() == content.lower()]
96
97 logger.debug('list_records: %s', records)
98 return records
99
100 # Create or update a record.
101 def update_record(self, identifier, type=None, name=None, content=None):
102 # Delete record if it exists
103 self.delete_record(identifier, type, name, content)
104 return self.create_record(type, name, content)
105
106 # Delete an existing record.
107 # If record does not exist, do nothing.
108 def delete_record(self, identifier=None, type=None, name=None, content=None):
109
110 record = self.list_records(type=type, name=name, content=content, id=identifier)
111 if record:
112 self.client.domains_dns_delHost(self.domain, self._convert_to_namecheap(record[0]))
113 return True
114 else:
115 return False
116
117 def _convert_to_namecheap(self, record):
118 """ converts from lexicon format record to namecheap format record,
119 suitable to sending through the api to namecheap"""
120
121 name = record['name']
122 if name.endswith('.'):
123 name = name[:-1]
124
125 short_name = name[:name.find(self.domain)-1]
126 processed_record = {
127 'Type': record['type'],
128 'Name': short_name,
129 'TTL': record['ttl'],
130 'Address': record['content'],
131 'HostId': record['id']
132 }
133
134 return processed_record
135
136 def _convert_to_lexicon(self, record):
137 """ converts from namecheap raw record format to lexicon format record
138 """
139
140 name = record['Name']
141 if self.domain not in name:
142 name = "{}.{}".format(name,self.domain)
143
144 processed_record = {
145 'type': record['Type'],
146 'name': '{0}.{1}'.format(record['Name'], self.domain),
147 'ttl': record['TTL'],
148 'content': record['Address'],
149 'id': record['HostId']
150 }
151
152 return processed_record
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lexicon/providers/namecheap.py b/lexicon/providers/namecheap.py
--- a/lexicon/providers/namecheap.py
+++ b/lexicon/providers/namecheap.py
@@ -3,10 +3,14 @@
import logging
-import namecheap
from .base import Provider as BaseProvider
+try:
+ import namecheap #optional dep
+except ImportError:
+ pass
+
logger = logging.getLogger(__name__)
| {"golden_diff": "diff --git a/lexicon/providers/namecheap.py b/lexicon/providers/namecheap.py\n--- a/lexicon/providers/namecheap.py\n+++ b/lexicon/providers/namecheap.py\n@@ -3,10 +3,14 @@\n \n import logging\n \n-import namecheap\n \n from .base import Provider as BaseProvider\n \n+try:\n+ import namecheap #optional dep\n+except ImportError:\n+ pass\n+\n logger = logging.getLogger(__name__)\n", "issue": "Namecheap support not optional\nUnlike route53 or softlayer and unlike what setup.py suggests, the namecheap provider is not optional in 2.1.17.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\n\nimport namecheap\n\nfrom .base import Provider as BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n subparser.add_argument(\n '--auth-token',\n help='specify api token used to authenticate'\n )\n subparser.add_argument(\n '--auth-username',\n help='specify email address used to authenticate'\n )\n # FIXME What is the client IP used for?\n subparser.add_argument(\n '--auth-client-ip',\n help='Client IP address to send to Namecheap API calls',\n default='127.0.0.1'\n )\n subparser.add_argument(\n '--auth-sandbox',\n help='Whether to use the sandbox server',\n action='store_true'\n )\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.options = options\n self.client = namecheap.Api(\n ApiUser=options.get('auth_username',''),\n ApiKey=options.get('auth_token',''),\n UserName=options.get('auth_username',''),\n ClientIP=options.get('auth_client_ip',''),\n sandbox=options.get('auth_sandbox', False),\n debug=False\n )\n self.domain = self.options['domain']\n self.domain_id = None\n\n def authenticate(self):\n try:\n domain_names = [x['Name'] for x in self.client.domains_getList()]\n except namecheap.ApiError:\n raise Exception('Authentication failed')\n if self.domain not in domain_names:\n raise Exception('The domain {} is not controlled by this Namecheap '\n 'account'.format(self.domain))\n # FIXME What is this for?\n self.domain_id = self.domain\n\n # Create record. If record already exists with the same content, do nothing\n def create_record(self, type, name, content):\n record = {\n # required\n 'Type': type,\n 'Name': self._relative_name(name),\n 'Address': content\n }\n # logger.debug('create_record: %s', 'id' in payload)\n # return 'id' in payload\n self.client.domains_dns_addHost(self.domain, record)\n return True\n\n # List all records. Return an empty list if no records found.\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is\n # received.\n def list_records(self, type=None, name=None, content=None, id=None):\n\n\n records = []\n raw_records = self.client.domains_dns_getHosts(self.domain)\n for record in raw_records:\n records.append(self._convert_to_lexicon(record))\n\n if id:\n records = [record for record in records if record['id'] == id]\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n if name.endswith('.'):\n name = name[:-1]\n records = [record for record in records if name in record['name'] ]\n if content:\n records = [record for record in records if record['content'].lower() == content.lower()]\n\n logger.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def update_record(self, identifier, type=None, name=None, content=None):\n # Delete record if it exists\n self.delete_record(identifier, type, name, content)\n return self.create_record(type, name, content)\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n\n record = self.list_records(type=type, name=name, content=content, id=identifier)\n if record:\n self.client.domains_dns_delHost(self.domain, self._convert_to_namecheap(record[0]))\n return True\n else:\n return False\n\n def _convert_to_namecheap(self, record):\n \"\"\" converts from lexicon format record to namecheap format record,\n suitable to sending through the api to namecheap\"\"\"\n\n name = record['name']\n if name.endswith('.'):\n name = name[:-1]\n\n short_name = name[:name.find(self.domain)-1]\n processed_record = {\n 'Type': record['type'],\n 'Name': short_name,\n 'TTL': record['ttl'],\n 'Address': record['content'],\n 'HostId': record['id']\n }\n\n return processed_record\n\n def _convert_to_lexicon(self, record):\n \"\"\" converts from namecheap raw record format to lexicon format record\n \"\"\"\n\n name = record['Name']\n if self.domain not in name:\n name = \"{}.{}\".format(name,self.domain)\n\n processed_record = {\n 'type': record['Type'],\n 'name': '{0}.{1}'.format(record['Name'], self.domain),\n 'ttl': record['TTL'],\n 'content': record['Address'],\n 'id': record['HostId']\n }\n\n return processed_record\n", "path": "lexicon/providers/namecheap.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\n\n\nfrom .base import Provider as BaseProvider\n\ntry:\n import namecheap #optional dep\nexcept ImportError:\n pass\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n subparser.add_argument(\n '--auth-token',\n help='specify api token used to authenticate'\n )\n subparser.add_argument(\n '--auth-username',\n help='specify email address used to authenticate'\n )\n # FIXME What is the client IP used for?\n subparser.add_argument(\n '--auth-client-ip',\n help='Client IP address to send to Namecheap API calls',\n default='127.0.0.1'\n )\n subparser.add_argument(\n '--auth-sandbox',\n help='Whether to use the sandbox server',\n action='store_true'\n )\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.options = options\n self.client = namecheap.Api(\n ApiUser=options.get('auth_username',''),\n ApiKey=options.get('auth_token',''),\n UserName=options.get('auth_username',''),\n ClientIP=options.get('auth_client_ip',''),\n sandbox=options.get('auth_sandbox', False),\n debug=False\n )\n self.domain = self.options['domain']\n self.domain_id = None\n\n def authenticate(self):\n try:\n domain_names = [x['Name'] for x in self.client.domains_getList()]\n except namecheap.ApiError:\n raise Exception('Authentication failed')\n if self.domain not in domain_names:\n raise Exception('The domain {} is not controlled by this Namecheap '\n 'account'.format(self.domain))\n # FIXME What is this for?\n self.domain_id = self.domain\n\n # Create record. If record already exists with the same content, do nothing\n def create_record(self, type, name, content):\n record = {\n # required\n 'Type': type,\n 'Name': self._relative_name(name),\n 'Address': content\n }\n # logger.debug('create_record: %s', 'id' in payload)\n # return 'id' in payload\n self.client.domains_dns_addHost(self.domain, record)\n return True\n\n # List all records. Return an empty list if no records found.\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is\n # received.\n def list_records(self, type=None, name=None, content=None, id=None):\n\n\n records = []\n raw_records = self.client.domains_dns_getHosts(self.domain)\n for record in raw_records:\n records.append(self._convert_to_lexicon(record))\n\n if id:\n records = [record for record in records if record['id'] == id]\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n if name.endswith('.'):\n name = name[:-1]\n records = [record for record in records if name in record['name'] ]\n if content:\n records = [record for record in records if record['content'].lower() == content.lower()]\n\n logger.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def update_record(self, identifier, type=None, name=None, content=None):\n # Delete record if it exists\n self.delete_record(identifier, type, name, content)\n return self.create_record(type, name, content)\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n\n record = self.list_records(type=type, name=name, content=content, id=identifier)\n if record:\n self.client.domains_dns_delHost(self.domain, self._convert_to_namecheap(record[0]))\n return True\n else:\n return False\n\n def _convert_to_namecheap(self, record):\n \"\"\" converts from lexicon format record to namecheap format record,\n suitable to sending through the api to namecheap\"\"\"\n\n name = record['name']\n if name.endswith('.'):\n name = name[:-1]\n\n short_name = name[:name.find(self.domain)-1]\n processed_record = {\n 'Type': record['type'],\n 'Name': short_name,\n 'TTL': record['ttl'],\n 'Address': record['content'],\n 'HostId': record['id']\n }\n\n return processed_record\n\n def _convert_to_lexicon(self, record):\n \"\"\" converts from namecheap raw record format to lexicon format record\n \"\"\"\n\n name = record['Name']\n if self.domain not in name:\n name = \"{}.{}\".format(name,self.domain)\n\n processed_record = {\n 'type': record['Type'],\n 'name': '{0}.{1}'.format(record['Name'], self.domain),\n 'ttl': record['TTL'],\n 'content': record['Address'],\n 'id': record['HostId']\n }\n\n return processed_record\n", "path": "lexicon/providers/namecheap.py"}]} | 1,769 | 97 |
gh_patches_debug_60787 | rasdani/github-patches | git_diff | liqd__a4-product-1090 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_newsletters during normal register is broken
If checked, the user still has get_newsletters = False. But when changed in the account settings, it's changed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/users/forms.py`
Content:
```
1 from allauth.socialaccount.adapter import get_adapter
2 from allauth.utils import email_address_exists
3 from django import forms
4 from django.contrib.auth import forms as auth_forms
5 from django.contrib.auth import get_user_model
6 from django.utils.translation import ugettext_lazy as _
7
8 User = get_user_model()
9
10
11 class TermsSignupForm(auth_forms.UserCreationForm):
12 terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={
13 'required': _('Please accept the terms of use.')
14 })
15
16 def signup(self, request, user):
17 user.signup(
18 self.cleaned_data['username'],
19 self.cleaned_data['email'],
20 )
21
22 class Meta:
23 model = User
24 fields = ('email', 'username', 'password1', 'password2',
25 'terms_of_use', 'get_newsletters')
26
27 # Tried to add form as described in allauth documentation:
28 # https://django-allauth.readthedocs.io/en/latest/forms.html#socialaccount-forms
29 # ran into the following error:
30 # https://stackoverflow.com/questions/57254251/custom-form-with-socialaccount-in-django-allauth
31 # added this solution, maybe not the best
32
33
34 class SignupForm(forms.Form):
35 terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={
36 'required': _('Please accept the terms of use.')
37 })
38 get_newsletters = forms.BooleanField(
39 label=_('Send me newsletters'), required=False)
40 email = forms.EmailField(widget=forms.HiddenInput())
41 username = forms.CharField(widget=forms.HiddenInput())
42
43 def __init__(self, *args, **kwargs):
44 self.sociallogin = kwargs.pop('sociallogin')
45 initial = get_adapter().get_signup_form_initial_data(
46 self.sociallogin)
47 kwargs.update({
48 'initial': initial})
49 super().__init__(*args, **kwargs)
50
51 def save(self, request):
52 adapter = get_adapter(request)
53 user = adapter.save_user(request, self.sociallogin, form=self)
54 user.get_newsletters = self.cleaned_data['get_newsletters']
55 user.save()
56 user.signup(
57 user.username,
58 user.email
59 )
60 return user
61
62 def clean(self):
63 email = self.cleaned_data['email']
64 if email_address_exists(email):
65 raise forms.ValidationError(
66 get_adapter().error_messages['email_taken']
67 % self.sociallogin.account.get_provider().name)
68 return super().clean()
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/users/forms.py b/apps/users/forms.py
--- a/apps/users/forms.py
+++ b/apps/users/forms.py
@@ -14,6 +14,7 @@
})
def signup(self, request, user):
+ user.get_newsletters = self.cleaned_data["get_newsletters"]
user.signup(
self.cleaned_data['username'],
self.cleaned_data['email'],
| {"golden_diff": "diff --git a/apps/users/forms.py b/apps/users/forms.py\n--- a/apps/users/forms.py\n+++ b/apps/users/forms.py\n@@ -14,6 +14,7 @@\n })\n \n def signup(self, request, user):\n+ user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n user.signup(\n self.cleaned_data['username'],\n self.cleaned_data['email'],\n", "issue": "get_newsletters during normal register is broken\nIf checked, the user still has get_newsletters = False. But when changed in the account settings, it's changed.\n", "before_files": [{"content": "from allauth.socialaccount.adapter import get_adapter\nfrom allauth.utils import email_address_exists\nfrom django import forms\nfrom django.contrib.auth import forms as auth_forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\n\nUser = get_user_model()\n\n\nclass TermsSignupForm(auth_forms.UserCreationForm):\n terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={\n 'required': _('Please accept the terms of use.')\n })\n\n def signup(self, request, user):\n user.signup(\n self.cleaned_data['username'],\n self.cleaned_data['email'],\n )\n\n class Meta:\n model = User\n fields = ('email', 'username', 'password1', 'password2',\n 'terms_of_use', 'get_newsletters')\n\n# Tried to add form as described in allauth documentation:\n# https://django-allauth.readthedocs.io/en/latest/forms.html#socialaccount-forms\n# ran into the following error:\n# https://stackoverflow.com/questions/57254251/custom-form-with-socialaccount-in-django-allauth\n# added this solution, maybe not the best\n\n\nclass SignupForm(forms.Form):\n terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={\n 'required': _('Please accept the terms of use.')\n })\n get_newsletters = forms.BooleanField(\n label=_('Send me newsletters'), required=False)\n email = forms.EmailField(widget=forms.HiddenInput())\n username = forms.CharField(widget=forms.HiddenInput())\n\n def __init__(self, *args, **kwargs):\n self.sociallogin = kwargs.pop('sociallogin')\n initial = get_adapter().get_signup_form_initial_data(\n self.sociallogin)\n kwargs.update({\n 'initial': initial})\n super().__init__(*args, **kwargs)\n\n def save(self, request):\n adapter = get_adapter(request)\n user = adapter.save_user(request, self.sociallogin, form=self)\n user.get_newsletters = self.cleaned_data['get_newsletters']\n user.save()\n user.signup(\n user.username,\n user.email\n )\n return user\n\n def clean(self):\n email = self.cleaned_data['email']\n if email_address_exists(email):\n raise forms.ValidationError(\n get_adapter().error_messages['email_taken']\n % self.sociallogin.account.get_provider().name)\n return super().clean()\n", "path": "apps/users/forms.py"}], "after_files": [{"content": "from allauth.socialaccount.adapter import get_adapter\nfrom allauth.utils import email_address_exists\nfrom django import forms\nfrom django.contrib.auth import forms as auth_forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\n\nUser = get_user_model()\n\n\nclass TermsSignupForm(auth_forms.UserCreationForm):\n terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={\n 'required': _('Please accept the terms of use.')\n })\n\n def signup(self, request, user):\n user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n user.signup(\n self.cleaned_data['username'],\n self.cleaned_data['email'],\n )\n\n class Meta:\n model = User\n fields = ('email', 'username', 'password1', 'password2',\n 'terms_of_use', 'get_newsletters')\n\n# Tried to add form as described in allauth documentation:\n# https://django-allauth.readthedocs.io/en/latest/forms.html#socialaccount-forms\n# ran into the following error:\n# https://stackoverflow.com/questions/57254251/custom-form-with-socialaccount-in-django-allauth\n# added this solution, maybe not the best\n\n\nclass SignupForm(forms.Form):\n terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={\n 'required': _('Please accept the terms of use.')\n })\n get_newsletters = forms.BooleanField(\n label=_('Send me newsletters'), required=False)\n email = forms.EmailField(widget=forms.HiddenInput())\n username = forms.CharField(widget=forms.HiddenInput())\n\n def __init__(self, *args, **kwargs):\n self.sociallogin = kwargs.pop('sociallogin')\n initial = get_adapter().get_signup_form_initial_data(\n self.sociallogin)\n kwargs.update({\n 'initial': initial})\n super().__init__(*args, **kwargs)\n\n def save(self, request):\n adapter = get_adapter(request)\n user = adapter.save_user(request, self.sociallogin, form=self)\n user.get_newsletters = self.cleaned_data['get_newsletters']\n user.save()\n user.signup(\n user.username,\n user.email\n )\n return user\n\n def clean(self):\n email = self.cleaned_data['email']\n if email_address_exists(email):\n raise forms.ValidationError(\n get_adapter().error_messages['email_taken']\n % self.sociallogin.account.get_provider().name)\n return super().clean()\n", "path": "apps/users/forms.py"}]} | 926 | 85 |
gh_patches_debug_6976 | rasdani/github-patches | git_diff | svthalia__concrexit-1369 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Searching in photos api is broken
### Describe the bug
Searching in photos api is broken
### How to reproduce
Steps to reproduce the behaviour:
1. Go to https://thalia.nu/api/v1/photos/albums/?search=Test
### Expected behaviour
This should not crash.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/photos/api/viewsets.py`
Content:
```
1 from rest_framework import permissions, filters
2 from rest_framework.exceptions import PermissionDenied
3 from rest_framework.mixins import CreateModelMixin, UpdateModelMixin
4 from rest_framework.viewsets import ModelViewSet, GenericViewSet
5
6 from photos import services
7 from photos.api import serializers
8 from photos.models import Album, Photo
9
10
11 class AlbumsViewSet(ModelViewSet):
12 """ViewSet class for an Album object."""
13
14 permission_classes = (permissions.IsAuthenticated,)
15 queryset = Album.objects.all()
16 filter_backends = (filters.SearchFilter,)
17 search_fields = ("title_en", "title_nl", "date", "slug")
18
19 def get_queryset(self):
20 """Return albums that are annotated to be accessible by the request user."""
21 return services.get_annotated_accessible_albums(
22 self.request, Album.objects.all()
23 )
24
25 def create(self, request, *args, **kwargs):
26 """Create album if the request user is allowed to."""
27 if self.request.user.has_perm("photos.create_album"):
28 return super().create(request, *args, **kwargs)
29 raise PermissionDenied
30
31 def update(self, request, *args, **kwargs):
32 """Create album if the request user is allowed to."""
33 if self.request.user.has_perm("photos.change_album"):
34 return super().update(request, *args, **kwargs)
35 raise PermissionDenied
36
37 def get_serializer_class(self):
38 """Return AlbumListSerializer if the current action is list else return AlbumSerializer."""
39 if self.action == "list":
40 return serializers.AlbumListSerializer
41 return serializers.AlbumSerializer
42
43
44 class PhotosViewSet(GenericViewSet, CreateModelMixin, UpdateModelMixin):
45 """ViewSet class for a Photo object."""
46
47 queryset = Photo.objects.all()
48 permission_classes = (permissions.IsAuthenticated,)
49 serializer_class = serializers.PhotoCreateSerializer
50
51 def create(self, request, *args, **kwargs):
52 """Create photo if the request user is allowed to."""
53 if self.request.user.has_perm("photos.create_photo"):
54 return super().create(request, *args, **kwargs)
55 raise PermissionDenied
56
57 def update(self, request, *args, **kwargs):
58 """Update photo if the request user is allowed to."""
59 if self.request.user.has_perm("photos.change_photo"):
60 return super().update(request, *args, **kwargs)
61 raise PermissionDenied
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/photos/api/viewsets.py b/website/photos/api/viewsets.py
--- a/website/photos/api/viewsets.py
+++ b/website/photos/api/viewsets.py
@@ -14,7 +14,7 @@
permission_classes = (permissions.IsAuthenticated,)
queryset = Album.objects.all()
filter_backends = (filters.SearchFilter,)
- search_fields = ("title_en", "title_nl", "date", "slug")
+ search_fields = ("title_en", "date", "slug")
def get_queryset(self):
"""Return albums that are annotated to be accessible by the request user."""
| {"golden_diff": "diff --git a/website/photos/api/viewsets.py b/website/photos/api/viewsets.py\n--- a/website/photos/api/viewsets.py\n+++ b/website/photos/api/viewsets.py\n@@ -14,7 +14,7 @@\n permission_classes = (permissions.IsAuthenticated,)\n queryset = Album.objects.all()\n filter_backends = (filters.SearchFilter,)\n- search_fields = (\"title_en\", \"title_nl\", \"date\", \"slug\")\n+ search_fields = (\"title_en\", \"date\", \"slug\")\n \n def get_queryset(self):\n \"\"\"Return albums that are annotated to be accessible by the request user.\"\"\"\n", "issue": "Searching in photos api is broken\n### Describe the bug\r\nSearching in photos api is broken\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to https://thalia.nu/api/v1/photos/albums/?search=Test\r\n\r\n### Expected behaviour\r\nThis should not crash.\r\n\r\n\n", "before_files": [{"content": "from rest_framework import permissions, filters\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.mixins import CreateModelMixin, UpdateModelMixin\nfrom rest_framework.viewsets import ModelViewSet, GenericViewSet\n\nfrom photos import services\nfrom photos.api import serializers\nfrom photos.models import Album, Photo\n\n\nclass AlbumsViewSet(ModelViewSet):\n \"\"\"ViewSet class for an Album object.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n queryset = Album.objects.all()\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"title_en\", \"title_nl\", \"date\", \"slug\")\n\n def get_queryset(self):\n \"\"\"Return albums that are annotated to be accessible by the request user.\"\"\"\n return services.get_annotated_accessible_albums(\n self.request, Album.objects.all()\n )\n\n def create(self, request, *args, **kwargs):\n \"\"\"Create album if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.create_album\"):\n return super().create(request, *args, **kwargs)\n raise PermissionDenied\n\n def update(self, request, *args, **kwargs):\n \"\"\"Create album if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.change_album\"):\n return super().update(request, *args, **kwargs)\n raise PermissionDenied\n\n def get_serializer_class(self):\n \"\"\"Return AlbumListSerializer if the current action is list else return AlbumSerializer.\"\"\"\n if self.action == \"list\":\n return serializers.AlbumListSerializer\n return serializers.AlbumSerializer\n\n\nclass PhotosViewSet(GenericViewSet, CreateModelMixin, UpdateModelMixin):\n \"\"\"ViewSet class for a Photo object.\"\"\"\n\n queryset = Photo.objects.all()\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.PhotoCreateSerializer\n\n def create(self, request, *args, **kwargs):\n \"\"\"Create photo if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.create_photo\"):\n return super().create(request, *args, **kwargs)\n raise PermissionDenied\n\n def update(self, request, *args, **kwargs):\n \"\"\"Update photo if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.change_photo\"):\n return super().update(request, *args, **kwargs)\n raise PermissionDenied\n", "path": "website/photos/api/viewsets.py"}], "after_files": [{"content": "from rest_framework import permissions, filters\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.mixins import CreateModelMixin, UpdateModelMixin\nfrom rest_framework.viewsets import ModelViewSet, GenericViewSet\n\nfrom photos import services\nfrom photos.api import serializers\nfrom photos.models import Album, Photo\n\n\nclass AlbumsViewSet(ModelViewSet):\n \"\"\"ViewSet class for an Album object.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n queryset = Album.objects.all()\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"title_en\", \"date\", \"slug\")\n\n def get_queryset(self):\n \"\"\"Return albums that are annotated to be accessible by the request user.\"\"\"\n return services.get_annotated_accessible_albums(\n self.request, Album.objects.all()\n )\n\n def create(self, request, *args, **kwargs):\n \"\"\"Create album if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.create_album\"):\n return super().create(request, *args, **kwargs)\n raise PermissionDenied\n\n def update(self, request, *args, **kwargs):\n \"\"\"Create album if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.change_album\"):\n return super().update(request, *args, **kwargs)\n raise PermissionDenied\n\n def get_serializer_class(self):\n \"\"\"Return AlbumListSerializer if the current action is list else return AlbumSerializer.\"\"\"\n if self.action == \"list\":\n return serializers.AlbumListSerializer\n return serializers.AlbumSerializer\n\n\nclass PhotosViewSet(GenericViewSet, CreateModelMixin, UpdateModelMixin):\n \"\"\"ViewSet class for a Photo object.\"\"\"\n\n queryset = Photo.objects.all()\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.PhotoCreateSerializer\n\n def create(self, request, *args, **kwargs):\n \"\"\"Create photo if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.create_photo\"):\n return super().create(request, *args, **kwargs)\n raise PermissionDenied\n\n def update(self, request, *args, **kwargs):\n \"\"\"Update photo if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.change_photo\"):\n return super().update(request, *args, **kwargs)\n raise PermissionDenied\n", "path": "website/photos/api/viewsets.py"}]} | 928 | 134 |
gh_patches_debug_64869 | rasdani/github-patches | git_diff | kedro-org__kedro-2345 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release Kedro `0.18.5`
### Description
Release Kedro `0.18.5` which will contain lots of new features for configuration. The release depends on the following tickets to be finished:
- [x] BLOCKER: https://github.com/kedro-org/kedro/issues/2255
- [x] #1909 (Docs)
- [x] #2148
- [x] #2170
- [x] #2225
Initially we wanted to include the below issues as well, but the implementation turned out to be trickier than expected, so we'll take more time to investigate a solution and won't let it block the release.
- [x] #2146
- [x] #2212
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/__init__.py`
Content:
```
1 """Kedro is a framework that makes it easy to build robust and scalable
2 data pipelines by providing uniform project templates, data abstraction,
3 configuration and pipeline assembly.
4 """
5
6 __version__ = "0.18.4"
7
8
9 import logging
10
11 logging.getLogger(__name__).addHandler(logging.NullHandler())
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kedro/__init__.py b/kedro/__init__.py
--- a/kedro/__init__.py
+++ b/kedro/__init__.py
@@ -3,7 +3,7 @@
configuration and pipeline assembly.
"""
-__version__ = "0.18.4"
+__version__ = "0.18.5"
import logging
| {"golden_diff": "diff --git a/kedro/__init__.py b/kedro/__init__.py\n--- a/kedro/__init__.py\n+++ b/kedro/__init__.py\n@@ -3,7 +3,7 @@\n configuration and pipeline assembly.\n \"\"\"\n \n-__version__ = \"0.18.4\"\n+__version__ = \"0.18.5\"\n \n \n import logging\n", "issue": "Release Kedro `0.18.5`\n### Description\r\n\r\nRelease Kedro `0.18.5` which will contain lots of new features for configuration. The release depends on the following tickets to be finished:\r\n\r\n- [x] BLOCKER: https://github.com/kedro-org/kedro/issues/2255\r\n- [x] #1909 (Docs)\r\n- [x] #2148 \r\n- [x] #2170\r\n- [x] #2225 \r\n\r\nInitially we wanted to include the below issues as well, but the implementation turned out to be trickier than expected, so we'll take more time to investigate a solution and won't let it block the release.\r\n- [x] #2146 \r\n- [x] #2212 \r\n\n", "before_files": [{"content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\n__version__ = \"0.18.4\"\n\n\nimport logging\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "kedro/__init__.py"}], "after_files": [{"content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\n__version__ = \"0.18.5\"\n\n\nimport logging\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "kedro/__init__.py"}]} | 518 | 87 |
gh_patches_debug_24683 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Case mismatch for fragment identifiers between menus and page heading anchor
### Describe the issue
The menu item "Groups / Ops and Management" sends you off to https://datatracker.ietf.org/wg/#ops but "#ops" is not recognised on the page because the heading anchor is "#OPS" and so that menu item takes you to the top of the page not the Ops heading.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/doc/templatetags/wg_menu.py`
Content:
```
1 # Copyright The IETF Trust 2009-2022, All Rights Reserved
2
3 # Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
4 # All rights reserved. Contact: Pasi Eronen <[email protected]>
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions
8 # are met:
9 #
10 # * Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above
14 # copyright notice, this list of conditions and the following
15 # disclaimer in the documentation and/or other materials provided
16 # with the distribution.
17 #
18 # * Neither the name of the Nokia Corporation and/or its
19 # subsidiary(-ies) nor the names of its contributors may be used
20 # to endorse or promote products derived from this software
21 # without specific prior written permission.
22 #
23 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
35 from django import template
36 from django.template.loader import render_to_string
37 from django.db import models
38
39 from ietf.group.models import Group
40
41 register = template.Library()
42
43 parent_short_names = {
44 "ops": "Ops & Management",
45 "rai": "RAI",
46 "iab": "IAB",
47 "art": "Apps & Realtime",
48 "ietfadminllc": "IETF LLC",
49 }
50
51 parents = Group.objects.filter(
52 models.Q(type="area")
53 | models.Q(type="irtf", acronym="irtf")
54 | models.Q(acronym="iab")
55 | models.Q(acronym="ietfadminllc")
56 | models.Q(acronym="rfceditor"),
57 state="active",
58 ).order_by("type__order", "type_id", "acronym")
59
60
61 @register.simple_tag
62 def wg_menu(flavor):
63 global parents
64
65 for p in parents:
66 p.short_name = parent_short_names.get(p.acronym) or p.name
67 if p.short_name.endswith(" Area"):
68 p.short_name = p.short_name[: -len(" Area")]
69
70 if p.type_id == "area":
71 p.menu_url = "/wg/#" + p.acronym
72 elif p.acronym == "irtf":
73 p.menu_url = "/rg/"
74 elif p.acronym == "iab":
75 p.menu_url = "/program/"
76 elif p.acronym == "ietfadminllc":
77 p.menu_url = "/adm/"
78 elif p.acronym == "rfceditor":
79 p.menu_url = "/rfcedtyp/"
80
81 return render_to_string(
82 "base/menu_wg.html", {"parents": parents, "flavor": flavor}
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ietf/doc/templatetags/wg_menu.py b/ietf/doc/templatetags/wg_menu.py
--- a/ietf/doc/templatetags/wg_menu.py
+++ b/ietf/doc/templatetags/wg_menu.py
@@ -1,4 +1,4 @@
-# Copyright The IETF Trust 2009-2022, All Rights Reserved
+# Copyright The IETF Trust 2009-2023, All Rights Reserved
# Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <[email protected]>
@@ -32,6 +32,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import debug # pyflakes: ignore
+
from django import template
from django.template.loader import render_to_string
from django.db import models
@@ -68,7 +70,7 @@
p.short_name = p.short_name[: -len(" Area")]
if p.type_id == "area":
- p.menu_url = "/wg/#" + p.acronym
+ p.menu_url = "/wg/#" + p.acronym.upper()
elif p.acronym == "irtf":
p.menu_url = "/rg/"
elif p.acronym == "iab":
| {"golden_diff": "diff --git a/ietf/doc/templatetags/wg_menu.py b/ietf/doc/templatetags/wg_menu.py\n--- a/ietf/doc/templatetags/wg_menu.py\n+++ b/ietf/doc/templatetags/wg_menu.py\n@@ -1,4 +1,4 @@\n-# Copyright The IETF Trust 2009-2022, All Rights Reserved\n+# Copyright The IETF Trust 2009-2023, All Rights Reserved\n \n # Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).\n # All rights reserved. Contact: Pasi Eronen <[email protected]>\n@@ -32,6 +32,8 @@\n # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n \n+import debug # pyflakes: ignore\n+\n from django import template\n from django.template.loader import render_to_string\n from django.db import models\n@@ -68,7 +70,7 @@\n p.short_name = p.short_name[: -len(\" Area\")]\n \n if p.type_id == \"area\":\n- p.menu_url = \"/wg/#\" + p.acronym\n+ p.menu_url = \"/wg/#\" + p.acronym.upper()\n elif p.acronym == \"irtf\":\n p.menu_url = \"/rg/\"\n elif p.acronym == \"iab\":\n", "issue": "Case mismatch for fragment identifiers between menus and page heading anchor\n### Describe the issue\n\nThe menu item \"Groups / Ops and Management\" sends you off to https://datatracker.ietf.org/wg/#ops but \"#ops\" is not recognised on the page because the heading anchor is \"#OPS\" and so that menu item takes you to the top of the page not the Ops heading.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2009-2022, All Rights Reserved\n\n# Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).\n# All rights reserved. Contact: Pasi Eronen <[email protected]>\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# * Neither the name of the Nokia Corporation and/or its\n# subsidiary(-ies) nor the names of its contributors may be used\n# to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom django import template\nfrom django.template.loader import render_to_string\nfrom django.db import models\n\nfrom ietf.group.models import Group\n\nregister = template.Library()\n\nparent_short_names = {\n \"ops\": \"Ops & Management\",\n \"rai\": \"RAI\",\n \"iab\": \"IAB\",\n \"art\": \"Apps & Realtime\",\n \"ietfadminllc\": \"IETF LLC\",\n}\n\nparents = Group.objects.filter(\n models.Q(type=\"area\")\n | models.Q(type=\"irtf\", acronym=\"irtf\")\n | models.Q(acronym=\"iab\")\n | models.Q(acronym=\"ietfadminllc\")\n | models.Q(acronym=\"rfceditor\"),\n state=\"active\",\n).order_by(\"type__order\", \"type_id\", \"acronym\")\n\n\[email protected]_tag\ndef wg_menu(flavor):\n global parents\n\n for p in parents:\n p.short_name = parent_short_names.get(p.acronym) or p.name\n if p.short_name.endswith(\" Area\"):\n p.short_name = p.short_name[: -len(\" Area\")]\n\n if p.type_id == \"area\":\n p.menu_url = \"/wg/#\" + p.acronym\n elif p.acronym == \"irtf\":\n p.menu_url = \"/rg/\"\n elif p.acronym == \"iab\":\n p.menu_url = \"/program/\"\n elif p.acronym == \"ietfadminllc\":\n p.menu_url = \"/adm/\"\n elif p.acronym == \"rfceditor\":\n p.menu_url = \"/rfcedtyp/\"\n\n return render_to_string(\n \"base/menu_wg.html\", {\"parents\": parents, \"flavor\": flavor}\n )\n", "path": "ietf/doc/templatetags/wg_menu.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2009-2023, All Rights Reserved\n\n# Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).\n# All rights reserved. Contact: Pasi Eronen <[email protected]>\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# * Neither the name of the Nokia Corporation and/or its\n# subsidiary(-ies) nor the names of its contributors may be used\n# to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport debug # pyflakes: ignore\n\nfrom django import template\nfrom django.template.loader import render_to_string\nfrom django.db import models\n\nfrom ietf.group.models import Group\n\nregister = template.Library()\n\nparent_short_names = {\n \"ops\": \"Ops & Management\",\n \"rai\": \"RAI\",\n \"iab\": \"IAB\",\n \"art\": \"Apps & Realtime\",\n \"ietfadminllc\": \"IETF LLC\",\n}\n\nparents = Group.objects.filter(\n models.Q(type=\"area\")\n | models.Q(type=\"irtf\", acronym=\"irtf\")\n | models.Q(acronym=\"iab\")\n | models.Q(acronym=\"ietfadminllc\")\n | models.Q(acronym=\"rfceditor\"),\n state=\"active\",\n).order_by(\"type__order\", \"type_id\", \"acronym\")\n\n\[email protected]_tag\ndef wg_menu(flavor):\n global parents\n\n for p in parents:\n p.short_name = parent_short_names.get(p.acronym) or p.name\n if p.short_name.endswith(\" Area\"):\n p.short_name = p.short_name[: -len(\" Area\")]\n\n if p.type_id == \"area\":\n p.menu_url = \"/wg/#\" + p.acronym.upper()\n elif p.acronym == \"irtf\":\n p.menu_url = \"/rg/\"\n elif p.acronym == \"iab\":\n p.menu_url = \"/program/\"\n elif p.acronym == \"ietfadminllc\":\n p.menu_url = \"/adm/\"\n elif p.acronym == \"rfceditor\":\n p.menu_url = \"/rfcedtyp/\"\n\n return render_to_string(\n \"base/menu_wg.html\", {\"parents\": parents, \"flavor\": flavor}\n )\n", "path": "ietf/doc/templatetags/wg_menu.py"}]} | 1,293 | 331 |
gh_patches_debug_33213 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-738 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consolidate serialization helpers to be static and shared
In the teams_helper there are 2 serialization helper methods. Currently they both create a big dict of all the Model objects that exist in Teams and BF. We should make the optimization to make the big dict once, and update the 2 helpers to use the new dict.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from inspect import getmembers
5 from typing import Type
6 from enum import Enum
7
8 from msrest.serialization import Model, Deserializer, Serializer
9
10 import botbuilder.schema as schema
11 import botbuilder.schema.teams as teams_schema
12
13 # Optimization: The dependencies dictionary could be cached here,
14 # and shared between the two methods.
15
16
17 def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:
18 dependencies = [
19 schema_cls
20 for key, schema_cls in getmembers(schema)
21 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
22 ]
23 dependencies += [
24 schema_cls
25 for key, schema_cls in getmembers(teams_schema)
26 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
27 ]
28 dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}
29 deserializer = Deserializer(dependencies_dict)
30 return deserializer(msrest_cls.__name__, dict_to_deserialize)
31
32
33 def serializer_helper(object_to_serialize: Model) -> dict:
34 if object_to_serialize is None:
35 return None
36
37 dependencies = [
38 schema_cls
39 for key, schema_cls in getmembers(schema)
40 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
41 ]
42 dependencies += [
43 schema_cls
44 for key, schema_cls in getmembers(teams_schema)
45 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
46 ]
47 dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}
48 serializer = Serializer(dependencies_dict)
49 # pylint: disable=protected-access
50 return serializer._serialize(object_to_serialize)
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py
--- a/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py
+++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py
@@ -10,23 +10,21 @@
import botbuilder.schema as schema
import botbuilder.schema.teams as teams_schema
-# Optimization: The dependencies dictionary could be cached here,
-# and shared between the two methods.
+DEPENDICIES = [
+ schema_cls
+ for key, schema_cls in getmembers(schema)
+ if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
+]
+DEPENDICIES += [
+ schema_cls
+ for key, schema_cls in getmembers(teams_schema)
+ if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
+]
+DEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES}
def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:
- dependencies = [
- schema_cls
- for key, schema_cls in getmembers(schema)
- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
- ]
- dependencies += [
- schema_cls
- for key, schema_cls in getmembers(teams_schema)
- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
- ]
- dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}
- deserializer = Deserializer(dependencies_dict)
+ deserializer = Deserializer(DEPENDICIES_DICT)
return deserializer(msrest_cls.__name__, dict_to_deserialize)
@@ -34,17 +32,6 @@
if object_to_serialize is None:
return None
- dependencies = [
- schema_cls
- for key, schema_cls in getmembers(schema)
- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
- ]
- dependencies += [
- schema_cls
- for key, schema_cls in getmembers(teams_schema)
- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
- ]
- dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}
- serializer = Serializer(dependencies_dict)
+ serializer = Serializer(DEPENDICIES_DICT)
# pylint: disable=protected-access
return serializer._serialize(object_to_serialize)
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py\n--- a/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py\n+++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py\n@@ -10,23 +10,21 @@\n import botbuilder.schema as schema\n import botbuilder.schema.teams as teams_schema\n \n-# Optimization: The dependencies dictionary could be cached here,\n-# and shared between the two methods.\n+DEPENDICIES = [\n+ schema_cls\n+ for key, schema_cls in getmembers(schema)\n+ if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n+]\n+DEPENDICIES += [\n+ schema_cls\n+ for key, schema_cls in getmembers(teams_schema)\n+ if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n+]\n+DEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES}\n \n \n def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n- dependencies = [\n- schema_cls\n- for key, schema_cls in getmembers(schema)\n- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n- ]\n- dependencies += [\n- schema_cls\n- for key, schema_cls in getmembers(teams_schema)\n- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n- ]\n- dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}\n- deserializer = Deserializer(dependencies_dict)\n+ deserializer = Deserializer(DEPENDICIES_DICT)\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n \n \n@@ -34,17 +32,6 @@\n if object_to_serialize is None:\n return None\n \n- dependencies = [\n- schema_cls\n- for key, schema_cls in getmembers(schema)\n- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n- ]\n- dependencies += [\n- schema_cls\n- for key, schema_cls in getmembers(teams_schema)\n- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n- ]\n- dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}\n- serializer = Serializer(dependencies_dict)\n+ serializer = Serializer(DEPENDICIES_DICT)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n", "issue": "Consolidate serialization helpers to be static and shared\nIn the teams_helper there are 2 serialization helper methods. Currently they both create a big dict of all the Model objects that exist in Teams and BF. We should make the optimization to make the big dict once, and update the 2 helpers to use the new dict.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom inspect import getmembers\nfrom typing import Type\nfrom enum import Enum\n\nfrom msrest.serialization import Model, Deserializer, Serializer\n\nimport botbuilder.schema as schema\nimport botbuilder.schema.teams as teams_schema\n\n# Optimization: The dependencies dictionary could be cached here,\n# and shared between the two methods.\n\n\ndef deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n dependencies = [\n schema_cls\n for key, schema_cls in getmembers(schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n ]\n dependencies += [\n schema_cls\n for key, schema_cls in getmembers(teams_schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n ]\n dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}\n deserializer = Deserializer(dependencies_dict)\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n\n\ndef serializer_helper(object_to_serialize: Model) -> dict:\n if object_to_serialize is None:\n return None\n\n dependencies = [\n schema_cls\n for key, schema_cls in getmembers(schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n ]\n dependencies += [\n schema_cls\n for key, schema_cls in getmembers(teams_schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n ]\n dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}\n serializer = Serializer(dependencies_dict)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n", "path": "libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom inspect import getmembers\nfrom typing import Type\nfrom enum import Enum\n\nfrom msrest.serialization import Model, Deserializer, Serializer\n\nimport botbuilder.schema as schema\nimport botbuilder.schema.teams as teams_schema\n\nDEPENDICIES = [\n schema_cls\n for key, schema_cls in getmembers(schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n]\nDEPENDICIES += [\n schema_cls\n for key, schema_cls in getmembers(teams_schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n]\nDEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES}\n\n\ndef deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n deserializer = Deserializer(DEPENDICIES_DICT)\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n\n\ndef serializer_helper(object_to_serialize: Model) -> dict:\n if object_to_serialize is None:\n return None\n\n serializer = Serializer(DEPENDICIES_DICT)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n", "path": "libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py"}]} | 811 | 582 |
gh_patches_debug_18661 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-4713 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to disable user config with Jupyter Server
The hub administrator is supposed to be able to prevent per-user notebook configuration scripts from running by setting
```
c.Spawner.disable_user_config = True
```
In the `jupyterhub_config.py` config. This sets the environment variable `JUPYTERHUB_DISABLE_USER_CONFIG=1` for the spawned notebook server. However this seems to be being ignored?
<details>
<summary>Using this Dockerfile</summary>
```
FROM jupyterhub/jupyterhub:2
RUN python3 -m pip install --no-cache jupyterlab
RUN \
adduser -q --gecos "" --disabled-password user1 && \
echo user1:user1 | chpasswd
ADD jupyterhub_config.py .
RUN mkdir -p /home/user1/.jupyter
ADD jupyter_notebook_config.py /home/user1/.jupyter/.
RUN chown -R user1:user1 /home/user1/.jupyter
CMD ["jupyterhub"]
```
</details>
<details><summary>
with this `jupyterhub_config.py` and example notebook config for `user1`:
</summary>
```
c.Spawner.disable_user_config = True
```
```
import os
print("HELLO FROM THE NOTEBOOK CONFIG")
print(os.getenv("JUPYTERHUB_DISABLE_USER_CONFIG"))
c.ServerApp.shutdown_no_activity_timeout = 600
c.MappingKernelManager.cull_idle_timeout = 600
c.TerminalManager.cull_inactive_timeout = 600
```
</details>
I see the "HELLO" message and the value 1 printed when the notebook starts up, and the timeout message indicating that my config setting is in effect:
```
[I 2022-02-22 22:35:23.167 SingleUserLabApp serverapp:2161] Will shut down after 600 seconds with no kernels or terminals.
```
Am I misunderstanding exactly what config files are excluded? I see there's a test for this but I wonder is it actually verifying that the config is being ignored?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/singleuser/_disable_user_config.py`
Content:
```
1 """
2 Disable user-controlled config for single-user servers
3
4 Applies patches to prevent loading configuration from the user's home directory.
5
6 Only used when launching a single-user server with disable_user_config=True.
7
8 This is where we still have some monkeypatches,
9 because we want to prevent loading configuration from user directories,
10 and `jupyter_core` functions don't allow that.
11
12 Due to extensions, we aren't able to apply patches in one place on the ServerApp,
13 we have to insert the patches at the lowest-level
14 on function objects themselves,
15 to ensure we modify calls to e.g. `jupyter_core.jupyter_path`
16 that may have been imported already!
17
18 We should perhaps ask for the necessary hooks to modify this in jupyter_core,
19 rather than keeing these monkey patches around.
20 """
21
22 import os
23
24 from jupyter_core import paths
25
26
27 def _exclude_home(path_list):
28 """Filter out any entries in a path list that are in my home directory.
29
30 Used to disable per-user configuration.
31 """
32 home = os.path.expanduser('~/')
33 for p in path_list:
34 if not p.startswith(home):
35 yield p
36
37
38 # record patches
39 _original_jupyter_paths = None
40 _jupyter_paths_without_home = None
41
42
43 def _disable_user_config(serverapp):
44 """
45 disable user-controlled sources of configuration
46 by excluding directories in their home from paths.
47
48 This _does not_ disable frontend config,
49 such as UI settings persistence.
50
51 1. Python config file paths
52 2. Search paths for extensions, etc.
53 3. import path
54 """
55 original_jupyter_path = paths.jupyter_path()
56 jupyter_path_without_home = list(_exclude_home(original_jupyter_path))
57
58 # config_file_paths is a property without a setter
59 # can't override on the instance
60 default_config_file_paths = serverapp.config_file_paths
61 config_file_paths = list(_exclude_home(default_config_file_paths))
62 serverapp.__class__.config_file_paths = property(
63 lambda self: config_file_paths,
64 )
65 # verify patch applied
66 assert serverapp.config_file_paths == config_file_paths
67
68 # patch jupyter_path to exclude $HOME
69 global _original_jupyter_paths, _jupyter_paths_without_home, _original_jupyter_config_dir
70 _original_jupyter_paths = paths.jupyter_path()
71 _jupyter_paths_without_home = list(_exclude_home(_original_jupyter_paths))
72
73 def get_jupyter_path_without_home(*subdirs):
74 # reimport because of our `__code__` patch
75 # affects what is resolved as the parent namespace
76 from jupyterhub.singleuser._disable_user_config import (
77 _jupyter_paths_without_home,
78 )
79
80 paths = list(_jupyter_paths_without_home)
81 if subdirs:
82 paths = [os.path.join(p, *subdirs) for p in paths]
83 return paths
84
85 # patch `jupyter_path.__code__` to ensure all callers are patched,
86 # even if they've already imported
87 # this affects e.g. nbclassic.nbextension_paths
88 paths.jupyter_path.__code__ = get_jupyter_path_without_home.__code__
89
90 # same thing for config_dir,
91 # which applies to some things like ExtensionApp config paths
92 # and nbclassic.static_custom_path
93
94 # allows explicit override if $JUPYTER_CONFIG_DIR is set
95 # or config dir is otherwise not in $HOME
96
97 if not os.getenv("JUPYTER_CONFIG_DIR") and not list(
98 _exclude_home([paths.jupyter_config_dir()])
99 ):
100 # patch specifically Application.config_dir
101 # this affects ServerApp and ExtensionApp,
102 # but does not affect JupyterLab's user-settings, etc.
103 # patching the traitlet directly affects all instances,
104 # already-created or future
105 from jupyter_core.application import JupyterApp
106
107 def get_env_config_dir(obj, cls=None):
108 return paths.ENV_CONFIG_PATH[0]
109
110 JupyterApp.config_dir.get = get_env_config_dir
111
112 # record disabled state on app object
113 serverapp.disable_user_config = True
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/singleuser/_disable_user_config.py b/jupyterhub/singleuser/_disable_user_config.py
--- a/jupyterhub/singleuser/_disable_user_config.py
+++ b/jupyterhub/singleuser/_disable_user_config.py
@@ -20,19 +20,35 @@
"""
import os
+from pathlib import Path
from jupyter_core import paths
+def _is_relative_to(path, prefix):
+ """
+ Backport Path.is_relative_to for Python < 3.9
+
+ added in Python 3.9
+ """
+ if hasattr(path, "is_relative_to"):
+ # Python >= 3.9
+ return path.is_relative_to(prefix)
+ else:
+ return path == prefix or prefix in path.parents
+
+
def _exclude_home(path_list):
"""Filter out any entries in a path list that are in my home directory.
Used to disable per-user configuration.
"""
- home = os.path.expanduser('~/')
- for p in path_list:
- if not p.startswith(home):
- yield p
+ # resolve paths before comparison
+ # so we do the right thing when $HOME is a symlink
+ home = Path.home().resolve()
+ for path in path_list:
+ if not _is_relative_to(Path(path).resolve(), home):
+ yield path
# record patches
| {"golden_diff": "diff --git a/jupyterhub/singleuser/_disable_user_config.py b/jupyterhub/singleuser/_disable_user_config.py\n--- a/jupyterhub/singleuser/_disable_user_config.py\n+++ b/jupyterhub/singleuser/_disable_user_config.py\n@@ -20,19 +20,35 @@\n \"\"\"\n \n import os\n+from pathlib import Path\n \n from jupyter_core import paths\n \n \n+def _is_relative_to(path, prefix):\n+ \"\"\"\n+ Backport Path.is_relative_to for Python < 3.9\n+\n+ added in Python 3.9\n+ \"\"\"\n+ if hasattr(path, \"is_relative_to\"):\n+ # Python >= 3.9\n+ return path.is_relative_to(prefix)\n+ else:\n+ return path == prefix or prefix in path.parents\n+\n+\n def _exclude_home(path_list):\n \"\"\"Filter out any entries in a path list that are in my home directory.\n \n Used to disable per-user configuration.\n \"\"\"\n- home = os.path.expanduser('~/')\n- for p in path_list:\n- if not p.startswith(home):\n- yield p\n+ # resolve paths before comparison\n+ # so we do the right thing when $HOME is a symlink\n+ home = Path.home().resolve()\n+ for path in path_list:\n+ if not _is_relative_to(Path(path).resolve(), home):\n+ yield path\n \n \n # record patches\n", "issue": "Unable to disable user config with Jupyter Server\nThe hub administrator is supposed to be able to prevent per-user notebook configuration scripts from running by setting\r\n\r\n```\r\nc.Spawner.disable_user_config = True\r\n```\r\n\r\nIn the `jupyterhub_config.py` config. This sets the environment variable `JUPYTERHUB_DISABLE_USER_CONFIG=1` for the spawned notebook server. However this seems to be being ignored?\r\n\r\n<details>\r\n<summary>Using this Dockerfile</summary>\r\n\r\n```\r\nFROM jupyterhub/jupyterhub:2\r\n\r\nRUN python3 -m pip install --no-cache jupyterlab\r\n\r\nRUN \\\r\n adduser -q --gecos \"\" --disabled-password user1 && \\\r\n echo user1:user1 | chpasswd\r\n\r\nADD jupyterhub_config.py .\r\n\r\nRUN mkdir -p /home/user1/.jupyter\r\nADD jupyter_notebook_config.py /home/user1/.jupyter/.\r\nRUN chown -R user1:user1 /home/user1/.jupyter\r\n\r\nCMD [\"jupyterhub\"]\r\n```\r\n\r\n</details>\r\n\r\n<details><summary>\r\nwith this `jupyterhub_config.py` and example notebook config for `user1`:\r\n</summary>\r\n\r\n```\r\nc.Spawner.disable_user_config = True\r\n```\r\n\r\n```\r\nimport os\r\n\r\nprint(\"HELLO FROM THE NOTEBOOK CONFIG\")\r\nprint(os.getenv(\"JUPYTERHUB_DISABLE_USER_CONFIG\"))\r\n\r\nc.ServerApp.shutdown_no_activity_timeout = 600\r\nc.MappingKernelManager.cull_idle_timeout = 600\r\nc.TerminalManager.cull_inactive_timeout = 600\r\n```\r\n\r\n</details>\r\n\r\nI see the \"HELLO\" message and the value 1 printed when the notebook starts up, and the timeout message indicating that my config setting is in effect:\r\n\r\n```\r\n[I 2022-02-22 22:35:23.167 SingleUserLabApp serverapp:2161] Will shut down after 600 seconds with no kernels or terminals.\r\n```\r\n\r\nAm I misunderstanding exactly what config files are excluded? I see there's a test for this but I wonder is it actually verifying that the config is being ignored?\n", "before_files": [{"content": "\"\"\"\nDisable user-controlled config for single-user servers\n\nApplies patches to prevent loading configuration from the user's home directory.\n\nOnly used when launching a single-user server with disable_user_config=True.\n\nThis is where we still have some monkeypatches,\nbecause we want to prevent loading configuration from user directories,\nand `jupyter_core` functions don't allow that.\n\nDue to extensions, we aren't able to apply patches in one place on the ServerApp,\nwe have to insert the patches at the lowest-level\non function objects themselves,\nto ensure we modify calls to e.g. `jupyter_core.jupyter_path`\nthat may have been imported already!\n\nWe should perhaps ask for the necessary hooks to modify this in jupyter_core,\nrather than keeing these monkey patches around.\n\"\"\"\n\nimport os\n\nfrom jupyter_core import paths\n\n\ndef _exclude_home(path_list):\n \"\"\"Filter out any entries in a path list that are in my home directory.\n\n Used to disable per-user configuration.\n \"\"\"\n home = os.path.expanduser('~/')\n for p in path_list:\n if not p.startswith(home):\n yield p\n\n\n# record patches\n_original_jupyter_paths = None\n_jupyter_paths_without_home = None\n\n\ndef _disable_user_config(serverapp):\n \"\"\"\n disable user-controlled sources of configuration\n by excluding directories in their home from paths.\n\n This _does not_ disable frontend config,\n such as UI settings persistence.\n\n 1. Python config file paths\n 2. Search paths for extensions, etc.\n 3. import path\n \"\"\"\n original_jupyter_path = paths.jupyter_path()\n jupyter_path_without_home = list(_exclude_home(original_jupyter_path))\n\n # config_file_paths is a property without a setter\n # can't override on the instance\n default_config_file_paths = serverapp.config_file_paths\n config_file_paths = list(_exclude_home(default_config_file_paths))\n serverapp.__class__.config_file_paths = property(\n lambda self: config_file_paths,\n )\n # verify patch applied\n assert serverapp.config_file_paths == config_file_paths\n\n # patch jupyter_path to exclude $HOME\n global _original_jupyter_paths, _jupyter_paths_without_home, _original_jupyter_config_dir\n _original_jupyter_paths = paths.jupyter_path()\n _jupyter_paths_without_home = list(_exclude_home(_original_jupyter_paths))\n\n def get_jupyter_path_without_home(*subdirs):\n # reimport because of our `__code__` patch\n # affects what is resolved as the parent namespace\n from jupyterhub.singleuser._disable_user_config import (\n _jupyter_paths_without_home,\n )\n\n paths = list(_jupyter_paths_without_home)\n if subdirs:\n paths = [os.path.join(p, *subdirs) for p in paths]\n return paths\n\n # patch `jupyter_path.__code__` to ensure all callers are patched,\n # even if they've already imported\n # this affects e.g. nbclassic.nbextension_paths\n paths.jupyter_path.__code__ = get_jupyter_path_without_home.__code__\n\n # same thing for config_dir,\n # which applies to some things like ExtensionApp config paths\n # and nbclassic.static_custom_path\n\n # allows explicit override if $JUPYTER_CONFIG_DIR is set\n # or config dir is otherwise not in $HOME\n\n if not os.getenv(\"JUPYTER_CONFIG_DIR\") and not list(\n _exclude_home([paths.jupyter_config_dir()])\n ):\n # patch specifically Application.config_dir\n # this affects ServerApp and ExtensionApp,\n # but does not affect JupyterLab's user-settings, etc.\n # patching the traitlet directly affects all instances,\n # already-created or future\n from jupyter_core.application import JupyterApp\n\n def get_env_config_dir(obj, cls=None):\n return paths.ENV_CONFIG_PATH[0]\n\n JupyterApp.config_dir.get = get_env_config_dir\n\n # record disabled state on app object\n serverapp.disable_user_config = True\n", "path": "jupyterhub/singleuser/_disable_user_config.py"}], "after_files": [{"content": "\"\"\"\nDisable user-controlled config for single-user servers\n\nApplies patches to prevent loading configuration from the user's home directory.\n\nOnly used when launching a single-user server with disable_user_config=True.\n\nThis is where we still have some monkeypatches,\nbecause we want to prevent loading configuration from user directories,\nand `jupyter_core` functions don't allow that.\n\nDue to extensions, we aren't able to apply patches in one place on the ServerApp,\nwe have to insert the patches at the lowest-level\non function objects themselves,\nto ensure we modify calls to e.g. `jupyter_core.jupyter_path`\nthat may have been imported already!\n\nWe should perhaps ask for the necessary hooks to modify this in jupyter_core,\nrather than keeing these monkey patches around.\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom jupyter_core import paths\n\n\ndef _is_relative_to(path, prefix):\n \"\"\"\n Backport Path.is_relative_to for Python < 3.9\n\n added in Python 3.9\n \"\"\"\n if hasattr(path, \"is_relative_to\"):\n # Python >= 3.9\n return path.is_relative_to(prefix)\n else:\n return path == prefix or prefix in path.parents\n\n\ndef _exclude_home(path_list):\n \"\"\"Filter out any entries in a path list that are in my home directory.\n\n Used to disable per-user configuration.\n \"\"\"\n # resolve paths before comparison\n # so we do the right thing when $HOME is a symlink\n home = Path.home().resolve()\n for path in path_list:\n if not _is_relative_to(Path(path).resolve(), home):\n yield path\n\n\n# record patches\n_original_jupyter_paths = None\n_jupyter_paths_without_home = None\n\n\ndef _disable_user_config(serverapp):\n \"\"\"\n disable user-controlled sources of configuration\n by excluding directories in their home from paths.\n\n This _does not_ disable frontend config,\n such as UI settings persistence.\n\n 1. Python config file paths\n 2. Search paths for extensions, etc.\n 3. import path\n \"\"\"\n original_jupyter_path = paths.jupyter_path()\n jupyter_path_without_home = list(_exclude_home(original_jupyter_path))\n\n # config_file_paths is a property without a setter\n # can't override on the instance\n default_config_file_paths = serverapp.config_file_paths\n config_file_paths = list(_exclude_home(default_config_file_paths))\n serverapp.__class__.config_file_paths = property(\n lambda self: config_file_paths,\n )\n # verify patch applied\n assert serverapp.config_file_paths == config_file_paths\n\n # patch jupyter_path to exclude $HOME\n global _original_jupyter_paths, _jupyter_paths_without_home, _original_jupyter_config_dir\n _original_jupyter_paths = paths.jupyter_path()\n _jupyter_paths_without_home = list(_exclude_home(_original_jupyter_paths))\n\n def get_jupyter_path_without_home(*subdirs):\n # reimport because of our `__code__` patch\n # affects what is resolved as the parent namespace\n from jupyterhub.singleuser._disable_user_config import (\n _jupyter_paths_without_home,\n )\n\n paths = list(_jupyter_paths_without_home)\n if subdirs:\n paths = [os.path.join(p, *subdirs) for p in paths]\n return paths\n\n # patch `jupyter_path.__code__` to ensure all callers are patched,\n # even if they've already imported\n # this affects e.g. nbclassic.nbextension_paths\n paths.jupyter_path.__code__ = get_jupyter_path_without_home.__code__\n\n # same thing for config_dir,\n # which applies to some things like ExtensionApp config paths\n # and nbclassic.static_custom_path\n\n # allows explicit override if $JUPYTER_CONFIG_DIR is set\n # or config dir is otherwise not in $HOME\n\n if not os.getenv(\"JUPYTER_CONFIG_DIR\") and not list(\n _exclude_home([paths.jupyter_config_dir()])\n ):\n # patch specifically Application.config_dir\n # this affects ServerApp and ExtensionApp,\n # but does not affect JupyterLab's user-settings, etc.\n # patching the traitlet directly affects all instances,\n # already-created or future\n from jupyter_core.application import JupyterApp\n\n def get_env_config_dir(obj, cls=None):\n return paths.ENV_CONFIG_PATH[0]\n\n JupyterApp.config_dir.get = get_env_config_dir\n\n # record disabled state on app object\n serverapp.disable_user_config = True\n", "path": "jupyterhub/singleuser/_disable_user_config.py"}]} | 1,843 | 309 |
gh_patches_debug_43131 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-5940 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MN parser down
## Description
Mongolia parser is down, but the bot hasn't open an issue. I am opening this one to alert the maintainers.
It seems this time the parser is down because of a change in the json that was being parsed:
> raise ParserException(
parsers.lib.exceptions.ParserException: MN.py Parser: Fetched keys from source dict_keys(['date', 'syssum', 'tpp', 'sumnar', 'sums', 'energyimport', 't']) do not match expected keys dict_values(['date', 'syssum', 'sumnar', 'sums', 'energyimport', 't']).
A new key called tpp (thermal power plants?) has being added. The value of this new key doesn't match the previously calculated unknown production (so tpp plus other keys don't add up to consumption). What should be done to fix this? It seems an unknown source is being added.
By the way, a bit off-topic, but I have noticed that the Mongolia parser outputs global exchange data. We currently get the exchange with Russia from its parser, so we could calculate the exchange with China by substracting the other exchange. Is this possible?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/MN.py`
Content:
```
1 #!/usr/bin/env python3
2
3 from datetime import datetime
4 from logging import Logger, getLogger
5 from typing import Any
6 from zoneinfo import ZoneInfo
7
8 from requests import Response, Session
9
10 from electricitymap.contrib.config import ZoneKey
11 from electricitymap.contrib.lib.models.event_lists import (
12 ProductionBreakdownList,
13 TotalConsumptionList,
14 )
15 from electricitymap.contrib.lib.models.events import ProductionMix
16 from parsers.lib.exceptions import ParserException
17
18 NDC_GENERATION = "https://disnews.energy.mn/test/convert.php"
19 TZ = ZoneInfo("Asia/Ulaanbaatar") # UTC+8
20
21 # Query fields to web API fields
22 JSON_QUERY_TO_SRC = {
23 "time": "date",
24 "consumptionMW": "syssum",
25 "solarMW": "sumnar",
26 "windMW": "sums",
27 "importMW": "energyimport", # positive = import
28 "temperatureC": "t", # current temperature
29 }
30
31
32 def parse_json(web_json: dict) -> dict[str, Any]:
33 """
34 Parse the fetched JSON data to our query format according to JSON_QUERY_TO_SRC.
35 Example of expected JSON format present at URL:
36 {"date":"2023-06-27 18:00:00","syssum":"869.37","sumnar":42.34,"sums":119.79,"energyimport":"49.58","t":"17"}
37 """
38
39 # Validate first if keys in fetched dict match expected keys
40 if set(JSON_QUERY_TO_SRC.values()) != set(web_json.keys()):
41 raise ParserException(
42 parser="MN.py",
43 message=f"Fetched keys from source {web_json.keys()} do not match expected keys {JSON_QUERY_TO_SRC.values()}.",
44 )
45
46 if None in web_json.values():
47 raise ParserException(
48 parser="MN.py",
49 message=f"Fetched values contain null. Fetched data: {web_json}.",
50 )
51
52 # Then we can safely parse them
53 query_data = dict()
54 for query_key, src_key in JSON_QUERY_TO_SRC.items():
55 if query_key == "time":
56 # convert to datetime
57 query_data[query_key] = datetime.fromisoformat(web_json[src_key]).replace(
58 tzinfo=TZ
59 )
60 else:
61 # or convert to float, might also be string
62 query_data[query_key] = float(web_json[src_key])
63
64 return query_data
65
66
67 def query(session: Session) -> dict[str, Any]:
68 """
69 Query the JSON endpoint and parse it.
70 """
71
72 target_response: Response = session.get(NDC_GENERATION)
73
74 if not target_response.ok:
75 raise ParserException(
76 parser="MN.py",
77 message=f"Data request did not succeed: {target_response.status_code}",
78 )
79
80 # Read as JSON
81 response_json = target_response.json()
82 query_result = parse_json(response_json)
83
84 return query_result
85
86
87 def fetch_production(
88 zone_key: ZoneKey,
89 session: Session = Session(),
90 target_datetime: datetime | None = None,
91 logger: Logger = getLogger(__name__),
92 ):
93 if target_datetime:
94 raise NotImplementedError("This parser is not yet able to parse past dates.")
95
96 query_data = query(session)
97
98 # Calculated 'unknown' production from available data (consumption, import, solar, wind).
99 # 'unknown' consists of 92.8% coal, 5.8% oil and 1.4% hydro as per 2020; sources: IEA and IRENA statistics.
100 query_data["unknownMW"] = round(
101 query_data["consumptionMW"]
102 - query_data["importMW"]
103 - query_data["solarMW"]
104 - query_data["windMW"],
105 13,
106 )
107
108 prod_mix = ProductionMix(
109 solar=query_data["solarMW"],
110 wind=query_data["windMW"],
111 unknown=query_data["unknownMW"],
112 )
113
114 prod_breakdown_list = ProductionBreakdownList(logger)
115 prod_breakdown_list.append(
116 datetime=query_data["time"],
117 zoneKey=zone_key,
118 source="https://ndc.energy.mn/",
119 production=prod_mix,
120 )
121
122 return prod_breakdown_list.to_list()
123
124
125 def fetch_consumption(
126 zone_key: ZoneKey,
127 session: Session = Session(),
128 target_datetime: datetime | None = None,
129 logger: Logger = getLogger(__name__),
130 ):
131 if target_datetime:
132 raise NotImplementedError("This parser is not yet able to parse past dates.")
133
134 query_data = query(session)
135
136 consumption_list = TotalConsumptionList(logger)
137 consumption_list.append(
138 datetime=query_data["time"],
139 zoneKey=zone_key,
140 consumption=query_data["consumptionMW"],
141 source="https://ndc.energy.mn/",
142 )
143
144 return consumption_list.to_list()
145
146
147 if __name__ == "__main__":
148 print("fetch_production() ->")
149 print(fetch_production(ZoneKey("MN")))
150 print("fetch_consumption() ->")
151 print(fetch_consumption(ZoneKey("MN")))
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/MN.py b/parsers/MN.py
--- a/parsers/MN.py
+++ b/parsers/MN.py
@@ -29,18 +29,18 @@
}
-def parse_json(web_json: dict) -> dict[str, Any]:
+def parse_json(web_json: dict, logger: Logger, zone_key: ZoneKey) -> dict[str, Any]:
"""
Parse the fetched JSON data to our query format according to JSON_QUERY_TO_SRC.
Example of expected JSON format present at URL:
- {"date":"2023-06-27 18:00:00","syssum":"869.37","sumnar":42.34,"sums":119.79,"energyimport":"49.58","t":"17"}
+ {"date":"2023-06-27 18:00:00","syssum":"869.37","sumnar":42.34,"sums":119.79,"energyimport":"49.58"}
"""
# Validate first if keys in fetched dict match expected keys
if set(JSON_QUERY_TO_SRC.values()) != set(web_json.keys()):
- raise ParserException(
- parser="MN.py",
- message=f"Fetched keys from source {web_json.keys()} do not match expected keys {JSON_QUERY_TO_SRC.values()}.",
+ logger.error(
+ msg=f"Fetched keys from source {web_json.keys()} do not match expected keys {JSON_QUERY_TO_SRC.values()}.",
+ extra={"zone_key": zone_key, "parser": "MN.py"},
)
if None in web_json.values():
@@ -52,7 +52,7 @@
# Then we can safely parse them
query_data = dict()
for query_key, src_key in JSON_QUERY_TO_SRC.items():
- if query_key == "time":
+ if "time" in query_key:
# convert to datetime
query_data[query_key] = datetime.fromisoformat(web_json[src_key]).replace(
tzinfo=TZ
@@ -64,7 +64,7 @@
return query_data
-def query(session: Session) -> dict[str, Any]:
+def query(session: Session, logger: Logger, zone_key: ZoneKey) -> dict[str, Any]:
"""
Query the JSON endpoint and parse it.
"""
@@ -79,7 +79,7 @@
# Read as JSON
response_json = target_response.json()
- query_result = parse_json(response_json)
+ query_result = parse_json(response_json, logger, zone_key)
return query_result
@@ -93,11 +93,11 @@
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates.")
- query_data = query(session)
+ query_data = query(session, logger, zone_key)
- # Calculated 'unknown' production from available data (consumption, import, solar, wind).
+ # Calculated 'unknown' production from available data (consumption, import, solar, wind, tpp).
# 'unknown' consists of 92.8% coal, 5.8% oil and 1.4% hydro as per 2020; sources: IEA and IRENA statistics.
- query_data["unknownMW"] = round(
+ query_data["leftoverMW"] = round(
query_data["consumptionMW"]
- query_data["importMW"]
- query_data["solarMW"]
@@ -105,11 +105,10 @@
13,
)
- prod_mix = ProductionMix(
- solar=query_data["solarMW"],
- wind=query_data["windMW"],
- unknown=query_data["unknownMW"],
- )
+ prod_mix = ProductionMix()
+ prod_mix.add_value("solar", query_data["solarMW"])
+ prod_mix.add_value("wind", query_data["windMW"])
+ prod_mix.add_value("unknown", query_data["leftoverMW"])
prod_breakdown_list = ProductionBreakdownList(logger)
prod_breakdown_list.append(
@@ -131,7 +130,7 @@
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates.")
- query_data = query(session)
+ query_data = query(session, logger, zone_key)
consumption_list = TotalConsumptionList(logger)
consumption_list.append(
| {"golden_diff": "diff --git a/parsers/MN.py b/parsers/MN.py\n--- a/parsers/MN.py\n+++ b/parsers/MN.py\n@@ -29,18 +29,18 @@\n }\n \n \n-def parse_json(web_json: dict) -> dict[str, Any]:\n+def parse_json(web_json: dict, logger: Logger, zone_key: ZoneKey) -> dict[str, Any]:\n \"\"\"\n Parse the fetched JSON data to our query format according to JSON_QUERY_TO_SRC.\n Example of expected JSON format present at URL:\n- {\"date\":\"2023-06-27 18:00:00\",\"syssum\":\"869.37\",\"sumnar\":42.34,\"sums\":119.79,\"energyimport\":\"49.58\",\"t\":\"17\"}\n+ {\"date\":\"2023-06-27 18:00:00\",\"syssum\":\"869.37\",\"sumnar\":42.34,\"sums\":119.79,\"energyimport\":\"49.58\"}\n \"\"\"\n \n # Validate first if keys in fetched dict match expected keys\n if set(JSON_QUERY_TO_SRC.values()) != set(web_json.keys()):\n- raise ParserException(\n- parser=\"MN.py\",\n- message=f\"Fetched keys from source {web_json.keys()} do not match expected keys {JSON_QUERY_TO_SRC.values()}.\",\n+ logger.error(\n+ msg=f\"Fetched keys from source {web_json.keys()} do not match expected keys {JSON_QUERY_TO_SRC.values()}.\",\n+ extra={\"zone_key\": zone_key, \"parser\": \"MN.py\"},\n )\n \n if None in web_json.values():\n@@ -52,7 +52,7 @@\n # Then we can safely parse them\n query_data = dict()\n for query_key, src_key in JSON_QUERY_TO_SRC.items():\n- if query_key == \"time\":\n+ if \"time\" in query_key:\n # convert to datetime\n query_data[query_key] = datetime.fromisoformat(web_json[src_key]).replace(\n tzinfo=TZ\n@@ -64,7 +64,7 @@\n return query_data\n \n \n-def query(session: Session) -> dict[str, Any]:\n+def query(session: Session, logger: Logger, zone_key: ZoneKey) -> dict[str, Any]:\n \"\"\"\n Query the JSON endpoint and parse it.\n \"\"\"\n@@ -79,7 +79,7 @@\n \n # Read as JSON\n response_json = target_response.json()\n- query_result = parse_json(response_json)\n+ query_result = parse_json(response_json, logger, zone_key)\n \n return query_result\n \n@@ -93,11 +93,11 @@\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates.\")\n \n- query_data = query(session)\n+ query_data = query(session, logger, zone_key)\n \n- # Calculated 'unknown' production from available data (consumption, import, solar, wind).\n+ # Calculated 'unknown' production from available data (consumption, import, solar, wind, tpp).\n # 'unknown' consists of 92.8% coal, 5.8% oil and 1.4% hydro as per 2020; sources: IEA and IRENA statistics.\n- query_data[\"unknownMW\"] = round(\n+ query_data[\"leftoverMW\"] = round(\n query_data[\"consumptionMW\"]\n - query_data[\"importMW\"]\n - query_data[\"solarMW\"]\n@@ -105,11 +105,10 @@\n 13,\n )\n \n- prod_mix = ProductionMix(\n- solar=query_data[\"solarMW\"],\n- wind=query_data[\"windMW\"],\n- unknown=query_data[\"unknownMW\"],\n- )\n+ prod_mix = ProductionMix()\n+ prod_mix.add_value(\"solar\", query_data[\"solarMW\"])\n+ prod_mix.add_value(\"wind\", query_data[\"windMW\"])\n+ prod_mix.add_value(\"unknown\", query_data[\"leftoverMW\"])\n \n prod_breakdown_list = ProductionBreakdownList(logger)\n prod_breakdown_list.append(\n@@ -131,7 +130,7 @@\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates.\")\n \n- query_data = query(session)\n+ query_data = query(session, logger, zone_key)\n \n consumption_list = TotalConsumptionList(logger)\n consumption_list.append(\n", "issue": "MN parser down\n## Description\r\nMongolia parser is down, but the bot hasn't open an issue. I am opening this one to alert the maintainers.\r\nIt seems this time the parser is down because of a change in the json that was being parsed:\r\n> raise ParserException(\r\nparsers.lib.exceptions.ParserException: MN.py Parser: Fetched keys from source dict_keys(['date', 'syssum', 'tpp', 'sumnar', 'sums', 'energyimport', 't']) do not match expected keys dict_values(['date', 'syssum', 'sumnar', 'sums', 'energyimport', 't']).\r\n\r\nA new key called tpp (thermal power plants?) has being added. The value of this new key doesn't match the previously calculated unknown production (so tpp plus other keys don't add up to consumption). What should be done to fix this? It seems an unknown source is being added.\r\n\r\nBy the way, a bit off-topic, but I have noticed that the Mongolia parser outputs global exchange data. We currently get the exchange with Russia from its parser, so we could calculate the exchange with China by substracting the other exchange. Is this possible?\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom datetime import datetime\nfrom logging import Logger, getLogger\nfrom typing import Any\nfrom zoneinfo import ZoneInfo\n\nfrom requests import Response, Session\n\nfrom electricitymap.contrib.config import ZoneKey\nfrom electricitymap.contrib.lib.models.event_lists import (\n ProductionBreakdownList,\n TotalConsumptionList,\n)\nfrom electricitymap.contrib.lib.models.events import ProductionMix\nfrom parsers.lib.exceptions import ParserException\n\nNDC_GENERATION = \"https://disnews.energy.mn/test/convert.php\"\nTZ = ZoneInfo(\"Asia/Ulaanbaatar\") # UTC+8\n\n# Query fields to web API fields\nJSON_QUERY_TO_SRC = {\n \"time\": \"date\",\n \"consumptionMW\": \"syssum\",\n \"solarMW\": \"sumnar\",\n \"windMW\": \"sums\",\n \"importMW\": \"energyimport\", # positive = import\n \"temperatureC\": \"t\", # current temperature\n}\n\n\ndef parse_json(web_json: dict) -> dict[str, Any]:\n \"\"\"\n Parse the fetched JSON data to our query format according to JSON_QUERY_TO_SRC.\n Example of expected JSON format present at URL:\n {\"date\":\"2023-06-27 18:00:00\",\"syssum\":\"869.37\",\"sumnar\":42.34,\"sums\":119.79,\"energyimport\":\"49.58\",\"t\":\"17\"}\n \"\"\"\n\n # Validate first if keys in fetched dict match expected keys\n if set(JSON_QUERY_TO_SRC.values()) != set(web_json.keys()):\n raise ParserException(\n parser=\"MN.py\",\n message=f\"Fetched keys from source {web_json.keys()} do not match expected keys {JSON_QUERY_TO_SRC.values()}.\",\n )\n\n if None in web_json.values():\n raise ParserException(\n parser=\"MN.py\",\n message=f\"Fetched values contain null. Fetched data: {web_json}.\",\n )\n\n # Then we can safely parse them\n query_data = dict()\n for query_key, src_key in JSON_QUERY_TO_SRC.items():\n if query_key == \"time\":\n # convert to datetime\n query_data[query_key] = datetime.fromisoformat(web_json[src_key]).replace(\n tzinfo=TZ\n )\n else:\n # or convert to float, might also be string\n query_data[query_key] = float(web_json[src_key])\n\n return query_data\n\n\ndef query(session: Session) -> dict[str, Any]:\n \"\"\"\n Query the JSON endpoint and parse it.\n \"\"\"\n\n target_response: Response = session.get(NDC_GENERATION)\n\n if not target_response.ok:\n raise ParserException(\n parser=\"MN.py\",\n message=f\"Data request did not succeed: {target_response.status_code}\",\n )\n\n # Read as JSON\n response_json = target_response.json()\n query_result = parse_json(response_json)\n\n return query_result\n\n\ndef fetch_production(\n zone_key: ZoneKey,\n session: Session = Session(),\n target_datetime: datetime | None = None,\n logger: Logger = getLogger(__name__),\n):\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates.\")\n\n query_data = query(session)\n\n # Calculated 'unknown' production from available data (consumption, import, solar, wind).\n # 'unknown' consists of 92.8% coal, 5.8% oil and 1.4% hydro as per 2020; sources: IEA and IRENA statistics.\n query_data[\"unknownMW\"] = round(\n query_data[\"consumptionMW\"]\n - query_data[\"importMW\"]\n - query_data[\"solarMW\"]\n - query_data[\"windMW\"],\n 13,\n )\n\n prod_mix = ProductionMix(\n solar=query_data[\"solarMW\"],\n wind=query_data[\"windMW\"],\n unknown=query_data[\"unknownMW\"],\n )\n\n prod_breakdown_list = ProductionBreakdownList(logger)\n prod_breakdown_list.append(\n datetime=query_data[\"time\"],\n zoneKey=zone_key,\n source=\"https://ndc.energy.mn/\",\n production=prod_mix,\n )\n\n return prod_breakdown_list.to_list()\n\n\ndef fetch_consumption(\n zone_key: ZoneKey,\n session: Session = Session(),\n target_datetime: datetime | None = None,\n logger: Logger = getLogger(__name__),\n):\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates.\")\n\n query_data = query(session)\n\n consumption_list = TotalConsumptionList(logger)\n consumption_list.append(\n datetime=query_data[\"time\"],\n zoneKey=zone_key,\n consumption=query_data[\"consumptionMW\"],\n source=\"https://ndc.energy.mn/\",\n )\n\n return consumption_list.to_list()\n\n\nif __name__ == \"__main__\":\n print(\"fetch_production() ->\")\n print(fetch_production(ZoneKey(\"MN\")))\n print(\"fetch_consumption() ->\")\n print(fetch_consumption(ZoneKey(\"MN\")))\n", "path": "parsers/MN.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom datetime import datetime\nfrom logging import Logger, getLogger\nfrom typing import Any\nfrom zoneinfo import ZoneInfo\n\nfrom requests import Response, Session\n\nfrom electricitymap.contrib.config import ZoneKey\nfrom electricitymap.contrib.lib.models.event_lists import (\n ProductionBreakdownList,\n TotalConsumptionList,\n)\nfrom electricitymap.contrib.lib.models.events import ProductionMix\nfrom parsers.lib.exceptions import ParserException\n\nNDC_GENERATION = \"https://disnews.energy.mn/test/convert.php\"\nTZ = ZoneInfo(\"Asia/Ulaanbaatar\") # UTC+8\n\n# Query fields to web API fields\nJSON_QUERY_TO_SRC = {\n \"time\": \"date\",\n \"consumptionMW\": \"syssum\",\n \"solarMW\": \"sumnar\",\n \"windMW\": \"sums\",\n \"importMW\": \"energyimport\", # positive = import\n \"temperatureC\": \"t\", # current temperature\n}\n\n\ndef parse_json(web_json: dict, logger: Logger, zone_key: ZoneKey) -> dict[str, Any]:\n \"\"\"\n Parse the fetched JSON data to our query format according to JSON_QUERY_TO_SRC.\n Example of expected JSON format present at URL:\n {\"date\":\"2023-06-27 18:00:00\",\"syssum\":\"869.37\",\"sumnar\":42.34,\"sums\":119.79,\"energyimport\":\"49.58\"}\n \"\"\"\n\n # Validate first if keys in fetched dict match expected keys\n if set(JSON_QUERY_TO_SRC.values()) != set(web_json.keys()):\n logger.error(\n msg=f\"Fetched keys from source {web_json.keys()} do not match expected keys {JSON_QUERY_TO_SRC.values()}.\",\n extra={\"zone_key\": zone_key, \"parser\": \"MN.py\"},\n )\n\n if None in web_json.values():\n raise ParserException(\n parser=\"MN.py\",\n message=f\"Fetched values contain null. Fetched data: {web_json}.\",\n )\n\n # Then we can safely parse them\n query_data = dict()\n for query_key, src_key in JSON_QUERY_TO_SRC.items():\n if \"time\" in query_key:\n # convert to datetime\n query_data[query_key] = datetime.fromisoformat(web_json[src_key]).replace(\n tzinfo=TZ\n )\n else:\n # or convert to float, might also be string\n query_data[query_key] = float(web_json[src_key])\n\n return query_data\n\n\ndef query(session: Session, logger: Logger, zone_key: ZoneKey) -> dict[str, Any]:\n \"\"\"\n Query the JSON endpoint and parse it.\n \"\"\"\n\n target_response: Response = session.get(NDC_GENERATION)\n\n if not target_response.ok:\n raise ParserException(\n parser=\"MN.py\",\n message=f\"Data request did not succeed: {target_response.status_code}\",\n )\n\n # Read as JSON\n response_json = target_response.json()\n query_result = parse_json(response_json, logger, zone_key)\n\n return query_result\n\n\ndef fetch_production(\n zone_key: ZoneKey,\n session: Session = Session(),\n target_datetime: datetime | None = None,\n logger: Logger = getLogger(__name__),\n):\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates.\")\n\n query_data = query(session, logger, zone_key)\n\n # Calculated 'unknown' production from available data (consumption, import, solar, wind, tpp).\n # 'unknown' consists of 92.8% coal, 5.8% oil and 1.4% hydro as per 2020; sources: IEA and IRENA statistics.\n query_data[\"leftoverMW\"] = round(\n query_data[\"consumptionMW\"]\n - query_data[\"importMW\"]\n - query_data[\"solarMW\"]\n - query_data[\"windMW\"],\n 13,\n )\n\n prod_mix = ProductionMix()\n prod_mix.add_value(\"solar\", query_data[\"solarMW\"])\n prod_mix.add_value(\"wind\", query_data[\"windMW\"])\n prod_mix.add_value(\"unknown\", query_data[\"leftoverMW\"])\n\n prod_breakdown_list = ProductionBreakdownList(logger)\n prod_breakdown_list.append(\n datetime=query_data[\"time\"],\n zoneKey=zone_key,\n source=\"https://ndc.energy.mn/\",\n production=prod_mix,\n )\n\n return prod_breakdown_list.to_list()\n\n\ndef fetch_consumption(\n zone_key: ZoneKey,\n session: Session = Session(),\n target_datetime: datetime | None = None,\n logger: Logger = getLogger(__name__),\n):\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates.\")\n\n query_data = query(session, logger, zone_key)\n\n consumption_list = TotalConsumptionList(logger)\n consumption_list.append(\n datetime=query_data[\"time\"],\n zoneKey=zone_key,\n consumption=query_data[\"consumptionMW\"],\n source=\"https://ndc.energy.mn/\",\n )\n\n return consumption_list.to_list()\n\n\nif __name__ == \"__main__\":\n print(\"fetch_production() ->\")\n print(fetch_production(ZoneKey(\"MN\")))\n print(\"fetch_consumption() ->\")\n print(fetch_consumption(ZoneKey(\"MN\")))\n", "path": "parsers/MN.py"}]} | 1,965 | 1,006 |
gh_patches_debug_5273 | rasdani/github-patches | git_diff | crytic__slither-1339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
README is not correctly rendered on PyPi
### Describe the desired feature
The description on https://pypi.org/project/slither-analyzer/ is not being rendered as markdown. Add the line `long_description_content_type="text/markdown",` to the `setup.py` for it to render correctly in future releases.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 with open("README.md", "r", encoding="utf-8") as f:
4 long_description = f.read()
5
6 setup(
7 name="slither-analyzer",
8 description="Slither is a Solidity static analysis framework written in Python 3.",
9 url="https://github.com/crytic/slither",
10 author="Trail of Bits",
11 version="0.8.3",
12 packages=find_packages(),
13 python_requires=">=3.8",
14 install_requires=[
15 "prettytable>=0.7.2",
16 "pysha3>=1.0.2",
17 # "crytic-compile>=0.2.3",
18 "crytic-compile",
19 ],
20 extras_require={
21 "dev": [
22 "black==22.3.0",
23 "pylint==2.13.4",
24 "pytest",
25 "pytest-cov",
26 "deepdiff",
27 "numpy",
28 "solc-select>=v1.0.0b1",
29 ]
30 },
31 dependency_links=["git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile"],
32 license="AGPL-3.0",
33 long_description=long_description,
34 entry_points={
35 "console_scripts": [
36 "slither = slither.__main__:main",
37 "slither-check-upgradeability = slither.tools.upgradeability.__main__:main",
38 "slither-find-paths = slither.tools.possible_paths.__main__:main",
39 "slither-simil = slither.tools.similarity.__main__:main",
40 "slither-flat = slither.tools.flattening.__main__:main",
41 "slither-format = slither.tools.slither_format.__main__:main",
42 "slither-check-erc = slither.tools.erc_conformance.__main__:main",
43 "slither-check-kspec = slither.tools.kspec_coverage.__main__:main",
44 "slither-prop = slither.tools.properties.__main__:main",
45 "slither-mutate = slither.tools.mutator.__main__:main",
46 "slither-read-storage = slither.tools.read_storage.__main__:main",
47 ]
48 },
49 )
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,7 @@
dependency_links=["git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile"],
license="AGPL-3.0",
long_description=long_description,
+ long_description_content_type="text/markdown",
entry_points={
"console_scripts": [
"slither = slither.__main__:main",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,6 +31,7 @@\n dependency_links=[\"git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile\"],\n license=\"AGPL-3.0\",\n long_description=long_description,\n+ long_description_content_type=\"text/markdown\",\n entry_points={\n \"console_scripts\": [\n \"slither = slither.__main__:main\",\n", "issue": "README is not correctly rendered on PyPi\n### Describe the desired feature\n\nThe description on https://pypi.org/project/slither-analyzer/ is not being rendered as markdown. Add the line `long_description_content_type=\"text/markdown\",` to the `setup.py` for it to render correctly in future releases.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"slither-analyzer\",\n description=\"Slither is a Solidity static analysis framework written in Python 3.\",\n url=\"https://github.com/crytic/slither\",\n author=\"Trail of Bits\",\n version=\"0.8.3\",\n packages=find_packages(),\n python_requires=\">=3.8\",\n install_requires=[\n \"prettytable>=0.7.2\",\n \"pysha3>=1.0.2\",\n # \"crytic-compile>=0.2.3\",\n \"crytic-compile\",\n ],\n extras_require={\n \"dev\": [\n \"black==22.3.0\",\n \"pylint==2.13.4\",\n \"pytest\",\n \"pytest-cov\",\n \"deepdiff\",\n \"numpy\",\n \"solc-select>=v1.0.0b1\",\n ]\n },\n dependency_links=[\"git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile\"],\n license=\"AGPL-3.0\",\n long_description=long_description,\n entry_points={\n \"console_scripts\": [\n \"slither = slither.__main__:main\",\n \"slither-check-upgradeability = slither.tools.upgradeability.__main__:main\",\n \"slither-find-paths = slither.tools.possible_paths.__main__:main\",\n \"slither-simil = slither.tools.similarity.__main__:main\",\n \"slither-flat = slither.tools.flattening.__main__:main\",\n \"slither-format = slither.tools.slither_format.__main__:main\",\n \"slither-check-erc = slither.tools.erc_conformance.__main__:main\",\n \"slither-check-kspec = slither.tools.kspec_coverage.__main__:main\",\n \"slither-prop = slither.tools.properties.__main__:main\",\n \"slither-mutate = slither.tools.mutator.__main__:main\",\n \"slither-read-storage = slither.tools.read_storage.__main__:main\",\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"slither-analyzer\",\n description=\"Slither is a Solidity static analysis framework written in Python 3.\",\n url=\"https://github.com/crytic/slither\",\n author=\"Trail of Bits\",\n version=\"0.8.3\",\n packages=find_packages(),\n python_requires=\">=3.8\",\n install_requires=[\n \"prettytable>=0.7.2\",\n \"pysha3>=1.0.2\",\n # \"crytic-compile>=0.2.3\",\n \"crytic-compile\",\n ],\n extras_require={\n \"dev\": [\n \"black==22.3.0\",\n \"pylint==2.13.4\",\n \"pytest\",\n \"pytest-cov\",\n \"deepdiff\",\n \"numpy\",\n \"solc-select>=v1.0.0b1\",\n ]\n },\n dependency_links=[\"git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile\"],\n license=\"AGPL-3.0\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n entry_points={\n \"console_scripts\": [\n \"slither = slither.__main__:main\",\n \"slither-check-upgradeability = slither.tools.upgradeability.__main__:main\",\n \"slither-find-paths = slither.tools.possible_paths.__main__:main\",\n \"slither-simil = slither.tools.similarity.__main__:main\",\n \"slither-flat = slither.tools.flattening.__main__:main\",\n \"slither-format = slither.tools.slither_format.__main__:main\",\n \"slither-check-erc = slither.tools.erc_conformance.__main__:main\",\n \"slither-check-kspec = slither.tools.kspec_coverage.__main__:main\",\n \"slither-prop = slither.tools.properties.__main__:main\",\n \"slither-mutate = slither.tools.mutator.__main__:main\",\n \"slither-read-storage = slither.tools.read_storage.__main__:main\",\n ]\n },\n)\n", "path": "setup.py"}]} | 894 | 111 |
gh_patches_debug_5135 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-4845 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WebSocket view jumps to top on new message
The WebSocket view keeps jumping to the top every time a new message arrives. This makes it basically impossible to work with while the connection is open. I hold down arrow -> it scrolls a bit -> message arrives -> I'm back at the top
_Originally posted by @Prinzhorn in https://github.com/mitmproxy/mitmproxy/issues/4486#issuecomment-796578909_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/tools/console/searchable.py`
Content:
```
1 import urwid
2
3 from mitmproxy.tools.console import signals
4
5
6 class Highlight(urwid.AttrMap):
7
8 def __init__(self, t):
9 urwid.AttrMap.__init__(
10 self,
11 urwid.Text(t.text),
12 "focusfield",
13 )
14 self.backup = t
15
16
17 class Searchable(urwid.ListBox):
18
19 def __init__(self, contents):
20 self.walker = urwid.SimpleFocusListWalker(contents)
21 urwid.ListBox.__init__(self, self.walker)
22 self.search_offset = 0
23 self.current_highlight = None
24 self.search_term = None
25 self.last_search = None
26
27 def keypress(self, size, key):
28 if key == "/":
29 signals.status_prompt.send(
30 prompt = "Search for",
31 text = "",
32 callback = self.set_search
33 )
34 elif key == "n":
35 self.find_next(False)
36 elif key == "N":
37 self.find_next(True)
38 elif key == "m_start":
39 self.set_focus(0)
40 self.walker._modified()
41 elif key == "m_end":
42 self.set_focus(len(self.walker) - 1)
43 self.walker._modified()
44 else:
45 return super().keypress(size, key)
46
47 def set_search(self, text):
48 self.last_search = text
49 self.search_term = text or None
50 self.find_next(False)
51
52 def set_highlight(self, offset):
53 if self.current_highlight is not None:
54 old = self.body[self.current_highlight]
55 self.body[self.current_highlight] = old.backup
56 if offset is None:
57 self.current_highlight = None
58 else:
59 self.body[offset] = Highlight(self.body[offset])
60 self.current_highlight = offset
61
62 def get_text(self, w):
63 if isinstance(w, urwid.Text):
64 return w.text
65 elif isinstance(w, Highlight):
66 return w.backup.text
67 else:
68 return None
69
70 def find_next(self, backwards):
71 if not self.search_term:
72 if self.last_search:
73 self.search_term = self.last_search
74 else:
75 self.set_highlight(None)
76 return
77 # Start search at focus + 1
78 if backwards:
79 rng = range(len(self.body) - 1, -1, -1)
80 else:
81 rng = range(1, len(self.body) + 1)
82 for i in rng:
83 off = (self.focus_position + i) % len(self.body)
84 w = self.body[off]
85 txt = self.get_text(w)
86 if txt and self.search_term in txt:
87 self.set_highlight(off)
88 self.set_focus(off, coming_from="above")
89 self.body._modified()
90 return
91 else:
92 self.set_highlight(None)
93 signals.status_message.send(message="Search not found.", expire=1)
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/tools/console/searchable.py b/mitmproxy/tools/console/searchable.py
--- a/mitmproxy/tools/console/searchable.py
+++ b/mitmproxy/tools/console/searchable.py
@@ -19,6 +19,7 @@
def __init__(self, contents):
self.walker = urwid.SimpleFocusListWalker(contents)
urwid.ListBox.__init__(self, self.walker)
+ self.set_focus(len(self.walker) - 1)
self.search_offset = 0
self.current_highlight = None
self.search_term = None
| {"golden_diff": "diff --git a/mitmproxy/tools/console/searchable.py b/mitmproxy/tools/console/searchable.py\n--- a/mitmproxy/tools/console/searchable.py\n+++ b/mitmproxy/tools/console/searchable.py\n@@ -19,6 +19,7 @@\n def __init__(self, contents):\n self.walker = urwid.SimpleFocusListWalker(contents)\n urwid.ListBox.__init__(self, self.walker)\n+ self.set_focus(len(self.walker) - 1)\n self.search_offset = 0\n self.current_highlight = None\n self.search_term = None\n", "issue": "WebSocket view jumps to top on new message\nThe WebSocket view keeps jumping to the top every time a new message arrives. This makes it basically impossible to work with while the connection is open. I hold down arrow -> it scrolls a bit -> message arrives -> I'm back at the top\r\n\r\n_Originally posted by @Prinzhorn in https://github.com/mitmproxy/mitmproxy/issues/4486#issuecomment-796578909_\n", "before_files": [{"content": "import urwid\n\nfrom mitmproxy.tools.console import signals\n\n\nclass Highlight(urwid.AttrMap):\n\n def __init__(self, t):\n urwid.AttrMap.__init__(\n self,\n urwid.Text(t.text),\n \"focusfield\",\n )\n self.backup = t\n\n\nclass Searchable(urwid.ListBox):\n\n def __init__(self, contents):\n self.walker = urwid.SimpleFocusListWalker(contents)\n urwid.ListBox.__init__(self, self.walker)\n self.search_offset = 0\n self.current_highlight = None\n self.search_term = None\n self.last_search = None\n\n def keypress(self, size, key):\n if key == \"/\":\n signals.status_prompt.send(\n prompt = \"Search for\",\n text = \"\",\n callback = self.set_search\n )\n elif key == \"n\":\n self.find_next(False)\n elif key == \"N\":\n self.find_next(True)\n elif key == \"m_start\":\n self.set_focus(0)\n self.walker._modified()\n elif key == \"m_end\":\n self.set_focus(len(self.walker) - 1)\n self.walker._modified()\n else:\n return super().keypress(size, key)\n\n def set_search(self, text):\n self.last_search = text\n self.search_term = text or None\n self.find_next(False)\n\n def set_highlight(self, offset):\n if self.current_highlight is not None:\n old = self.body[self.current_highlight]\n self.body[self.current_highlight] = old.backup\n if offset is None:\n self.current_highlight = None\n else:\n self.body[offset] = Highlight(self.body[offset])\n self.current_highlight = offset\n\n def get_text(self, w):\n if isinstance(w, urwid.Text):\n return w.text\n elif isinstance(w, Highlight):\n return w.backup.text\n else:\n return None\n\n def find_next(self, backwards):\n if not self.search_term:\n if self.last_search:\n self.search_term = self.last_search\n else:\n self.set_highlight(None)\n return\n # Start search at focus + 1\n if backwards:\n rng = range(len(self.body) - 1, -1, -1)\n else:\n rng = range(1, len(self.body) + 1)\n for i in rng:\n off = (self.focus_position + i) % len(self.body)\n w = self.body[off]\n txt = self.get_text(w)\n if txt and self.search_term in txt:\n self.set_highlight(off)\n self.set_focus(off, coming_from=\"above\")\n self.body._modified()\n return\n else:\n self.set_highlight(None)\n signals.status_message.send(message=\"Search not found.\", expire=1)\n", "path": "mitmproxy/tools/console/searchable.py"}], "after_files": [{"content": "import urwid\n\nfrom mitmproxy.tools.console import signals\n\n\nclass Highlight(urwid.AttrMap):\n\n def __init__(self, t):\n urwid.AttrMap.__init__(\n self,\n urwid.Text(t.text),\n \"focusfield\",\n )\n self.backup = t\n\n\nclass Searchable(urwid.ListBox):\n\n def __init__(self, contents):\n self.walker = urwid.SimpleFocusListWalker(contents)\n urwid.ListBox.__init__(self, self.walker)\n self.set_focus(len(self.walker) - 1)\n self.search_offset = 0\n self.current_highlight = None\n self.search_term = None\n self.last_search = None\n\n def keypress(self, size, key):\n if key == \"/\":\n signals.status_prompt.send(\n prompt = \"Search for\",\n text = \"\",\n callback = self.set_search\n )\n elif key == \"n\":\n self.find_next(False)\n elif key == \"N\":\n self.find_next(True)\n elif key == \"m_start\":\n self.set_focus(0)\n self.walker._modified()\n elif key == \"m_end\":\n self.set_focus(len(self.walker) - 1)\n self.walker._modified()\n else:\n return super().keypress(size, key)\n\n def set_search(self, text):\n self.last_search = text\n self.search_term = text or None\n self.find_next(False)\n\n def set_highlight(self, offset):\n if self.current_highlight is not None:\n old = self.body[self.current_highlight]\n self.body[self.current_highlight] = old.backup\n if offset is None:\n self.current_highlight = None\n else:\n self.body[offset] = Highlight(self.body[offset])\n self.current_highlight = offset\n\n def get_text(self, w):\n if isinstance(w, urwid.Text):\n return w.text\n elif isinstance(w, Highlight):\n return w.backup.text\n else:\n return None\n\n def find_next(self, backwards):\n if not self.search_term:\n if self.last_search:\n self.search_term = self.last_search\n else:\n self.set_highlight(None)\n return\n # Start search at focus + 1\n if backwards:\n rng = range(len(self.body) - 1, -1, -1)\n else:\n rng = range(1, len(self.body) + 1)\n for i in rng:\n off = (self.focus_position + i) % len(self.body)\n w = self.body[off]\n txt = self.get_text(w)\n if txt and self.search_term in txt:\n self.set_highlight(off)\n self.set_focus(off, coming_from=\"above\")\n self.body._modified()\n return\n else:\n self.set_highlight(None)\n signals.status_message.send(message=\"Search not found.\", expire=1)\n", "path": "mitmproxy/tools/console/searchable.py"}]} | 1,146 | 127 |
gh_patches_debug_58219 | rasdani/github-patches | git_diff | opsdroid__opsdroid-169 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
arrow dep missing
Fresh install of ubuntu 16.04
```
$ sudo apt update && sudo apt install python3-pip
...
$ pip3 install opsdroid
...
$ opsdroid
Traceback (most recent call last):
File "/home/ubuntu/.local/bin/opsdroid", line 7, in <module>
from opsdroid.__main__ import main
File "/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/__main__.py", line 8, in <module>
from opsdroid.core import OpsDroid
File "/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/core.py", line 15, in <module>
from opsdroid.parsers.crontab import parse_crontab
File "/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/parsers/crontab.py", line 6, in <module>
import arrow
ImportError: No module named 'arrow'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from opsdroid.const import __version__
5
6 PACKAGE_NAME = 'opsdroid'
7 HERE = os.path.abspath(os.path.dirname(__file__))
8
9 PACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',
10 'modules.*', 'docs', 'docs.*'])
11
12 REQUIRES = [
13 'pyyaml>=3.11,<4',
14 'aiohttp>=1.2.0,<2',
15 'pycron>=0.40',
16 ]
17
18 setup(
19 name=PACKAGE_NAME,
20 version=__version__,
21 license='GNU GENERAL PUBLIC LICENSE V3',
22 url='',
23 download_url='',
24 author='Jacob Tomlinson',
25 author_email='[email protected]',
26 description='An open source chat-ops bot.',
27 packages=PACKAGES,
28 include_package_data=True,
29 zip_safe=False,
30 platforms='any',
31 install_requires=REQUIRES,
32 test_suite='tests',
33 keywords=['bot', 'chatops'],
34 entry_points={
35 'console_scripts': [
36 'opsdroid = opsdroid.__main__:main'
37 ]
38 },
39 )
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,9 +10,10 @@
'modules.*', 'docs', 'docs.*'])
REQUIRES = [
- 'pyyaml>=3.11,<4',
- 'aiohttp>=1.2.0,<2',
- 'pycron>=0.40',
+ 'arrow==0.10.0',
+ 'aiohttp==2.1.0',
+ 'pycron==0.40',
+ 'pyyaml==3.12'
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,9 +10,10 @@\n 'modules.*', 'docs', 'docs.*'])\n \n REQUIRES = [\n- 'pyyaml>=3.11,<4',\n- 'aiohttp>=1.2.0,<2',\n- 'pycron>=0.40',\n+ 'arrow==0.10.0',\n+ 'aiohttp==2.1.0',\n+ 'pycron==0.40',\n+ 'pyyaml==3.12'\n ]\n \n setup(\n", "issue": "arrow dep missing\nFresh install of ubuntu 16.04\r\n\r\n```\r\n$ sudo apt update && sudo apt install python3-pip\r\n...\r\n$ pip3 install opsdroid\r\n...\r\n$ opsdroid\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/.local/bin/opsdroid\", line 7, in <module>\r\n from opsdroid.__main__ import main\r\n File \"/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/__main__.py\", line 8, in <module>\r\n from opsdroid.core import OpsDroid\r\n File \"/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/core.py\", line 15, in <module>\r\n from opsdroid.parsers.crontab import parse_crontab\r\n File \"/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/parsers/crontab.py\", line 6, in <module>\r\n import arrow\r\nImportError: No module named 'arrow'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom opsdroid.const import __version__\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\nREQUIRES = [\n 'pyyaml>=3.11,<4',\n 'aiohttp>=1.2.0,<2',\n 'pycron>=0.40',\n]\n\nsetup(\n name=PACKAGE_NAME,\n version=__version__,\n license='GNU GENERAL PUBLIC LICENSE V3',\n url='',\n download_url='',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source chat-ops bot.',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=['bot', 'chatops'],\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom opsdroid.const import __version__\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\nREQUIRES = [\n 'arrow==0.10.0',\n 'aiohttp==2.1.0',\n 'pycron==0.40',\n 'pyyaml==3.12'\n]\n\nsetup(\n name=PACKAGE_NAME,\n version=__version__,\n license='GNU GENERAL PUBLIC LICENSE V3',\n url='',\n download_url='',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source chat-ops bot.',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=['bot', 'chatops'],\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}]} | 802 | 140 |
gh_patches_debug_31455 | rasdani/github-patches | git_diff | pypa__pipenv-3186 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Quote command if parentheses exist
Thank you for contributing to Pipenv!
### The issue
Fixes #3168
### The fix
Quote the command if it contains `()`.
### The checklist
* [x] Associated issue
* [x] A news fragment in the `news/` directory to describe this fix with the extension `.bugfix`, `.feature`, `.behavior`, `.doc`. `.vendor`. or `.trivial` (this will appear in the release changelog). Use semantic line breaks and name the file after the issue number or the PR #.
<!--
### If this is a patch to the `vendor` directory…
Please try to refrain from submitting patches directly to `vendor` or `patched`, but raise your issue to the upstream project instead, and inform Pipenv to upgrade when the upstream project accepts the fix.
A pull request to upgrade vendor packages is strongly discouraged, unless there is a very good reason (e.g. you need to test Pipenv’s integration to a new vendor feature). Pipenv audits and performs vendor upgrades regularly, generally before a new release is about to drop.
If your patch is not or cannot be accepted by upstream, but is essential to Pipenv (make sure to discuss this with maintainers!), please remember to attach a patch file in `tasks/vendoring/patched`, so this divergence from upstream can be recorded and replayed afterwards.
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pipenv/cmdparse.py`
Content:
```
1 import re
2 import shlex
3
4 import six
5
6
7 class ScriptEmptyError(ValueError):
8 pass
9
10
11 class Script(object):
12 """Parse a script line (in Pipfile's [scripts] section).
13
14 This always works in POSIX mode, even on Windows.
15 """
16
17 def __init__(self, command, args=None):
18 self._parts = [command]
19 if args:
20 self._parts.extend(args)
21
22 @classmethod
23 def parse(cls, value):
24 if isinstance(value, six.string_types):
25 value = shlex.split(value)
26 if not value:
27 raise ScriptEmptyError(value)
28 return cls(value[0], value[1:])
29
30 def __repr__(self):
31 return "Script({0!r})".format(self._parts)
32
33 @property
34 def command(self):
35 return self._parts[0]
36
37 @property
38 def args(self):
39 return self._parts[1:]
40
41 def extend(self, extra_args):
42 self._parts.extend(extra_args)
43
44 def cmdify(self):
45 """Encode into a cmd-executable string.
46
47 This re-implements CreateProcess's quoting logic to turn a list of
48 arguments into one single string for the shell to interpret.
49
50 * All double quotes are escaped with a backslash.
51 * Existing backslashes before a quote are doubled, so they are all
52 escaped properly.
53 * Backslashes elsewhere are left as-is; cmd will interpret them
54 literally.
55
56 The result is then quoted into a pair of double quotes to be grouped.
57
58 An argument is intentionally not quoted if it does not contain
59 whitespaces. This is done to be compatible with Windows built-in
60 commands that don't work well with quotes, e.g. everything with `echo`,
61 and DOS-style (forward slash) switches.
62
63 The intended use of this function is to pre-process an argument list
64 before passing it into ``subprocess.Popen(..., shell=True)``.
65
66 See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
67 """
68 return " ".join(
69 arg if not next(re.finditer(r'\s', arg), None)
70 else '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', arg))
71 for arg in self._parts
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pipenv/cmdparse.py b/pipenv/cmdparse.py
--- a/pipenv/cmdparse.py
+++ b/pipenv/cmdparse.py
@@ -1,3 +1,4 @@
+import itertools
import re
import shlex
@@ -8,6 +9,12 @@
pass
+def _quote_if_contains(value, pattern):
+ if next(re.finditer(pattern, value), None):
+ return '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', value))
+ return value
+
+
class Script(object):
"""Parse a script line (in Pipfile's [scripts] section).
@@ -56,17 +63,21 @@
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
- whitespaces. This is done to be compatible with Windows built-in
+ foul characters. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
+ Foul characters include:
+
+ * Whitespaces.
+ * Parentheses in the command. (pypa/pipenv#3168)
+
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
- return " ".join(
- arg if not next(re.finditer(r'\s', arg), None)
- else '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', arg))
- for arg in self._parts
- )
+ return " ".join(itertools.chain(
+ [_quote_if_contains(self.command, r'[\s()]')],
+ (_quote_if_contains(arg, r'\s') for arg in self.args),
+ ))
| {"golden_diff": "diff --git a/pipenv/cmdparse.py b/pipenv/cmdparse.py\n--- a/pipenv/cmdparse.py\n+++ b/pipenv/cmdparse.py\n@@ -1,3 +1,4 @@\n+import itertools\n import re\n import shlex\n \n@@ -8,6 +9,12 @@\n pass\n \n \n+def _quote_if_contains(value, pattern):\n+ if next(re.finditer(pattern, value), None):\n+ return '\"{0}\"'.format(re.sub(r'(\\\\*)\"', r'\\1\\1\\\\\"', value))\n+ return value\n+\n+\n class Script(object):\n \"\"\"Parse a script line (in Pipfile's [scripts] section).\n \n@@ -56,17 +63,21 @@\n The result is then quoted into a pair of double quotes to be grouped.\n \n An argument is intentionally not quoted if it does not contain\n- whitespaces. This is done to be compatible with Windows built-in\n+ foul characters. This is done to be compatible with Windows built-in\n commands that don't work well with quotes, e.g. everything with `echo`,\n and DOS-style (forward slash) switches.\n \n+ Foul characters include:\n+\n+ * Whitespaces.\n+ * Parentheses in the command. (pypa/pipenv#3168)\n+\n The intended use of this function is to pre-process an argument list\n before passing it into ``subprocess.Popen(..., shell=True)``.\n \n See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence\n \"\"\"\n- return \" \".join(\n- arg if not next(re.finditer(r'\\s', arg), None)\n- else '\"{0}\"'.format(re.sub(r'(\\\\*)\"', r'\\1\\1\\\\\"', arg))\n- for arg in self._parts\n- )\n+ return \" \".join(itertools.chain(\n+ [_quote_if_contains(self.command, r'[\\s()]')],\n+ (_quote_if_contains(arg, r'\\s') for arg in self.args),\n+ ))\n", "issue": "Quote command if parentheses exist\nThank you for contributing to Pipenv!\r\n\r\n\r\n### The issue\r\n\r\nFixes #3168 \r\n\r\n### The fix\r\n\r\nQuote the command if it contains `()`.\r\n\r\n### The checklist\r\n\r\n* [x] Associated issue\r\n* [x] A news fragment in the `news/` directory to describe this fix with the extension `.bugfix`, `.feature`, `.behavior`, `.doc`. `.vendor`. or `.trivial` (this will appear in the release changelog). Use semantic line breaks and name the file after the issue number or the PR #.\r\n\r\n<!--\r\n### If this is a patch to the `vendor` directory\u2026\r\n\r\nPlease try to refrain from submitting patches directly to `vendor` or `patched`, but raise your issue to the upstream project instead, and inform Pipenv to upgrade when the upstream project accepts the fix.\r\n\r\nA pull request to upgrade vendor packages is strongly discouraged, unless there is a very good reason (e.g. you need to test Pipenv\u2019s integration to a new vendor feature). Pipenv audits and performs vendor upgrades regularly, generally before a new release is about to drop.\r\n\r\nIf your patch is not or cannot be accepted by upstream, but is essential to Pipenv (make sure to discuss this with maintainers!), please remember to attach a patch file in `tasks/vendoring/patched`, so this divergence from upstream can be recorded and replayed afterwards.\r\n-->\r\n\n", "before_files": [{"content": "import re\nimport shlex\n\nimport six\n\n\nclass ScriptEmptyError(ValueError):\n pass\n\n\nclass Script(object):\n \"\"\"Parse a script line (in Pipfile's [scripts] section).\n\n This always works in POSIX mode, even on Windows.\n \"\"\"\n\n def __init__(self, command, args=None):\n self._parts = [command]\n if args:\n self._parts.extend(args)\n\n @classmethod\n def parse(cls, value):\n if isinstance(value, six.string_types):\n value = shlex.split(value)\n if not value:\n raise ScriptEmptyError(value)\n return cls(value[0], value[1:])\n\n def __repr__(self):\n return \"Script({0!r})\".format(self._parts)\n\n @property\n def command(self):\n return self._parts[0]\n\n @property\n def args(self):\n return self._parts[1:]\n\n def extend(self, extra_args):\n self._parts.extend(extra_args)\n\n def cmdify(self):\n \"\"\"Encode into a cmd-executable string.\n\n This re-implements CreateProcess's quoting logic to turn a list of\n arguments into one single string for the shell to interpret.\n\n * All double quotes are escaped with a backslash.\n * Existing backslashes before a quote are doubled, so they are all\n escaped properly.\n * Backslashes elsewhere are left as-is; cmd will interpret them\n literally.\n\n The result is then quoted into a pair of double quotes to be grouped.\n\n An argument is intentionally not quoted if it does not contain\n whitespaces. This is done to be compatible with Windows built-in\n commands that don't work well with quotes, e.g. everything with `echo`,\n and DOS-style (forward slash) switches.\n\n The intended use of this function is to pre-process an argument list\n before passing it into ``subprocess.Popen(..., shell=True)``.\n\n See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence\n \"\"\"\n return \" \".join(\n arg if not next(re.finditer(r'\\s', arg), None)\n else '\"{0}\"'.format(re.sub(r'(\\\\*)\"', r'\\1\\1\\\\\"', arg))\n for arg in self._parts\n )\n", "path": "pipenv/cmdparse.py"}], "after_files": [{"content": "import itertools\nimport re\nimport shlex\n\nimport six\n\n\nclass ScriptEmptyError(ValueError):\n pass\n\n\ndef _quote_if_contains(value, pattern):\n if next(re.finditer(pattern, value), None):\n return '\"{0}\"'.format(re.sub(r'(\\\\*)\"', r'\\1\\1\\\\\"', value))\n return value\n\n\nclass Script(object):\n \"\"\"Parse a script line (in Pipfile's [scripts] section).\n\n This always works in POSIX mode, even on Windows.\n \"\"\"\n\n def __init__(self, command, args=None):\n self._parts = [command]\n if args:\n self._parts.extend(args)\n\n @classmethod\n def parse(cls, value):\n if isinstance(value, six.string_types):\n value = shlex.split(value)\n if not value:\n raise ScriptEmptyError(value)\n return cls(value[0], value[1:])\n\n def __repr__(self):\n return \"Script({0!r})\".format(self._parts)\n\n @property\n def command(self):\n return self._parts[0]\n\n @property\n def args(self):\n return self._parts[1:]\n\n def extend(self, extra_args):\n self._parts.extend(extra_args)\n\n def cmdify(self):\n \"\"\"Encode into a cmd-executable string.\n\n This re-implements CreateProcess's quoting logic to turn a list of\n arguments into one single string for the shell to interpret.\n\n * All double quotes are escaped with a backslash.\n * Existing backslashes before a quote are doubled, so they are all\n escaped properly.\n * Backslashes elsewhere are left as-is; cmd will interpret them\n literally.\n\n The result is then quoted into a pair of double quotes to be grouped.\n\n An argument is intentionally not quoted if it does not contain\n foul characters. This is done to be compatible with Windows built-in\n commands that don't work well with quotes, e.g. everything with `echo`,\n and DOS-style (forward slash) switches.\n\n Foul characters include:\n\n * Whitespaces.\n * Parentheses in the command. (pypa/pipenv#3168)\n\n The intended use of this function is to pre-process an argument list\n before passing it into ``subprocess.Popen(..., shell=True)``.\n\n See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence\n \"\"\"\n return \" \".join(itertools.chain(\n [_quote_if_contains(self.command, r'[\\s()]')],\n (_quote_if_contains(arg, r'\\s') for arg in self.args),\n ))\n", "path": "pipenv/cmdparse.py"}]} | 1,193 | 454 |
gh_patches_debug_35752 | rasdani/github-patches | git_diff | feast-dev__feast-2845 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect projects-list.json generated by feast ui when using Postgres as a data source.
## Expected Behavior
Correct generation of the projects-list.json when running feast ui.
## Current Behavior
The generated projects-list.json does not contain a name in the dataSources field, causing the parser to fail.
## Steps to reproduce
Setup feast with PostgreSQL as a data source.
### Specifications
- Version:
- Platform:
- Subsystem:
## Possible Solution
Adding name=self.name to to_proto() in postgres_source.py. And in general making the postgres_source.py file more similar to e.g., file_source.py.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py`
Content:
```
1 import json
2 from typing import Callable, Dict, Iterable, Optional, Tuple
3
4 from feast.data_source import DataSource
5 from feast.infra.utils.postgres.connection_utils import _get_conn
6 from feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto
7 from feast.repo_config import RepoConfig
8 from feast.type_map import pg_type_code_to_pg_type, pg_type_to_feast_value_type
9 from feast.value_type import ValueType
10
11
12 class PostgreSQLSource(DataSource):
13 def __init__(
14 self,
15 name: str,
16 query: str,
17 timestamp_field: Optional[str] = "",
18 created_timestamp_column: Optional[str] = "",
19 field_mapping: Optional[Dict[str, str]] = None,
20 date_partition_column: Optional[str] = "",
21 ):
22 self._postgres_options = PostgreSQLOptions(name=name, query=query)
23
24 super().__init__(
25 name=name,
26 timestamp_field=timestamp_field,
27 created_timestamp_column=created_timestamp_column,
28 field_mapping=field_mapping,
29 date_partition_column=date_partition_column,
30 )
31
32 def __hash__(self):
33 return super().__hash__()
34
35 def __eq__(self, other):
36 if not isinstance(other, PostgreSQLSource):
37 raise TypeError(
38 "Comparisons should only involve PostgreSQLSource class objects."
39 )
40
41 return (
42 self._postgres_options._query == other._postgres_options._query
43 and self.timestamp_field == other.timestamp_field
44 and self.created_timestamp_column == other.created_timestamp_column
45 and self.field_mapping == other.field_mapping
46 )
47
48 @staticmethod
49 def from_proto(data_source: DataSourceProto):
50 assert data_source.HasField("custom_options")
51
52 postgres_options = json.loads(data_source.custom_options.configuration)
53 return PostgreSQLSource(
54 name=postgres_options["name"],
55 query=postgres_options["query"],
56 field_mapping=dict(data_source.field_mapping),
57 timestamp_field=data_source.timestamp_field,
58 created_timestamp_column=data_source.created_timestamp_column,
59 date_partition_column=data_source.date_partition_column,
60 )
61
62 def to_proto(self) -> DataSourceProto:
63 data_source_proto = DataSourceProto(
64 type=DataSourceProto.CUSTOM_SOURCE,
65 data_source_class_type="feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource",
66 field_mapping=self.field_mapping,
67 custom_options=self._postgres_options.to_proto(),
68 )
69
70 data_source_proto.timestamp_field = self.timestamp_field
71 data_source_proto.created_timestamp_column = self.created_timestamp_column
72 data_source_proto.date_partition_column = self.date_partition_column
73
74 return data_source_proto
75
76 def validate(self, config: RepoConfig):
77 pass
78
79 @staticmethod
80 def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]:
81 return pg_type_to_feast_value_type
82
83 def get_table_column_names_and_types(
84 self, config: RepoConfig
85 ) -> Iterable[Tuple[str, str]]:
86 with _get_conn(config.offline_store) as conn, conn.cursor() as cur:
87 cur.execute(
88 f"SELECT * FROM ({self.get_table_query_string()}) AS sub LIMIT 0"
89 )
90 return (
91 (c.name, pg_type_code_to_pg_type(c.type_code)) for c in cur.description
92 )
93
94 def get_table_query_string(self) -> str:
95 return f"({self._postgres_options._query})"
96
97
98 class PostgreSQLOptions:
99 def __init__(self, name: str, query: Optional[str]):
100 self._name = name
101 self._query = query
102
103 @classmethod
104 def from_proto(cls, postgres_options_proto: DataSourceProto.CustomSourceOptions):
105 config = json.loads(postgres_options_proto.configuration.decode("utf8"))
106 postgres_options = cls(name=config["name"], query=config["query"])
107
108 return postgres_options
109
110 def to_proto(self) -> DataSourceProto.CustomSourceOptions:
111 postgres_options_proto = DataSourceProto.CustomSourceOptions(
112 configuration=json.dumps(
113 {"name": self._name, "query": self._query}
114 ).encode()
115 )
116
117 return postgres_options_proto
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py
--- a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py
+++ b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py
@@ -18,6 +18,9 @@
created_timestamp_column: Optional[str] = "",
field_mapping: Optional[Dict[str, str]] = None,
date_partition_column: Optional[str] = "",
+ description: Optional[str] = "",
+ tags: Optional[Dict[str, str]] = None,
+ owner: Optional[str] = "",
):
self._postgres_options = PostgreSQLOptions(name=name, query=query)
@@ -27,6 +30,9 @@
created_timestamp_column=created_timestamp_column,
field_mapping=field_mapping,
date_partition_column=date_partition_column,
+ description=description,
+ tags=tags,
+ owner=owner,
)
def __hash__(self):
@@ -57,14 +63,21 @@
timestamp_field=data_source.timestamp_field,
created_timestamp_column=data_source.created_timestamp_column,
date_partition_column=data_source.date_partition_column,
+ description=data_source.description,
+ tags=dict(data_source.tags),
+ owner=data_source.owner,
)
def to_proto(self) -> DataSourceProto:
data_source_proto = DataSourceProto(
+ name=self.name,
type=DataSourceProto.CUSTOM_SOURCE,
data_source_class_type="feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource",
field_mapping=self.field_mapping,
custom_options=self._postgres_options.to_proto(),
+ description=self.description,
+ tags=self.tags,
+ owner=self.owner,
)
data_source_proto.timestamp_field = self.timestamp_field
| {"golden_diff": "diff --git a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py\n--- a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py\n+++ b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py\n@@ -18,6 +18,9 @@\n created_timestamp_column: Optional[str] = \"\",\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = \"\",\n+ description: Optional[str] = \"\",\n+ tags: Optional[Dict[str, str]] = None,\n+ owner: Optional[str] = \"\",\n ):\n self._postgres_options = PostgreSQLOptions(name=name, query=query)\n \n@@ -27,6 +30,9 @@\n created_timestamp_column=created_timestamp_column,\n field_mapping=field_mapping,\n date_partition_column=date_partition_column,\n+ description=description,\n+ tags=tags,\n+ owner=owner,\n )\n \n def __hash__(self):\n@@ -57,14 +63,21 @@\n timestamp_field=data_source.timestamp_field,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n+ description=data_source.description,\n+ tags=dict(data_source.tags),\n+ owner=data_source.owner,\n )\n \n def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n+ name=self.name,\n type=DataSourceProto.CUSTOM_SOURCE,\n data_source_class_type=\"feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource\",\n field_mapping=self.field_mapping,\n custom_options=self._postgres_options.to_proto(),\n+ description=self.description,\n+ tags=self.tags,\n+ owner=self.owner,\n )\n \n data_source_proto.timestamp_field = self.timestamp_field\n", "issue": "Incorrect projects-list.json generated by feast ui when using Postgres as a data source.\n## Expected Behavior \r\nCorrect generation of the projects-list.json when running feast ui. \r\n## Current Behavior\r\nThe generated projects-list.json does not contain a name in the dataSources field, causing the parser to fail.\r\n## Steps to reproduce\r\nSetup feast with PostgreSQL as a data source.\r\n### Specifications\r\n\r\n- Version:\r\n- Platform:\r\n- Subsystem:\r\n\r\n## Possible Solution\r\nAdding name=self.name to to_proto() in postgres_source.py. And in general making the postgres_source.py file more similar to e.g., file_source.py.\n", "before_files": [{"content": "import json\nfrom typing import Callable, Dict, Iterable, Optional, Tuple\n\nfrom feast.data_source import DataSource\nfrom feast.infra.utils.postgres.connection_utils import _get_conn\nfrom feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto\nfrom feast.repo_config import RepoConfig\nfrom feast.type_map import pg_type_code_to_pg_type, pg_type_to_feast_value_type\nfrom feast.value_type import ValueType\n\n\nclass PostgreSQLSource(DataSource):\n def __init__(\n self,\n name: str,\n query: str,\n timestamp_field: Optional[str] = \"\",\n created_timestamp_column: Optional[str] = \"\",\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = \"\",\n ):\n self._postgres_options = PostgreSQLOptions(name=name, query=query)\n\n super().__init__(\n name=name,\n timestamp_field=timestamp_field,\n created_timestamp_column=created_timestamp_column,\n field_mapping=field_mapping,\n date_partition_column=date_partition_column,\n )\n\n def __hash__(self):\n return super().__hash__()\n\n def __eq__(self, other):\n if not isinstance(other, PostgreSQLSource):\n raise TypeError(\n \"Comparisons should only involve PostgreSQLSource class objects.\"\n )\n\n return (\n self._postgres_options._query == other._postgres_options._query\n and self.timestamp_field == other.timestamp_field\n and self.created_timestamp_column == other.created_timestamp_column\n and self.field_mapping == other.field_mapping\n )\n\n @staticmethod\n def from_proto(data_source: DataSourceProto):\n assert data_source.HasField(\"custom_options\")\n\n postgres_options = json.loads(data_source.custom_options.configuration)\n return PostgreSQLSource(\n name=postgres_options[\"name\"],\n query=postgres_options[\"query\"],\n field_mapping=dict(data_source.field_mapping),\n timestamp_field=data_source.timestamp_field,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n )\n\n def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.CUSTOM_SOURCE,\n data_source_class_type=\"feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource\",\n field_mapping=self.field_mapping,\n custom_options=self._postgres_options.to_proto(),\n )\n\n data_source_proto.timestamp_field = self.timestamp_field\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto\n\n def validate(self, config: RepoConfig):\n pass\n\n @staticmethod\n def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]:\n return pg_type_to_feast_value_type\n\n def get_table_column_names_and_types(\n self, config: RepoConfig\n ) -> Iterable[Tuple[str, str]]:\n with _get_conn(config.offline_store) as conn, conn.cursor() as cur:\n cur.execute(\n f\"SELECT * FROM ({self.get_table_query_string()}) AS sub LIMIT 0\"\n )\n return (\n (c.name, pg_type_code_to_pg_type(c.type_code)) for c in cur.description\n )\n\n def get_table_query_string(self) -> str:\n return f\"({self._postgres_options._query})\"\n\n\nclass PostgreSQLOptions:\n def __init__(self, name: str, query: Optional[str]):\n self._name = name\n self._query = query\n\n @classmethod\n def from_proto(cls, postgres_options_proto: DataSourceProto.CustomSourceOptions):\n config = json.loads(postgres_options_proto.configuration.decode(\"utf8\"))\n postgres_options = cls(name=config[\"name\"], query=config[\"query\"])\n\n return postgres_options\n\n def to_proto(self) -> DataSourceProto.CustomSourceOptions:\n postgres_options_proto = DataSourceProto.CustomSourceOptions(\n configuration=json.dumps(\n {\"name\": self._name, \"query\": self._query}\n ).encode()\n )\n\n return postgres_options_proto\n", "path": "sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py"}], "after_files": [{"content": "import json\nfrom typing import Callable, Dict, Iterable, Optional, Tuple\n\nfrom feast.data_source import DataSource\nfrom feast.infra.utils.postgres.connection_utils import _get_conn\nfrom feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto\nfrom feast.repo_config import RepoConfig\nfrom feast.type_map import pg_type_code_to_pg_type, pg_type_to_feast_value_type\nfrom feast.value_type import ValueType\n\n\nclass PostgreSQLSource(DataSource):\n def __init__(\n self,\n name: str,\n query: str,\n timestamp_field: Optional[str] = \"\",\n created_timestamp_column: Optional[str] = \"\",\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = \"\",\n description: Optional[str] = \"\",\n tags: Optional[Dict[str, str]] = None,\n owner: Optional[str] = \"\",\n ):\n self._postgres_options = PostgreSQLOptions(name=name, query=query)\n\n super().__init__(\n name=name,\n timestamp_field=timestamp_field,\n created_timestamp_column=created_timestamp_column,\n field_mapping=field_mapping,\n date_partition_column=date_partition_column,\n description=description,\n tags=tags,\n owner=owner,\n )\n\n def __hash__(self):\n return super().__hash__()\n\n def __eq__(self, other):\n if not isinstance(other, PostgreSQLSource):\n raise TypeError(\n \"Comparisons should only involve PostgreSQLSource class objects.\"\n )\n\n return (\n self._postgres_options._query == other._postgres_options._query\n and self.timestamp_field == other.timestamp_field\n and self.created_timestamp_column == other.created_timestamp_column\n and self.field_mapping == other.field_mapping\n )\n\n @staticmethod\n def from_proto(data_source: DataSourceProto):\n assert data_source.HasField(\"custom_options\")\n\n postgres_options = json.loads(data_source.custom_options.configuration)\n return PostgreSQLSource(\n name=postgres_options[\"name\"],\n query=postgres_options[\"query\"],\n field_mapping=dict(data_source.field_mapping),\n timestamp_field=data_source.timestamp_field,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n description=data_source.description,\n tags=dict(data_source.tags),\n owner=data_source.owner,\n )\n\n def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n name=self.name,\n type=DataSourceProto.CUSTOM_SOURCE,\n data_source_class_type=\"feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource\",\n field_mapping=self.field_mapping,\n custom_options=self._postgres_options.to_proto(),\n description=self.description,\n tags=self.tags,\n owner=self.owner,\n )\n\n data_source_proto.timestamp_field = self.timestamp_field\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto\n\n def validate(self, config: RepoConfig):\n pass\n\n @staticmethod\n def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]:\n return pg_type_to_feast_value_type\n\n def get_table_column_names_and_types(\n self, config: RepoConfig\n ) -> Iterable[Tuple[str, str]]:\n with _get_conn(config.offline_store) as conn, conn.cursor() as cur:\n cur.execute(\n f\"SELECT * FROM ({self.get_table_query_string()}) AS sub LIMIT 0\"\n )\n return (\n (c.name, pg_type_code_to_pg_type(c.type_code)) for c in cur.description\n )\n\n def get_table_query_string(self) -> str:\n return f\"({self._postgres_options._query})\"\n\n\nclass PostgreSQLOptions:\n def __init__(self, name: str, query: Optional[str]):\n self._name = name\n self._query = query\n\n @classmethod\n def from_proto(cls, postgres_options_proto: DataSourceProto.CustomSourceOptions):\n config = json.loads(postgres_options_proto.configuration.decode(\"utf8\"))\n postgres_options = cls(name=config[\"name\"], query=config[\"query\"])\n\n return postgres_options\n\n def to_proto(self) -> DataSourceProto.CustomSourceOptions:\n postgres_options_proto = DataSourceProto.CustomSourceOptions(\n configuration=json.dumps(\n {\"name\": self._name, \"query\": self._query}\n ).encode()\n )\n\n return postgres_options_proto\n", "path": "sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py"}]} | 1,510 | 438 |
gh_patches_debug_1634 | rasdani/github-patches | git_diff | coala__coala-4980 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compatibility.py: Add comment explaining JSONDecodeError is missing in Python 3.4
difficulty/newcomer
Opened via [gitter](https://gitter.im/coala/coala/?at=59098da68fcce56b205cd7e0) by @jayvdb
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/misc/Compatibility.py`
Content:
```
1 import json
2 try:
3 JSONDecodeError = json.decoder.JSONDecodeError
4 except AttributeError: # pragma Python 3.5,3.6: no cover
5 JSONDecodeError = ValueError
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/coalib/misc/Compatibility.py b/coalib/misc/Compatibility.py
--- a/coalib/misc/Compatibility.py
+++ b/coalib/misc/Compatibility.py
@@ -1,5 +1,6 @@
import json
try:
+ # JSONDecodeError class is available since Python 3.5.x.
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError: # pragma Python 3.5,3.6: no cover
JSONDecodeError = ValueError
| {"golden_diff": "diff --git a/coalib/misc/Compatibility.py b/coalib/misc/Compatibility.py\n--- a/coalib/misc/Compatibility.py\n+++ b/coalib/misc/Compatibility.py\n@@ -1,5 +1,6 @@\n import json\n try:\n+ # JSONDecodeError class is available since Python 3.5.x.\n JSONDecodeError = json.decoder.JSONDecodeError\n except AttributeError: # pragma Python 3.5,3.6: no cover\n JSONDecodeError = ValueError\n", "issue": "Compatibility.py: Add comment explaining JSONDecodeError is missing in Python 3.4\ndifficulty/newcomer\n\nOpened via [gitter](https://gitter.im/coala/coala/?at=59098da68fcce56b205cd7e0) by @jayvdb\n", "before_files": [{"content": "import json\ntry:\n JSONDecodeError = json.decoder.JSONDecodeError\nexcept AttributeError: # pragma Python 3.5,3.6: no cover\n JSONDecodeError = ValueError\n", "path": "coalib/misc/Compatibility.py"}], "after_files": [{"content": "import json\ntry:\n # JSONDecodeError class is available since Python 3.5.x.\n JSONDecodeError = json.decoder.JSONDecodeError\nexcept AttributeError: # pragma Python 3.5,3.6: no cover\n JSONDecodeError = ValueError\n", "path": "coalib/misc/Compatibility.py"}]} | 376 | 109 |
gh_patches_debug_727 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2027 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Integrate Forums into challenges
Navigating to the forum of a challenge currently takes the participant outside of the challenge environment. Navigating back to the challenge is not possible through the breadcrumbs on the forum page and instead requires going via the Challenge tab and searching for the respective Challenge again. It would be nicer if the forums were visually integrated into the challenge page layout and if the breadcrumbs reflected their nesting in the challenge rather than their nesting under all forum on GC.
See here: https://github.com/DIAGNijmegen/rse-roadmap/issues/83#issuecomment-919250835
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/forum_conversation/templatetags/forum_extras.py`
Content:
```
1 from actstream.models import Follow
2 from django import template
3 from django.contrib.contenttypes.models import ContentType
4
5 from grandchallenge.notifications.forms import FollowForm
6
7 register = template.Library()
8
9
10 @register.simple_tag
11 def get_follow_object_pk(user, follow_object):
12 object_follows_for_user = Follow.objects.filter(
13 user=user,
14 content_type=ContentType.objects.get(
15 app_label=follow_object._meta.app_label,
16 model=follow_object._meta.model_name,
17 ),
18 ).all()
19
20 if not object_follows_for_user:
21 current_follow_object = []
22 else:
23 current_follow_object = []
24 for obj in object_follows_for_user:
25 if not obj.follow_object:
26 continue
27 elif obj.follow_object.id == follow_object.id:
28 current_follow_object = obj.pk
29 return current_follow_object
30
31
32 @register.simple_tag
33 def follow_form(*, user, object_id, content_type):
34 return FollowForm(
35 user=user,
36 initial={
37 "object_id": object_id,
38 "content_type": content_type,
39 "actor_only": False,
40 },
41 )
42
43
44 @register.simple_tag()
45 def get_content_type(follow_object):
46 try:
47 ct = ContentType.objects.get(
48 app_label=follow_object._meta.app_label,
49 model=follow_object._meta.model_name,
50 )
51 except AttributeError:
52 ct = None
53 return ct
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
--- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
+++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
@@ -51,3 +51,9 @@
except AttributeError:
ct = None
return ct
+
+
[email protected]_tag()
+def is_participant(user, challenge):
+ if challenge.is_participant(user):
+ return True
| {"golden_diff": "diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n--- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n+++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n@@ -51,3 +51,9 @@\n except AttributeError:\r\n ct = None\r\n return ct\r\n+\r\n+\r\[email protected]_tag()\r\n+def is_participant(user, challenge):\r\n+ if challenge.is_participant(user):\r\n+ return True\n", "issue": "Integrate Forums into challenges \nNavigating to the forum of a challenge currently takes the participant outside of the challenge environment. Navigating back to the challenge is not possible through the breadcrumbs on the forum page and instead requires going via the Challenge tab and searching for the respective Challenge again. It would be nicer if the forums were visually integrated into the challenge page layout and if the breadcrumbs reflected their nesting in the challenge rather than their nesting under all forum on GC. \r\n\r\nSee here: https://github.com/DIAGNijmegen/rse-roadmap/issues/83#issuecomment-919250835\r\n\n", "before_files": [{"content": "from actstream.models import Follow\r\nfrom django import template\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom grandchallenge.notifications.forms import FollowForm\r\n\r\nregister = template.Library()\r\n\r\n\r\[email protected]_tag\r\ndef get_follow_object_pk(user, follow_object):\r\n object_follows_for_user = Follow.objects.filter(\r\n user=user,\r\n content_type=ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n\r\n if not object_follows_for_user:\r\n current_follow_object = []\r\n else:\r\n current_follow_object = []\r\n for obj in object_follows_for_user:\r\n if not obj.follow_object:\r\n continue\r\n elif obj.follow_object.id == follow_object.id:\r\n current_follow_object = obj.pk\r\n return current_follow_object\r\n\r\n\r\[email protected]_tag\r\ndef follow_form(*, user, object_id, content_type):\r\n return FollowForm(\r\n user=user,\r\n initial={\r\n \"object_id\": object_id,\r\n \"content_type\": content_type,\r\n \"actor_only\": False,\r\n },\r\n )\r\n\r\n\r\[email protected]_tag()\r\ndef get_content_type(follow_object):\r\n try:\r\n ct = ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n )\r\n except AttributeError:\r\n ct = None\r\n return ct\r\n", "path": "app/grandchallenge/forum_conversation/templatetags/forum_extras.py"}], "after_files": [{"content": "from actstream.models import Follow\r\nfrom django import template\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom grandchallenge.notifications.forms import FollowForm\r\n\r\nregister = template.Library()\r\n\r\n\r\[email protected]_tag\r\ndef get_follow_object_pk(user, follow_object):\r\n object_follows_for_user = Follow.objects.filter(\r\n user=user,\r\n content_type=ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n\r\n if not object_follows_for_user:\r\n current_follow_object = []\r\n else:\r\n current_follow_object = []\r\n for obj in object_follows_for_user:\r\n if not obj.follow_object:\r\n continue\r\n elif obj.follow_object.id == follow_object.id:\r\n current_follow_object = obj.pk\r\n return current_follow_object\r\n\r\n\r\[email protected]_tag\r\ndef follow_form(*, user, object_id, content_type):\r\n return FollowForm(\r\n user=user,\r\n initial={\r\n \"object_id\": object_id,\r\n \"content_type\": content_type,\r\n \"actor_only\": False,\r\n },\r\n )\r\n\r\n\r\[email protected]_tag()\r\ndef get_content_type(follow_object):\r\n try:\r\n ct = ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n )\r\n except AttributeError:\r\n ct = None\r\n return ct\r\n\r\n\r\[email protected]_tag()\r\ndef is_participant(user, challenge):\r\n if challenge.is_participant(user):\r\n return True\r\n", "path": "app/grandchallenge/forum_conversation/templatetags/forum_extras.py"}]} | 794 | 134 |
gh_patches_debug_10509 | rasdani/github-patches | git_diff | openfun__richie-2035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cookiecutter bootstrap failure
## Bug Report
**Problematic Behavior**
The `nightly round` job warns us that there is a problem with cookiecutter template.
**Additional context/Screenshots**
[Add any other context about the problem here. If applicable, add screenshots to help explain.](https://app.circleci.com/pipelines/github/openfun/richie/6840/workflows/7b6bd5f9-e2d4-4ef1-8e54-4562a521d50d/jobs/183180)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py`
Content:
```
1 """
2 {{cookiecutter.site}} urls
3 """
4 from django.conf import settings
5 from django.conf.urls.i18n import i18n_patterns
6 from django.contrib import admin
7 from django.contrib.sitemaps.views import sitemap
8 from django.contrib.staticfiles.urls import staticfiles_urlpatterns
9 from django.urls import include, path, re_path
10 from django.views.generic import TemplateView
11 from django.views.static import serve
12
13 from cms.sitemaps import CMSSitemap
14 from richie.apps.courses.urls import (
15 redirects_urlpatterns as courses_redirects_urlpatterns,
16 urlpatterns as courses_urlpatterns,
17 )
18 from richie.apps.search.urls import urlpatterns as search_urlpatterns
19 from richie.plugins.urls import urlpatterns as plugins_urlpatterns
20
21 # For now, we use URLPathVersioning to be consistent with fonzie. Fonzie uses it
22 # because DRF OpenAPI only supports URLPathVersioning for now. See fonzie
23 # API_PREFIX config for more information.
24 API_PREFIX = r"v(?P<version>[0-9]+\.[0-9]+)"
25
26 admin.autodiscover()
27 admin.site.enable_nav_sidebar = False
28
29 urlpatterns = [
30 path(r"sitemap.xml", sitemap, {"sitemaps": {"cmspages": CMSSitemap}}),
31 re_path(
32 rf"api/{API_PREFIX:s}/",
33 include([*courses_urlpatterns, *search_urlpatterns, *plugins_urlpatterns]),
34 ),
35 path(r"", include("filer.server.urls")),
36 path(r"django-check-seo/", include("django_check_seo.urls")),
37 ]
38
39 urlpatterns += i18n_patterns(
40 path(r"admin/", admin.site.urls),
41 path(r"accounts/", include("django.contrib.auth.urls")),
42 path(r"", include("cms.urls")), # NOQA
43 )
44
45 # This is only needed when using runserver.
46 if settings.DEBUG:
47 urlpatterns = (
48 [
49 path(
50 r"styleguide/",
51 TemplateView.as_view(
52 template_name="richie/styleguide/index.html",
53 extra_context={"STYLEGUIDE": settings.STYLEGUIDE},
54 ),
55 name="styleguide",
56 ),
57 path(
58 r"media/<path:path>",
59 serve,
60 {"document_root": settings.MEDIA_ROOT, "show_indexes": True},
61 ),
62 ]
63 + staticfiles_urlpatterns()
64 + urlpatterns
65 )
66
67 handler400 = "richie.apps.core.views.error.error_400_view_handler"
68 handler403 = "richie.apps.core.views.error.error_403_view_handler"
69 handler404 = "richie.apps.core.views.error.error_404_view_handler"
70 handler500 = "richie.apps.core.views.error.error_500_view_handler"
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py b/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py
--- a/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py
+++ b/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py
@@ -11,10 +11,7 @@
from django.views.static import serve
from cms.sitemaps import CMSSitemap
-from richie.apps.courses.urls import (
- redirects_urlpatterns as courses_redirects_urlpatterns,
- urlpatterns as courses_urlpatterns,
-)
+from richie.apps.courses.urls import urlpatterns as courses_urlpatterns
from richie.apps.search.urls import urlpatterns as search_urlpatterns
from richie.plugins.urls import urlpatterns as plugins_urlpatterns
| {"golden_diff": "diff --git a/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py b/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py\n--- a/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py\n+++ b/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py\n@@ -11,10 +11,7 @@\n from django.views.static import serve\n \n from cms.sitemaps import CMSSitemap\n-from richie.apps.courses.urls import (\n- redirects_urlpatterns as courses_redirects_urlpatterns,\n- urlpatterns as courses_urlpatterns,\n-)\n+from richie.apps.courses.urls import urlpatterns as courses_urlpatterns\n from richie.apps.search.urls import urlpatterns as search_urlpatterns\n from richie.plugins.urls import urlpatterns as plugins_urlpatterns\n", "issue": "Cookiecutter bootstrap failure\n## Bug Report\r\n\r\n**Problematic Behavior**\r\nThe `nightly round` job warns us that there is a problem with cookiecutter template.\r\n\r\n**Additional context/Screenshots**\r\n[Add any other context about the problem here. If applicable, add screenshots to help explain.](https://app.circleci.com/pipelines/github/openfun/richie/6840/workflows/7b6bd5f9-e2d4-4ef1-8e54-4562a521d50d/jobs/183180)\r\n\n", "before_files": [{"content": "\"\"\"\n{{cookiecutter.site}} urls\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.urls import include, path, re_path\nfrom django.views.generic import TemplateView\nfrom django.views.static import serve\n\nfrom cms.sitemaps import CMSSitemap\nfrom richie.apps.courses.urls import (\n redirects_urlpatterns as courses_redirects_urlpatterns,\n urlpatterns as courses_urlpatterns,\n)\nfrom richie.apps.search.urls import urlpatterns as search_urlpatterns\nfrom richie.plugins.urls import urlpatterns as plugins_urlpatterns\n\n# For now, we use URLPathVersioning to be consistent with fonzie. Fonzie uses it\n# because DRF OpenAPI only supports URLPathVersioning for now. See fonzie\n# API_PREFIX config for more information.\nAPI_PREFIX = r\"v(?P<version>[0-9]+\\.[0-9]+)\"\n\nadmin.autodiscover()\nadmin.site.enable_nav_sidebar = False\n\nurlpatterns = [\n path(r\"sitemap.xml\", sitemap, {\"sitemaps\": {\"cmspages\": CMSSitemap}}),\n re_path(\n rf\"api/{API_PREFIX:s}/\",\n include([*courses_urlpatterns, *search_urlpatterns, *plugins_urlpatterns]),\n ),\n path(r\"\", include(\"filer.server.urls\")),\n path(r\"django-check-seo/\", include(\"django_check_seo.urls\")),\n]\n\nurlpatterns += i18n_patterns(\n path(r\"admin/\", admin.site.urls),\n path(r\"accounts/\", include(\"django.contrib.auth.urls\")),\n path(r\"\", include(\"cms.urls\")), # NOQA\n)\n\n# This is only needed when using runserver.\nif settings.DEBUG:\n urlpatterns = (\n [\n path(\n r\"styleguide/\",\n TemplateView.as_view(\n template_name=\"richie/styleguide/index.html\",\n extra_context={\"STYLEGUIDE\": settings.STYLEGUIDE},\n ),\n name=\"styleguide\",\n ),\n path(\n r\"media/<path:path>\",\n serve,\n {\"document_root\": settings.MEDIA_ROOT, \"show_indexes\": True},\n ),\n ]\n + staticfiles_urlpatterns()\n + urlpatterns\n )\n\nhandler400 = \"richie.apps.core.views.error.error_400_view_handler\"\nhandler403 = \"richie.apps.core.views.error.error_403_view_handler\"\nhandler404 = \"richie.apps.core.views.error.error_404_view_handler\"\nhandler500 = \"richie.apps.core.views.error.error_500_view_handler\"\n", "path": "cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py"}], "after_files": [{"content": "\"\"\"\n{{cookiecutter.site}} urls\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.urls import include, path, re_path\nfrom django.views.generic import TemplateView\nfrom django.views.static import serve\n\nfrom cms.sitemaps import CMSSitemap\nfrom richie.apps.courses.urls import urlpatterns as courses_urlpatterns\nfrom richie.apps.search.urls import urlpatterns as search_urlpatterns\nfrom richie.plugins.urls import urlpatterns as plugins_urlpatterns\n\n# For now, we use URLPathVersioning to be consistent with fonzie. Fonzie uses it\n# because DRF OpenAPI only supports URLPathVersioning for now. See fonzie\n# API_PREFIX config for more information.\nAPI_PREFIX = r\"v(?P<version>[0-9]+\\.[0-9]+)\"\n\nadmin.autodiscover()\nadmin.site.enable_nav_sidebar = False\n\nurlpatterns = [\n path(r\"sitemap.xml\", sitemap, {\"sitemaps\": {\"cmspages\": CMSSitemap}}),\n re_path(\n rf\"api/{API_PREFIX:s}/\",\n include([*courses_urlpatterns, *search_urlpatterns, *plugins_urlpatterns]),\n ),\n path(r\"\", include(\"filer.server.urls\")),\n path(r\"django-check-seo/\", include(\"django_check_seo.urls\")),\n]\n\nurlpatterns += i18n_patterns(\n path(r\"admin/\", admin.site.urls),\n path(r\"accounts/\", include(\"django.contrib.auth.urls\")),\n path(r\"\", include(\"cms.urls\")), # NOQA\n)\n\n# This is only needed when using runserver.\nif settings.DEBUG:\n urlpatterns = (\n [\n path(\n r\"styleguide/\",\n TemplateView.as_view(\n template_name=\"richie/styleguide/index.html\",\n extra_context={\"STYLEGUIDE\": settings.STYLEGUIDE},\n ),\n name=\"styleguide\",\n ),\n path(\n r\"media/<path:path>\",\n serve,\n {\"document_root\": settings.MEDIA_ROOT, \"show_indexes\": True},\n ),\n ]\n + staticfiles_urlpatterns()\n + urlpatterns\n )\n\nhandler400 = \"richie.apps.core.views.error.error_400_view_handler\"\nhandler403 = \"richie.apps.core.views.error.error_403_view_handler\"\nhandler404 = \"richie.apps.core.views.error.error_404_view_handler\"\nhandler500 = \"richie.apps.core.views.error.error_500_view_handler\"\n", "path": "cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py"}]} | 1,121 | 253 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.