problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_51314
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-2643
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
module 'skimage.filters' has no attribute 'denoise_tv_chambolle'
There are a couple of undefined symbols in [`filters` module](https://github.com/scikit-image/scikit-image/blob/master/skimage/filters/__init__.py#L46-L48)
Thus `from skimage.filters import *` gives:
```
AttributeError: module 'skimage.filters' has no attribute 'denoise_tv_chambolle'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/filters/__init__.py`
Content:
```
1 from .lpi_filter import inverse, wiener, LPIFilter2D
2 from ._gaussian import gaussian
3 from .edges import (sobel, sobel_h, sobel_v,
4 scharr, scharr_h, scharr_v,
5 prewitt, prewitt_h, prewitt_v,
6 roberts, roberts_pos_diag, roberts_neg_diag,
7 laplace)
8 from ._rank_order import rank_order
9 from ._gabor import gabor_kernel, gabor
10 from ._frangi import frangi, hessian
11 from .thresholding import (threshold_local,
12 threshold_adaptive, threshold_otsu, threshold_yen,
13 threshold_isodata, threshold_li, threshold_minimum,
14 threshold_mean, threshold_triangle,
15 threshold_niblack, threshold_sauvola,
16 try_all_threshold)
17 from . import rank
18 from .rank import median
19
20 from .._shared.utils import deprecated, copy_func
21
22
23 gaussian_filter = copy_func(gaussian, name='gaussian_filter')
24 gaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)
25 gabor_filter = copy_func(gabor, name='gabor_filter')
26 gabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)
27
28 __all__ = ['inverse',
29 'wiener',
30 'LPIFilter2D',
31 'gaussian',
32 'median',
33 'sobel',
34 'sobel_h',
35 'sobel_v',
36 'scharr',
37 'scharr_h',
38 'scharr_v',
39 'prewitt',
40 'prewitt_h',
41 'prewitt_v',
42 'roberts',
43 'roberts_pos_diag',
44 'roberts_neg_diag',
45 'laplace',
46 'denoise_tv_chambolle',
47 'denoise_bilateral',
48 'denoise_tv_bregman',
49 'rank_order',
50 'gabor_kernel',
51 'gabor',
52 'try_all_threshold',
53 'frangi',
54 'hessian',
55 'threshold_adaptive',
56 'threshold_otsu',
57 'threshold_yen',
58 'threshold_isodata',
59 'threshold_li',
60 'threshold_minimum',
61 'threshold_mean',
62 'threshold_niblack',
63 'threshold_sauvola',
64 'threshold_triangle',
65 'rank']
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py
--- a/skimage/filters/__init__.py
+++ b/skimage/filters/__init__.py
@@ -43,9 +43,6 @@
'roberts_pos_diag',
'roberts_neg_diag',
'laplace',
- 'denoise_tv_chambolle',
- 'denoise_bilateral',
- 'denoise_tv_bregman',
'rank_order',
'gabor_kernel',
'gabor',
|
{"golden_diff": "diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py\n--- a/skimage/filters/__init__.py\n+++ b/skimage/filters/__init__.py\n@@ -43,9 +43,6 @@\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n- 'denoise_tv_chambolle',\n- 'denoise_bilateral',\n- 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n", "issue": "module 'skimage.filters' has no attribute 'denoise_tv_chambolle' \nThere are a couple of undefined symbols in [`filters` module](https://github.com/scikit-image/scikit-image/blob/master/skimage/filters/__init__.py#L46-L48)\r\n\r\nThus `from skimage.filters import *` gives:\r\n\r\n```\r\nAttributeError: module 'skimage.filters' has no attribute 'denoise_tv_chambolle'\r\n```\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom ._frangi import frangi, hessian\nfrom .thresholding import (threshold_local,\n threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n try_all_threshold)\nfrom . import rank\nfrom .rank import median\n\nfrom .._shared.utils import deprecated, copy_func\n\n\ngaussian_filter = copy_func(gaussian, name='gaussian_filter')\ngaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)\ngabor_filter = copy_func(gabor, name='gabor_filter')\ngabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'denoise_tv_chambolle',\n 'denoise_bilateral',\n 'denoise_tv_bregman',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'frangi',\n 'hessian',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'rank']\n", "path": "skimage/filters/__init__.py"}], "after_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import gaussian\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom ._frangi import frangi, hessian\nfrom .thresholding import (threshold_local,\n threshold_adaptive, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n try_all_threshold)\nfrom . import rank\nfrom .rank import median\n\nfrom .._shared.utils import deprecated, copy_func\n\n\ngaussian_filter = copy_func(gaussian, name='gaussian_filter')\ngaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)\ngabor_filter = copy_func(gabor, name='gabor_filter')\ngabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)\n\n__all__ = ['inverse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'frangi',\n 'hessian',\n 'threshold_adaptive',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'rank']\n", "path": "skimage/filters/__init__.py"}]}
| 972 | 125 |
gh_patches_debug_32234
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-2252
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[REFACTOR]: remove dead code in modin/pandas/index folder
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **Modin installed from (source or binary)**:
- **Modin version**:
- **Python version**:
- **Exact command to reproduce**:
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/pandas/index/partitioned_index.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14
15 class PartitionedIndex(object):
16
17 _index_lengths_cache = None
18
19 def _get_partition_lengths(self):
20 if self._index_lengths_cache is None:
21 self._index_lengths_cache = [
22 obj.apply(len).get() for obj in self.index_partitions[:0]
23 ]
24 return self._index_lengths_cache
25
26 def _set_partition_lengths(self, new_value):
27 self._partition_length_cache = new_value
28
29 index_lengths = property(_get_partition_lengths, _set_partition_lengths)
30
31 def __getitem__(self, key):
32 cls = type(self)
33 return cls(self.index_partitions[key])
34
35
36 class RayPartitionedIndex(PartitionedIndex):
37 def __init__(self, index_partitions):
38 self.index_partitions = index_partitions
39
```
Path: `modin/pandas/index/__init__.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modin/pandas/index/__init__.py b/modin/pandas/index/__init__.py
deleted file mode 100644
--- a/modin/pandas/index/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# Licensed to Modin Development Team under one or more contributor license agreements.
-# See the NOTICE file distributed with this work for additional information regarding
-# copyright ownership. The Modin Development Team licenses this file to you under the
-# Apache License, Version 2.0 (the "License"); you may not use this file except in
-# compliance with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
diff --git a/modin/pandas/index/partitioned_index.py b/modin/pandas/index/partitioned_index.py
deleted file mode 100644
--- a/modin/pandas/index/partitioned_index.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed to Modin Development Team under one or more contributor license agreements.
-# See the NOTICE file distributed with this work for additional information regarding
-# copyright ownership. The Modin Development Team licenses this file to you under the
-# Apache License, Version 2.0 (the "License"); you may not use this file except in
-# compliance with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under
-# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
-# ANY KIND, either express or implied. See the License for the specific language
-# governing permissions and limitations under the License.
-
-
-class PartitionedIndex(object):
-
- _index_lengths_cache = None
-
- def _get_partition_lengths(self):
- if self._index_lengths_cache is None:
- self._index_lengths_cache = [
- obj.apply(len).get() for obj in self.index_partitions[:0]
- ]
- return self._index_lengths_cache
-
- def _set_partition_lengths(self, new_value):
- self._partition_length_cache = new_value
-
- index_lengths = property(_get_partition_lengths, _set_partition_lengths)
-
- def __getitem__(self, key):
- cls = type(self)
- return cls(self.index_partitions[key])
-
-
-class RayPartitionedIndex(PartitionedIndex):
- def __init__(self, index_partitions):
- self.index_partitions = index_partitions
|
{"golden_diff": "diff --git a/modin/pandas/index/__init__.py b/modin/pandas/index/__init__.py\ndeleted file mode 100644\n--- a/modin/pandas/index/__init__.py\n+++ /dev/null\n@@ -1,12 +0,0 @@\n-# Licensed to Modin Development Team under one or more contributor license agreements.\n-# See the NOTICE file distributed with this work for additional information regarding\n-# copyright ownership. The Modin Development Team licenses this file to you under the\n-# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n-# compliance with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software distributed under\n-# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n-# ANY KIND, either express or implied. See the License for the specific language\n-# governing permissions and limitations under the License.\ndiff --git a/modin/pandas/index/partitioned_index.py b/modin/pandas/index/partitioned_index.py\ndeleted file mode 100644\n--- a/modin/pandas/index/partitioned_index.py\n+++ /dev/null\n@@ -1,38 +0,0 @@\n-# Licensed to Modin Development Team under one or more contributor license agreements.\n-# See the NOTICE file distributed with this work for additional information regarding\n-# copyright ownership. The Modin Development Team licenses this file to you under the\n-# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n-# compliance with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software distributed under\n-# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n-# ANY KIND, either express or implied. See the License for the specific language\n-# governing permissions and limitations under the License.\n-\n-\n-class PartitionedIndex(object):\n-\n- _index_lengths_cache = None\n-\n- def _get_partition_lengths(self):\n- if self._index_lengths_cache is None:\n- self._index_lengths_cache = [\n- obj.apply(len).get() for obj in self.index_partitions[:0]\n- ]\n- return self._index_lengths_cache\n-\n- def _set_partition_lengths(self, new_value):\n- self._partition_length_cache = new_value\n-\n- index_lengths = property(_get_partition_lengths, _set_partition_lengths)\n-\n- def __getitem__(self, key):\n- cls = type(self)\n- return cls(self.index_partitions[key])\n-\n-\n-class RayPartitionedIndex(PartitionedIndex):\n- def __init__(self, index_partitions):\n- self.index_partitions = index_partitions\n", "issue": "[REFACTOR]: remove dead code in modin/pandas/index folder\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **Modin installed from (source or binary)**:\r\n- **Modin version**:\r\n- **Python version**:\r\n- **Exact command to reproduce**:\r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\nclass PartitionedIndex(object):\n\n _index_lengths_cache = None\n\n def _get_partition_lengths(self):\n if self._index_lengths_cache is None:\n self._index_lengths_cache = [\n obj.apply(len).get() for obj in self.index_partitions[:0]\n ]\n return self._index_lengths_cache\n\n def _set_partition_lengths(self, new_value):\n self._partition_length_cache = new_value\n\n index_lengths = property(_get_partition_lengths, _set_partition_lengths)\n\n def __getitem__(self, key):\n cls = type(self)\n return cls(self.index_partitions[key])\n\n\nclass RayPartitionedIndex(PartitionedIndex):\n def __init__(self, index_partitions):\n self.index_partitions = index_partitions\n", "path": "modin/pandas/index/partitioned_index.py"}, {"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n", "path": "modin/pandas/index/__init__.py"}], "after_files": [{"content": null, "path": "modin/pandas/index/partitioned_index.py"}, {"content": null, "path": "modin/pandas/index/__init__.py"}]}
| 1,072 | 659 |
gh_patches_debug_9364
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-2258
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Azure VM - We are not getting power state
The VM data we are getting back does not tell you if the VM is running or not.
I think perhaps you have to tell the `list_all` api what you want - we want `instanceview`
https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/instanceview
Not sure how this happens via SDK.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/c7n_azure/c7n_azure/resources/vm.py`
Content:
```
1 # Copyright 2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from c7n_azure.query import QueryResourceManager
16 from c7n_azure.provider import resources
17 from c7n.filters.core import ValueFilter, type_schema
18
19 @resources.register('vm')
20 class VirtualMachine(QueryResourceManager):
21
22 class resource_type(object):
23 service = 'azure.mgmt.compute'
24 client = 'ComputeManagementClient'
25 enum_spec = ('virtual_machines', 'list_all')
26 id = 'id'
27 name = 'name'
28 default_report_fields = (
29 'name',
30 'location',
31 'resourceGroup',
32 'properties.hardwareProfile.vmSize',
33 )
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/c7n_azure/c7n_azure/resources/vm.py b/tools/c7n_azure/c7n_azure/resources/vm.py
--- a/tools/c7n_azure/c7n_azure/resources/vm.py
+++ b/tools/c7n_azure/c7n_azure/resources/vm.py
@@ -31,3 +31,15 @@
'resourceGroup',
'properties.hardwareProfile.vmSize',
)
+
[email protected]_registry.register('instance-view')
+class InstanceViewFilter(ValueFilter):
+ schema = type_schema('instance-view', rinherit=ValueFilter.schema)
+
+ def __call__(self, i):
+ if 'instanceView' not in i:
+ client = self.manager.get_client()
+ instance = client.virtual_machines.get(i['resourceGroup'], i['name'], expand='instanceview').instance_view
+ i['instanceView'] = instance.serialize()
+
+ return super(InstanceViewFilter, self).__call__(i['instanceView'])
|
{"golden_diff": "diff --git a/tools/c7n_azure/c7n_azure/resources/vm.py b/tools/c7n_azure/c7n_azure/resources/vm.py\n--- a/tools/c7n_azure/c7n_azure/resources/vm.py\n+++ b/tools/c7n_azure/c7n_azure/resources/vm.py\n@@ -31,3 +31,15 @@\n 'resourceGroup',\n 'properties.hardwareProfile.vmSize',\n )\n+\[email protected]_registry.register('instance-view')\n+class InstanceViewFilter(ValueFilter):\n+ schema = type_schema('instance-view', rinherit=ValueFilter.schema)\n+\n+ def __call__(self, i):\n+ if 'instanceView' not in i:\n+ client = self.manager.get_client()\n+ instance = client.virtual_machines.get(i['resourceGroup'], i['name'], expand='instanceview').instance_view\n+ i['instanceView'] = instance.serialize()\n+\n+ return super(InstanceViewFilter, self).__call__(i['instanceView'])\n", "issue": "Azure VM - We are not getting power state\nThe VM data we are getting back does not tell you if the VM is running or not.\r\n\r\nI think perhaps you have to tell the `list_all` api what you want - we want `instanceview`\r\n\r\nhttps://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/instanceview\r\n\r\nNot sure how this happens via SDK.\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n_azure.query import QueryResourceManager\nfrom c7n_azure.provider import resources\nfrom c7n.filters.core import ValueFilter, type_schema\n\[email protected]('vm')\nclass VirtualMachine(QueryResourceManager):\n\n class resource_type(object):\n service = 'azure.mgmt.compute'\n client = 'ComputeManagementClient'\n enum_spec = ('virtual_machines', 'list_all')\n id = 'id'\n name = 'name'\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'properties.hardwareProfile.vmSize',\n )\n", "path": "tools/c7n_azure/c7n_azure/resources/vm.py"}], "after_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom c7n_azure.query import QueryResourceManager\nfrom c7n_azure.provider import resources\nfrom c7n.filters.core import ValueFilter, type_schema\n\[email protected]('vm')\nclass VirtualMachine(QueryResourceManager):\n\n class resource_type(object):\n service = 'azure.mgmt.compute'\n client = 'ComputeManagementClient'\n enum_spec = ('virtual_machines', 'list_all')\n id = 'id'\n name = 'name'\n default_report_fields = (\n 'name',\n 'location',\n 'resourceGroup',\n 'properties.hardwareProfile.vmSize',\n )\n\[email protected]_registry.register('instance-view')\nclass InstanceViewFilter(ValueFilter):\n schema = type_schema('instance-view', rinherit=ValueFilter.schema)\n\n def __call__(self, i):\n if 'instanceView' not in i:\n client = self.manager.get_client()\n instance = client.virtual_machines.get(i['resourceGroup'], i['name'], expand='instanceview').instance_view\n i['instanceView'] = instance.serialize()\n\n return super(InstanceViewFilter, self).__call__(i['instanceView'])\n", "path": "tools/c7n_azure/c7n_azure/resources/vm.py"}]}
| 666 | 223 |
gh_patches_debug_50933
|
rasdani/github-patches
|
git_diff
|
apache__airflow-15117
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove 'user_id', 'role_id' from User and Role in OpenAPI schema
Would be good to remove the 'id' of both User and Role schemas from what is dumped in REST API endpoints. ID of User and Role table are sensitive data that would be fine to hide from the endpoints
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/api_connexion/schemas/user_schema.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 from typing import List, NamedTuple
18
19 from flask_appbuilder.security.sqla.models import User
20 from marshmallow import Schema, fields
21 from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
22
23 from airflow.api_connexion.parameters import validate_istimezone
24 from airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema
25
26
27 class UserCollectionItemSchema(SQLAlchemySchema):
28 """user collection item schema"""
29
30 class Meta:
31 """Meta"""
32
33 model = User
34 dateformat = "iso"
35
36 user_id = auto_field('id', dump_only=True)
37 first_name = auto_field()
38 last_name = auto_field()
39 username = auto_field()
40 active = auto_field(dump_only=True)
41 email = auto_field()
42 last_login = auto_field(dump_only=True)
43 login_count = auto_field(dump_only=True)
44 fail_login_count = auto_field(dump_only=True)
45 roles = fields.List(fields.Nested(RoleSchema, only=('name',)))
46 created_on = auto_field(validate=validate_istimezone, dump_only=True)
47 changed_on = auto_field(validate=validate_istimezone, dump_only=True)
48
49
50 class UserSchema(UserCollectionItemSchema):
51 """User schema"""
52
53 password = auto_field(load_only=True)
54
55
56 class UserCollection(NamedTuple):
57 """User collection"""
58
59 users: List[User]
60 total_entries: int
61
62
63 class UserCollectionSchema(Schema):
64 """User collection schema"""
65
66 users = fields.List(fields.Nested(UserCollectionItemSchema))
67 total_entries = fields.Int()
68
69
70 user_collection_item_schema = UserCollectionItemSchema()
71 user_schema = UserSchema()
72 user_collection_schema = UserCollectionSchema()
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/api_connexion/schemas/user_schema.py b/airflow/api_connexion/schemas/user_schema.py
--- a/airflow/api_connexion/schemas/user_schema.py
+++ b/airflow/api_connexion/schemas/user_schema.py
@@ -33,7 +33,6 @@
model = User
dateformat = "iso"
- user_id = auto_field('id', dump_only=True)
first_name = auto_field()
last_name = auto_field()
username = auto_field()
|
{"golden_diff": "diff --git a/airflow/api_connexion/schemas/user_schema.py b/airflow/api_connexion/schemas/user_schema.py\n--- a/airflow/api_connexion/schemas/user_schema.py\n+++ b/airflow/api_connexion/schemas/user_schema.py\n@@ -33,7 +33,6 @@\n model = User\n dateformat = \"iso\"\n \n- user_id = auto_field('id', dump_only=True)\n first_name = auto_field()\n last_name = auto_field()\n username = auto_field()\n", "issue": "Remove 'user_id', 'role_id' from User and Role in OpenAPI schema \nWould be good to remove the 'id' of both User and Role schemas from what is dumped in REST API endpoints. ID of User and Role table are sensitive data that would be fine to hide from the endpoints\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom typing import List, NamedTuple\n\nfrom flask_appbuilder.security.sqla.models import User\nfrom marshmallow import Schema, fields\nfrom marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n\nfrom airflow.api_connexion.parameters import validate_istimezone\nfrom airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema\n\n\nclass UserCollectionItemSchema(SQLAlchemySchema):\n \"\"\"user collection item schema\"\"\"\n\n class Meta:\n \"\"\"Meta\"\"\"\n\n model = User\n dateformat = \"iso\"\n\n user_id = auto_field('id', dump_only=True)\n first_name = auto_field()\n last_name = auto_field()\n username = auto_field()\n active = auto_field(dump_only=True)\n email = auto_field()\n last_login = auto_field(dump_only=True)\n login_count = auto_field(dump_only=True)\n fail_login_count = auto_field(dump_only=True)\n roles = fields.List(fields.Nested(RoleSchema, only=('name',)))\n created_on = auto_field(validate=validate_istimezone, dump_only=True)\n changed_on = auto_field(validate=validate_istimezone, dump_only=True)\n\n\nclass UserSchema(UserCollectionItemSchema):\n \"\"\"User schema\"\"\"\n\n password = auto_field(load_only=True)\n\n\nclass UserCollection(NamedTuple):\n \"\"\"User collection\"\"\"\n\n users: List[User]\n total_entries: int\n\n\nclass UserCollectionSchema(Schema):\n \"\"\"User collection schema\"\"\"\n\n users = fields.List(fields.Nested(UserCollectionItemSchema))\n total_entries = fields.Int()\n\n\nuser_collection_item_schema = UserCollectionItemSchema()\nuser_schema = UserSchema()\nuser_collection_schema = UserCollectionSchema()\n", "path": "airflow/api_connexion/schemas/user_schema.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom typing import List, NamedTuple\n\nfrom flask_appbuilder.security.sqla.models import User\nfrom marshmallow import Schema, fields\nfrom marshmallow_sqlalchemy import SQLAlchemySchema, auto_field\n\nfrom airflow.api_connexion.parameters import validate_istimezone\nfrom airflow.api_connexion.schemas.role_and_permission_schema import RoleSchema\n\n\nclass UserCollectionItemSchema(SQLAlchemySchema):\n \"\"\"user collection item schema\"\"\"\n\n class Meta:\n \"\"\"Meta\"\"\"\n\n model = User\n dateformat = \"iso\"\n\n first_name = auto_field()\n last_name = auto_field()\n username = auto_field()\n active = auto_field(dump_only=True)\n email = auto_field()\n last_login = auto_field(dump_only=True)\n login_count = auto_field(dump_only=True)\n fail_login_count = auto_field(dump_only=True)\n roles = fields.List(fields.Nested(RoleSchema, only=('name',)))\n created_on = auto_field(validate=validate_istimezone, dump_only=True)\n changed_on = auto_field(validate=validate_istimezone, dump_only=True)\n\n\nclass UserSchema(UserCollectionItemSchema):\n \"\"\"User schema\"\"\"\n\n password = auto_field(load_only=True)\n\n\nclass UserCollection(NamedTuple):\n \"\"\"User collection\"\"\"\n\n users: List[User]\n total_entries: int\n\n\nclass UserCollectionSchema(Schema):\n \"\"\"User collection schema\"\"\"\n\n users = fields.List(fields.Nested(UserCollectionItemSchema))\n total_entries = fields.Int()\n\n\nuser_collection_item_schema = UserCollectionItemSchema()\nuser_schema = UserSchema()\nuser_collection_schema = UserCollectionSchema()\n", "path": "airflow/api_connexion/schemas/user_schema.py"}]}
| 997 | 115 |
gh_patches_debug_19410
|
rasdani/github-patches
|
git_diff
|
pyload__pyload-1418
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Downloading from Oboom.com without premium ERROR
It can't download the file. I get the Error "recaptcha html not found".
Everything is up2date...:(
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/hoster/OboomCom.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Test links:
4 # https://www.oboom.com/B7CYZIEB/10Mio.dat
5
6 import re
7
8 from module.common.json_layer import json_loads
9 from module.plugins.Hoster import Hoster
10 from module.plugins.internal.CaptchaService import ReCaptcha
11
12
13 class OboomCom(Hoster):
14 __name__ = "OboomCom"
15 __type__ = "hoster"
16 __version__ = "0.31"
17
18 __pattern__ = r'https?://(?:www\.)?oboom\.com/(#(id=|/)?)?(?P<ID>\w{8})'
19
20 __description__ = """oboom.com hoster plugin"""
21 __license__ = "GPLv3"
22 __authors__ = [("stanley", "[email protected]")]
23
24
25 RECAPTCHA_KEY = "6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX"
26
27
28 def setup(self):
29 self.chunkLimit = 1
30 self.multiDL = self.resumeDownload = self.premium
31
32
33 def process(self, pyfile):
34 self.pyfile.url.replace(".com/#id=", ".com/#")
35 self.pyfile.url.replace(".com/#/", ".com/#")
36 self.getFileId(self.pyfile.url)
37 self.getSessionToken()
38 self.getFileInfo(self.sessionToken, self.fileId)
39 self.pyfile.name = self.fileName
40 self.pyfile.size = self.fileSize
41 if not self.premium:
42 self.solveCaptcha()
43 self.getDownloadTicket()
44 self.download("https://%s/1.0/dlh" % self.downloadDomain, get={"ticket": self.downloadTicket, "http_errors": 0})
45
46
47 def loadUrl(self, url, get=None):
48 if get is None:
49 get = dict()
50 return json_loads(self.load(url, get, decode=True))
51
52
53 def getFileId(self, url):
54 self.fileId = re.match(OboomCom.__pattern__, url).group('ID')
55
56
57 def getSessionToken(self):
58 if self.premium:
59 accountInfo = self.account.getAccountInfo(self.user, True)
60 if "session" in accountInfo:
61 self.sessionToken = accountInfo['session']
62 else:
63 self.fail(_("Could not retrieve premium session"))
64 else:
65 apiUrl = "https://www.oboom.com/1.0/guestsession"
66 result = self.loadUrl(apiUrl)
67 if result[0] == 200:
68 self.sessionToken = result[1]
69 else:
70 self.fail(_("Could not retrieve token for guest session. Error code: %s") % result[0])
71
72
73 def solveCaptcha(self):
74 recaptcha = ReCaptcha(self)
75
76 for _i in xrange(5):
77 response, challenge = recaptcha.challenge(self.RECAPTCHA_KEY)
78 apiUrl = "https://www.oboom.com/1.0/download/ticket"
79 params = {"recaptcha_challenge_field": challenge,
80 "recaptcha_response_field": response,
81 "download_id": self.fileId,
82 "token": self.sessionToken}
83 result = self.loadUrl(apiUrl, params)
84
85 if result[0] == 200:
86 self.downloadToken = result[1]
87 self.downloadAuth = result[2]
88 self.correctCaptcha()
89 self.setWait(30)
90 self.wait()
91 break
92
93 elif result[0] == 400:
94 if result[1] == "incorrect-captcha-sol":
95 self.invalidCaptcha()
96 elif result[1] == "captcha-timeout":
97 self.invalidCaptcha()
98 elif result[1] == "forbidden":
99 self.retry(5, 15 * 60, _("Service unavailable"))
100
101 elif result[0] == 403:
102 if result[1] == -1: # another download is running
103 self.setWait(15 * 60)
104 else:
105 self.setWait(result[1], True)
106 self.wait()
107 self.retry(5)
108 else:
109 self.invalidCaptcha()
110 self.fail(_("Received invalid captcha 5 times"))
111
112
113 def getFileInfo(self, token, fileId):
114 apiUrl = "https://api.oboom.com/1.0/info"
115 params = {"token": token, "items": fileId, "http_errors": 0}
116
117 result = self.loadUrl(apiUrl, params)
118 if result[0] == 200:
119 item = result[1][0]
120 if item['state'] == "online":
121 self.fileSize = item['size']
122 self.fileName = item['name']
123 else:
124 self.offline()
125 else:
126 self.fail(_("Could not retrieve file info. Error code %s: %s") % (result[0], result[1]))
127
128
129 def getDownloadTicket(self):
130 apiUrl = "https://api.oboom.com/1/dl"
131 params = {"item": self.fileId, "http_errors": 0}
132 if self.premium:
133 params['token'] = self.sessionToken
134 else:
135 params['token'] = self.downloadToken
136 params['auth'] = self.downloadAuth
137
138 result = self.loadUrl(apiUrl, params)
139 if result[0] == 200:
140 self.downloadDomain = result[1]
141 self.downloadTicket = result[2]
142 elif result[0] == 421:
143 self.retry(wait_time=result[2] + 60, reason=_("Connection limit exceeded"))
144 else:
145 self.fail(_("Could not retrieve download ticket. Error code: %s") % result[0])
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/module/plugins/hoster/OboomCom.py b/module/plugins/hoster/OboomCom.py
--- a/module/plugins/hoster/OboomCom.py
+++ b/module/plugins/hoster/OboomCom.py
@@ -13,9 +13,9 @@
class OboomCom(Hoster):
__name__ = "OboomCom"
__type__ = "hoster"
- __version__ = "0.31"
+ __version__ = "0.32"
- __pattern__ = r'https?://(?:www\.)?oboom\.com/(#(id=|/)?)?(?P<ID>\w{8})'
+ __pattern__ = r'https?://(?:www\.)?oboom\.com/(?:#(?:id=|/)?)?(?P<ID>\w{8})'
__description__ = """oboom.com hoster plugin"""
__license__ = "GPLv3"
@@ -33,6 +33,7 @@
def process(self, pyfile):
self.pyfile.url.replace(".com/#id=", ".com/#")
self.pyfile.url.replace(".com/#/", ".com/#")
+ self.html = self.load(pyfile.url)
self.getFileId(self.pyfile.url)
self.getSessionToken()
self.getFileInfo(self.sessionToken, self.fileId)
|
{"golden_diff": "diff --git a/module/plugins/hoster/OboomCom.py b/module/plugins/hoster/OboomCom.py\n--- a/module/plugins/hoster/OboomCom.py\n+++ b/module/plugins/hoster/OboomCom.py\n@@ -13,9 +13,9 @@\n class OboomCom(Hoster):\n __name__ = \"OboomCom\"\n __type__ = \"hoster\"\n- __version__ = \"0.31\"\n+ __version__ = \"0.32\"\n \n- __pattern__ = r'https?://(?:www\\.)?oboom\\.com/(#(id=|/)?)?(?P<ID>\\w{8})'\n+ __pattern__ = r'https?://(?:www\\.)?oboom\\.com/(?:#(?:id=|/)?)?(?P<ID>\\w{8})'\n \n __description__ = \"\"\"oboom.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n@@ -33,6 +33,7 @@\n def process(self, pyfile):\n self.pyfile.url.replace(\".com/#id=\", \".com/#\")\n self.pyfile.url.replace(\".com/#/\", \".com/#\")\n+ self.html = self.load(pyfile.url)\n self.getFileId(self.pyfile.url)\n self.getSessionToken()\n self.getFileInfo(self.sessionToken, self.fileId)\n", "issue": "Downloading from Oboom.com without premium ERROR\nIt can't download the file. I get the Error \"recaptcha html not found\".\nEverything is up2date...:(\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Test links:\n# https://www.oboom.com/B7CYZIEB/10Mio.dat\n\nimport re\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.Hoster import Hoster\nfrom module.plugins.internal.CaptchaService import ReCaptcha\n\n\nclass OboomCom(Hoster):\n __name__ = \"OboomCom\"\n __type__ = \"hoster\"\n __version__ = \"0.31\"\n\n __pattern__ = r'https?://(?:www\\.)?oboom\\.com/(#(id=|/)?)?(?P<ID>\\w{8})'\n\n __description__ = \"\"\"oboom.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"stanley\", \"[email protected]\")]\n\n\n RECAPTCHA_KEY = \"6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX\"\n\n\n def setup(self):\n self.chunkLimit = 1\n self.multiDL = self.resumeDownload = self.premium\n\n\n def process(self, pyfile):\n self.pyfile.url.replace(\".com/#id=\", \".com/#\")\n self.pyfile.url.replace(\".com/#/\", \".com/#\")\n self.getFileId(self.pyfile.url)\n self.getSessionToken()\n self.getFileInfo(self.sessionToken, self.fileId)\n self.pyfile.name = self.fileName\n self.pyfile.size = self.fileSize\n if not self.premium:\n self.solveCaptcha()\n self.getDownloadTicket()\n self.download(\"https://%s/1.0/dlh\" % self.downloadDomain, get={\"ticket\": self.downloadTicket, \"http_errors\": 0})\n\n\n def loadUrl(self, url, get=None):\n if get is None:\n get = dict()\n return json_loads(self.load(url, get, decode=True))\n\n\n def getFileId(self, url):\n self.fileId = re.match(OboomCom.__pattern__, url).group('ID')\n\n\n def getSessionToken(self):\n if self.premium:\n accountInfo = self.account.getAccountInfo(self.user, True)\n if \"session\" in accountInfo:\n self.sessionToken = accountInfo['session']\n else:\n self.fail(_(\"Could not retrieve premium session\"))\n else:\n apiUrl = \"https://www.oboom.com/1.0/guestsession\"\n result = self.loadUrl(apiUrl)\n if result[0] == 200:\n self.sessionToken = result[1]\n else:\n self.fail(_(\"Could not retrieve token for guest session. Error code: %s\") % result[0])\n\n\n def solveCaptcha(self):\n recaptcha = ReCaptcha(self)\n\n for _i in xrange(5):\n response, challenge = recaptcha.challenge(self.RECAPTCHA_KEY)\n apiUrl = \"https://www.oboom.com/1.0/download/ticket\"\n params = {\"recaptcha_challenge_field\": challenge,\n \"recaptcha_response_field\": response,\n \"download_id\": self.fileId,\n \"token\": self.sessionToken}\n result = self.loadUrl(apiUrl, params)\n\n if result[0] == 200:\n self.downloadToken = result[1]\n self.downloadAuth = result[2]\n self.correctCaptcha()\n self.setWait(30)\n self.wait()\n break\n\n elif result[0] == 400:\n if result[1] == \"incorrect-captcha-sol\":\n self.invalidCaptcha()\n elif result[1] == \"captcha-timeout\":\n self.invalidCaptcha()\n elif result[1] == \"forbidden\":\n self.retry(5, 15 * 60, _(\"Service unavailable\"))\n\n elif result[0] == 403:\n if result[1] == -1: # another download is running\n self.setWait(15 * 60)\n else:\n self.setWait(result[1], True)\n self.wait()\n self.retry(5)\n else:\n self.invalidCaptcha()\n self.fail(_(\"Received invalid captcha 5 times\"))\n\n\n def getFileInfo(self, token, fileId):\n apiUrl = \"https://api.oboom.com/1.0/info\"\n params = {\"token\": token, \"items\": fileId, \"http_errors\": 0}\n\n result = self.loadUrl(apiUrl, params)\n if result[0] == 200:\n item = result[1][0]\n if item['state'] == \"online\":\n self.fileSize = item['size']\n self.fileName = item['name']\n else:\n self.offline()\n else:\n self.fail(_(\"Could not retrieve file info. Error code %s: %s\") % (result[0], result[1]))\n\n\n def getDownloadTicket(self):\n apiUrl = \"https://api.oboom.com/1/dl\"\n params = {\"item\": self.fileId, \"http_errors\": 0}\n if self.premium:\n params['token'] = self.sessionToken\n else:\n params['token'] = self.downloadToken\n params['auth'] = self.downloadAuth\n\n result = self.loadUrl(apiUrl, params)\n if result[0] == 200:\n self.downloadDomain = result[1]\n self.downloadTicket = result[2]\n elif result[0] == 421:\n self.retry(wait_time=result[2] + 60, reason=_(\"Connection limit exceeded\"))\n else:\n self.fail(_(\"Could not retrieve download ticket. Error code: %s\") % result[0])\n", "path": "module/plugins/hoster/OboomCom.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Test links:\n# https://www.oboom.com/B7CYZIEB/10Mio.dat\n\nimport re\n\nfrom module.common.json_layer import json_loads\nfrom module.plugins.Hoster import Hoster\nfrom module.plugins.internal.CaptchaService import ReCaptcha\n\n\nclass OboomCom(Hoster):\n __name__ = \"OboomCom\"\n __type__ = \"hoster\"\n __version__ = \"0.32\"\n\n __pattern__ = r'https?://(?:www\\.)?oboom\\.com/(?:#(?:id=|/)?)?(?P<ID>\\w{8})'\n\n __description__ = \"\"\"oboom.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"stanley\", \"[email protected]\")]\n\n\n RECAPTCHA_KEY = \"6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX\"\n\n\n def setup(self):\n self.chunkLimit = 1\n self.multiDL = self.resumeDownload = self.premium\n\n\n def process(self, pyfile):\n self.pyfile.url.replace(\".com/#id=\", \".com/#\")\n self.pyfile.url.replace(\".com/#/\", \".com/#\")\n self.html = self.load(pyfile.url)\n self.getFileId(self.pyfile.url)\n self.getSessionToken()\n self.getFileInfo(self.sessionToken, self.fileId)\n self.pyfile.name = self.fileName\n self.pyfile.size = self.fileSize\n if not self.premium:\n self.solveCaptcha()\n self.getDownloadTicket()\n self.download(\"https://%s/1.0/dlh\" % self.downloadDomain, get={\"ticket\": self.downloadTicket, \"http_errors\": 0})\n\n\n def loadUrl(self, url, get=None):\n if get is None:\n get = dict()\n return json_loads(self.load(url, get, decode=True))\n\n\n def getFileId(self, url):\n self.fileId = re.match(OboomCom.__pattern__, url).group('ID')\n\n\n def getSessionToken(self):\n if self.premium:\n accountInfo = self.account.getAccountInfo(self.user, True)\n if \"session\" in accountInfo:\n self.sessionToken = accountInfo['session']\n else:\n self.fail(_(\"Could not retrieve premium session\"))\n else:\n apiUrl = \"https://www.oboom.com/1.0/guestsession\"\n result = self.loadUrl(apiUrl)\n if result[0] == 200:\n self.sessionToken = result[1]\n else:\n self.fail(_(\"Could not retrieve token for guest session. Error code: %s\") % result[0])\n\n\n def solveCaptcha(self):\n recaptcha = ReCaptcha(self)\n\n for _i in xrange(5):\n response, challenge = recaptcha.challenge(self.RECAPTCHA_KEY)\n apiUrl = \"https://www.oboom.com/1.0/download/ticket\"\n params = {\"recaptcha_challenge_field\": challenge,\n \"recaptcha_response_field\": response,\n \"download_id\": self.fileId,\n \"token\": self.sessionToken}\n result = self.loadUrl(apiUrl, params)\n\n if result[0] == 200:\n self.downloadToken = result[1]\n self.downloadAuth = result[2]\n self.correctCaptcha()\n self.setWait(30)\n self.wait()\n break\n\n elif result[0] == 400:\n if result[1] == \"incorrect-captcha-sol\":\n self.invalidCaptcha()\n elif result[1] == \"captcha-timeout\":\n self.invalidCaptcha()\n elif result[1] == \"forbidden\":\n self.retry(5, 15 * 60, _(\"Service unavailable\"))\n\n elif result[0] == 403:\n if result[1] == -1: # another download is running\n self.setWait(15 * 60)\n else:\n self.setWait(result[1], True)\n self.wait()\n self.retry(5)\n else:\n self.invalidCaptcha()\n self.fail(_(\"Received invalid captcha 5 times\"))\n\n\n def getFileInfo(self, token, fileId):\n apiUrl = \"https://api.oboom.com/1.0/info\"\n params = {\"token\": token, \"items\": fileId, \"http_errors\": 0}\n\n result = self.loadUrl(apiUrl, params)\n if result[0] == 200:\n item = result[1][0]\n if item['state'] == \"online\":\n self.fileSize = item['size']\n self.fileName = item['name']\n else:\n self.offline()\n else:\n self.fail(_(\"Could not retrieve file info. Error code %s: %s\") % (result[0], result[1]))\n\n\n def getDownloadTicket(self):\n apiUrl = \"https://api.oboom.com/1/dl\"\n params = {\"item\": self.fileId, \"http_errors\": 0}\n if self.premium:\n params['token'] = self.sessionToken\n else:\n params['token'] = self.downloadToken\n params['auth'] = self.downloadAuth\n\n result = self.loadUrl(apiUrl, params)\n if result[0] == 200:\n self.downloadDomain = result[1]\n self.downloadTicket = result[2]\n elif result[0] == 421:\n self.retry(wait_time=result[2] + 60, reason=_(\"Connection limit exceeded\"))\n else:\n self.fail(_(\"Could not retrieve download ticket. Error code: %s\") % result[0])\n", "path": "module/plugins/hoster/OboomCom.py"}]}
| 1,876 | 296 |
gh_patches_debug_7405
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-823
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
urllib instrumentation fails for local file access
When reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.
https://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217
urllib instrumentation fails for local file access
When reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.
https://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Dict, Sequence
16
17 from wrapt import ObjectProxy
18
19 from opentelemetry import context, trace
20
21 # pylint: disable=unused-import
22 # pylint: disable=E0611
23 from opentelemetry.context import _SUPPRESS_INSTRUMENTATION_KEY # noqa: F401
24 from opentelemetry.propagate import extract
25 from opentelemetry.trace import StatusCode
26
27
28 def extract_attributes_from_object(
29 obj: any, attributes: Sequence[str], existing: Dict[str, str] = None
30 ) -> Dict[str, str]:
31 extracted = {}
32 if existing:
33 extracted.update(existing)
34 for attr in attributes:
35 value = getattr(obj, attr, None)
36 if value is not None:
37 extracted[attr] = str(value)
38 return extracted
39
40
41 def http_status_to_status_code(
42 status: int,
43 allow_redirect: bool = True,
44 server_span: bool = False,
45 ) -> StatusCode:
46 """Converts an HTTP status code to an OpenTelemetry canonical status code
47
48 Args:
49 status (int): HTTP status code
50 """
51 # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status
52 if status < 100:
53 return StatusCode.ERROR
54 if status <= 299:
55 return StatusCode.UNSET
56 if status <= 399 and allow_redirect:
57 return StatusCode.UNSET
58 if status <= 499 and server_span:
59 return StatusCode.UNSET
60 return StatusCode.ERROR
61
62
63 def unwrap(obj, attr: str):
64 """Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it
65
66 Args:
67 obj: Object that holds a reference to the wrapped function
68 attr (str): Name of the wrapped function
69 """
70 func = getattr(obj, attr, None)
71 if func and isinstance(func, ObjectProxy) and hasattr(func, "__wrapped__"):
72 setattr(obj, attr, func.__wrapped__)
73
74
75 def _start_internal_or_server_span(
76 tracer, span_name, start_time, context_carrier, context_getter
77 ):
78 """Returns internal or server span along with the token which can be used by caller to reset context
79
80
81 Args:
82 tracer : tracer in use by given instrumentation library
83 name (string): name of the span
84 start_time : start time of the span
85 context_carrier : object which contains values that are
86 used to construct a Context. This object
87 must be paired with an appropriate getter
88 which understands how to extract a value from it.
89 context_getter : an object which contains a get function that can retrieve zero
90 or more values from the carrier and a keys function that can get all the keys
91 from carrier.
92 """
93
94 token = ctx = span_kind = None
95 if trace.get_current_span() is trace.INVALID_SPAN:
96 ctx = extract(context_carrier, getter=context_getter)
97 token = context.attach(ctx)
98 span_kind = trace.SpanKind.SERVER
99 else:
100 ctx = context.get_current()
101 span_kind = trace.SpanKind.INTERNAL
102 span = tracer.start_span(
103 name=span_name,
104 context=ctx,
105 kind=span_kind,
106 start_time=start_time,
107 )
108 return span, token
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py
@@ -49,6 +49,9 @@
status (int): HTTP status code
"""
# See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status
+ if not isinstance(status, int):
+ return StatusCode.UNSET
+
if status < 100:
return StatusCode.ERROR
if status <= 299:
|
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py\n@@ -49,6 +49,9 @@\n status (int): HTTP status code\n \"\"\"\n # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status\n+ if not isinstance(status, int):\n+ return StatusCode.UNSET\n+\n if status < 100:\n return StatusCode.ERROR\n if status <= 299:\n", "issue": "urllib instrumentation fails for local file access\nWhen reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217\nurllib instrumentation fails for local file access\nWhen reading local files the status code is not specified and is None. This isn't handled by the instrumentation and causes an exception.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python-contrib/blob/444e0a13127304d3a04ccd44445b2e6caed3f770/instrumentation/opentelemetry-instrumentation-urllib/src/opentelemetry/instrumentation/urllib/__init__.py#L212-L217\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Sequence\n\nfrom wrapt import ObjectProxy\n\nfrom opentelemetry import context, trace\n\n# pylint: disable=unused-import\n# pylint: disable=E0611\nfrom opentelemetry.context import _SUPPRESS_INSTRUMENTATION_KEY # noqa: F401\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.trace import StatusCode\n\n\ndef extract_attributes_from_object(\n obj: any, attributes: Sequence[str], existing: Dict[str, str] = None\n) -> Dict[str, str]:\n extracted = {}\n if existing:\n extracted.update(existing)\n for attr in attributes:\n value = getattr(obj, attr, None)\n if value is not None:\n extracted[attr] = str(value)\n return extracted\n\n\ndef http_status_to_status_code(\n status: int,\n allow_redirect: bool = True,\n server_span: bool = False,\n) -> StatusCode:\n \"\"\"Converts an HTTP status code to an OpenTelemetry canonical status code\n\n Args:\n status (int): HTTP status code\n \"\"\"\n # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status\n if status < 100:\n return StatusCode.ERROR\n if status <= 299:\n return StatusCode.UNSET\n if status <= 399 and allow_redirect:\n return StatusCode.UNSET\n if status <= 499 and server_span:\n return StatusCode.UNSET\n return StatusCode.ERROR\n\n\ndef unwrap(obj, attr: str):\n \"\"\"Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it\n\n Args:\n obj: Object that holds a reference to the wrapped function\n attr (str): Name of the wrapped function\n \"\"\"\n func = getattr(obj, attr, None)\n if func and isinstance(func, ObjectProxy) and hasattr(func, \"__wrapped__\"):\n setattr(obj, attr, func.__wrapped__)\n\n\ndef _start_internal_or_server_span(\n tracer, span_name, start_time, context_carrier, context_getter\n):\n \"\"\"Returns internal or server span along with the token which can be used by caller to reset context\n\n\n Args:\n tracer : tracer in use by given instrumentation library\n name (string): name of the span\n start_time : start time of the span\n context_carrier : object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate getter\n which understands how to extract a value from it.\n context_getter : an object which contains a get function that can retrieve zero\n or more values from the carrier and a keys function that can get all the keys\n from carrier.\n \"\"\"\n\n token = ctx = span_kind = None\n if trace.get_current_span() is trace.INVALID_SPAN:\n ctx = extract(context_carrier, getter=context_getter)\n token = context.attach(ctx)\n span_kind = trace.SpanKind.SERVER\n else:\n ctx = context.get_current()\n span_kind = trace.SpanKind.INTERNAL\n span = tracer.start_span(\n name=span_name,\n context=ctx,\n kind=span_kind,\n start_time=start_time,\n )\n return span, token\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Sequence\n\nfrom wrapt import ObjectProxy\n\nfrom opentelemetry import context, trace\n\n# pylint: disable=unused-import\n# pylint: disable=E0611\nfrom opentelemetry.context import _SUPPRESS_INSTRUMENTATION_KEY # noqa: F401\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.trace import StatusCode\n\n\ndef extract_attributes_from_object(\n obj: any, attributes: Sequence[str], existing: Dict[str, str] = None\n) -> Dict[str, str]:\n extracted = {}\n if existing:\n extracted.update(existing)\n for attr in attributes:\n value = getattr(obj, attr, None)\n if value is not None:\n extracted[attr] = str(value)\n return extracted\n\n\ndef http_status_to_status_code(\n status: int,\n allow_redirect: bool = True,\n server_span: bool = False,\n) -> StatusCode:\n \"\"\"Converts an HTTP status code to an OpenTelemetry canonical status code\n\n Args:\n status (int): HTTP status code\n \"\"\"\n # See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status\n if not isinstance(status, int):\n return StatusCode.UNSET\n\n if status < 100:\n return StatusCode.ERROR\n if status <= 299:\n return StatusCode.UNSET\n if status <= 399 and allow_redirect:\n return StatusCode.UNSET\n if status <= 499 and server_span:\n return StatusCode.UNSET\n return StatusCode.ERROR\n\n\ndef unwrap(obj, attr: str):\n \"\"\"Given a function that was wrapped by wrapt.wrap_function_wrapper, unwrap it\n\n Args:\n obj: Object that holds a reference to the wrapped function\n attr (str): Name of the wrapped function\n \"\"\"\n func = getattr(obj, attr, None)\n if func and isinstance(func, ObjectProxy) and hasattr(func, \"__wrapped__\"):\n setattr(obj, attr, func.__wrapped__)\n\n\ndef _start_internal_or_server_span(\n tracer, span_name, start_time, context_carrier, context_getter\n):\n \"\"\"Returns internal or server span along with the token which can be used by caller to reset context\n\n\n Args:\n tracer : tracer in use by given instrumentation library\n name (string): name of the span\n start_time : start time of the span\n context_carrier : object which contains values that are\n used to construct a Context. This object\n must be paired with an appropriate getter\n which understands how to extract a value from it.\n context_getter : an object which contains a get function that can retrieve zero\n or more values from the carrier and a keys function that can get all the keys\n from carrier.\n \"\"\"\n\n token = ctx = span_kind = None\n if trace.get_current_span() is trace.INVALID_SPAN:\n ctx = extract(context_carrier, getter=context_getter)\n token = context.attach(ctx)\n span_kind = trace.SpanKind.SERVER\n else:\n ctx = context.get_current()\n span_kind = trace.SpanKind.INTERNAL\n span = tracer.start_span(\n name=span_name,\n context=ctx,\n kind=span_kind,\n start_time=start_time,\n )\n return span, token\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/utils.py"}]}
| 1,567 | 166 |
gh_patches_debug_8569
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-1091
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CI: Problem with `gen_cli.py`
see: https://app.circleci.com/pipelines/github/mne-tools/mne-bids/4785/workflows/21ad6804-1cc2-42dd-9133-f24de2ea3db5/jobs/6923
```
Traceback (most recent call last):
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/events.py", line 94, in emit
results.append(listener.handler(self.app, *args))
File "/home/circleci/project/doc/sphinxext/gen_cli.py", line 84, in generate_cli_rst
output[0], output[2] = output[2], output[0]
IndexError: list index out of range
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/cmd/build.py", line 276, in build_main
app = Sphinx(args.sourcedir, args.confdir, args.outputdir,
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/application.py", line 262, in __init__
self._init_builder()
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/application.py", line 335, in _init_builder
self.events.emit('builder-inited')
File "/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/events.py", line 105, in emit
raise ExtensionError(__("Handler %r for event %r threw an exception") %
sphinx.errors.ExtensionError: Handler <function generate_cli_rst at 0x7fe9bf90c160> for event 'builder-inited' threw an exception (exception: list index out of range)
```
https://github.com/mne-tools/mne-bids/blob/46b0a5300ed5c17ca93b8bbf1d9542069597ef62/doc/sphinxext/gen_cli.py#L1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/sphinxext/gen_cli.py`
Content:
```
1 """Custom sphinx extension to generate docs for the command line interface.
2
3 Inspired by MNE-Python's `gen_commands.py`
4 see: github.com/mne-tools/mne-python/blob/main/doc/sphinxext/gen_commands.py
5 """
6 # Authors: Eric Larson <[email protected]>
7 # Alexandre Gramfort <[email protected]>
8 # Stefan Appelhoff <[email protected]>
9 #
10 # License: BSD-3-Clause
11 import os
12 import glob
13 from os import path as op
14 import subprocess
15 import sys
16
17 import sphinx.util
18 from mne.utils import run_subprocess, _replace_md5
19
20
21 def setup(app):
22 """Set up the app."""
23 app.connect('builder-inited', generate_cli_rst)
24
25
26 # Header markings go:
27 # 1. =/= : Page title
28 # 2. = : Command name
29 # 3. -/- : Command description
30 # 4. - : Command sections (Examples, Notes)
31
32 header = """\
33 :orphan:
34
35 .. _python_cli:
36
37 =====================================
38 MNE-BIDS Command Line Interface (CLI)
39 =====================================
40
41 Here we list the MNE-BIDS tools that you can use from the command line.
42
43 """
44
45 command_rst = """
46
47 .. _gen_%s:
48
49 %s
50 %s
51
52 .. rst-class:: callout
53
54 %s
55
56 """
57
58
59 def generate_cli_rst(app=None):
60 """Generate the command line interface docs."""
61 out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated'))
62 if not op.isdir(out_dir):
63 os.mkdir(out_dir)
64 out_fname = op.join(out_dir, 'cli.rst.new')
65
66 cli_path = op.abspath(
67 op.join(os.path.dirname(__file__), '..', '..', 'mne_bids', 'commands'))
68 fnames = sorted([
69 op.basename(fname)
70 for fname in glob.glob(op.join(cli_path, 'mne_bids*.py'))])
71 iterator = sphinx.util.status_iterator(
72 fnames, 'generating MNE-BIDS cli help ... ', length=len(fnames))
73 with open(out_fname, 'w', encoding='utf-8') as f:
74 f.write(header)
75 for fname in iterator:
76 cmd_name = fname[:-3]
77 run_name = op.join(cli_path, fname)
78 output, _ = run_subprocess([sys.executable, run_name, '--help'],
79 stdout=subprocess.PIPE,
80 stderr=subprocess.PIPE, verbose=False)
81 output = output.splitlines()
82
83 # Swap usage and title lines
84 output[0], output[2] = output[2], output[0]
85
86 # Add header marking
87 for idx in (1, 0):
88 output.insert(idx, '-' * len(output[0]))
89
90 # Add code styling for the "Usage: " line
91 for li, line in enumerate(output):
92 if line.startswith('Usage: mne_bids '):
93 output[li] = 'Usage: ``%s``' % line[7:]
94 break
95
96 # Turn "Options:" into field list
97 if 'Options:' in output:
98 ii = output.index('Options:')
99 output[ii] = 'Options'
100 output.insert(ii + 1, '-------')
101 output.insert(ii + 2, '')
102 output.insert(ii + 3, '.. rst-class:: field-list cmd-list')
103 output.insert(ii + 4, '')
104 output = '\n'.join(output)
105 f.write(command_rst % (cmd_name,
106 cmd_name.replace('mne_bids_', 'mne_bids '),
107 '=' * len(cmd_name),
108 output))
109 _replace_md5(out_fname)
110 print('[Done]')
111
112
113 # This is useful for testing/iterating to see what the result looks like
114 if __name__ == '__main__':
115 generate_cli_rst()
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/sphinxext/gen_cli.py b/doc/sphinxext/gen_cli.py
--- a/doc/sphinxext/gen_cli.py
+++ b/doc/sphinxext/gen_cli.py
@@ -76,8 +76,7 @@
cmd_name = fname[:-3]
run_name = op.join(cli_path, fname)
output, _ = run_subprocess([sys.executable, run_name, '--help'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, verbose=False)
+ verbose=False)
output = output.splitlines()
# Swap usage and title lines
|
{"golden_diff": "diff --git a/doc/sphinxext/gen_cli.py b/doc/sphinxext/gen_cli.py\n--- a/doc/sphinxext/gen_cli.py\n+++ b/doc/sphinxext/gen_cli.py\n@@ -76,8 +76,7 @@\n cmd_name = fname[:-3]\n run_name = op.join(cli_path, fname)\n output, _ = run_subprocess([sys.executable, run_name, '--help'],\n- stdout=subprocess.PIPE,\n- stderr=subprocess.PIPE, verbose=False)\n+ verbose=False)\n output = output.splitlines()\n \n # Swap usage and title lines\n", "issue": "CI: Problem with `gen_cli.py`\nsee: https://app.circleci.com/pipelines/github/mne-tools/mne-bids/4785/workflows/21ad6804-1cc2-42dd-9133-f24de2ea3db5/jobs/6923\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/events.py\", line 94, in emit\r\n results.append(listener.handler(self.app, *args))\r\n File \"/home/circleci/project/doc/sphinxext/gen_cli.py\", line 84, in generate_cli_rst\r\n output[0], output[2] = output[2], output[0]\r\nIndexError: list index out of range\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/cmd/build.py\", line 276, in build_main\r\n app = Sphinx(args.sourcedir, args.confdir, args.outputdir,\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/application.py\", line 262, in __init__\r\n self._init_builder()\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/application.py\", line 335, in _init_builder\r\n self.events.emit('builder-inited')\r\n File \"/home/circleci/mne_bids_env/lib/python3.9/site-packages/sphinx/events.py\", line 105, in emit\r\n raise ExtensionError(__(\"Handler %r for event %r threw an exception\") %\r\nsphinx.errors.ExtensionError: Handler <function generate_cli_rst at 0x7fe9bf90c160> for event 'builder-inited' threw an exception (exception: list index out of range)\r\n```\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/46b0a5300ed5c17ca93b8bbf1d9542069597ef62/doc/sphinxext/gen_cli.py#L1\n", "before_files": [{"content": "\"\"\"Custom sphinx extension to generate docs for the command line interface.\n\nInspired by MNE-Python's `gen_commands.py`\nsee: github.com/mne-tools/mne-python/blob/main/doc/sphinxext/gen_commands.py\n\"\"\"\n# Authors: Eric Larson <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD-3-Clause\nimport os\nimport glob\nfrom os import path as op\nimport subprocess\nimport sys\n\nimport sphinx.util\nfrom mne.utils import run_subprocess, _replace_md5\n\n\ndef setup(app):\n \"\"\"Set up the app.\"\"\"\n app.connect('builder-inited', generate_cli_rst)\n\n\n# Header markings go:\n# 1. =/= : Page title\n# 2. = : Command name\n# 3. -/- : Command description\n# 4. - : Command sections (Examples, Notes)\n\nheader = \"\"\"\\\n:orphan:\n\n.. _python_cli:\n\n=====================================\nMNE-BIDS Command Line Interface (CLI)\n=====================================\n\nHere we list the MNE-BIDS tools that you can use from the command line.\n\n\"\"\"\n\ncommand_rst = \"\"\"\n\n.. _gen_%s:\n\n%s\n%s\n\n.. rst-class:: callout\n\n%s\n\n\"\"\"\n\n\ndef generate_cli_rst(app=None):\n \"\"\"Generate the command line interface docs.\"\"\"\n out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated'))\n if not op.isdir(out_dir):\n os.mkdir(out_dir)\n out_fname = op.join(out_dir, 'cli.rst.new')\n\n cli_path = op.abspath(\n op.join(os.path.dirname(__file__), '..', '..', 'mne_bids', 'commands'))\n fnames = sorted([\n op.basename(fname)\n for fname in glob.glob(op.join(cli_path, 'mne_bids*.py'))])\n iterator = sphinx.util.status_iterator(\n fnames, 'generating MNE-BIDS cli help ... ', length=len(fnames))\n with open(out_fname, 'w', encoding='utf-8') as f:\n f.write(header)\n for fname in iterator:\n cmd_name = fname[:-3]\n run_name = op.join(cli_path, fname)\n output, _ = run_subprocess([sys.executable, run_name, '--help'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, verbose=False)\n output = output.splitlines()\n\n # Swap usage and title lines\n output[0], output[2] = output[2], output[0]\n\n # Add header marking\n for idx in (1, 0):\n output.insert(idx, '-' * len(output[0]))\n\n # Add code styling for the \"Usage: \" line\n for li, line in enumerate(output):\n if line.startswith('Usage: mne_bids '):\n output[li] = 'Usage: ``%s``' % line[7:]\n break\n\n # Turn \"Options:\" into field list\n if 'Options:' in output:\n ii = output.index('Options:')\n output[ii] = 'Options'\n output.insert(ii + 1, '-------')\n output.insert(ii + 2, '')\n output.insert(ii + 3, '.. rst-class:: field-list cmd-list')\n output.insert(ii + 4, '')\n output = '\\n'.join(output)\n f.write(command_rst % (cmd_name,\n cmd_name.replace('mne_bids_', 'mne_bids '),\n '=' * len(cmd_name),\n output))\n _replace_md5(out_fname)\n print('[Done]')\n\n\n# This is useful for testing/iterating to see what the result looks like\nif __name__ == '__main__':\n generate_cli_rst()\n", "path": "doc/sphinxext/gen_cli.py"}], "after_files": [{"content": "\"\"\"Custom sphinx extension to generate docs for the command line interface.\n\nInspired by MNE-Python's `gen_commands.py`\nsee: github.com/mne-tools/mne-python/blob/main/doc/sphinxext/gen_commands.py\n\"\"\"\n# Authors: Eric Larson <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD-3-Clause\nimport os\nimport glob\nfrom os import path as op\nimport subprocess\nimport sys\n\nimport sphinx.util\nfrom mne.utils import run_subprocess, _replace_md5\n\n\ndef setup(app):\n \"\"\"Set up the app.\"\"\"\n app.connect('builder-inited', generate_cli_rst)\n\n\n# Header markings go:\n# 1. =/= : Page title\n# 2. = : Command name\n# 3. -/- : Command description\n# 4. - : Command sections (Examples, Notes)\n\nheader = \"\"\"\\\n:orphan:\n\n.. _python_cli:\n\n=====================================\nMNE-BIDS Command Line Interface (CLI)\n=====================================\n\nHere we list the MNE-BIDS tools that you can use from the command line.\n\n\"\"\"\n\ncommand_rst = \"\"\"\n\n.. _gen_%s:\n\n%s\n%s\n\n.. rst-class:: callout\n\n%s\n\n\"\"\"\n\n\ndef generate_cli_rst(app=None):\n \"\"\"Generate the command line interface docs.\"\"\"\n out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated'))\n if not op.isdir(out_dir):\n os.mkdir(out_dir)\n out_fname = op.join(out_dir, 'cli.rst.new')\n\n cli_path = op.abspath(\n op.join(os.path.dirname(__file__), '..', '..', 'mne_bids', 'commands'))\n fnames = sorted([\n op.basename(fname)\n for fname in glob.glob(op.join(cli_path, 'mne_bids*.py'))])\n iterator = sphinx.util.status_iterator(\n fnames, 'generating MNE-BIDS cli help ... ', length=len(fnames))\n with open(out_fname, 'w', encoding='utf-8') as f:\n f.write(header)\n for fname in iterator:\n cmd_name = fname[:-3]\n run_name = op.join(cli_path, fname)\n output, _ = run_subprocess([sys.executable, run_name, '--help'],\n verbose=False)\n output = output.splitlines()\n\n # Swap usage and title lines\n output[0], output[2] = output[2], output[0]\n\n # Add header marking\n for idx in (1, 0):\n output.insert(idx, '-' * len(output[0]))\n\n # Add code styling for the \"Usage: \" line\n for li, line in enumerate(output):\n if line.startswith('Usage: mne_bids '):\n output[li] = 'Usage: ``%s``' % line[7:]\n break\n\n # Turn \"Options:\" into field list\n if 'Options:' in output:\n ii = output.index('Options:')\n output[ii] = 'Options'\n output.insert(ii + 1, '-------')\n output.insert(ii + 2, '')\n output.insert(ii + 3, '.. rst-class:: field-list cmd-list')\n output.insert(ii + 4, '')\n output = '\\n'.join(output)\n f.write(command_rst % (cmd_name,\n cmd_name.replace('mne_bids_', 'mne_bids '),\n '=' * len(cmd_name),\n output))\n _replace_md5(out_fname)\n print('[Done]')\n\n\n# This is useful for testing/iterating to see what the result looks like\nif __name__ == '__main__':\n generate_cli_rst()\n", "path": "doc/sphinxext/gen_cli.py"}]}
| 1,833 | 126 |
gh_patches_debug_19589
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-839
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.
[AWS::ElasticBeanstalk::ConfigurationTemplate](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-beanstalk-configurationtemplate.html) and [AWS::ElasticBeanstalk::Environment](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html)
Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/elasticbeanstalk.py`
Content:
```
1 # Copyright (c) 2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Tags
7
8
9 WebServer = "WebServer"
10 Worker = "Worker"
11 WebServerType = "Standard"
12 WorkerType = "SQS/HTTP"
13
14
15 class SourceBundle(AWSProperty):
16 props = {
17 'S3Bucket': (basestring, True),
18 'S3Key': (basestring, True),
19 }
20
21
22 class SourceConfiguration(AWSProperty):
23 props = {
24 'ApplicationName': (basestring, True),
25 'TemplateName': (basestring, True),
26 }
27
28
29 class OptionSettings(AWSProperty):
30 props = {
31 'Namespace': (basestring, True),
32 'OptionName': (basestring, True),
33 'Value': (basestring, True),
34 }
35
36
37 class Application(AWSObject):
38 resource_type = "AWS::ElasticBeanstalk::Application"
39
40 props = {
41 'ApplicationName': (basestring, False),
42 'Description': (basestring, False),
43 }
44
45
46 class ApplicationVersion(AWSObject):
47 resource_type = "AWS::ElasticBeanstalk::ApplicationVersion"
48
49 props = {
50 'ApplicationName': (basestring, True),
51 'Description': (basestring, False),
52 'SourceBundle': (SourceBundle, False),
53 }
54
55
56 class ConfigurationTemplate(AWSObject):
57 resource_type = "AWS::ElasticBeanstalk::ConfigurationTemplate"
58
59 props = {
60 'ApplicationName': (basestring, True),
61 'Description': (basestring, False),
62 'EnvironmentId': (basestring, False),
63 'OptionSettings': ([OptionSettings], False),
64 'SolutionStackName': (basestring, False),
65 'SourceConfiguration': (SourceConfiguration, False),
66 }
67
68
69 def validate_tier_name(name):
70 valid_names = [WebServer, Worker]
71 if name not in valid_names:
72 raise ValueError('Tier name needs to be one of %r' % valid_names)
73 return name
74
75
76 def validate_tier_type(tier_type):
77 valid_types = [WebServerType, WorkerType]
78 if tier_type not in valid_types:
79 raise ValueError('Tier type needs to be one of %r' % valid_types)
80 return tier_type
81
82
83 class Tier(AWSProperty):
84 props = {
85 'Name': (validate_tier_name, False),
86 'Type': (validate_tier_type, False),
87 'Version': (basestring, False),
88 }
89
90
91 class Environment(AWSObject):
92 resource_type = "AWS::ElasticBeanstalk::Environment"
93
94 props = {
95 'ApplicationName': (basestring, True),
96 'CNAMEPrefix': (basestring, False),
97 'Description': (basestring, False),
98 'EnvironmentName': (basestring, False),
99 'OptionSettings': ([OptionSettings], False),
100 'SolutionStackName': (basestring, False),
101 'Tags': (Tags, False),
102 'TemplateName': (basestring, False),
103 'Tier': (Tier, False),
104 'VersionLabel': (basestring, False),
105 }
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/elasticbeanstalk.py b/troposphere/elasticbeanstalk.py
--- a/troposphere/elasticbeanstalk.py
+++ b/troposphere/elasticbeanstalk.py
@@ -61,6 +61,7 @@
'Description': (basestring, False),
'EnvironmentId': (basestring, False),
'OptionSettings': ([OptionSettings], False),
+ 'PlatformArn': (basestring, False),
'SolutionStackName': (basestring, False),
'SourceConfiguration': (SourceConfiguration, False),
}
@@ -97,6 +98,7 @@
'Description': (basestring, False),
'EnvironmentName': (basestring, False),
'OptionSettings': ([OptionSettings], False),
+ 'PlatformArn': (basestring, False),
'SolutionStackName': (basestring, False),
'Tags': (Tags, False),
'TemplateName': (basestring, False),
|
{"golden_diff": "diff --git a/troposphere/elasticbeanstalk.py b/troposphere/elasticbeanstalk.py\n--- a/troposphere/elasticbeanstalk.py\n+++ b/troposphere/elasticbeanstalk.py\n@@ -61,6 +61,7 @@\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n+ 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n@@ -97,6 +98,7 @@\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n+ 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n", "issue": "Use the PlatformArn property to specify a custom platform for Elastic Beanstalk.\n[AWS::ElasticBeanstalk::ConfigurationTemplate](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-beanstalk-configurationtemplate.html) and [AWS::ElasticBeanstalk::Environment](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html)\r\nUse the PlatformArn property to specify a custom platform for Elastic Beanstalk.\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\n\n\nWebServer = \"WebServer\"\nWorker = \"Worker\"\nWebServerType = \"Standard\"\nWorkerType = \"SQS/HTTP\"\n\n\nclass SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n 'S3Key': (basestring, True),\n }\n\n\nclass SourceConfiguration(AWSProperty):\n props = {\n 'ApplicationName': (basestring, True),\n 'TemplateName': (basestring, True),\n }\n\n\nclass OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n 'OptionName': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Application(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Application\"\n\n props = {\n 'ApplicationName': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass ApplicationVersion(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ApplicationVersion\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'SourceBundle': (SourceBundle, False),\n }\n\n\nclass ConfigurationTemplate(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n\n\ndef validate_tier_name(name):\n valid_names = [WebServer, Worker]\n if name not in valid_names:\n raise ValueError('Tier name needs to be one of %r' % valid_names)\n return name\n\n\ndef validate_tier_type(tier_type):\n valid_types = [WebServerType, WorkerType]\n if tier_type not in valid_types:\n raise ValueError('Tier type needs to be one of %r' % valid_types)\n return tier_type\n\n\nclass Tier(AWSProperty):\n props = {\n 'Name': (validate_tier_name, False),\n 'Type': (validate_tier_type, False),\n 'Version': (basestring, False),\n }\n\n\nclass Environment(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Environment\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'CNAMEPrefix': (basestring, False),\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n 'Tier': (Tier, False),\n 'VersionLabel': (basestring, False),\n }\n", "path": "troposphere/elasticbeanstalk.py"}], "after_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\n\n\nWebServer = \"WebServer\"\nWorker = \"Worker\"\nWebServerType = \"Standard\"\nWorkerType = \"SQS/HTTP\"\n\n\nclass SourceBundle(AWSProperty):\n props = {\n 'S3Bucket': (basestring, True),\n 'S3Key': (basestring, True),\n }\n\n\nclass SourceConfiguration(AWSProperty):\n props = {\n 'ApplicationName': (basestring, True),\n 'TemplateName': (basestring, True),\n }\n\n\nclass OptionSettings(AWSProperty):\n props = {\n 'Namespace': (basestring, True),\n 'OptionName': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass Application(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Application\"\n\n props = {\n 'ApplicationName': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass ApplicationVersion(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ApplicationVersion\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'SourceBundle': (SourceBundle, False),\n }\n\n\nclass ConfigurationTemplate(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'Description': (basestring, False),\n 'EnvironmentId': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'SourceConfiguration': (SourceConfiguration, False),\n }\n\n\ndef validate_tier_name(name):\n valid_names = [WebServer, Worker]\n if name not in valid_names:\n raise ValueError('Tier name needs to be one of %r' % valid_names)\n return name\n\n\ndef validate_tier_type(tier_type):\n valid_types = [WebServerType, WorkerType]\n if tier_type not in valid_types:\n raise ValueError('Tier type needs to be one of %r' % valid_types)\n return tier_type\n\n\nclass Tier(AWSProperty):\n props = {\n 'Name': (validate_tier_name, False),\n 'Type': (validate_tier_type, False),\n 'Version': (basestring, False),\n }\n\n\nclass Environment(AWSObject):\n resource_type = \"AWS::ElasticBeanstalk::Environment\"\n\n props = {\n 'ApplicationName': (basestring, True),\n 'CNAMEPrefix': (basestring, False),\n 'Description': (basestring, False),\n 'EnvironmentName': (basestring, False),\n 'OptionSettings': ([OptionSettings], False),\n 'PlatformArn': (basestring, False),\n 'SolutionStackName': (basestring, False),\n 'Tags': (Tags, False),\n 'TemplateName': (basestring, False),\n 'Tier': (Tier, False),\n 'VersionLabel': (basestring, False),\n }\n", "path": "troposphere/elasticbeanstalk.py"}]}
| 1,262 | 212 |
gh_patches_debug_26636
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-456
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter archived and draft projects from the wagtail frontpage selection element
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/cms/models.py`
Content:
```
1 from django.db import models
2 from django.forms import widgets
3 from modelcluster.fields import ParentalKey
4 from modelcluster.models import ClusterableModel
5 from wagtail.wagtailadmin import edit_handlers
6 from wagtail.wagtailcore import blocks
7 from wagtail.wagtailcore import fields
8 from wagtail.wagtailcore.models import Orderable
9 from wagtail.wagtailcore.models import Page
10 from wagtail.wagtailforms.models import AbstractEmailForm
11 from wagtail.wagtailforms.models import AbstractFormField
12 from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
13 from wagtail.wagtailsnippets.models import register_snippet
14
15 from adhocracy4.projects.models import Project
16
17 from . import emails
18
19
20 class SimplePage(Page):
21 body = fields.RichTextField(blank=True)
22
23 content_panels = [
24 edit_handlers.FieldPanel('title'),
25 edit_handlers.FieldPanel('body'),
26 ]
27
28 subpage_types = []
29
30
31 class ProjectSelectionBlock(blocks.ChooserBlock):
32 target_model = Project
33 widget = widgets.Select
34
35 def value_for_form(self, value):
36 if isinstance(value, Project):
37 return value.pk
38 return value
39
40
41 class ProjectsWrapperBlock(blocks.StructBlock):
42 title = blocks.CharBlock(max_length=80)
43 projects = blocks.ListBlock(
44 ProjectSelectionBlock(label='Project'),
45 )
46
47 class Meta:
48 template = 'meinberlin_cms/blocks/projects_block.html'
49
50
51 class CallToActionBlock(blocks.StructBlock):
52 body = blocks.RichTextBlock()
53 link = blocks.CharBlock()
54 link_text = blocks.CharBlock(max_length=50, label='Link Text')
55
56 class Meta:
57 template = 'meinberlin_cms/blocks/cta_block.html'
58
59
60 class ColumnsBlock(blocks.StructBlock):
61 columns_count = blocks.ChoiceBlock(choices=[
62 (2, 'Two columns'),
63 (3, 'Three columns'),
64 (4, 'Four columns'),
65 ], default=2)
66
67 columns = blocks.ListBlock(
68 blocks.RichTextBlock(label='Column body'),
69 )
70
71 class Meta:
72 template = 'meinberlin_cms/blocks/columns_block.html'
73
74
75 class HomePage(Page):
76 body = fields.StreamField([
77 ('paragraph', blocks.RichTextBlock(
78 template='meinberlin_cms/blocks/richtext_block.html'
79 )),
80 ('call_to_action', CallToActionBlock()),
81 ('columns_text', ColumnsBlock()),
82 ('projects', ProjectsWrapperBlock()),
83 ])
84
85 subtitle = models.CharField(max_length=120)
86
87 header_image = models.ForeignKey(
88 'wagtailimages.Image',
89 null=True,
90 blank=False,
91 on_delete=models.SET_NULL,
92 related_name='+'
93 )
94
95 content_panels = Page.content_panels + [
96 edit_handlers.FieldPanel('subtitle'),
97 ImageChooserPanel('header_image'),
98 edit_handlers.StreamFieldPanel('body'),
99 ]
100
101
102 class MenuItem(models.Model):
103 title = models.CharField(max_length=255)
104 link_page = models.ForeignKey('wagtailcore.Page')
105
106 @property
107 def url(self):
108 return self.link_page.url
109
110 def __str__(self):
111 return self.title
112
113 panels = [
114 edit_handlers.FieldPanel('title'),
115 edit_handlers.PageChooserPanel('link_page')
116 ]
117
118
119 @register_snippet
120 class NavigationMenu(ClusterableModel):
121 title = models.CharField(max_length=255, null=False, blank=False)
122
123 def __str__(self):
124 return self.title
125
126 panels = [
127 edit_handlers.FieldPanel('title'),
128 edit_handlers.InlinePanel('items')
129 ]
130
131
132 class NavigationMenuItem(Orderable, MenuItem):
133 parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')
134
135
136 class EmailFormField(AbstractFormField):
137 page = ParentalKey('EmailFormPage', related_name='form_fields')
138
139
140 class EmailFormPage(AbstractEmailForm):
141 intro = fields.RichTextField(
142 help_text='Introduction text shown above the form'
143 )
144 thank_you = fields.RichTextField(
145 help_text='Text shown after form submission',
146 )
147 email_content = models.CharField(
148 max_length=200,
149 help_text='Email content message',
150 )
151 attach_as = models.CharField(
152 max_length=3,
153 choices=(
154 ('csv', 'CSV Document'),
155 ('txt', 'Text'),
156 ),
157 default='csv',
158 help_text='Form results are send in this document format',
159 )
160
161 content_panels = AbstractEmailForm.content_panels + [
162 edit_handlers.MultiFieldPanel([
163 edit_handlers.FieldPanel('intro', classname='full'),
164 edit_handlers.FieldPanel('thank_you', classname='full'),
165 ], 'Page'),
166 edit_handlers.MultiFieldPanel([
167 edit_handlers.FieldPanel('to_address'),
168 edit_handlers.FieldPanel('subject'),
169 edit_handlers.FieldPanel('email_content', classname='full'),
170 edit_handlers.FieldPanel('attach_as'),
171 ], 'Email'),
172 edit_handlers.InlinePanel('form_fields', label='Form fields'),
173 ]
174
175 def send_mail(self, form):
176 self.form = form
177 if self.attach_as == 'csv':
178 emails.CsvFormEmail.send(self)
179 elif self.attach_as == 'txt':
180 emails.TextFormEmail.send(self)
181
182 @property
183 def field_values(self):
184 fields = {}
185 for field in self.form:
186 value = field.value()
187 if isinstance(value, list):
188 value = ', '.join(value)
189 fields[field.label] = value
190 return fields
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/cms/models.py b/apps/cms/models.py
--- a/apps/cms/models.py
+++ b/apps/cms/models.py
@@ -1,5 +1,6 @@
+from django import forms
from django.db import models
-from django.forms import widgets
+from django.utils.functional import cached_property
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin import edit_handlers
@@ -30,13 +31,30 @@
class ProjectSelectionBlock(blocks.ChooserBlock):
target_model = Project
- widget = widgets.Select
+ widget = forms.widgets.Select
+
+ @cached_property
+ def field(self):
+ return forms.ModelChoiceField(
+ queryset=self.target_model.objects.filter(
+ is_draft=False,
+ is_archived=False,
+ is_public=True),
+ widget=self.widget,
+ required=self._required,
+ help_text=self._help_text)
def value_for_form(self, value):
if isinstance(value, Project):
return value.pk
return value
+ def value_from_form(self, value):
+ # if project became unavailable (unpublished), selection will become an
+ # empty string and cause a server error on save, so we give a fallback
+ value = value or None
+ return super().value_from_form(value)
+
class ProjectsWrapperBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=80)
|
{"golden_diff": "diff --git a/apps/cms/models.py b/apps/cms/models.py\n--- a/apps/cms/models.py\n+++ b/apps/cms/models.py\n@@ -1,5 +1,6 @@\n+from django import forms\n from django.db import models\n-from django.forms import widgets\n+from django.utils.functional import cached_property\n from modelcluster.fields import ParentalKey\n from modelcluster.models import ClusterableModel\n from wagtail.wagtailadmin import edit_handlers\n@@ -30,13 +31,30 @@\n \n class ProjectSelectionBlock(blocks.ChooserBlock):\n target_model = Project\n- widget = widgets.Select\n+ widget = forms.widgets.Select\n+\n+ @cached_property\n+ def field(self):\n+ return forms.ModelChoiceField(\n+ queryset=self.target_model.objects.filter(\n+ is_draft=False,\n+ is_archived=False,\n+ is_public=True),\n+ widget=self.widget,\n+ required=self._required,\n+ help_text=self._help_text)\n \n def value_for_form(self, value):\n if isinstance(value, Project):\n return value.pk\n return value\n \n+ def value_from_form(self, value):\n+ # if project became unavailable (unpublished), selection will become an\n+ # empty string and cause a server error on save, so we give a fallback\n+ value = value or None\n+ return super().value_from_form(value)\n+\n \n class ProjectsWrapperBlock(blocks.StructBlock):\n title = blocks.CharBlock(max_length=80)\n", "issue": "Filter archived and draft projects from the wagtail frontpage selection element\n\n", "before_files": [{"content": "from django.db import models\nfrom django.forms import widgets\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.wagtailadmin import edit_handlers\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore import fields\nfrom wagtail.wagtailcore.models import Orderable\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailforms.models import AbstractEmailForm\nfrom wagtail.wagtailforms.models import AbstractFormField\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsnippets.models import register_snippet\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\n\nclass SimplePage(Page):\n body = fields.RichTextField(blank=True)\n\n content_panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.FieldPanel('body'),\n ]\n\n subpage_types = []\n\n\nclass ProjectSelectionBlock(blocks.ChooserBlock):\n target_model = Project\n widget = widgets.Select\n\n def value_for_form(self, value):\n if isinstance(value, Project):\n return value.pk\n return value\n\n\nclass ProjectsWrapperBlock(blocks.StructBlock):\n title = blocks.CharBlock(max_length=80)\n projects = blocks.ListBlock(\n ProjectSelectionBlock(label='Project'),\n )\n\n class Meta:\n template = 'meinberlin_cms/blocks/projects_block.html'\n\n\nclass CallToActionBlock(blocks.StructBlock):\n body = blocks.RichTextBlock()\n link = blocks.CharBlock()\n link_text = blocks.CharBlock(max_length=50, label='Link Text')\n\n class Meta:\n template = 'meinberlin_cms/blocks/cta_block.html'\n\n\nclass ColumnsBlock(blocks.StructBlock):\n columns_count = blocks.ChoiceBlock(choices=[\n (2, 'Two columns'),\n (3, 'Three columns'),\n (4, 'Four columns'),\n ], default=2)\n\n columns = blocks.ListBlock(\n blocks.RichTextBlock(label='Column body'),\n )\n\n class Meta:\n template = 'meinberlin_cms/blocks/columns_block.html'\n\n\nclass HomePage(Page):\n body = fields.StreamField([\n ('paragraph', blocks.RichTextBlock(\n template='meinberlin_cms/blocks/richtext_block.html'\n )),\n ('call_to_action', CallToActionBlock()),\n ('columns_text', ColumnsBlock()),\n ('projects', ProjectsWrapperBlock()),\n ])\n\n subtitle = models.CharField(max_length=120)\n\n header_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n content_panels = Page.content_panels + [\n edit_handlers.FieldPanel('subtitle'),\n ImageChooserPanel('header_image'),\n edit_handlers.StreamFieldPanel('body'),\n ]\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items')\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n\n\nclass EmailFormField(AbstractFormField):\n page = ParentalKey('EmailFormPage', related_name='form_fields')\n\n\nclass EmailFormPage(AbstractEmailForm):\n intro = fields.RichTextField(\n help_text='Introduction text shown above the form'\n )\n thank_you = fields.RichTextField(\n help_text='Text shown after form submission',\n )\n email_content = models.CharField(\n max_length=200,\n help_text='Email content message',\n )\n attach_as = models.CharField(\n max_length=3,\n choices=(\n ('csv', 'CSV Document'),\n ('txt', 'Text'),\n ),\n default='csv',\n help_text='Form results are send in this document format',\n )\n\n content_panels = AbstractEmailForm.content_panels + [\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('intro', classname='full'),\n edit_handlers.FieldPanel('thank_you', classname='full'),\n ], 'Page'),\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('to_address'),\n edit_handlers.FieldPanel('subject'),\n edit_handlers.FieldPanel('email_content', classname='full'),\n edit_handlers.FieldPanel('attach_as'),\n ], 'Email'),\n edit_handlers.InlinePanel('form_fields', label='Form fields'),\n ]\n\n def send_mail(self, form):\n self.form = form\n if self.attach_as == 'csv':\n emails.CsvFormEmail.send(self)\n elif self.attach_as == 'txt':\n emails.TextFormEmail.send(self)\n\n @property\n def field_values(self):\n fields = {}\n for field in self.form:\n value = field.value()\n if isinstance(value, list):\n value = ', '.join(value)\n fields[field.label] = value\n return fields\n", "path": "apps/cms/models.py"}], "after_files": [{"content": "from django import forms\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.wagtailadmin import edit_handlers\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore import fields\nfrom wagtail.wagtailcore.models import Orderable\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailforms.models import AbstractEmailForm\nfrom wagtail.wagtailforms.models import AbstractFormField\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsnippets.models import register_snippet\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\n\nclass SimplePage(Page):\n body = fields.RichTextField(blank=True)\n\n content_panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.FieldPanel('body'),\n ]\n\n subpage_types = []\n\n\nclass ProjectSelectionBlock(blocks.ChooserBlock):\n target_model = Project\n widget = forms.widgets.Select\n\n @cached_property\n def field(self):\n return forms.ModelChoiceField(\n queryset=self.target_model.objects.filter(\n is_draft=False,\n is_archived=False,\n is_public=True),\n widget=self.widget,\n required=self._required,\n help_text=self._help_text)\n\n def value_for_form(self, value):\n if isinstance(value, Project):\n return value.pk\n return value\n\n def value_from_form(self, value):\n # if project became unavailable (unpublished), selection will become an\n # empty string and cause a server error on save, so we give a fallback\n value = value or None\n return super().value_from_form(value)\n\n\nclass ProjectsWrapperBlock(blocks.StructBlock):\n title = blocks.CharBlock(max_length=80)\n projects = blocks.ListBlock(\n ProjectSelectionBlock(label='Project'),\n )\n\n class Meta:\n template = 'meinberlin_cms/blocks/projects_block.html'\n\n\nclass CallToActionBlock(blocks.StructBlock):\n body = blocks.RichTextBlock()\n link = blocks.CharBlock()\n link_text = blocks.CharBlock(max_length=50, label='Link Text')\n\n class Meta:\n template = 'meinberlin_cms/blocks/cta_block.html'\n\n\nclass ColumnsBlock(blocks.StructBlock):\n columns_count = blocks.ChoiceBlock(choices=[\n (2, 'Two columns'),\n (3, 'Three columns'),\n (4, 'Four columns'),\n ], default=2)\n\n columns = blocks.ListBlock(\n blocks.RichTextBlock(label='Column body'),\n )\n\n class Meta:\n template = 'meinberlin_cms/blocks/columns_block.html'\n\n\nclass HomePage(Page):\n body = fields.StreamField([\n ('paragraph', blocks.RichTextBlock(\n template='meinberlin_cms/blocks/richtext_block.html'\n )),\n ('call_to_action', CallToActionBlock()),\n ('columns_text', ColumnsBlock()),\n ('projects', ProjectsWrapperBlock()),\n ])\n\n subtitle = models.CharField(max_length=120)\n\n header_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n content_panels = Page.content_panels + [\n edit_handlers.FieldPanel('subtitle'),\n ImageChooserPanel('header_image'),\n edit_handlers.StreamFieldPanel('body'),\n ]\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items')\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n\n\nclass EmailFormField(AbstractFormField):\n page = ParentalKey('EmailFormPage', related_name='form_fields')\n\n\nclass EmailFormPage(AbstractEmailForm):\n intro = fields.RichTextField(\n help_text='Introduction text shown above the form'\n )\n thank_you = fields.RichTextField(\n help_text='Text shown after form submission',\n )\n email_content = models.CharField(\n max_length=200,\n help_text='Email content message',\n )\n attach_as = models.CharField(\n max_length=3,\n choices=(\n ('csv', 'CSV Document'),\n ('txt', 'Text'),\n ),\n default='csv',\n help_text='Form results are send in this document format',\n )\n\n content_panels = AbstractEmailForm.content_panels + [\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('intro', classname='full'),\n edit_handlers.FieldPanel('thank_you', classname='full'),\n ], 'Page'),\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('to_address'),\n edit_handlers.FieldPanel('subject'),\n edit_handlers.FieldPanel('email_content', classname='full'),\n edit_handlers.FieldPanel('attach_as'),\n ], 'Email'),\n edit_handlers.InlinePanel('form_fields', label='Form fields'),\n ]\n\n def send_mail(self, form):\n self.form = form\n if self.attach_as == 'csv':\n emails.CsvFormEmail.send(self)\n elif self.attach_as == 'txt':\n emails.TextFormEmail.send(self)\n\n @property\n def field_values(self):\n fields = {}\n for field in self.form:\n value = field.value()\n if isinstance(value, list):\n value = ', '.join(value)\n fields[field.label] = value\n return fields\n", "path": "apps/cms/models.py"}]}
| 1,936 | 321 |
gh_patches_debug_8055
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-1497
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
setup.py lacks appropriate metadata for differing python versions
Please see this issue for the full context: https://github.com/NixOS/nixpkgs/issues/46318
Basically, it appears the METADATA in the resulting installation differs depending on the installation method.
I've done some minimal patching to `setup.py` to include the same `python_version` constraints that appear in `setup.cfg` and it appears to fix the issues with regards to METADATA.
However, I'm not very experienced in python packaging and am surprised that no one else has run into this issue before me.
Can anyone confirm that there is a mismatch here and that adding additional constraints to `setup.py` would be appropriate? I'll go ahead and get a PR together in the meantime.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup
4
5 import os
6 import re
7 import codecs
8
9 base_path = os.path.dirname(__file__)
10
11 # Get the version (borrowed from SQLAlchemy)
12 with open(os.path.join(base_path, 'src', 'urllib3', '__init__.py')) as fp:
13 VERSION = re.compile(r".*__version__ = '(.*?)'",
14 re.S).match(fp.read()).group(1)
15
16 with codecs.open('README.rst', encoding='utf-8') as fp:
17 readme = fp.read()
18 with codecs.open('CHANGES.rst', encoding='utf-8') as fp:
19 changes = fp.read()
20 version = VERSION
21
22 setup(name='urllib3',
23 version=version,
24 description="HTTP library with thread-safe connection pooling, file post, and more.",
25 long_description=u'\n\n'.join([readme, changes]),
26 classifiers=[
27 'Environment :: Web Environment',
28 'Intended Audience :: Developers',
29 'License :: OSI Approved :: MIT License',
30 'Operating System :: OS Independent',
31 'Programming Language :: Python',
32 'Programming Language :: Python :: 2',
33 'Programming Language :: Python :: 2.7',
34 'Programming Language :: Python :: 3',
35 'Programming Language :: Python :: 3.4',
36 'Programming Language :: Python :: 3.5',
37 'Programming Language :: Python :: 3.6',
38 'Programming Language :: Python :: 3.7',
39 'Programming Language :: Python :: 3.8',
40 'Programming Language :: Python :: Implementation :: CPython',
41 'Programming Language :: Python :: Implementation :: PyPy',
42 'Topic :: Internet :: WWW/HTTP',
43 'Topic :: Software Development :: Libraries',
44 ],
45 keywords='urllib httplib threadsafe filepost http https ssl pooling',
46 author='Andrey Petrov',
47 author_email='[email protected]',
48 url='https://urllib3.readthedocs.io/',
49 license='MIT',
50 packages=['urllib3',
51 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',
52 'urllib3.packages.backports', 'urllib3.packages.rfc3986',
53 'urllib3.contrib', 'urllib3.contrib._securetransport',
54 'urllib3.util'],
55 package_dir={'': 'src'},
56 requires=[],
57 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
58 tests_require=[
59 # These are a less-specific subset of dev-requirements.txt, for the
60 # convenience of distro package maintainers.
61 'pytest',
62 'mock',
63 'tornado',
64 ],
65 test_suite='test',
66 extras_require={
67 'secure': [
68 'pyOpenSSL >= 0.14',
69 'cryptography>=1.3.4',
70 'idna>=2.0.0',
71 'certifi',
72 "ipaddress",
73 ],
74 'socks': [
75 'PySocks>=1.5.6,<2.0,!=1.5.7',
76 ]
77 },
78 )
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,11 +65,11 @@
test_suite='test',
extras_require={
'secure': [
- 'pyOpenSSL >= 0.14',
+ 'pyOpenSSL>=0.14',
'cryptography>=1.3.4',
'idna>=2.0.0',
'certifi',
- "ipaddress",
+ "ipaddress; python_version=='2.7'",
],
'socks': [
'PySocks>=1.5.6,<2.0,!=1.5.7',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,11 +65,11 @@\n test_suite='test',\n extras_require={\n 'secure': [\n- 'pyOpenSSL >= 0.14',\n+ 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n- \"ipaddress\",\n+ \"ipaddress; python_version=='2.7'\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n", "issue": "setup.py lacks appropriate metadata for differing python versions\nPlease see this issue for the full context: https://github.com/NixOS/nixpkgs/issues/46318\r\n\r\nBasically, it appears the METADATA in the resulting installation differs depending on the installation method.\r\n\r\nI've done some minimal patching to `setup.py` to include the same `python_version` constraints that appear in `setup.cfg` and it appears to fix the issues with regards to METADATA.\r\n\r\nHowever, I'm not very experienced in python packaging and am surprised that no one else has run into this issue before me.\r\n\r\nCan anyone confirm that there is a mismatch here and that adding additional constraints to `setup.py` would be appropriate? I'll go ahead and get a PR together in the meantime.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'src', 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.packages.rfc3986',\n 'urllib3.contrib', 'urllib3.contrib._securetransport',\n 'urllib3.util'],\n package_dir={'': 'src'},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'pytest',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL >= 0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n \"ipaddress\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, 'src', 'urllib3', '__init__.py')) as fp:\n VERSION = re.compile(r\".*__version__ = '(.*?)'\",\n re.S).match(fp.read()).group(1)\n\nwith codecs.open('README.rst', encoding='utf-8') as fp:\n readme = fp.read()\nwith codecs.open('CHANGES.rst', encoding='utf-8') as fp:\n changes = fp.read()\nversion = VERSION\n\nsetup(name='urllib3',\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u'\\n\\n'.join([readme, changes]),\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='urllib httplib threadsafe filepost http https ssl pooling',\n author='Andrey Petrov',\n author_email='[email protected]',\n url='https://urllib3.readthedocs.io/',\n license='MIT',\n packages=['urllib3',\n 'urllib3.packages', 'urllib3.packages.ssl_match_hostname',\n 'urllib3.packages.backports', 'urllib3.packages.rfc3986',\n 'urllib3.contrib', 'urllib3.contrib._securetransport',\n 'urllib3.util'],\n package_dir={'': 'src'},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n tests_require=[\n # These are a less-specific subset of dev-requirements.txt, for the\n # convenience of distro package maintainers.\n 'pytest',\n 'mock',\n 'tornado',\n ],\n test_suite='test',\n extras_require={\n 'secure': [\n 'pyOpenSSL>=0.14',\n 'cryptography>=1.3.4',\n 'idna>=2.0.0',\n 'certifi',\n \"ipaddress; python_version=='2.7'\",\n ],\n 'socks': [\n 'PySocks>=1.5.6,<2.0,!=1.5.7',\n ]\n },\n )\n", "path": "setup.py"}]}
| 1,249 | 151 |
gh_patches_debug_3178
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1810
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace reward point redemption dropdown with number input field
If a user selects an option, a new line is added and the selection spans two rows. This looks wrong.
A user can insert custom options. If the user inputs something invalid like "abcdef" or an empty string, only parts of "Please select"-placeholder is visible. This looks wrong as well.
Replace reward point redemption dropdown with number input field
If a user selects an option, a new line is added and the selection spans two rows. This looks wrong.
A user can insert custom options. If the user inputs something invalid like "abcdef" or an empty string, only parts of "Please select"-placeholder is visible. This looks wrong as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/rewards/views.py`
Content:
```
1 from datetime import datetime
2
3 from django.contrib import messages
4 from django.core.exceptions import BadRequest, SuspiciousOperation
5 from django.http import HttpResponse
6 from django.shortcuts import get_object_or_404, redirect, render
7 from django.utils.translation import get_language
8 from django.utils.translation import gettext as _
9 from django.views.decorators.http import require_POST
10
11 from evap.evaluation.auth import manager_required, reward_user_required
12 from evap.evaluation.models import Semester
13 from evap.evaluation.tools import AttachmentResponse, get_object_from_dict_pk_entry_or_logged_40x
14 from evap.rewards.exporters import RewardsExporter
15 from evap.rewards.forms import RewardPointRedemptionEventForm
16 from evap.rewards.models import (
17 NoPointsSelected,
18 NotEnoughPoints,
19 RedemptionEventExpired,
20 RewardPointGranting,
21 RewardPointRedemption,
22 RewardPointRedemptionEvent,
23 SemesterActivation,
24 )
25 from evap.rewards.tools import grant_eligible_reward_points_for_semester, reward_points_of_user, save_redemptions
26 from evap.staff.views import semester_view
27
28
29 @reward_user_required
30 def index(request):
31 if request.method == "POST":
32 redemptions = {}
33 try:
34 for key, value in request.POST.items():
35 if key.startswith("points-"):
36 event_id = int(key.rpartition("-")[2])
37 redemptions[event_id] = int(value)
38 except ValueError as e:
39 raise BadRequest from e
40
41 try:
42 save_redemptions(request, redemptions)
43 messages.success(request, _("You successfully redeemed your points."))
44 except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error:
45 messages.warning(request, error)
46
47 total_points_available = reward_points_of_user(request.user)
48 reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)
49 reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user)
50 events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by("date")
51
52 reward_point_actions = []
53 for granting in reward_point_grantings:
54 reward_point_actions.append(
55 (granting.granting_time, _("Reward for") + " " + granting.semester.name, granting.value, "")
56 )
57 for redemption in reward_point_redemptions:
58 reward_point_actions.append((redemption.redemption_time, redemption.event.name, "", redemption.value))
59
60 reward_point_actions.sort(key=lambda action: action[0], reverse=True)
61
62 template_data = dict(
63 reward_point_actions=reward_point_actions,
64 total_points_available=total_points_available,
65 events=events,
66 point_selection=range(0, total_points_available + 1),
67 )
68 return render(request, "rewards_index.html", template_data)
69
70
71 @manager_required
72 def reward_point_redemption_events(request):
73 upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by("date")
74 past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by("-date")
75 template_data = dict(upcoming_events=upcoming_events, past_events=past_events)
76 return render(request, "rewards_reward_point_redemption_events.html", template_data)
77
78
79 @manager_required
80 def reward_point_redemption_event_create(request):
81 event = RewardPointRedemptionEvent()
82 form = RewardPointRedemptionEventForm(request.POST or None, instance=event)
83
84 if form.is_valid():
85 form.save()
86 messages.success(request, _("Successfully created event."))
87 return redirect("rewards:reward_point_redemption_events")
88
89 return render(request, "rewards_reward_point_redemption_event_form.html", dict(form=form))
90
91
92 @manager_required
93 def reward_point_redemption_event_edit(request, event_id):
94 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
95 form = RewardPointRedemptionEventForm(request.POST or None, instance=event)
96
97 if form.is_valid():
98 event = form.save()
99
100 messages.success(request, _("Successfully updated event."))
101 return redirect("rewards:reward_point_redemption_events")
102
103 return render(request, "rewards_reward_point_redemption_event_form.html", dict(event=event, form=form))
104
105
106 @require_POST
107 @manager_required
108 def reward_point_redemption_event_delete(request):
109 event = get_object_from_dict_pk_entry_or_logged_40x(RewardPointRedemptionEvent, request.POST, "event_id")
110
111 if not event.can_delete:
112 raise SuspiciousOperation("Deleting redemption event not allowed")
113 event.delete()
114 return HttpResponse() # 200 OK
115
116
117 @manager_required
118 def reward_point_redemption_event_export(request, event_id):
119 event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)
120
121 filename = _("RewardPoints") + f"-{event.date}-{event.name}-{get_language()}.xls"
122 response = AttachmentResponse(filename, content_type="application/vnd.ms-excel")
123
124 RewardsExporter().export(response, event.redemptions_by_user())
125
126 return response
127
128
129 @manager_required
130 def semester_activation(request, semester_id, active):
131 semester = get_object_or_404(Semester, id=semester_id)
132 active = active == "on"
133
134 SemesterActivation.objects.update_or_create(semester=semester, defaults={"is_active": active})
135 if active:
136 grant_eligible_reward_points_for_semester(request, semester)
137
138 return semester_view(request=request, semester_id=semester_id)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/rewards/views.py b/evap/rewards/views.py
--- a/evap/rewards/views.py
+++ b/evap/rewards/views.py
@@ -63,7 +63,6 @@
reward_point_actions=reward_point_actions,
total_points_available=total_points_available,
events=events,
- point_selection=range(0, total_points_available + 1),
)
return render(request, "rewards_index.html", template_data)
|
{"golden_diff": "diff --git a/evap/rewards/views.py b/evap/rewards/views.py\n--- a/evap/rewards/views.py\n+++ b/evap/rewards/views.py\n@@ -63,7 +63,6 @@\n reward_point_actions=reward_point_actions,\n total_points_available=total_points_available,\n events=events,\n- point_selection=range(0, total_points_available + 1),\n )\n return render(request, \"rewards_index.html\", template_data)\n", "issue": "Replace reward point redemption dropdown with number input field\nIf a user selects an option, a new line is added and the selection spans two rows. This looks wrong.\r\n\r\nA user can insert custom options. If the user inputs something invalid like \"abcdef\" or an empty string, only parts of \"Please select\"-placeholder is visible. This looks wrong as well.\nReplace reward point redemption dropdown with number input field\nIf a user selects an option, a new line is added and the selection spans two rows. This looks wrong.\r\n\r\nA user can insert custom options. If the user inputs something invalid like \"abcdef\" or an empty string, only parts of \"Please select\"-placeholder is visible. This looks wrong as well.\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import BadRequest, SuspiciousOperation\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import get_language\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom evap.evaluation.auth import manager_required, reward_user_required\nfrom evap.evaluation.models import Semester\nfrom evap.evaluation.tools import AttachmentResponse, get_object_from_dict_pk_entry_or_logged_40x\nfrom evap.rewards.exporters import RewardsExporter\nfrom evap.rewards.forms import RewardPointRedemptionEventForm\nfrom evap.rewards.models import (\n NoPointsSelected,\n NotEnoughPoints,\n RedemptionEventExpired,\n RewardPointGranting,\n RewardPointRedemption,\n RewardPointRedemptionEvent,\n SemesterActivation,\n)\nfrom evap.rewards.tools import grant_eligible_reward_points_for_semester, reward_points_of_user, save_redemptions\nfrom evap.staff.views import semester_view\n\n\n@reward_user_required\ndef index(request):\n if request.method == \"POST\":\n redemptions = {}\n try:\n for key, value in request.POST.items():\n if key.startswith(\"points-\"):\n event_id = int(key.rpartition(\"-\")[2])\n redemptions[event_id] = int(value)\n except ValueError as e:\n raise BadRequest from e\n\n try:\n save_redemptions(request, redemptions)\n messages.success(request, _(\"You successfully redeemed your points.\"))\n except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error:\n messages.warning(request, error)\n\n total_points_available = reward_points_of_user(request.user)\n reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)\n reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user)\n events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by(\"date\")\n\n reward_point_actions = []\n for granting in reward_point_grantings:\n reward_point_actions.append(\n (granting.granting_time, _(\"Reward for\") + \" \" + granting.semester.name, granting.value, \"\")\n )\n for redemption in reward_point_redemptions:\n reward_point_actions.append((redemption.redemption_time, redemption.event.name, \"\", redemption.value))\n\n reward_point_actions.sort(key=lambda action: action[0], reverse=True)\n\n template_data = dict(\n reward_point_actions=reward_point_actions,\n total_points_available=total_points_available,\n events=events,\n point_selection=range(0, total_points_available + 1),\n )\n return render(request, \"rewards_index.html\", template_data)\n\n\n@manager_required\ndef reward_point_redemption_events(request):\n upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by(\"date\")\n past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by(\"-date\")\n template_data = dict(upcoming_events=upcoming_events, past_events=past_events)\n return render(request, \"rewards_reward_point_redemption_events.html\", template_data)\n\n\n@manager_required\ndef reward_point_redemption_event_create(request):\n event = RewardPointRedemptionEvent()\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Successfully created event.\"))\n return redirect(\"rewards:reward_point_redemption_events\")\n\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(form=form))\n\n\n@manager_required\ndef reward_point_redemption_event_edit(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n event = form.save()\n\n messages.success(request, _(\"Successfully updated event.\"))\n return redirect(\"rewards:reward_point_redemption_events\")\n\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(event=event, form=form))\n\n\n@require_POST\n@manager_required\ndef reward_point_redemption_event_delete(request):\n event = get_object_from_dict_pk_entry_or_logged_40x(RewardPointRedemptionEvent, request.POST, \"event_id\")\n\n if not event.can_delete:\n raise SuspiciousOperation(\"Deleting redemption event not allowed\")\n event.delete()\n return HttpResponse() # 200 OK\n\n\n@manager_required\ndef reward_point_redemption_event_export(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n\n filename = _(\"RewardPoints\") + f\"-{event.date}-{event.name}-{get_language()}.xls\"\n response = AttachmentResponse(filename, content_type=\"application/vnd.ms-excel\")\n\n RewardsExporter().export(response, event.redemptions_by_user())\n\n return response\n\n\n@manager_required\ndef semester_activation(request, semester_id, active):\n semester = get_object_or_404(Semester, id=semester_id)\n active = active == \"on\"\n\n SemesterActivation.objects.update_or_create(semester=semester, defaults={\"is_active\": active})\n if active:\n grant_eligible_reward_points_for_semester(request, semester)\n\n return semester_view(request=request, semester_id=semester_id)\n", "path": "evap/rewards/views.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import BadRequest, SuspiciousOperation\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import get_language\nfrom django.utils.translation import gettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom evap.evaluation.auth import manager_required, reward_user_required\nfrom evap.evaluation.models import Semester\nfrom evap.evaluation.tools import AttachmentResponse, get_object_from_dict_pk_entry_or_logged_40x\nfrom evap.rewards.exporters import RewardsExporter\nfrom evap.rewards.forms import RewardPointRedemptionEventForm\nfrom evap.rewards.models import (\n NoPointsSelected,\n NotEnoughPoints,\n RedemptionEventExpired,\n RewardPointGranting,\n RewardPointRedemption,\n RewardPointRedemptionEvent,\n SemesterActivation,\n)\nfrom evap.rewards.tools import grant_eligible_reward_points_for_semester, reward_points_of_user, save_redemptions\nfrom evap.staff.views import semester_view\n\n\n@reward_user_required\ndef index(request):\n if request.method == \"POST\":\n redemptions = {}\n try:\n for key, value in request.POST.items():\n if key.startswith(\"points-\"):\n event_id = int(key.rpartition(\"-\")[2])\n redemptions[event_id] = int(value)\n except ValueError as e:\n raise BadRequest from e\n\n try:\n save_redemptions(request, redemptions)\n messages.success(request, _(\"You successfully redeemed your points.\"))\n except (NoPointsSelected, NotEnoughPoints, RedemptionEventExpired) as error:\n messages.warning(request, error)\n\n total_points_available = reward_points_of_user(request.user)\n reward_point_grantings = RewardPointGranting.objects.filter(user_profile=request.user)\n reward_point_redemptions = RewardPointRedemption.objects.filter(user_profile=request.user)\n events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by(\"date\")\n\n reward_point_actions = []\n for granting in reward_point_grantings:\n reward_point_actions.append(\n (granting.granting_time, _(\"Reward for\") + \" \" + granting.semester.name, granting.value, \"\")\n )\n for redemption in reward_point_redemptions:\n reward_point_actions.append((redemption.redemption_time, redemption.event.name, \"\", redemption.value))\n\n reward_point_actions.sort(key=lambda action: action[0], reverse=True)\n\n template_data = dict(\n reward_point_actions=reward_point_actions,\n total_points_available=total_points_available,\n events=events,\n )\n return render(request, \"rewards_index.html\", template_data)\n\n\n@manager_required\ndef reward_point_redemption_events(request):\n upcoming_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__gte=datetime.now()).order_by(\"date\")\n past_events = RewardPointRedemptionEvent.objects.filter(redeem_end_date__lt=datetime.now()).order_by(\"-date\")\n template_data = dict(upcoming_events=upcoming_events, past_events=past_events)\n return render(request, \"rewards_reward_point_redemption_events.html\", template_data)\n\n\n@manager_required\ndef reward_point_redemption_event_create(request):\n event = RewardPointRedemptionEvent()\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n form.save()\n messages.success(request, _(\"Successfully created event.\"))\n return redirect(\"rewards:reward_point_redemption_events\")\n\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(form=form))\n\n\n@manager_required\ndef reward_point_redemption_event_edit(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n form = RewardPointRedemptionEventForm(request.POST or None, instance=event)\n\n if form.is_valid():\n event = form.save()\n\n messages.success(request, _(\"Successfully updated event.\"))\n return redirect(\"rewards:reward_point_redemption_events\")\n\n return render(request, \"rewards_reward_point_redemption_event_form.html\", dict(event=event, form=form))\n\n\n@require_POST\n@manager_required\ndef reward_point_redemption_event_delete(request):\n event = get_object_from_dict_pk_entry_or_logged_40x(RewardPointRedemptionEvent, request.POST, \"event_id\")\n\n if not event.can_delete:\n raise SuspiciousOperation(\"Deleting redemption event not allowed\")\n event.delete()\n return HttpResponse() # 200 OK\n\n\n@manager_required\ndef reward_point_redemption_event_export(request, event_id):\n event = get_object_or_404(RewardPointRedemptionEvent, id=event_id)\n\n filename = _(\"RewardPoints\") + f\"-{event.date}-{event.name}-{get_language()}.xls\"\n response = AttachmentResponse(filename, content_type=\"application/vnd.ms-excel\")\n\n RewardsExporter().export(response, event.redemptions_by_user())\n\n return response\n\n\n@manager_required\ndef semester_activation(request, semester_id, active):\n semester = get_object_or_404(Semester, id=semester_id)\n active = active == \"on\"\n\n SemesterActivation.objects.update_or_create(semester=semester, defaults={\"is_active\": active})\n if active:\n grant_eligible_reward_points_for_semester(request, semester)\n\n return semester_view(request=request, semester_id=semester_id)\n", "path": "evap/rewards/views.py"}]}
| 1,901 | 107 |
gh_patches_debug_18891
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-3198
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix `safely_reserve_a_username`
This function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.
Fix `safely_reserve_a_username`
This function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/utils/username.py`
Content:
```
1 from psycopg2 import IntegrityError
2 import random
3
4
5 class FailedToReserveUsername(Exception): pass
6 class RanOutOfUsernameAttempts(Exception): pass
7
8
9 def gen_random_usernames():
10 """Yield random 12-hex-digit unicodes.
11 """
12 while 1:
13 yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')
14
15
16 def insert_into_participants(cursor, username):
17 return cursor.one( "INSERT INTO participants (username, username_lower) "
18 "VALUES (%s, %s) RETURNING username"
19 , (username, username.lower())
20 )
21
22
23 def safely_reserve_a_username(cursor, gen_usernames=gen_random_usernames,
24 reserve=insert_into_participants):
25 """Safely reserve a username.
26
27 :param cursor: a :py:class:`psycopg2.cursor` managed as a :py:mod:`postgres`
28 transaction
29 :param gen_usernames: a generator of usernames to try
30 :param reserve: a function that takes the cursor and does the SQL
31 stuff
32 :database: one ``INSERT`` on average
33 :returns: a 12-hex-digit unicode
34 :raises: :py:class:`FailedToReserveUsername` if no acceptable username is found
35 within 100 attempts, or :py:class:`RanOutOfUsernameAttempts` if the username
36 generator runs out first
37
38 The returned value is guaranteed to have been reserved in the database.
39
40 """
41 seatbelt = 0
42 for username in gen_usernames():
43 seatbelt += 1
44 if seatbelt > 100:
45 raise FailedToReserveUsername
46
47 try:
48 check = reserve(cursor, username)
49 except IntegrityError: # Collision, try again with another value.
50 continue
51 else:
52 assert check == username
53 break
54 else:
55 raise RanOutOfUsernameAttempts
56 return username
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gratipay/utils/username.py b/gratipay/utils/username.py
--- a/gratipay/utils/username.py
+++ b/gratipay/utils/username.py
@@ -38,6 +38,8 @@
The returned value is guaranteed to have been reserved in the database.
"""
+ cursor.execute("SAVEPOINT safely_reserve_a_username")
+
seatbelt = 0
for username in gen_usernames():
seatbelt += 1
@@ -47,10 +49,13 @@
try:
check = reserve(cursor, username)
except IntegrityError: # Collision, try again with another value.
+ cursor.execute("ROLLBACK TO safely_reserve_a_username")
continue
else:
assert check == username
break
else:
raise RanOutOfUsernameAttempts
+
+ cursor.execute("RELEASE safely_reserve_a_username")
return username
|
{"golden_diff": "diff --git a/gratipay/utils/username.py b/gratipay/utils/username.py\n--- a/gratipay/utils/username.py\n+++ b/gratipay/utils/username.py\n@@ -38,6 +38,8 @@\n The returned value is guaranteed to have been reserved in the database.\n \n \"\"\"\n+ cursor.execute(\"SAVEPOINT safely_reserve_a_username\")\n+\n seatbelt = 0\n for username in gen_usernames():\n seatbelt += 1\n@@ -47,10 +49,13 @@\n try:\n check = reserve(cursor, username)\n except IntegrityError: # Collision, try again with another value.\n+ cursor.execute(\"ROLLBACK TO safely_reserve_a_username\")\n continue\n else:\n assert check == username\n break\n else:\n raise RanOutOfUsernameAttempts\n+\n+ cursor.execute(\"RELEASE safely_reserve_a_username\")\n return username\n", "issue": "Fix `safely_reserve_a_username`\nThis function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.\n\nFix `safely_reserve_a_username`\nThis function keeps using a cursor after an `IntegrityError` exception is raised, that's invalid and raises another exception. See https://github.com/gratipay/gratipay.com/pull/2752#issuecomment-65266388.\n\n", "before_files": [{"content": "from psycopg2 import IntegrityError\nimport random\n\n\nclass FailedToReserveUsername(Exception): pass\nclass RanOutOfUsernameAttempts(Exception): pass\n\n\ndef gen_random_usernames():\n \"\"\"Yield random 12-hex-digit unicodes.\n \"\"\"\n while 1:\n yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')\n\n\ndef insert_into_participants(cursor, username):\n return cursor.one( \"INSERT INTO participants (username, username_lower) \"\n \"VALUES (%s, %s) RETURNING username\"\n , (username, username.lower())\n )\n\n\ndef safely_reserve_a_username(cursor, gen_usernames=gen_random_usernames,\n reserve=insert_into_participants):\n \"\"\"Safely reserve a username.\n\n :param cursor: a :py:class:`psycopg2.cursor` managed as a :py:mod:`postgres`\n transaction\n :param gen_usernames: a generator of usernames to try\n :param reserve: a function that takes the cursor and does the SQL\n stuff\n :database: one ``INSERT`` on average\n :returns: a 12-hex-digit unicode\n :raises: :py:class:`FailedToReserveUsername` if no acceptable username is found\n within 100 attempts, or :py:class:`RanOutOfUsernameAttempts` if the username\n generator runs out first\n\n The returned value is guaranteed to have been reserved in the database.\n\n \"\"\"\n seatbelt = 0\n for username in gen_usernames():\n seatbelt += 1\n if seatbelt > 100:\n raise FailedToReserveUsername\n\n try:\n check = reserve(cursor, username)\n except IntegrityError: # Collision, try again with another value.\n continue\n else:\n assert check == username\n break\n else:\n raise RanOutOfUsernameAttempts\n return username\n", "path": "gratipay/utils/username.py"}], "after_files": [{"content": "from psycopg2 import IntegrityError\nimport random\n\n\nclass FailedToReserveUsername(Exception): pass\nclass RanOutOfUsernameAttempts(Exception): pass\n\n\ndef gen_random_usernames():\n \"\"\"Yield random 12-hex-digit unicodes.\n \"\"\"\n while 1:\n yield hex(int(random.random() * 16**12))[2:].zfill(12).decode('ASCII')\n\n\ndef insert_into_participants(cursor, username):\n return cursor.one( \"INSERT INTO participants (username, username_lower) \"\n \"VALUES (%s, %s) RETURNING username\"\n , (username, username.lower())\n )\n\n\ndef safely_reserve_a_username(cursor, gen_usernames=gen_random_usernames,\n reserve=insert_into_participants):\n \"\"\"Safely reserve a username.\n\n :param cursor: a :py:class:`psycopg2.cursor` managed as a :py:mod:`postgres`\n transaction\n :param gen_usernames: a generator of usernames to try\n :param reserve: a function that takes the cursor and does the SQL\n stuff\n :database: one ``INSERT`` on average\n :returns: a 12-hex-digit unicode\n :raises: :py:class:`FailedToReserveUsername` if no acceptable username is found\n within 100 attempts, or :py:class:`RanOutOfUsernameAttempts` if the username\n generator runs out first\n\n The returned value is guaranteed to have been reserved in the database.\n\n \"\"\"\n cursor.execute(\"SAVEPOINT safely_reserve_a_username\")\n\n seatbelt = 0\n for username in gen_usernames():\n seatbelt += 1\n if seatbelt > 100:\n raise FailedToReserveUsername\n\n try:\n check = reserve(cursor, username)\n except IntegrityError: # Collision, try again with another value.\n cursor.execute(\"ROLLBACK TO safely_reserve_a_username\")\n continue\n else:\n assert check == username\n break\n else:\n raise RanOutOfUsernameAttempts\n\n cursor.execute(\"RELEASE safely_reserve_a_username\")\n return username\n", "path": "gratipay/utils/username.py"}]}
| 920 | 198 |
gh_patches_debug_31158
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-3189
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSV export does not include read date
**Describe the bug**
When exporting data into a CSV file, several fields are exported, but `read date` is not one of them, despite being exremelly valuable.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'Profile'
2. Click on 'Export to CSV'
3. Download CSV file
4. Open CSV file
**Expected behavior**
A column containing read date should be included among the current ones
**Instance**
bookwyrm.social
---
**Desktop (please complete the following information):**
- OS: KDE Neon
- Browser Firefox, Chromium
- Version
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/preferences/export.py`
Content:
```
1 """ Let users export their book data """
2 from datetime import timedelta
3 import csv
4 import io
5
6 from django.contrib.auth.decorators import login_required
7 from django.core.paginator import Paginator
8 from django.db.models import Q
9 from django.http import HttpResponse
10 from django.template.response import TemplateResponse
11 from django.utils import timezone
12 from django.views import View
13 from django.utils.decorators import method_decorator
14 from django.shortcuts import redirect
15
16 from bookwyrm import models
17 from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob
18 from bookwyrm.settings import PAGE_LENGTH
19
20 # pylint: disable=no-self-use
21 @method_decorator(login_required, name="dispatch")
22 class Export(View):
23 """Let users export data"""
24
25 def get(self, request):
26 """Request csv file"""
27 return TemplateResponse(request, "preferences/export.html")
28
29 def post(self, request):
30 """Download the csv file of a user's book data"""
31 books = models.Edition.viewer_aware_objects(request.user)
32 books_shelves = books.filter(Q(shelves__user=request.user)).distinct()
33 books_readthrough = books.filter(Q(readthrough__user=request.user)).distinct()
34 books_review = books.filter(Q(review__user=request.user)).distinct()
35 books_comment = books.filter(Q(comment__user=request.user)).distinct()
36 books_quotation = books.filter(Q(quotation__user=request.user)).distinct()
37
38 books = set(
39 list(books_shelves)
40 + list(books_readthrough)
41 + list(books_review)
42 + list(books_comment)
43 + list(books_quotation)
44 )
45
46 csv_string = io.StringIO()
47 writer = csv.writer(csv_string)
48
49 deduplication_fields = [
50 f.name
51 for f in models.Edition._meta.get_fields() # pylint: disable=protected-access
52 if getattr(f, "deduplication_field", False)
53 ]
54 fields = (
55 ["title", "author_text"]
56 + deduplication_fields
57 + ["rating", "review_name", "review_cw", "review_content"]
58 )
59 writer.writerow(fields)
60
61 for book in books:
62 # I think this is more efficient than doing a subquery in the view? but idk
63 review_rating = (
64 models.Review.objects.filter(
65 user=request.user, book=book, rating__isnull=False
66 )
67 .order_by("-published_date")
68 .first()
69 )
70
71 book.rating = review_rating.rating if review_rating else None
72
73 review = (
74 models.Review.objects.filter(
75 user=request.user, book=book, content__isnull=False
76 )
77 .order_by("-published_date")
78 .first()
79 )
80 if review:
81 book.review_name = review.name
82 book.review_cw = review.content_warning
83 book.review_content = review.raw_content
84 writer.writerow([getattr(book, field, "") or "" for field in fields])
85
86 return HttpResponse(
87 csv_string.getvalue(),
88 content_type="text/csv",
89 headers={
90 "Content-Disposition": 'attachment; filename="bookwyrm-export.csv"'
91 },
92 )
93
94
95 # pylint: disable=no-self-use
96 @method_decorator(login_required, name="dispatch")
97 class ExportUser(View):
98 """Let users export user data to import into another Bookwyrm instance"""
99
100 def get(self, request):
101 """Request tar file"""
102
103 jobs = BookwyrmExportJob.objects.filter(user=request.user).order_by(
104 "-created_date"
105 )
106 site = models.SiteSettings.objects.get()
107 hours = site.user_import_time_limit
108 allowed = (
109 jobs.first().created_date < timezone.now() - timedelta(hours=hours)
110 if jobs.first()
111 else True
112 )
113 next_available = (
114 jobs.first().created_date + timedelta(hours=hours) if not allowed else False
115 )
116 paginated = Paginator(jobs, PAGE_LENGTH)
117 page = paginated.get_page(request.GET.get("page"))
118 data = {
119 "jobs": page,
120 "next_available": next_available,
121 "page_range": paginated.get_elided_page_range(
122 page.number, on_each_side=2, on_ends=1
123 ),
124 }
125
126 return TemplateResponse(request, "preferences/export-user.html", data)
127
128 def post(self, request):
129 """Download the json file of a user's data"""
130
131 job = BookwyrmExportJob.objects.create(user=request.user)
132 job.start_job()
133
134 return redirect("prefs-user-export")
135
136
137 @method_decorator(login_required, name="dispatch")
138 class ExportArchive(View):
139 """Serve the archive file"""
140
141 def get(self, request, archive_id):
142 """download user export file"""
143 export = BookwyrmExportJob.objects.get(task_id=archive_id, user=request.user)
144 return HttpResponse(
145 export.export_data,
146 content_type="application/gzip",
147 headers={
148 "Content-Disposition": 'attachment; filename="bookwyrm-account-export.tar.gz"' # pylint: disable=line-too-long
149 },
150 )
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/views/preferences/export.py b/bookwyrm/views/preferences/export.py
--- a/bookwyrm/views/preferences/export.py
+++ b/bookwyrm/views/preferences/export.py
@@ -17,7 +17,7 @@
from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob
from bookwyrm.settings import PAGE_LENGTH
-# pylint: disable=no-self-use
+# pylint: disable=no-self-use,too-many-locals
@method_decorator(login_required, name="dispatch")
class Export(View):
"""Let users export data"""
@@ -54,6 +54,7 @@
fields = (
["title", "author_text"]
+ deduplication_fields
+ + ["start_date", "finish_date", "stopped_date"]
+ ["rating", "review_name", "review_cw", "review_content"]
)
writer.writerow(fields)
@@ -70,6 +71,24 @@
book.rating = review_rating.rating if review_rating else None
+ readthrough = (
+ models.ReadThrough.objects.filter(user=request.user, book=book)
+ .order_by("-start_date", "-finish_date")
+ .first()
+ )
+ if readthrough:
+ book.start_date = (
+ readthrough.start_date.date() if readthrough.start_date else None
+ )
+ book.finish_date = (
+ readthrough.finish_date.date() if readthrough.finish_date else None
+ )
+ book.stopped_date = (
+ readthrough.stopped_date.date()
+ if readthrough.stopped_date
+ else None
+ )
+
review = (
models.Review.objects.filter(
user=request.user, book=book, content__isnull=False
|
{"golden_diff": "diff --git a/bookwyrm/views/preferences/export.py b/bookwyrm/views/preferences/export.py\n--- a/bookwyrm/views/preferences/export.py\n+++ b/bookwyrm/views/preferences/export.py\n@@ -17,7 +17,7 @@\n from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob\n from bookwyrm.settings import PAGE_LENGTH\n \n-# pylint: disable=no-self-use\n+# pylint: disable=no-self-use,too-many-locals\n @method_decorator(login_required, name=\"dispatch\")\n class Export(View):\n \"\"\"Let users export data\"\"\"\n@@ -54,6 +54,7 @@\n fields = (\n [\"title\", \"author_text\"]\n + deduplication_fields\n+ + [\"start_date\", \"finish_date\", \"stopped_date\"]\n + [\"rating\", \"review_name\", \"review_cw\", \"review_content\"]\n )\n writer.writerow(fields)\n@@ -70,6 +71,24 @@\n \n book.rating = review_rating.rating if review_rating else None\n \n+ readthrough = (\n+ models.ReadThrough.objects.filter(user=request.user, book=book)\n+ .order_by(\"-start_date\", \"-finish_date\")\n+ .first()\n+ )\n+ if readthrough:\n+ book.start_date = (\n+ readthrough.start_date.date() if readthrough.start_date else None\n+ )\n+ book.finish_date = (\n+ readthrough.finish_date.date() if readthrough.finish_date else None\n+ )\n+ book.stopped_date = (\n+ readthrough.stopped_date.date()\n+ if readthrough.stopped_date\n+ else None\n+ )\n+\n review = (\n models.Review.objects.filter(\n user=request.user, book=book, content__isnull=False\n", "issue": "CSV export does not include read date\n**Describe the bug**\r\nWhen exporting data into a CSV file, several fields are exported, but `read date` is not one of them, despite being exremelly valuable.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Profile'\r\n2. Click on 'Export to CSV'\r\n3. Download CSV file\r\n4. Open CSV file\r\n\r\n**Expected behavior**\r\nA column containing read date should be included among the current ones\r\n\r\n**Instance**\r\nbookwyrm.social\r\n\r\n\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: KDE Neon\r\n - Browser Firefox, Chromium\r\n - Version \r\n\n", "before_files": [{"content": "\"\"\" Let users export their book data \"\"\"\nfrom datetime import timedelta\nimport csv\nimport io\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import redirect\n\nfrom bookwyrm import models\nfrom bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob\nfrom bookwyrm.settings import PAGE_LENGTH\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Export(View):\n \"\"\"Let users export data\"\"\"\n\n def get(self, request):\n \"\"\"Request csv file\"\"\"\n return TemplateResponse(request, \"preferences/export.html\")\n\n def post(self, request):\n \"\"\"Download the csv file of a user's book data\"\"\"\n books = models.Edition.viewer_aware_objects(request.user)\n books_shelves = books.filter(Q(shelves__user=request.user)).distinct()\n books_readthrough = books.filter(Q(readthrough__user=request.user)).distinct()\n books_review = books.filter(Q(review__user=request.user)).distinct()\n books_comment = books.filter(Q(comment__user=request.user)).distinct()\n books_quotation = books.filter(Q(quotation__user=request.user)).distinct()\n\n books = set(\n list(books_shelves)\n + list(books_readthrough)\n + list(books_review)\n + list(books_comment)\n + list(books_quotation)\n )\n\n csv_string = io.StringIO()\n writer = csv.writer(csv_string)\n\n deduplication_fields = [\n f.name\n for f in models.Edition._meta.get_fields() # pylint: disable=protected-access\n if getattr(f, \"deduplication_field\", False)\n ]\n fields = (\n [\"title\", \"author_text\"]\n + deduplication_fields\n + [\"rating\", \"review_name\", \"review_cw\", \"review_content\"]\n )\n writer.writerow(fields)\n\n for book in books:\n # I think this is more efficient than doing a subquery in the view? but idk\n review_rating = (\n models.Review.objects.filter(\n user=request.user, book=book, rating__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n\n book.rating = review_rating.rating if review_rating else None\n\n review = (\n models.Review.objects.filter(\n user=request.user, book=book, content__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n if review:\n book.review_name = review.name\n book.review_cw = review.content_warning\n book.review_content = review.raw_content\n writer.writerow([getattr(book, field, \"\") or \"\" for field in fields])\n\n return HttpResponse(\n csv_string.getvalue(),\n content_type=\"text/csv\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-export.csv\"'\n },\n )\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportUser(View):\n \"\"\"Let users export user data to import into another Bookwyrm instance\"\"\"\n\n def get(self, request):\n \"\"\"Request tar file\"\"\"\n\n jobs = BookwyrmExportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n )\n site = models.SiteSettings.objects.get()\n hours = site.user_import_time_limit\n allowed = (\n jobs.first().created_date < timezone.now() - timedelta(hours=hours)\n if jobs.first()\n else True\n )\n next_available = (\n jobs.first().created_date + timedelta(hours=hours) if not allowed else False\n )\n paginated = Paginator(jobs, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"jobs\": page,\n \"next_available\": next_available,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"preferences/export-user.html\", data)\n\n def post(self, request):\n \"\"\"Download the json file of a user's data\"\"\"\n\n job = BookwyrmExportJob.objects.create(user=request.user)\n job.start_job()\n\n return redirect(\"prefs-user-export\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportArchive(View):\n \"\"\"Serve the archive file\"\"\"\n\n def get(self, request, archive_id):\n \"\"\"download user export file\"\"\"\n export = BookwyrmExportJob.objects.get(task_id=archive_id, user=request.user)\n return HttpResponse(\n export.export_data,\n content_type=\"application/gzip\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-account-export.tar.gz\"' # pylint: disable=line-too-long\n },\n )\n", "path": "bookwyrm/views/preferences/export.py"}], "after_files": [{"content": "\"\"\" Let users export their book data \"\"\"\nfrom datetime import timedelta\nimport csv\nimport io\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import redirect\n\nfrom bookwyrm import models\nfrom bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob\nfrom bookwyrm.settings import PAGE_LENGTH\n\n# pylint: disable=no-self-use,too-many-locals\n@method_decorator(login_required, name=\"dispatch\")\nclass Export(View):\n \"\"\"Let users export data\"\"\"\n\n def get(self, request):\n \"\"\"Request csv file\"\"\"\n return TemplateResponse(request, \"preferences/export.html\")\n\n def post(self, request):\n \"\"\"Download the csv file of a user's book data\"\"\"\n books = models.Edition.viewer_aware_objects(request.user)\n books_shelves = books.filter(Q(shelves__user=request.user)).distinct()\n books_readthrough = books.filter(Q(readthrough__user=request.user)).distinct()\n books_review = books.filter(Q(review__user=request.user)).distinct()\n books_comment = books.filter(Q(comment__user=request.user)).distinct()\n books_quotation = books.filter(Q(quotation__user=request.user)).distinct()\n\n books = set(\n list(books_shelves)\n + list(books_readthrough)\n + list(books_review)\n + list(books_comment)\n + list(books_quotation)\n )\n\n csv_string = io.StringIO()\n writer = csv.writer(csv_string)\n\n deduplication_fields = [\n f.name\n for f in models.Edition._meta.get_fields() # pylint: disable=protected-access\n if getattr(f, \"deduplication_field\", False)\n ]\n fields = (\n [\"title\", \"author_text\"]\n + deduplication_fields\n + [\"start_date\", \"finish_date\", \"stopped_date\"]\n + [\"rating\", \"review_name\", \"review_cw\", \"review_content\"]\n )\n writer.writerow(fields)\n\n for book in books:\n # I think this is more efficient than doing a subquery in the view? but idk\n review_rating = (\n models.Review.objects.filter(\n user=request.user, book=book, rating__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n\n book.rating = review_rating.rating if review_rating else None\n\n readthrough = (\n models.ReadThrough.objects.filter(user=request.user, book=book)\n .order_by(\"-start_date\", \"-finish_date\")\n .first()\n )\n if readthrough:\n book.start_date = (\n readthrough.start_date.date() if readthrough.start_date else None\n )\n book.finish_date = (\n readthrough.finish_date.date() if readthrough.finish_date else None\n )\n book.stopped_date = (\n readthrough.stopped_date.date()\n if readthrough.stopped_date\n else None\n )\n\n review = (\n models.Review.objects.filter(\n user=request.user, book=book, content__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n if review:\n book.review_name = review.name\n book.review_cw = review.content_warning\n book.review_content = review.raw_content\n writer.writerow([getattr(book, field, \"\") or \"\" for field in fields])\n\n return HttpResponse(\n csv_string.getvalue(),\n content_type=\"text/csv\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-export.csv\"'\n },\n )\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportUser(View):\n \"\"\"Let users export user data to import into another Bookwyrm instance\"\"\"\n\n def get(self, request):\n \"\"\"Request tar file\"\"\"\n\n jobs = BookwyrmExportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n )\n site = models.SiteSettings.objects.get()\n hours = site.user_import_time_limit\n allowed = (\n jobs.first().created_date < timezone.now() - timedelta(hours=hours)\n if jobs.first()\n else True\n )\n next_available = (\n jobs.first().created_date + timedelta(hours=hours) if not allowed else False\n )\n paginated = Paginator(jobs, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"jobs\": page,\n \"next_available\": next_available,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"preferences/export-user.html\", data)\n\n def post(self, request):\n \"\"\"Download the json file of a user's data\"\"\"\n\n job = BookwyrmExportJob.objects.create(user=request.user)\n job.start_job()\n\n return redirect(\"prefs-user-export\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportArchive(View):\n \"\"\"Serve the archive file\"\"\"\n\n def get(self, request, archive_id):\n \"\"\"download user export file\"\"\"\n export = BookwyrmExportJob.objects.get(task_id=archive_id, user=request.user)\n return HttpResponse(\n export.export_data,\n content_type=\"application/gzip\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-account-export.tar.gz\"' # pylint: disable=line-too-long\n },\n )\n", "path": "bookwyrm/views/preferences/export.py"}]}
| 1,808 | 380 |
gh_patches_debug_5570
|
rasdani/github-patches
|
git_diff
|
mindsdb__lightwood-40
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ModuleNotFoundError: No module named '_lzma'
I've tried to test lightwood with [home rentals
example](https://github.com/mindsdb/lightwood/blob/master/docs/examples/home_rentals.py) but got ModuleNotFoundError: No module named '_lzma'.
Screenshot:

It looks like _lzma is a dependency to pandas, but it should be included with Python 3.x version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/__about__.py`
Content:
```
1 __title__ = 'lightwood'
2 __package_name__ = 'mindsdb'
3 __version__ = '0.9.0'
4 __description__ = "Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
5 __email__ = "[email protected]"
6 __author__ = 'MindsDB Inc'
7 __github__ = 'https://github.com/mindsdb/lightwood'
8 __pypi__ = 'https://pypi.org/project/lightwood'
9 __license__ = 'MIT'
10 __copyright__ = 'Copyright 2019- mindsdb'
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lightwood/__about__.py b/lightwood/__about__.py
--- a/lightwood/__about__.py
+++ b/lightwood/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'lightwood'
__package_name__ = 'mindsdb'
-__version__ = '0.9.0'
+__version__ = '0.9.1'
__description__ = "Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
|
{"golden_diff": "diff --git a/lightwood/__about__.py b/lightwood/__about__.py\n--- a/lightwood/__about__.py\n+++ b/lightwood/__about__.py\n@@ -1,6 +1,6 @@\n __title__ = 'lightwood'\n __package_name__ = 'mindsdb'\n-__version__ = '0.9.0'\n+__version__ = '0.9.1'\n __description__ = \"Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n __email__ = \"[email protected]\"\n __author__ = 'MindsDB Inc'\n", "issue": "ModuleNotFoundError: No module named '_lzma'\nI've tried to test lightwood with [home rentals\r\n example](https://github.com/mindsdb/lightwood/blob/master/docs/examples/home_rentals.py) but got ModuleNotFoundError: No module named '_lzma'.\r\n\r\nScreenshot:\r\n\r\n\r\nIt looks like _lzma is a dependency to pandas, but it should be included with Python 3.x version.\r\n\r\n\n", "before_files": [{"content": "__title__ = 'lightwood'\n__package_name__ = 'mindsdb'\n__version__ = '0.9.0'\n__description__ = \"Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n__email__ = \"[email protected]\"\n__author__ = 'MindsDB Inc'\n__github__ = 'https://github.com/mindsdb/lightwood'\n__pypi__ = 'https://pypi.org/project/lightwood'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2019- mindsdb'\n", "path": "lightwood/__about__.py"}], "after_files": [{"content": "__title__ = 'lightwood'\n__package_name__ = 'mindsdb'\n__version__ = '0.9.1'\n__description__ = \"Lightwood's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n__email__ = \"[email protected]\"\n__author__ = 'MindsDB Inc'\n__github__ = 'https://github.com/mindsdb/lightwood'\n__pypi__ = 'https://pypi.org/project/lightwood'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2019- mindsdb'\n", "path": "lightwood/__about__.py"}]}
| 571 | 139 |
gh_patches_debug_713
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-1826
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Agate type inference is too clever
### Describe the bug
We’re trying to set a value from a {% call statement %} and within the call, one line is SELECT 0 AS my_value...and it then treats it as a boolean (false) in the returned values.
The same happens if we try SELECT 1 AS my_value, but as soon as we do SELECT 2 AS my_value it treats it like a number (as it should).
### Steps To Reproduce
Create a call statement that selects 0, or 1. false, and true respectively will be returned.
### Expected behavior
0, or 1 to be returned, as integers.
### Screenshots and log output
### System information
**Which database are you using dbt with?**
- [ ] postgres
- [ ] redshift
- [x] bigquery
- [ ] snowflake
- [ ] other (specify: ____________)
**The output of `dbt --version`:**
```
installed version: 0.15.0-a1
latest version: 0.14.2
Your version of dbt is ahead of the latest release!
```
FYI, we run a fork, but that shouldn't have affected anything here.
**The operating system you're using:**
Mojave
**The output of `python --version`:**
Python 3.7.1
### Additional context
We'd love a quick fix for this, even if it's ugly!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/clients/agate_helper.py`
Content:
```
1 from codecs import BOM_UTF8
2
3 import agate
4 import json
5
6
7 BOM = BOM_UTF8.decode('utf-8') # '\ufeff'
8
9 DEFAULT_TYPE_TESTER = agate.TypeTester(types=[
10 agate.data_types.Number(null_values=('null', '')),
11 agate.data_types.TimeDelta(null_values=('null', '')),
12 agate.data_types.Date(null_values=('null', '')),
13 agate.data_types.DateTime(null_values=('null', '')),
14 agate.data_types.Boolean(true_values=('true',),
15 false_values=('false',),
16 null_values=('null', '')),
17 agate.data_types.Text(null_values=('null', ''))
18 ])
19
20
21 def table_from_data(data, column_names):
22 "Convert list of dictionaries into an Agate table"
23
24 # The agate table is generated from a list of dicts, so the column order
25 # from `data` is not preserved. We can use `select` to reorder the columns
26 #
27 # If there is no data, create an empty table with the specified columns
28
29 if len(data) == 0:
30 return agate.Table([], column_names=column_names)
31 else:
32 table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)
33 return table.select(column_names)
34
35
36 def table_from_data_flat(data, column_names):
37 "Convert list of dictionaries into an Agate table"
38
39 rows = []
40 for _row in data:
41 row = []
42 for value in list(_row.values()):
43 if isinstance(value, (dict, list, tuple)):
44 row.append(json.dumps(value))
45 else:
46 row.append(value)
47 rows.append(row)
48
49 return agate.Table(rows, column_names)
50
51
52 def empty_table():
53 "Returns an empty Agate table. To be used in place of None"
54
55 return agate.Table(rows=[])
56
57
58 def as_matrix(table):
59 "Return an agate table as a matrix of data sans columns"
60
61 return [r.values() for r in table.rows.values()]
62
63
64 def from_csv(abspath):
65 with open(abspath, encoding='utf-8') as fp:
66 if fp.read(1) != BOM:
67 fp.seek(0)
68 return agate.Table.from_csv(fp, column_types=DEFAULT_TYPE_TESTER)
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py
--- a/core/dbt/clients/agate_helper.py
+++ b/core/dbt/clients/agate_helper.py
@@ -46,7 +46,7 @@
row.append(value)
rows.append(row)
- return agate.Table(rows, column_names)
+ return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)
def empty_table():
|
{"golden_diff": "diff --git a/core/dbt/clients/agate_helper.py b/core/dbt/clients/agate_helper.py\n--- a/core/dbt/clients/agate_helper.py\n+++ b/core/dbt/clients/agate_helper.py\n@@ -46,7 +46,7 @@\n row.append(value)\n rows.append(row)\n \n- return agate.Table(rows, column_names)\n+ return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)\n \n \n def empty_table():\n", "issue": "Agate type inference is too clever\n### Describe the bug\r\nWe\u2019re trying to set a value from a {% call statement %} and within the call, one line is SELECT 0 AS my_value...and it then treats it as a boolean (false) in the returned values. \r\n\r\nThe same happens if we try SELECT 1 AS my_value, but as soon as we do SELECT 2 AS my_value it treats it like a number (as it should).\r\n\r\n### Steps To Reproduce\r\nCreate a call statement that selects 0, or 1. false, and true respectively will be returned.\r\n\r\n### Expected behavior\r\n0, or 1 to be returned, as integers.\r\n\r\n### Screenshots and log output\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [ ] redshift\r\n- [x] bigquery\r\n- [ ] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\n```\r\ninstalled version: 0.15.0-a1\r\n latest version: 0.14.2\r\nYour version of dbt is ahead of the latest release!\r\n```\r\n\r\nFYI, we run a fork, but that shouldn't have affected anything here.\r\n\r\n**The operating system you're using:**\r\n\r\nMojave\r\n\r\n**The output of `python --version`:**\r\n\r\nPython 3.7.1\r\n\r\n### Additional context\r\nWe'd love a quick fix for this, even if it's ugly!\r\n\n", "before_files": [{"content": "from codecs import BOM_UTF8\n\nimport agate\nimport json\n\n\nBOM = BOM_UTF8.decode('utf-8') # '\\ufeff'\n\nDEFAULT_TYPE_TESTER = agate.TypeTester(types=[\n agate.data_types.Number(null_values=('null', '')),\n agate.data_types.TimeDelta(null_values=('null', '')),\n agate.data_types.Date(null_values=('null', '')),\n agate.data_types.DateTime(null_values=('null', '')),\n agate.data_types.Boolean(true_values=('true',),\n false_values=('false',),\n null_values=('null', '')),\n agate.data_types.Text(null_values=('null', ''))\n])\n\n\ndef table_from_data(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n # The agate table is generated from a list of dicts, so the column order\n # from `data` is not preserved. We can use `select` to reorder the columns\n #\n # If there is no data, create an empty table with the specified columns\n\n if len(data) == 0:\n return agate.Table([], column_names=column_names)\n else:\n table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)\n return table.select(column_names)\n\n\ndef table_from_data_flat(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n rows = []\n for _row in data:\n row = []\n for value in list(_row.values()):\n if isinstance(value, (dict, list, tuple)):\n row.append(json.dumps(value))\n else:\n row.append(value)\n rows.append(row)\n\n return agate.Table(rows, column_names)\n\n\ndef empty_table():\n \"Returns an empty Agate table. To be used in place of None\"\n\n return agate.Table(rows=[])\n\n\ndef as_matrix(table):\n \"Return an agate table as a matrix of data sans columns\"\n\n return [r.values() for r in table.rows.values()]\n\n\ndef from_csv(abspath):\n with open(abspath, encoding='utf-8') as fp:\n if fp.read(1) != BOM:\n fp.seek(0)\n return agate.Table.from_csv(fp, column_types=DEFAULT_TYPE_TESTER)\n", "path": "core/dbt/clients/agate_helper.py"}], "after_files": [{"content": "from codecs import BOM_UTF8\n\nimport agate\nimport json\n\n\nBOM = BOM_UTF8.decode('utf-8') # '\\ufeff'\n\nDEFAULT_TYPE_TESTER = agate.TypeTester(types=[\n agate.data_types.Number(null_values=('null', '')),\n agate.data_types.TimeDelta(null_values=('null', '')),\n agate.data_types.Date(null_values=('null', '')),\n agate.data_types.DateTime(null_values=('null', '')),\n agate.data_types.Boolean(true_values=('true',),\n false_values=('false',),\n null_values=('null', '')),\n agate.data_types.Text(null_values=('null', ''))\n])\n\n\ndef table_from_data(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n # The agate table is generated from a list of dicts, so the column order\n # from `data` is not preserved. We can use `select` to reorder the columns\n #\n # If there is no data, create an empty table with the specified columns\n\n if len(data) == 0:\n return agate.Table([], column_names=column_names)\n else:\n table = agate.Table.from_object(data, column_types=DEFAULT_TYPE_TESTER)\n return table.select(column_names)\n\n\ndef table_from_data_flat(data, column_names):\n \"Convert list of dictionaries into an Agate table\"\n\n rows = []\n for _row in data:\n row = []\n for value in list(_row.values()):\n if isinstance(value, (dict, list, tuple)):\n row.append(json.dumps(value))\n else:\n row.append(value)\n rows.append(row)\n\n return agate.Table(rows, column_names, column_types=DEFAULT_TYPE_TESTER)\n\n\ndef empty_table():\n \"Returns an empty Agate table. To be used in place of None\"\n\n return agate.Table(rows=[])\n\n\ndef as_matrix(table):\n \"Return an agate table as a matrix of data sans columns\"\n\n return [r.values() for r in table.rows.values()]\n\n\ndef from_csv(abspath):\n with open(abspath, encoding='utf-8') as fp:\n if fp.read(1) != BOM:\n fp.seek(0)\n return agate.Table.from_csv(fp, column_types=DEFAULT_TYPE_TESTER)\n", "path": "core/dbt/clients/agate_helper.py"}]}
| 1,205 | 106 |
gh_patches_debug_1031
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-3857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[beta][v20] S'inscrire/se connecter/chercher avec un emoji provoque une 500
Serveur : Beta
Version : v20-RC3/d3fd8af
Système : Mac OS X
Navigateur : 52.0.2743.116 (64-bit)
---
1. Rendez-vous à la page d'inscription et renseigner un pseudo du type : 👚 test
2. Remplissez les autres champs.
3. Soumettez le formulaire.
4. Constatez une erreur 500.
Note : Vous pouvez reproduire la même erreur en tentant de vous connecter avec le même pseudo ou en faisant une recherche sur le pseudo d'un membre.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/misc.py`
Content:
```
1 # coding: utf-8
2 import hashlib
3 import re
4
5 THUMB_MAX_WIDTH = 80
6 THUMB_MAX_HEIGHT = 80
7
8 MEDIUM_MAX_WIDTH = 200
9 MEDIUM_MAX_HEIGHT = 200
10
11
12 def compute_hash(filenames):
13 """returns a md5 hexdigest of group of files to check if they have change"""
14 md5_hash = hashlib.md5()
15 for filename in filenames:
16 if filename:
17 file_handle = open(filename, 'rb')
18 must_continue = True
19 while must_continue:
20 read_bytes = file_handle.read(8096)
21 if not read_bytes:
22 must_continue = False
23 else:
24 md5_hash.update(read_bytes)
25 return md5_hash.hexdigest()
26
27
28 def content_has_changed(filenames, md5):
29 return md5 != compute_hash(filenames)
30
31
32 def has_changed(instance, field, manager='objects'):
33 """Returns true if a field has changed in a model May be used in a
34 model.save() method."""
35 if not instance.pk:
36 return True
37 manager = getattr(instance.__class__, manager)
38 old = getattr(manager.get(pk=instance.pk), field)
39 return not getattr(instance, field) == old
40
41
42 def convert_camel_to_underscore(camel_case):
43 """
44 Converts a name in camel case to underscore.
45 """
46 s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel_case)
47 return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
48
49
50 def contains_utf8mb4(s):
51 """
52 This string contains at least one character of more than 3 bytes
53 """
54 if not isinstance(s, unicode):
55 s = unicode(s, 'utf-8')
56 return not all(len(c.encode('utf-8')) <= 3 for c in s)
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zds/utils/misc.py b/zds/utils/misc.py
--- a/zds/utils/misc.py
+++ b/zds/utils/misc.py
@@ -53,4 +53,5 @@
"""
if not isinstance(s, unicode):
s = unicode(s, 'utf-8')
- return not all(len(c.encode('utf-8')) <= 3 for c in s)
+ re_pattern = re.compile(u'[^\u0000-\uD7FF\uE000-\uFFFF]', re.UNICODE)
+ return s != re_pattern.sub(u'\uFFFD', s)
|
{"golden_diff": "diff --git a/zds/utils/misc.py b/zds/utils/misc.py\n--- a/zds/utils/misc.py\n+++ b/zds/utils/misc.py\n@@ -53,4 +53,5 @@\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n- return not all(len(c.encode('utf-8')) <= 3 for c in s)\n+ re_pattern = re.compile(u'[^\\u0000-\\uD7FF\\uE000-\\uFFFF]', re.UNICODE)\n+ return s != re_pattern.sub(u'\\uFFFD', s)\n", "issue": "[beta][v20] S'inscrire/se connecter/chercher avec un emoji provoque une 500\nServeur : Beta\nVersion : v20-RC3/d3fd8af\nSyst\u00e8me : Mac OS X\nNavigateur : 52.0.2743.116 (64-bit)\n\n---\n1. Rendez-vous \u00e0 la page d'inscription et renseigner un pseudo du type : \ud83d\udc5a test\n2. Remplissez les autres champs.\n3. Soumettez le formulaire.\n4. Constatez une erreur 500.\n\nNote : Vous pouvez reproduire la m\u00eame erreur en tentant de vous connecter avec le m\u00eame pseudo ou en faisant une recherche sur le pseudo d'un membre.\n\n", "before_files": [{"content": "# coding: utf-8\nimport hashlib\nimport re\n\nTHUMB_MAX_WIDTH = 80\nTHUMB_MAX_HEIGHT = 80\n\nMEDIUM_MAX_WIDTH = 200\nMEDIUM_MAX_HEIGHT = 200\n\n\ndef compute_hash(filenames):\n \"\"\"returns a md5 hexdigest of group of files to check if they have change\"\"\"\n md5_hash = hashlib.md5()\n for filename in filenames:\n if filename:\n file_handle = open(filename, 'rb')\n must_continue = True\n while must_continue:\n read_bytes = file_handle.read(8096)\n if not read_bytes:\n must_continue = False\n else:\n md5_hash.update(read_bytes)\n return md5_hash.hexdigest()\n\n\ndef content_has_changed(filenames, md5):\n return md5 != compute_hash(filenames)\n\n\ndef has_changed(instance, field, manager='objects'):\n \"\"\"Returns true if a field has changed in a model May be used in a\n model.save() method.\"\"\"\n if not instance.pk:\n return True\n manager = getattr(instance.__class__, manager)\n old = getattr(manager.get(pk=instance.pk), field)\n return not getattr(instance, field) == old\n\n\ndef convert_camel_to_underscore(camel_case):\n \"\"\"\n Converts a name in camel case to underscore.\n \"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef contains_utf8mb4(s):\n \"\"\"\n This string contains at least one character of more than 3 bytes\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n return not all(len(c.encode('utf-8')) <= 3 for c in s)\n", "path": "zds/utils/misc.py"}], "after_files": [{"content": "# coding: utf-8\nimport hashlib\nimport re\n\nTHUMB_MAX_WIDTH = 80\nTHUMB_MAX_HEIGHT = 80\n\nMEDIUM_MAX_WIDTH = 200\nMEDIUM_MAX_HEIGHT = 200\n\n\ndef compute_hash(filenames):\n \"\"\"returns a md5 hexdigest of group of files to check if they have change\"\"\"\n md5_hash = hashlib.md5()\n for filename in filenames:\n if filename:\n file_handle = open(filename, 'rb')\n must_continue = True\n while must_continue:\n read_bytes = file_handle.read(8096)\n if not read_bytes:\n must_continue = False\n else:\n md5_hash.update(read_bytes)\n return md5_hash.hexdigest()\n\n\ndef content_has_changed(filenames, md5):\n return md5 != compute_hash(filenames)\n\n\ndef has_changed(instance, field, manager='objects'):\n \"\"\"Returns true if a field has changed in a model May be used in a\n model.save() method.\"\"\"\n if not instance.pk:\n return True\n manager = getattr(instance.__class__, manager)\n old = getattr(manager.get(pk=instance.pk), field)\n return not getattr(instance, field) == old\n\n\ndef convert_camel_to_underscore(camel_case):\n \"\"\"\n Converts a name in camel case to underscore.\n \"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n\ndef contains_utf8mb4(s):\n \"\"\"\n This string contains at least one character of more than 3 bytes\n \"\"\"\n if not isinstance(s, unicode):\n s = unicode(s, 'utf-8')\n re_pattern = re.compile(u'[^\\u0000-\\uD7FF\\uE000-\\uFFFF]', re.UNICODE)\n return s != re_pattern.sub(u'\\uFFFD', s)\n", "path": "zds/utils/misc.py"}]}
| 947 | 134 |
gh_patches_debug_39012
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-6031
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Orthogonal initializer dimensions error
Orthogonal Initializer throws an error whenever the first dimension is larger than the second. This could be fixed by transposing the array rather than throwing an error.
e.g. 1. ` dense = L.Linear(64,128, initialW=initializers.Orthogonal())`

e.g. 2 `initializers.generate_array(initializers.Orthogonal(), (20,10), numpy, 'f')`

System Info:
Chainer: 5.0.0rc1
NumPy: 1.14.2
CuPy:
CuPy Version : 5.0.0rc1
CUDA Root : /usr/local/cuda
CUDA Build Version : 8000
CUDA Driver Version : 9020
CUDA Runtime Version : 8000
cuDNN Build Version : 7102
cuDNN Version : 7102
NCCL Build Version : 2213
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/initializers/orthogonal.py`
Content:
```
1 import numpy
2
3 from chainer import backend
4 from chainer import initializer
5 from chainer import utils
6
7
8 # Original code forked from MIT licensed keras project
9 # https://github.com/fchollet/keras/blob/master/keras/initializations.py
10
11 class Orthogonal(initializer.Initializer):
12 """Initializes array with an orthogonal system.
13
14 This initializer first makes a matrix of the same shape as the
15 array to be initialized whose elements are drawn independently from
16 standard Gaussian distribution.
17 Next, it applies QR decomposition to (the transpose of) the matrix.
18 To make the decomposition (almost surely) unique, we require the diagonal
19 of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,
20 https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).
21 Then, it initializes the array with the (semi-)orthogonal matrix Q.
22 Finally, the array is multiplied by the constant ``scale``.
23
24 If the ``ndim`` of the input array is more than 2, we consider the array
25 to be a matrix by concatenating all axes except the first one.
26
27 The number of vectors consisting of the orthogonal system
28 (i.e. first element of the shape of the array) must be equal to or smaller
29 than the dimension of each vector (i.e. second element of the shape of
30 the array).
31
32 Attributes:
33 scale (float): A constant to be multiplied by.
34 dtype: Data type specifier.
35
36 Reference: Saxe et al., https://arxiv.org/abs/1312.6120
37
38 """
39
40 def __init__(self, scale=1.1, dtype=None):
41 self.scale = scale
42 super(Orthogonal, self).__init__(dtype)
43
44 # TODO(Kenta Oono)
45 # How do we treat overcomplete base-system case?
46 def __call__(self, array):
47 if self.dtype is not None:
48 assert array.dtype == self.dtype
49 xp = backend.get_array_module(array)
50 if not array.shape: # 0-dim case
51 array[...] = self.scale * (2 * numpy.random.randint(2) - 1)
52 elif not array.size:
53 raise ValueError('Array to be initialized must be non-empty.')
54 else:
55 # numpy.prod returns float value when the argument is empty.
56 flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))
57 if flat_shape[0] > flat_shape[1]:
58 raise ValueError('Cannot make orthogonal system because'
59 ' # of vectors ({}) is larger than'
60 ' that of dimensions ({})'.format(
61 flat_shape[0], flat_shape[1]))
62 a = numpy.random.normal(size=flat_shape)
63 # cupy.linalg.qr requires cusolver in CUDA 8+
64 q, r = numpy.linalg.qr(a.T)
65 q *= numpy.copysign(self.scale, numpy.diag(r))
66 array[...] = xp.asarray(q.T.reshape(array.shape))
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/initializers/orthogonal.py b/chainer/initializers/orthogonal.py
--- a/chainer/initializers/orthogonal.py
+++ b/chainer/initializers/orthogonal.py
@@ -5,6 +5,14 @@
from chainer import utils
+_orthogonal_constraints = { # (assert emb., assert proj.)
+ 'auto': (False, False),
+ 'projection': (False, True),
+ 'embedding': (True, False),
+ 'basis': (True, True),
+}
+
+
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
@@ -32,13 +40,24 @@
Attributes:
scale (float): A constant to be multiplied by.
dtype: Data type specifier.
+ mode (str): Assertion on the initialized shape.
+ ``'auto'`` (default), ``'projection'`` (before v7),
+ ``'embedding'``, or ``'basis'``.
Reference: Saxe et al., https://arxiv.org/abs/1312.6120
"""
- def __init__(self, scale=1.1, dtype=None):
+ def __init__(self, scale=1.1, dtype=None, mode='auto'):
self.scale = scale
+ self.mode = mode
+ try:
+ self._checks = _orthogonal_constraints[mode]
+ except KeyError:
+ raise ValueError(
+ 'Invalid mode: {}. Choose from {}.'.format(
+ repr(mode),
+ ', '.join(repr(m) for m in _orthogonal_constraints)))
super(Orthogonal, self).__init__(dtype)
# TODO(Kenta Oono)
@@ -53,14 +72,22 @@
raise ValueError('Array to be initialized must be non-empty.')
else:
# numpy.prod returns float value when the argument is empty.
- flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))
- if flat_shape[0] > flat_shape[1]:
- raise ValueError('Cannot make orthogonal system because'
- ' # of vectors ({}) is larger than'
- ' that of dimensions ({})'.format(
- flat_shape[0], flat_shape[1]))
- a = numpy.random.normal(size=flat_shape)
+ out_dim = len(array)
+ in_dim = utils.size_of_shape(array.shape[1:])
+ if (in_dim > out_dim and self._checks[0]) or (
+ in_dim < out_dim and self._checks[1]):
+ raise ValueError(
+ 'Cannot make orthogonal {}.'
+ 'shape = {}, interpreted as '
+ '{}-dim input and {}-dim output.'.format(
+ self.mode, array.shape, in_dim, out_dim))
+ transpose = in_dim > out_dim
+ a = numpy.random.normal(size=(out_dim, in_dim))
+ if transpose:
+ a = a.T
# cupy.linalg.qr requires cusolver in CUDA 8+
- q, r = numpy.linalg.qr(a.T)
+ q, r = numpy.linalg.qr(a)
q *= numpy.copysign(self.scale, numpy.diag(r))
- array[...] = xp.asarray(q.T.reshape(array.shape))
+ if transpose:
+ q = q.T
+ array[...] = xp.asarray(q.reshape(array.shape))
|
{"golden_diff": "diff --git a/chainer/initializers/orthogonal.py b/chainer/initializers/orthogonal.py\n--- a/chainer/initializers/orthogonal.py\n+++ b/chainer/initializers/orthogonal.py\n@@ -5,6 +5,14 @@\n from chainer import utils\n \n \n+_orthogonal_constraints = { # (assert emb., assert proj.)\n+ 'auto': (False, False),\n+ 'projection': (False, True),\n+ 'embedding': (True, False),\n+ 'basis': (True, True),\n+}\n+\n+\n # Original code forked from MIT licensed keras project\n # https://github.com/fchollet/keras/blob/master/keras/initializations.py\n \n@@ -32,13 +40,24 @@\n Attributes:\n scale (float): A constant to be multiplied by.\n dtype: Data type specifier.\n+ mode (str): Assertion on the initialized shape.\n+ ``'auto'`` (default), ``'projection'`` (before v7),\n+ ``'embedding'``, or ``'basis'``.\n \n Reference: Saxe et al., https://arxiv.org/abs/1312.6120\n \n \"\"\"\n \n- def __init__(self, scale=1.1, dtype=None):\n+ def __init__(self, scale=1.1, dtype=None, mode='auto'):\n self.scale = scale\n+ self.mode = mode\n+ try:\n+ self._checks = _orthogonal_constraints[mode]\n+ except KeyError:\n+ raise ValueError(\n+ 'Invalid mode: {}. Choose from {}.'.format(\n+ repr(mode),\n+ ', '.join(repr(m) for m in _orthogonal_constraints)))\n super(Orthogonal, self).__init__(dtype)\n \n # TODO(Kenta Oono)\n@@ -53,14 +72,22 @@\n raise ValueError('Array to be initialized must be non-empty.')\n else:\n # numpy.prod returns float value when the argument is empty.\n- flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))\n- if flat_shape[0] > flat_shape[1]:\n- raise ValueError('Cannot make orthogonal system because'\n- ' # of vectors ({}) is larger than'\n- ' that of dimensions ({})'.format(\n- flat_shape[0], flat_shape[1]))\n- a = numpy.random.normal(size=flat_shape)\n+ out_dim = len(array)\n+ in_dim = utils.size_of_shape(array.shape[1:])\n+ if (in_dim > out_dim and self._checks[0]) or (\n+ in_dim < out_dim and self._checks[1]):\n+ raise ValueError(\n+ 'Cannot make orthogonal {}.'\n+ 'shape = {}, interpreted as '\n+ '{}-dim input and {}-dim output.'.format(\n+ self.mode, array.shape, in_dim, out_dim))\n+ transpose = in_dim > out_dim\n+ a = numpy.random.normal(size=(out_dim, in_dim))\n+ if transpose:\n+ a = a.T\n # cupy.linalg.qr requires cusolver in CUDA 8+\n- q, r = numpy.linalg.qr(a.T)\n+ q, r = numpy.linalg.qr(a)\n q *= numpy.copysign(self.scale, numpy.diag(r))\n- array[...] = xp.asarray(q.T.reshape(array.shape))\n+ if transpose:\n+ q = q.T\n+ array[...] = xp.asarray(q.reshape(array.shape))\n", "issue": "Orthogonal initializer dimensions error\nOrthogonal Initializer throws an error whenever the first dimension is larger than the second. This could be fixed by transposing the array rather than throwing an error.\r\n\r\n\r\ne.g. 1. ` dense = L.Linear(64,128, initialW=initializers.Orthogonal())`\r\n\r\n\r\n\r\n\r\ne.g. 2 `initializers.generate_array(initializers.Orthogonal(), (20,10), numpy, 'f')`\r\n\r\n\r\n\r\n\r\nSystem Info:\r\nChainer: 5.0.0rc1\r\nNumPy: 1.14.2\r\nCuPy:\r\n CuPy Version : 5.0.0rc1\r\n CUDA Root : /usr/local/cuda\r\n CUDA Build Version : 8000\r\n CUDA Driver Version : 9020\r\n CUDA Runtime Version : 8000\r\n cuDNN Build Version : 7102\r\n cuDNN Version : 7102\r\n NCCL Build Version : 2213\r\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import backend\nfrom chainer import initializer\nfrom chainer import utils\n\n\n# Original code forked from MIT licensed keras project\n# https://github.com/fchollet/keras/blob/master/keras/initializations.py\n\nclass Orthogonal(initializer.Initializer):\n \"\"\"Initializes array with an orthogonal system.\n\n This initializer first makes a matrix of the same shape as the\n array to be initialized whose elements are drawn independently from\n standard Gaussian distribution.\n Next, it applies QR decomposition to (the transpose of) the matrix.\n To make the decomposition (almost surely) unique, we require the diagonal\n of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,\n https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).\n Then, it initializes the array with the (semi-)orthogonal matrix Q.\n Finally, the array is multiplied by the constant ``scale``.\n\n If the ``ndim`` of the input array is more than 2, we consider the array\n to be a matrix by concatenating all axes except the first one.\n\n The number of vectors consisting of the orthogonal system\n (i.e. first element of the shape of the array) must be equal to or smaller\n than the dimension of each vector (i.e. second element of the shape of\n the array).\n\n Attributes:\n scale (float): A constant to be multiplied by.\n dtype: Data type specifier.\n\n Reference: Saxe et al., https://arxiv.org/abs/1312.6120\n\n \"\"\"\n\n def __init__(self, scale=1.1, dtype=None):\n self.scale = scale\n super(Orthogonal, self).__init__(dtype)\n\n # TODO(Kenta Oono)\n # How do we treat overcomplete base-system case?\n def __call__(self, array):\n if self.dtype is not None:\n assert array.dtype == self.dtype\n xp = backend.get_array_module(array)\n if not array.shape: # 0-dim case\n array[...] = self.scale * (2 * numpy.random.randint(2) - 1)\n elif not array.size:\n raise ValueError('Array to be initialized must be non-empty.')\n else:\n # numpy.prod returns float value when the argument is empty.\n flat_shape = (len(array), utils.size_of_shape(array.shape[1:]))\n if flat_shape[0] > flat_shape[1]:\n raise ValueError('Cannot make orthogonal system because'\n ' # of vectors ({}) is larger than'\n ' that of dimensions ({})'.format(\n flat_shape[0], flat_shape[1]))\n a = numpy.random.normal(size=flat_shape)\n # cupy.linalg.qr requires cusolver in CUDA 8+\n q, r = numpy.linalg.qr(a.T)\n q *= numpy.copysign(self.scale, numpy.diag(r))\n array[...] = xp.asarray(q.T.reshape(array.shape))\n", "path": "chainer/initializers/orthogonal.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import backend\nfrom chainer import initializer\nfrom chainer import utils\n\n\n_orthogonal_constraints = { # (assert emb., assert proj.)\n 'auto': (False, False),\n 'projection': (False, True),\n 'embedding': (True, False),\n 'basis': (True, True),\n}\n\n\n# Original code forked from MIT licensed keras project\n# https://github.com/fchollet/keras/blob/master/keras/initializations.py\n\nclass Orthogonal(initializer.Initializer):\n \"\"\"Initializes array with an orthogonal system.\n\n This initializer first makes a matrix of the same shape as the\n array to be initialized whose elements are drawn independently from\n standard Gaussian distribution.\n Next, it applies QR decomposition to (the transpose of) the matrix.\n To make the decomposition (almost surely) unique, we require the diagonal\n of the triangular matrix R to be non-negative (see e.g. Edelman & Rao,\n https://web.eecs.umich.edu/~rajnrao/Acta05rmt.pdf).\n Then, it initializes the array with the (semi-)orthogonal matrix Q.\n Finally, the array is multiplied by the constant ``scale``.\n\n If the ``ndim`` of the input array is more than 2, we consider the array\n to be a matrix by concatenating all axes except the first one.\n\n The number of vectors consisting of the orthogonal system\n (i.e. first element of the shape of the array) must be equal to or smaller\n than the dimension of each vector (i.e. second element of the shape of\n the array).\n\n Attributes:\n scale (float): A constant to be multiplied by.\n dtype: Data type specifier.\n mode (str): Assertion on the initialized shape.\n ``'auto'`` (default), ``'projection'`` (before v7),\n ``'embedding'``, or ``'basis'``.\n\n Reference: Saxe et al., https://arxiv.org/abs/1312.6120\n\n \"\"\"\n\n def __init__(self, scale=1.1, dtype=None, mode='auto'):\n self.scale = scale\n self.mode = mode\n try:\n self._checks = _orthogonal_constraints[mode]\n except KeyError:\n raise ValueError(\n 'Invalid mode: {}. Choose from {}.'.format(\n repr(mode),\n ', '.join(repr(m) for m in _orthogonal_constraints)))\n super(Orthogonal, self).__init__(dtype)\n\n # TODO(Kenta Oono)\n # How do we treat overcomplete base-system case?\n def __call__(self, array):\n if self.dtype is not None:\n assert array.dtype == self.dtype\n xp = backend.get_array_module(array)\n if not array.shape: # 0-dim case\n array[...] = self.scale * (2 * numpy.random.randint(2) - 1)\n elif not array.size:\n raise ValueError('Array to be initialized must be non-empty.')\n else:\n # numpy.prod returns float value when the argument is empty.\n out_dim = len(array)\n in_dim = utils.size_of_shape(array.shape[1:])\n if (in_dim > out_dim and self._checks[0]) or (\n in_dim < out_dim and self._checks[1]):\n raise ValueError(\n 'Cannot make orthogonal {}.'\n 'shape = {}, interpreted as '\n '{}-dim input and {}-dim output.'.format(\n self.mode, array.shape, in_dim, out_dim))\n transpose = in_dim > out_dim\n a = numpy.random.normal(size=(out_dim, in_dim))\n if transpose:\n a = a.T\n # cupy.linalg.qr requires cusolver in CUDA 8+\n q, r = numpy.linalg.qr(a)\n q *= numpy.copysign(self.scale, numpy.diag(r))\n if transpose:\n q = q.T\n array[...] = xp.asarray(q.reshape(array.shape))\n", "path": "chainer/initializers/orthogonal.py"}]}
| 1,402 | 773 |
gh_patches_debug_18049
|
rasdani/github-patches
|
git_diff
|
PrefectHQ__prefect-9390
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter by work pool not filtering the "dot" graph
### First check
- [X] I added a descriptive title to this issue.
- [X] I used the GitHub search to find a similar issue and didn't find it.
- [X] I searched the Prefect documentation for this issue.
- [X] I checked that this issue is related to Prefect and not one of its dependencies.
### Bug summary
When filtering by "work pool" in the flows screen, the "dots graph" is not filtered.
The "main-pool" is a pool I've just created, and there are no runs associated to it.

### Reproduction
```python3
Create a new "work flow".
Make sure it's empty and has no runs in it.
Go to the flows screen, and filter by this work pool.
You'll see that all the dots on the graph remain although they are not related to that work pool.
```
### Error
_No response_
### Versions
```Text
Prefect cloud
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/server/api/ui/flow_runs.py`
Content:
```
1 import datetime
2 from typing import List
3 from uuid import UUID
4
5 from fastapi import Body, Depends
6 from pydantic import Field
7
8 import prefect.server.schemas as schemas
9 from prefect.logging import get_logger
10 from prefect.server import models
11 from prefect.server.database.dependencies import provide_database_interface
12 from prefect.server.database.interface import PrefectDBInterface
13 from prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel
14 from prefect.server.utilities.server import PrefectRouter
15
16 logger = get_logger("server.api.ui.flow_runs")
17
18 router = PrefectRouter(prefix="/ui/flow_runs", tags=["Flow Runs", "UI"])
19
20
21 class SimpleFlowRun(PrefectBaseModel):
22 id: UUID = Field(default=..., description="The flow run id.")
23 state_type: schemas.states.StateType = Field(
24 default=..., description="The state type."
25 )
26 timestamp: DateTimeTZ = Field(
27 default=...,
28 description=(
29 "The start time of the run, or the expected start time "
30 "if it hasn't run yet."
31 ),
32 )
33 duration: datetime.timedelta = Field(
34 default=..., description="The total run time of the run."
35 )
36 lateness: datetime.timedelta = Field(
37 default=..., description="The delay between the expected and actual start time."
38 )
39
40
41 @router.post("/history")
42 async def read_flow_run_history(
43 sort: schemas.sorting.FlowRunSort = Body(
44 schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC
45 ),
46 limit: int = Body(1000, le=1000),
47 offset: int = Body(0, ge=0),
48 flows: schemas.filters.FlowFilter = None,
49 flow_runs: schemas.filters.FlowRunFilter = None,
50 task_runs: schemas.filters.TaskRunFilter = None,
51 deployments: schemas.filters.DeploymentFilter = None,
52 db: PrefectDBInterface = Depends(provide_database_interface),
53 ) -> List[SimpleFlowRun]:
54 columns = [
55 db.FlowRun.id,
56 db.FlowRun.state_type,
57 db.FlowRun.start_time,
58 db.FlowRun.expected_start_time,
59 db.FlowRun.total_run_time,
60 # Although it isn't returned, we need to select
61 # this field in order to compute `estimated_run_time`
62 db.FlowRun.state_timestamp,
63 ]
64 async with db.session_context() as session:
65 result = await models.flow_runs.read_flow_runs(
66 columns=columns,
67 flow_filter=flows,
68 flow_run_filter=flow_runs,
69 task_run_filter=task_runs,
70 deployment_filter=deployments,
71 sort=sort,
72 limit=limit,
73 offset=offset,
74 session=session,
75 )
76 return [
77 SimpleFlowRun(
78 id=r.id,
79 state_type=r.state_type,
80 timestamp=r.start_time or r.expected_start_time,
81 duration=r.estimated_run_time,
82 lateness=r.estimated_start_time_delta,
83 )
84 for r in result
85 ]
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/prefect/server/api/ui/flow_runs.py b/src/prefect/server/api/ui/flow_runs.py
--- a/src/prefect/server/api/ui/flow_runs.py
+++ b/src/prefect/server/api/ui/flow_runs.py
@@ -49,6 +49,7 @@
flow_runs: schemas.filters.FlowRunFilter = None,
task_runs: schemas.filters.TaskRunFilter = None,
deployments: schemas.filters.DeploymentFilter = None,
+ work_pools: schemas.filters.WorkPoolFilter = None,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> List[SimpleFlowRun]:
columns = [
@@ -68,6 +69,7 @@
flow_run_filter=flow_runs,
task_run_filter=task_runs,
deployment_filter=deployments,
+ work_pool_filter=work_pools,
sort=sort,
limit=limit,
offset=offset,
|
{"golden_diff": "diff --git a/src/prefect/server/api/ui/flow_runs.py b/src/prefect/server/api/ui/flow_runs.py\n--- a/src/prefect/server/api/ui/flow_runs.py\n+++ b/src/prefect/server/api/ui/flow_runs.py\n@@ -49,6 +49,7 @@\n flow_runs: schemas.filters.FlowRunFilter = None,\n task_runs: schemas.filters.TaskRunFilter = None,\n deployments: schemas.filters.DeploymentFilter = None,\n+ work_pools: schemas.filters.WorkPoolFilter = None,\n db: PrefectDBInterface = Depends(provide_database_interface),\n ) -> List[SimpleFlowRun]:\n columns = [\n@@ -68,6 +69,7 @@\n flow_run_filter=flow_runs,\n task_run_filter=task_runs,\n deployment_filter=deployments,\n+ work_pool_filter=work_pools,\n sort=sort,\n limit=limit,\n offset=offset,\n", "issue": "Filter by work pool not filtering the \"dot\" graph\n### First check\n\n- [X] I added a descriptive title to this issue.\n- [X] I used the GitHub search to find a similar issue and didn't find it.\n- [X] I searched the Prefect documentation for this issue.\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\n\n### Bug summary\n\nWhen filtering by \"work pool\" in the flows screen, the \"dots graph\" is not filtered.\r\nThe \"main-pool\" is a pool I've just created, and there are no runs associated to it.\r\n\r\n\r\n\r\n\n\n### Reproduction\n\n```python3\nCreate a new \"work flow\".\r\nMake sure it's empty and has no runs in it.\r\nGo to the flows screen, and filter by this work pool.\r\nYou'll see that all the dots on the graph remain although they are not related to that work pool.\n```\n\n\n### Error\n\n_No response_\n\n### Versions\n\n```Text\nPrefect cloud\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import datetime\nfrom typing import List\nfrom uuid import UUID\n\nfrom fastapi import Body, Depends\nfrom pydantic import Field\n\nimport prefect.server.schemas as schemas\nfrom prefect.logging import get_logger\nfrom prefect.server import models\nfrom prefect.server.database.dependencies import provide_database_interface\nfrom prefect.server.database.interface import PrefectDBInterface\nfrom prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel\nfrom prefect.server.utilities.server import PrefectRouter\n\nlogger = get_logger(\"server.api.ui.flow_runs\")\n\nrouter = PrefectRouter(prefix=\"/ui/flow_runs\", tags=[\"Flow Runs\", \"UI\"])\n\n\nclass SimpleFlowRun(PrefectBaseModel):\n id: UUID = Field(default=..., description=\"The flow run id.\")\n state_type: schemas.states.StateType = Field(\n default=..., description=\"The state type.\"\n )\n timestamp: DateTimeTZ = Field(\n default=...,\n description=(\n \"The start time of the run, or the expected start time \"\n \"if it hasn't run yet.\"\n ),\n )\n duration: datetime.timedelta = Field(\n default=..., description=\"The total run time of the run.\"\n )\n lateness: datetime.timedelta = Field(\n default=..., description=\"The delay between the expected and actual start time.\"\n )\n\n\[email protected](\"/history\")\nasync def read_flow_run_history(\n sort: schemas.sorting.FlowRunSort = Body(\n schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC\n ),\n limit: int = Body(1000, le=1000),\n offset: int = Body(0, ge=0),\n flows: schemas.filters.FlowFilter = None,\n flow_runs: schemas.filters.FlowRunFilter = None,\n task_runs: schemas.filters.TaskRunFilter = None,\n deployments: schemas.filters.DeploymentFilter = None,\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> List[SimpleFlowRun]:\n columns = [\n db.FlowRun.id,\n db.FlowRun.state_type,\n db.FlowRun.start_time,\n db.FlowRun.expected_start_time,\n db.FlowRun.total_run_time,\n # Although it isn't returned, we need to select\n # this field in order to compute `estimated_run_time`\n db.FlowRun.state_timestamp,\n ]\n async with db.session_context() as session:\n result = await models.flow_runs.read_flow_runs(\n columns=columns,\n flow_filter=flows,\n flow_run_filter=flow_runs,\n task_run_filter=task_runs,\n deployment_filter=deployments,\n sort=sort,\n limit=limit,\n offset=offset,\n session=session,\n )\n return [\n SimpleFlowRun(\n id=r.id,\n state_type=r.state_type,\n timestamp=r.start_time or r.expected_start_time,\n duration=r.estimated_run_time,\n lateness=r.estimated_start_time_delta,\n )\n for r in result\n ]\n", "path": "src/prefect/server/api/ui/flow_runs.py"}], "after_files": [{"content": "import datetime\nfrom typing import List\nfrom uuid import UUID\n\nfrom fastapi import Body, Depends\nfrom pydantic import Field\n\nimport prefect.server.schemas as schemas\nfrom prefect.logging import get_logger\nfrom prefect.server import models\nfrom prefect.server.database.dependencies import provide_database_interface\nfrom prefect.server.database.interface import PrefectDBInterface\nfrom prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel\nfrom prefect.server.utilities.server import PrefectRouter\n\nlogger = get_logger(\"server.api.ui.flow_runs\")\n\nrouter = PrefectRouter(prefix=\"/ui/flow_runs\", tags=[\"Flow Runs\", \"UI\"])\n\n\nclass SimpleFlowRun(PrefectBaseModel):\n id: UUID = Field(default=..., description=\"The flow run id.\")\n state_type: schemas.states.StateType = Field(\n default=..., description=\"The state type.\"\n )\n timestamp: DateTimeTZ = Field(\n default=...,\n description=(\n \"The start time of the run, or the expected start time \"\n \"if it hasn't run yet.\"\n ),\n )\n duration: datetime.timedelta = Field(\n default=..., description=\"The total run time of the run.\"\n )\n lateness: datetime.timedelta = Field(\n default=..., description=\"The delay between the expected and actual start time.\"\n )\n\n\[email protected](\"/history\")\nasync def read_flow_run_history(\n sort: schemas.sorting.FlowRunSort = Body(\n schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC\n ),\n limit: int = Body(1000, le=1000),\n offset: int = Body(0, ge=0),\n flows: schemas.filters.FlowFilter = None,\n flow_runs: schemas.filters.FlowRunFilter = None,\n task_runs: schemas.filters.TaskRunFilter = None,\n deployments: schemas.filters.DeploymentFilter = None,\n work_pools: schemas.filters.WorkPoolFilter = None,\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> List[SimpleFlowRun]:\n columns = [\n db.FlowRun.id,\n db.FlowRun.state_type,\n db.FlowRun.start_time,\n db.FlowRun.expected_start_time,\n db.FlowRun.total_run_time,\n # Although it isn't returned, we need to select\n # this field in order to compute `estimated_run_time`\n db.FlowRun.state_timestamp,\n ]\n async with db.session_context() as session:\n result = await models.flow_runs.read_flow_runs(\n columns=columns,\n flow_filter=flows,\n flow_run_filter=flow_runs,\n task_run_filter=task_runs,\n deployment_filter=deployments,\n work_pool_filter=work_pools,\n sort=sort,\n limit=limit,\n offset=offset,\n session=session,\n )\n return [\n SimpleFlowRun(\n id=r.id,\n state_type=r.state_type,\n timestamp=r.start_time or r.expected_start_time,\n duration=r.estimated_run_time,\n lateness=r.estimated_start_time_delta,\n )\n for r in result\n ]\n", "path": "src/prefect/server/api/ui/flow_runs.py"}]}
| 1,369 | 203 |
gh_patches_debug_4840
|
rasdani/github-patches
|
git_diff
|
vega__altair-1192
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect description of an example
https://altair-viz.github.io/gallery/scatter_linked_brush.html
The title of the page says "Faceted Scatter Plot with Linked Brushing".
But the example is a concatenated view, not a faceted view.
(The data points are shown twice in the visualization, not split by a category.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/vegalite/v2/examples/scatter_linked_brush.py`
Content:
```
1 """
2 Faceted Scatter Plot with Linked Brushing
3 -----------------------------------------
4 This is an example of using an interval selection to control the color of
5 points across multiple facets.
6 """
7 # category: interactive charts
8 import altair as alt
9 from vega_datasets import data
10
11 cars = data.cars()
12
13 brush = alt.selection(type='interval', resolve='global')
14
15 base = alt.Chart(cars).mark_point().encode(
16 y='Miles_per_Gallon',
17 color=alt.condition(brush, 'Origin', alt.ColorValue('gray'))
18 ).add_selection(
19 brush
20 ).properties(
21 width=250,
22 height=250
23 )
24
25 base.encode(x='Horsepower') | base.encode(x='Acceleration')
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/altair/vegalite/v2/examples/scatter_linked_brush.py b/altair/vegalite/v2/examples/scatter_linked_brush.py
--- a/altair/vegalite/v2/examples/scatter_linked_brush.py
+++ b/altair/vegalite/v2/examples/scatter_linked_brush.py
@@ -1,8 +1,8 @@
"""
-Faceted Scatter Plot with Linked Brushing
------------------------------------------
+Multi-panel Scatter Plot with Linked Brushing
+---------------------------------------------
This is an example of using an interval selection to control the color of
-points across multiple facets.
+points across multiple panels.
"""
# category: interactive charts
import altair as alt
|
{"golden_diff": "diff --git a/altair/vegalite/v2/examples/scatter_linked_brush.py b/altair/vegalite/v2/examples/scatter_linked_brush.py\n--- a/altair/vegalite/v2/examples/scatter_linked_brush.py\n+++ b/altair/vegalite/v2/examples/scatter_linked_brush.py\n@@ -1,8 +1,8 @@\n \"\"\"\n-Faceted Scatter Plot with Linked Brushing\n------------------------------------------\n+Multi-panel Scatter Plot with Linked Brushing\n+---------------------------------------------\n This is an example of using an interval selection to control the color of\n-points across multiple facets.\n+points across multiple panels.\n \"\"\"\n # category: interactive charts\n import altair as alt\n", "issue": "Incorrect description of an example\nhttps://altair-viz.github.io/gallery/scatter_linked_brush.html\r\n\r\nThe title of the page says \"Faceted Scatter Plot with Linked Brushing\".\r\nBut the example is a concatenated view, not a faceted view. \r\n(The data points are shown twice in the visualization, not split by a category.) \n", "before_files": [{"content": "\"\"\"\nFaceted Scatter Plot with Linked Brushing\n-----------------------------------------\nThis is an example of using an interval selection to control the color of\npoints across multiple facets.\n\"\"\"\n# category: interactive charts\nimport altair as alt\nfrom vega_datasets import data\n\ncars = data.cars()\n\nbrush = alt.selection(type='interval', resolve='global')\n\nbase = alt.Chart(cars).mark_point().encode(\n y='Miles_per_Gallon',\n color=alt.condition(brush, 'Origin', alt.ColorValue('gray'))\n).add_selection(\n brush\n).properties(\n width=250,\n height=250\n)\n\nbase.encode(x='Horsepower') | base.encode(x='Acceleration')\n", "path": "altair/vegalite/v2/examples/scatter_linked_brush.py"}], "after_files": [{"content": "\"\"\"\nMulti-panel Scatter Plot with Linked Brushing\n---------------------------------------------\nThis is an example of using an interval selection to control the color of\npoints across multiple panels.\n\"\"\"\n# category: interactive charts\nimport altair as alt\nfrom vega_datasets import data\n\ncars = data.cars()\n\nbrush = alt.selection(type='interval', resolve='global')\n\nbase = alt.Chart(cars).mark_point().encode(\n y='Miles_per_Gallon',\n color=alt.condition(brush, 'Origin', alt.ColorValue('gray'))\n).add_selection(\n brush\n).properties(\n width=250,\n height=250\n)\n\nbase.encode(x='Horsepower') | base.encode(x='Acceleration')\n", "path": "altair/vegalite/v2/examples/scatter_linked_brush.py"}]}
| 535 | 151 |
gh_patches_debug_39588
|
rasdani/github-patches
|
git_diff
|
google__turbinia-1002
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve exception handling in FileSystemTimelineJob
Under certain conditions, dfvfs may throw exceptions that are not currently being handked:
```
dfvfs.lib.errors.BackEndError: Unable to open file system with error: pyfsext_volume_open_file_object: unable to open volume. libfsext_superblock_read_data: unsupported read-only compatible features flags: 0xff000003. libfsext_superblock_read_file_io_handle: unable to read superblock at offset: 1024 (0x00000400). libfsext_internal_volume_read_block_groups: unable to read superblock: 0 at offset: 1024 (0x00000400). libfsext_internal_volume_open_read: unable to read block groups. libfsext_volume_open_file_io_handle: unable to read from file IO handle.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/workers/file_system_timeline.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2022 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Task to run dfimagetools FileEntryLister on disk partitions."""
16
17 from __future__ import unicode_literals
18
19 import os
20
21 from turbinia import TurbiniaException
22 from turbinia.workers import TurbiniaTask
23 from turbinia.evidence import EvidenceState as state
24 from turbinia.evidence import BodyFile
25
26 if TurbiniaTask.check_worker_role():
27 try:
28 from dfvfs.helpers import volume_scanner
29 from dfvfs.lib import errors as dfvfs_errors
30 from dfimagetools import file_entry_lister
31 except ImportError as exception:
32 message = 'Could not import libraries: {0!s}'.format(exception)
33 raise TurbiniaException(message)
34
35
36 class FileSystemTimelineTask(TurbiniaTask):
37
38 REQUIRED_STATES = [state.ATTACHED]
39
40 TASK_CONFIG = {'partitions': ['all']}
41
42 def run(self, evidence, result):
43 """Task to execute (dfimagetools) FileEntryLister.
44
45 Args:
46 evidence (Evidence object): The evidence we will process.
47 result (TurbiniaTaskResult): The object to place task results into.
48
49 Returns:
50 TurbiniaTaskResult object.
51 """
52 bodyfile_output = os.path.join(self.output_dir, 'file_system.bodyfile')
53 output_evidence = BodyFile(source_path=bodyfile_output)
54 number_of_entries = 0
55
56 # Set things up for the FileEntryLister client. We will scan all
57 # partitions in the volume.
58 volume_scanner_options = volume_scanner.VolumeScannerOptions()
59 volume_scanner_options.partitions = self.task_config.get('partitions')
60
61 # Create the FileEntryLister client and generate the path specs
62 # for all available partitions.
63 entry_lister = file_entry_lister.FileEntryLister()
64 base_path_specs = entry_lister.GetBasePathSpecs(
65 evidence.device_path, options=volume_scanner_options)
66
67 # Iterate over all file entries and generate the output in bodyfile
68 # format.
69 try:
70 with open(bodyfile_output, 'w') as file_object:
71 for file_entry, path_segments in entry_lister.ListFileEntries(
72 base_path_specs):
73 bodyfile_entries = entry_lister.GetBodyfileEntries(
74 file_entry, path_segments)
75 for bodyfile_entry in bodyfile_entries:
76 file_object.write(bodyfile_entry)
77 file_object.write('\n')
78 number_of_entries += 1
79 output_evidence.number_of_entries = number_of_entries
80 result.add_evidence(output_evidence, evidence.config)
81 status = 'Generated file system timeline containing [{0:d}] entries'.format(
82 number_of_entries)
83 result.close(self, success=True, status=status)
84 except dfvfs_errors.ScannerError as exception:
85 result.log('Error generating bodyfile {0!s}'.format(exception))
86 status = 'Unable to generate bodyfile using provided evidence data.'
87 result.close(self, success=False, status=status)
88 raise TurbiniaException(
89 'Could not process volume: {0!s}'.format(exception))
90
91 return result
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/turbinia/workers/file_system_timeline.py b/turbinia/workers/file_system_timeline.py
--- a/turbinia/workers/file_system_timeline.py
+++ b/turbinia/workers/file_system_timeline.py
@@ -34,6 +34,7 @@
class FileSystemTimelineTask(TurbiniaTask):
+ """Task to generate file system timelines. """
REQUIRED_STATES = [state.ATTACHED]
@@ -61,31 +62,49 @@
# Create the FileEntryLister client and generate the path specs
# for all available partitions.
entry_lister = file_entry_lister.FileEntryLister()
- base_path_specs = entry_lister.GetBasePathSpecs(
- evidence.device_path, options=volume_scanner_options)
+ try:
+ base_path_specs = entry_lister.GetBasePathSpecs(
+ evidence.device_path, options=volume_scanner_options)
+ except dfvfs_errors.ScannerError as exception:
+ status = 'Unable to open evidence: {0!s}'.format(exception)
+ result.close(self, success=False, status=status)
# Iterate over all file entries and generate the output in bodyfile
# format.
try:
- with open(bodyfile_output, 'w') as file_object:
- for file_entry, path_segments in entry_lister.ListFileEntries(
- base_path_specs):
- bodyfile_entries = entry_lister.GetBodyfileEntries(
- file_entry, path_segments)
- for bodyfile_entry in bodyfile_entries:
- file_object.write(bodyfile_entry)
- file_object.write('\n')
- number_of_entries += 1
- output_evidence.number_of_entries = number_of_entries
- result.add_evidence(output_evidence, evidence.config)
- status = 'Generated file system timeline containing [{0:d}] entries'.format(
- number_of_entries)
- result.close(self, success=True, status=status)
- except dfvfs_errors.ScannerError as exception:
- result.log('Error generating bodyfile {0!s}'.format(exception))
- status = 'Unable to generate bodyfile using provided evidence data.'
+ file_entries = None
+ with open(bodyfile_output, 'w', encoding='utf-8') as file_object:
+ file_entries = enumerate(entry_lister.ListFileEntries(base_path_specs))
+ while file_entries:
+ try:
+ _, (file_entry, path_segments) = next(file_entries)
+ bodyfile_entries = entry_lister.GetBodyfileEntries(
+ file_entry, path_segments)
+ for bodyfile_entry in bodyfile_entries:
+ file_object.write(bodyfile_entry)
+ file_object.write('\n')
+ number_of_entries += 1
+ except StopIteration:
+ break
+ except (dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
+ dfvfs_errors.MountPointError,
+ dfvfs_errors.PathSpecError) as exception:
+ status = 'Unable to process file entry: {0!s}'.format(exception)
+ result.log(status)
+
+ if number_of_entries > 0:
+ output_evidence.number_of_entries = number_of_entries
+ result.add_evidence(output_evidence, evidence.config)
+ status = 'Generated file system timeline containing [{0:d}] entries'.format(
+ number_of_entries)
+ result.close(self, success=True, status=status)
+ else:
+ status = 'Unable to process any file entries.'
+ result.close(self, success=False, status=status)
+
+ except IOError as exception:
+ status = 'Unable to create bodyfile local output file: {0!s}'.format(
+ exception)
result.close(self, success=False, status=status)
- raise TurbiniaException(
- 'Could not process volume: {0!s}'.format(exception))
return result
|
{"golden_diff": "diff --git a/turbinia/workers/file_system_timeline.py b/turbinia/workers/file_system_timeline.py\n--- a/turbinia/workers/file_system_timeline.py\n+++ b/turbinia/workers/file_system_timeline.py\n@@ -34,6 +34,7 @@\n \n \n class FileSystemTimelineTask(TurbiniaTask):\n+ \"\"\"Task to generate file system timelines. \"\"\"\n \n REQUIRED_STATES = [state.ATTACHED]\n \n@@ -61,31 +62,49 @@\n # Create the FileEntryLister client and generate the path specs\n # for all available partitions.\n entry_lister = file_entry_lister.FileEntryLister()\n- base_path_specs = entry_lister.GetBasePathSpecs(\n- evidence.device_path, options=volume_scanner_options)\n+ try:\n+ base_path_specs = entry_lister.GetBasePathSpecs(\n+ evidence.device_path, options=volume_scanner_options)\n+ except dfvfs_errors.ScannerError as exception:\n+ status = 'Unable to open evidence: {0!s}'.format(exception)\n+ result.close(self, success=False, status=status)\n \n # Iterate over all file entries and generate the output in bodyfile\n # format.\n try:\n- with open(bodyfile_output, 'w') as file_object:\n- for file_entry, path_segments in entry_lister.ListFileEntries(\n- base_path_specs):\n- bodyfile_entries = entry_lister.GetBodyfileEntries(\n- file_entry, path_segments)\n- for bodyfile_entry in bodyfile_entries:\n- file_object.write(bodyfile_entry)\n- file_object.write('\\n')\n- number_of_entries += 1\n- output_evidence.number_of_entries = number_of_entries\n- result.add_evidence(output_evidence, evidence.config)\n- status = 'Generated file system timeline containing [{0:d}] entries'.format(\n- number_of_entries)\n- result.close(self, success=True, status=status)\n- except dfvfs_errors.ScannerError as exception:\n- result.log('Error generating bodyfile {0!s}'.format(exception))\n- status = 'Unable to generate bodyfile using provided evidence data.'\n+ file_entries = None\n+ with open(bodyfile_output, 'w', encoding='utf-8') as file_object:\n+ file_entries = enumerate(entry_lister.ListFileEntries(base_path_specs))\n+ while file_entries:\n+ try:\n+ _, (file_entry, path_segments) = next(file_entries)\n+ bodyfile_entries = entry_lister.GetBodyfileEntries(\n+ file_entry, path_segments)\n+ for bodyfile_entry in bodyfile_entries:\n+ file_object.write(bodyfile_entry)\n+ file_object.write('\\n')\n+ number_of_entries += 1\n+ except StopIteration:\n+ break\n+ except (dfvfs_errors.AccessError, dfvfs_errors.BackEndError,\n+ dfvfs_errors.MountPointError,\n+ dfvfs_errors.PathSpecError) as exception:\n+ status = 'Unable to process file entry: {0!s}'.format(exception)\n+ result.log(status)\n+\n+ if number_of_entries > 0:\n+ output_evidence.number_of_entries = number_of_entries\n+ result.add_evidence(output_evidence, evidence.config)\n+ status = 'Generated file system timeline containing [{0:d}] entries'.format(\n+ number_of_entries)\n+ result.close(self, success=True, status=status)\n+ else:\n+ status = 'Unable to process any file entries.'\n+ result.close(self, success=False, status=status)\n+\n+ except IOError as exception:\n+ status = 'Unable to create bodyfile local output file: {0!s}'.format(\n+ exception)\n result.close(self, success=False, status=status)\n- raise TurbiniaException(\n- 'Could not process volume: {0!s}'.format(exception))\n \n return result\n", "issue": "Improve exception handling in FileSystemTimelineJob\nUnder certain conditions, dfvfs may throw exceptions that are not currently being handked:\r\n\r\n```\r\ndfvfs.lib.errors.BackEndError: Unable to open file system with error: pyfsext_volume_open_file_object: unable to open volume. libfsext_superblock_read_data: unsupported read-only compatible features flags: 0xff000003. libfsext_superblock_read_file_io_handle: unable to read superblock at offset: 1024 (0x00000400). libfsext_internal_volume_read_block_groups: unable to read superblock: 0 at offset: 1024 (0x00000400). libfsext_internal_volume_open_read: unable to read block groups. libfsext_volume_open_file_io_handle: unable to read from file IO handle.\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to run dfimagetools FileEntryLister on disk partitions.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom turbinia import TurbiniaException\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import BodyFile\n\nif TurbiniaTask.check_worker_role():\n try:\n from dfvfs.helpers import volume_scanner\n from dfvfs.lib import errors as dfvfs_errors\n from dfimagetools import file_entry_lister\n except ImportError as exception:\n message = 'Could not import libraries: {0!s}'.format(exception)\n raise TurbiniaException(message)\n\n\nclass FileSystemTimelineTask(TurbiniaTask):\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {'partitions': ['all']}\n\n def run(self, evidence, result):\n \"\"\"Task to execute (dfimagetools) FileEntryLister.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n bodyfile_output = os.path.join(self.output_dir, 'file_system.bodyfile')\n output_evidence = BodyFile(source_path=bodyfile_output)\n number_of_entries = 0\n\n # Set things up for the FileEntryLister client. We will scan all\n # partitions in the volume.\n volume_scanner_options = volume_scanner.VolumeScannerOptions()\n volume_scanner_options.partitions = self.task_config.get('partitions')\n\n # Create the FileEntryLister client and generate the path specs\n # for all available partitions.\n entry_lister = file_entry_lister.FileEntryLister()\n base_path_specs = entry_lister.GetBasePathSpecs(\n evidence.device_path, options=volume_scanner_options)\n\n # Iterate over all file entries and generate the output in bodyfile\n # format.\n try:\n with open(bodyfile_output, 'w') as file_object:\n for file_entry, path_segments in entry_lister.ListFileEntries(\n base_path_specs):\n bodyfile_entries = entry_lister.GetBodyfileEntries(\n file_entry, path_segments)\n for bodyfile_entry in bodyfile_entries:\n file_object.write(bodyfile_entry)\n file_object.write('\\n')\n number_of_entries += 1\n output_evidence.number_of_entries = number_of_entries\n result.add_evidence(output_evidence, evidence.config)\n status = 'Generated file system timeline containing [{0:d}] entries'.format(\n number_of_entries)\n result.close(self, success=True, status=status)\n except dfvfs_errors.ScannerError as exception:\n result.log('Error generating bodyfile {0!s}'.format(exception))\n status = 'Unable to generate bodyfile using provided evidence data.'\n result.close(self, success=False, status=status)\n raise TurbiniaException(\n 'Could not process volume: {0!s}'.format(exception))\n\n return result\n", "path": "turbinia/workers/file_system_timeline.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Task to run dfimagetools FileEntryLister on disk partitions.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport os\n\nfrom turbinia import TurbiniaException\nfrom turbinia.workers import TurbiniaTask\nfrom turbinia.evidence import EvidenceState as state\nfrom turbinia.evidence import BodyFile\n\nif TurbiniaTask.check_worker_role():\n try:\n from dfvfs.helpers import volume_scanner\n from dfvfs.lib import errors as dfvfs_errors\n from dfimagetools import file_entry_lister\n except ImportError as exception:\n message = 'Could not import libraries: {0!s}'.format(exception)\n raise TurbiniaException(message)\n\n\nclass FileSystemTimelineTask(TurbiniaTask):\n \"\"\"Task to generate file system timelines. \"\"\"\n\n REQUIRED_STATES = [state.ATTACHED]\n\n TASK_CONFIG = {'partitions': ['all']}\n\n def run(self, evidence, result):\n \"\"\"Task to execute (dfimagetools) FileEntryLister.\n\n Args:\n evidence (Evidence object): The evidence we will process.\n result (TurbiniaTaskResult): The object to place task results into.\n\n Returns:\n TurbiniaTaskResult object.\n \"\"\"\n bodyfile_output = os.path.join(self.output_dir, 'file_system.bodyfile')\n output_evidence = BodyFile(source_path=bodyfile_output)\n number_of_entries = 0\n\n # Set things up for the FileEntryLister client. We will scan all\n # partitions in the volume.\n volume_scanner_options = volume_scanner.VolumeScannerOptions()\n volume_scanner_options.partitions = self.task_config.get('partitions')\n\n # Create the FileEntryLister client and generate the path specs\n # for all available partitions.\n entry_lister = file_entry_lister.FileEntryLister()\n try:\n base_path_specs = entry_lister.GetBasePathSpecs(\n evidence.device_path, options=volume_scanner_options)\n except dfvfs_errors.ScannerError as exception:\n status = 'Unable to open evidence: {0!s}'.format(exception)\n result.close(self, success=False, status=status)\n\n # Iterate over all file entries and generate the output in bodyfile\n # format.\n try:\n file_entries = None\n with open(bodyfile_output, 'w', encoding='utf-8') as file_object:\n file_entries = enumerate(entry_lister.ListFileEntries(base_path_specs))\n while file_entries:\n try:\n _, (file_entry, path_segments) = next(file_entries)\n bodyfile_entries = entry_lister.GetBodyfileEntries(\n file_entry, path_segments)\n for bodyfile_entry in bodyfile_entries:\n file_object.write(bodyfile_entry)\n file_object.write('\\n')\n number_of_entries += 1\n except StopIteration:\n break\n except (dfvfs_errors.AccessError, dfvfs_errors.BackEndError,\n dfvfs_errors.MountPointError,\n dfvfs_errors.PathSpecError) as exception:\n status = 'Unable to process file entry: {0!s}'.format(exception)\n result.log(status)\n\n if number_of_entries > 0:\n output_evidence.number_of_entries = number_of_entries\n result.add_evidence(output_evidence, evidence.config)\n status = 'Generated file system timeline containing [{0:d}] entries'.format(\n number_of_entries)\n result.close(self, success=True, status=status)\n else:\n status = 'Unable to process any file entries.'\n result.close(self, success=False, status=status)\n\n except IOError as exception:\n status = 'Unable to create bodyfile local output file: {0!s}'.format(\n exception)\n result.close(self, success=False, status=status)\n\n return result\n", "path": "turbinia/workers/file_system_timeline.py"}]}
| 1,434 | 849 |
gh_patches_debug_24183
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-6460
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tifffile deprecation warning on test_tifffile.py
## Description
<!--
(Note: for guidance on how to use `scikit-image`, please post instead on https://forum.image.sc/tag/scikit-image)
-->
## Way to reproduce
```python
run pytest on skimage/io/tests/test_tifffile.py
```
Will show deprecation warning issues
```bash
skimage/io/tests/test_tifffile.py ................................... [100%]
=============================== warnings summary ===============================
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape1]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape1]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape2]
skimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape2]
/scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.
result = tif.write(
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/io/_plugins/tifffile_plugin.py`
Content:
```
1 __all__ = ['imread', 'imsave']
2
3 from tifffile import imwrite as imsave, imread as tifffile_imread
4
5
6 def imread(fname, **kwargs):
7 """Load a tiff image from file.
8
9 Parameters
10 ----------
11 fname : str or file
12 File name or file-like-object.
13 kwargs : keyword pairs, optional
14 Additional keyword arguments to pass through (see ``tifffile``'s
15 ``imread`` function).
16
17 Notes
18 -----
19 Provided by the tifffile library [1]_, and supports many
20 advanced image types including multi-page and floating point.
21
22 References
23 ----------
24 .. [1] https://pypi.org/project/tifffile/
25
26 """
27 if 'img_num' in kwargs:
28 kwargs['key'] = kwargs.pop('img_num')
29
30 return tifffile_imread(fname, **kwargs)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py
--- a/skimage/io/_plugins/tifffile_plugin.py
+++ b/skimage/io/_plugins/tifffile_plugin.py
@@ -1,6 +1,50 @@
+from tifffile import imread as tifffile_imread
+from tifffile import imwrite as tifffile_imwrite
+
__all__ = ['imread', 'imsave']
-from tifffile import imwrite as imsave, imread as tifffile_imread
+
+def imsave(fname, arr, **kwargs):
+ """Load a tiff image to file.
+
+ Parameters
+ ----------
+ fname : str or file
+ File name or file-like object.
+ arr : ndarray
+ The array to write.
+ kwargs : keyword pairs, optional
+ Additional keyword arguments to pass through (see ``tifffile``'s
+ ``imwrite`` function).
+
+ Notes
+ -----
+ Provided by the tifffile library [1]_, and supports many
+ advanced image types including multi-page and floating-point.
+
+ This implementation will set ``photometric='RGB'`` when writing if the first
+ or last axis of `arr` has length 3 or 4. To override this, explicitly
+ pass the ``photometric`` kwarg.
+
+ This implementation will set ``planarconfig='SEPARATE'`` when writing if the
+ first axis of arr has length 3 or 4. To override this, explicitly
+ specify the ``planarconfig`` kwarg.
+
+ References
+ ----------
+ .. [1] https://pypi.org/project/tifffile/
+
+ """
+ if arr.shape[0] in [3, 4]:
+ if 'planarconfig' not in kwargs:
+ kwargs['planarconfig'] = 'SEPARATE'
+ rgb = True
+ else:
+ rgb = arr.shape[-1] in [3, 4]
+ if rgb and 'photometric' not in kwargs:
+ kwargs['photometric'] = 'RGB'
+
+ return tifffile_imwrite(fname, arr, **kwargs)
def imread(fname, **kwargs):
|
{"golden_diff": "diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py\n--- a/skimage/io/_plugins/tifffile_plugin.py\n+++ b/skimage/io/_plugins/tifffile_plugin.py\n@@ -1,6 +1,50 @@\n+from tifffile import imread as tifffile_imread\n+from tifffile import imwrite as tifffile_imwrite\n+\n __all__ = ['imread', 'imsave']\n \n-from tifffile import imwrite as imsave, imread as tifffile_imread\n+\n+def imsave(fname, arr, **kwargs):\n+ \"\"\"Load a tiff image to file.\n+\n+ Parameters\n+ ----------\n+ fname : str or file\n+ File name or file-like object.\n+ arr : ndarray\n+ The array to write.\n+ kwargs : keyword pairs, optional\n+ Additional keyword arguments to pass through (see ``tifffile``'s\n+ ``imwrite`` function).\n+\n+ Notes\n+ -----\n+ Provided by the tifffile library [1]_, and supports many\n+ advanced image types including multi-page and floating-point.\n+\n+ This implementation will set ``photometric='RGB'`` when writing if the first\n+ or last axis of `arr` has length 3 or 4. To override this, explicitly\n+ pass the ``photometric`` kwarg.\n+\n+ This implementation will set ``planarconfig='SEPARATE'`` when writing if the\n+ first axis of arr has length 3 or 4. To override this, explicitly\n+ specify the ``planarconfig`` kwarg.\n+\n+ References\n+ ----------\n+ .. [1] https://pypi.org/project/tifffile/\n+\n+ \"\"\"\n+ if arr.shape[0] in [3, 4]:\n+ if 'planarconfig' not in kwargs:\n+ kwargs['planarconfig'] = 'SEPARATE'\n+ rgb = True\n+ else:\n+ rgb = arr.shape[-1] in [3, 4]\n+ if rgb and 'photometric' not in kwargs:\n+ kwargs['photometric'] = 'RGB'\n+\n+ return tifffile_imwrite(fname, arr, **kwargs)\n \n \n def imread(fname, **kwargs):\n", "issue": "tifffile deprecation warning on test_tifffile.py\n## Description\r\n\r\n<!--\r\n(Note: for guidance on how to use `scikit-image`, please post instead on https://forum.image.sc/tag/scikit-image)\r\n-->\r\n\r\n## Way to reproduce\r\n```python\r\nrun pytest on skimage/io/tests/test_tifffile.py\r\n```\r\nWill show deprecation warning issues\r\n```bash\r\nskimage/io/tests/test_tifffile.py ................................... [100%]\r\n\r\n=============================== warnings summary ===============================\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float32-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float32-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float32' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-int16-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-int16-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'int16' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape1]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape1]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 3) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[False-float64-shape2]\r\nskimage/io/tests/test_tifffile.py::TestSave::test_imsave_roundtrip[True-float64-shape2]\r\n /scikit-image/.venv/lib/python3.10/site-packages/tifffile/tifffile.py:1155: DeprecationWarning: <tifffile.TiffWriter.write> data with shape (10, 10, 4) and dtype 'float64' are stored as RGB with contiguous samples. Future versions will store such data as MINISBLACK in separate pages by default, unless the 'photometric' parameter is specified.\r\n result = tif.write(\r\n\r\n-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html\r\n\r\n```\r\n\r\n\n", "before_files": [{"content": "__all__ = ['imread', 'imsave']\n\nfrom tifffile import imwrite as imsave, imread as tifffile_imread\n\n\ndef imread(fname, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by the tifffile library [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] https://pypi.org/project/tifffile/\n\n \"\"\"\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n return tifffile_imread(fname, **kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}], "after_files": [{"content": "from tifffile import imread as tifffile_imread\nfrom tifffile import imwrite as tifffile_imwrite\n\n__all__ = ['imread', 'imsave']\n\n\ndef imsave(fname, arr, **kwargs):\n \"\"\"Load a tiff image to file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like object.\n arr : ndarray\n The array to write.\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imwrite`` function).\n\n Notes\n -----\n Provided by the tifffile library [1]_, and supports many\n advanced image types including multi-page and floating-point.\n\n This implementation will set ``photometric='RGB'`` when writing if the first\n or last axis of `arr` has length 3 or 4. To override this, explicitly\n pass the ``photometric`` kwarg.\n\n This implementation will set ``planarconfig='SEPARATE'`` when writing if the\n first axis of arr has length 3 or 4. To override this, explicitly\n specify the ``planarconfig`` kwarg.\n\n References\n ----------\n .. [1] https://pypi.org/project/tifffile/\n\n \"\"\"\n if arr.shape[0] in [3, 4]:\n if 'planarconfig' not in kwargs:\n kwargs['planarconfig'] = 'SEPARATE'\n rgb = True\n else:\n rgb = arr.shape[-1] in [3, 4]\n if rgb and 'photometric' not in kwargs:\n kwargs['photometric'] = 'RGB'\n\n return tifffile_imwrite(fname, arr, **kwargs)\n\n\ndef imread(fname, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by the tifffile library [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] https://pypi.org/project/tifffile/\n\n \"\"\"\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n return tifffile_imread(fname, **kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}]}
| 1,653 | 526 |
gh_patches_debug_34329
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-1417
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documents: This backend doesn't support absolute paths. (Update to 1.0b2)
## 1. Bug since Wagtail update
I recently upgraded to 1.0b2 (from 1.0b1) and now, when I try to access I document I uploaded via a `wagtaildocs.Document` field, I get the following error:
```
NotImplementedError at /documents/3/headphones.svg
This backend doesn't support absolute paths.
```
The field is specified as:
```
svg_mask = models.ForeignKey(
verbose_name=u"Mask (SVG)",
to='wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
```
Is this a wagtail bug or a configuration error?
## 2. Unexpected behaviour
I would like to serve the svg as a `image/svg+xml`, not as a stream (which was the case in 1.0b1). I set the mimetype in my settings as follows:
```
mimetypes.add_type("image/svg+xml", ".svg", True)
mimetypes.add_type("image/svg+xml", ".svgz", True)
```
Yet (in 1.0b1) it always got served as a stream. I couldn't test it in 1.0b2 yet, because of the above problem.
Thank you in advance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtaildocs/views/serve.py`
Content:
```
1 from django.shortcuts import get_object_or_404
2 from django.conf import settings
3
4 from wagtail.utils.sendfile import sendfile
5 from wagtail.utils import sendfile_streaming_backend
6
7 from wagtail.wagtaildocs.models import Document, document_served
8
9
10 def serve(request, document_id, document_filename):
11 doc = get_object_or_404(Document, id=document_id)
12
13 # Send document_served signal
14 document_served.send(sender=Document, instance=doc, request=request)
15
16 if hasattr(settings, 'SENDFILE_BACKEND'):
17 return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)
18 else:
19 # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
20 return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/wagtaildocs/views/serve.py b/wagtail/wagtaildocs/views/serve.py
--- a/wagtail/wagtaildocs/views/serve.py
+++ b/wagtail/wagtaildocs/views/serve.py
@@ -1,5 +1,9 @@
from django.shortcuts import get_object_or_404
from django.conf import settings
+from django.http import StreamingHttpResponse, BadHeaderError
+
+from unidecode import unidecode
+from wsgiref.util import FileWrapper
from wagtail.utils.sendfile import sendfile
from wagtail.utils import sendfile_streaming_backend
@@ -13,8 +17,40 @@
# Send document_served signal
document_served.send(sender=Document, instance=doc, request=request)
- if hasattr(settings, 'SENDFILE_BACKEND'):
- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)
+ try:
+ local_path = doc.file.path
+ except NotImplementedError:
+ local_path = None
+
+ if local_path:
+
+ # Use wagtail.utils.sendfile to serve the file;
+ # this provides support for mimetypes, if-modified-since and django-sendfile backends
+
+ if hasattr(settings, 'SENDFILE_BACKEND'):
+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename)
+ else:
+ # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
+
else:
- # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND
- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)
+
+ # We are using a storage backend which does not expose filesystem paths
+ # (e.g. storages.backends.s3boto.S3BotoStorage).
+ # Fall back on pre-sendfile behaviour of reading the file content and serving it
+ # as a StreamingHttpResponse
+
+ wrapper = FileWrapper(doc.file)
+ response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')
+
+ try:
+ response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename
+ except BadHeaderError:
+ # Unicode filenames can fail on Django <1.8, Python 2 due to
+ # https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name
+ response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)
+
+ # FIXME: storage backends are not guaranteed to implement 'size'
+ response['Content-Length'] = doc.file.size
+
+ return response
|
{"golden_diff": "diff --git a/wagtail/wagtaildocs/views/serve.py b/wagtail/wagtaildocs/views/serve.py\n--- a/wagtail/wagtaildocs/views/serve.py\n+++ b/wagtail/wagtaildocs/views/serve.py\n@@ -1,5 +1,9 @@\n from django.shortcuts import get_object_or_404\n from django.conf import settings\n+from django.http import StreamingHttpResponse, BadHeaderError\n+\n+from unidecode import unidecode\n+from wsgiref.util import FileWrapper\n \n from wagtail.utils.sendfile import sendfile\n from wagtail.utils import sendfile_streaming_backend\n@@ -13,8 +17,40 @@\n # Send document_served signal\n document_served.send(sender=Document, instance=doc, request=request)\n \n- if hasattr(settings, 'SENDFILE_BACKEND'):\n- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)\n+ try:\n+ local_path = doc.file.path\n+ except NotImplementedError:\n+ local_path = None\n+\n+ if local_path:\n+\n+ # Use wagtail.utils.sendfile to serve the file;\n+ # this provides support for mimetypes, if-modified-since and django-sendfile backends\n+\n+ if hasattr(settings, 'SENDFILE_BACKEND'):\n+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename)\n+ else:\n+ # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n+ return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n+\n else:\n- # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n- return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n+\n+ # We are using a storage backend which does not expose filesystem paths\n+ # (e.g. storages.backends.s3boto.S3BotoStorage).\n+ # Fall back on pre-sendfile behaviour of reading the file content and serving it\n+ # as a StreamingHttpResponse\n+\n+ wrapper = FileWrapper(doc.file)\n+ response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')\n+\n+ try:\n+ response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename\n+ except BadHeaderError:\n+ # Unicode filenames can fail on Django <1.8, Python 2 due to\n+ # https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name\n+ response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)\n+\n+ # FIXME: storage backends are not guaranteed to implement 'size'\n+ response['Content-Length'] = doc.file.size\n+\n+ return response\n", "issue": "Documents: This backend doesn't support absolute paths. (Update to 1.0b2)\n## 1. Bug since Wagtail update\n\nI recently upgraded to 1.0b2 (from 1.0b1) and now, when I try to access I document I uploaded via a `wagtaildocs.Document` field, I get the following error:\n\n```\nNotImplementedError at /documents/3/headphones.svg\nThis backend doesn't support absolute paths.\n```\n\nThe field is specified as:\n\n```\nsvg_mask = models.ForeignKey(\n verbose_name=u\"Mask (SVG)\",\n to='wagtaildocs.Document',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n)\n```\n\nIs this a wagtail bug or a configuration error?\n## 2. Unexpected behaviour\n\nI would like to serve the svg as a `image/svg+xml`, not as a stream (which was the case in 1.0b1). I set the mimetype in my settings as follows:\n\n```\nmimetypes.add_type(\"image/svg+xml\", \".svg\", True)\nmimetypes.add_type(\"image/svg+xml\", \".svgz\", True)\n```\n\nYet (in 1.0b1) it always got served as a stream. I couldn't test it in 1.0b2 yet, because of the above problem.\n\nThank you in advance.\n\n", "before_files": [{"content": "from django.shortcuts import get_object_or_404\nfrom django.conf import settings\n\nfrom wagtail.utils.sendfile import sendfile\nfrom wagtail.utils import sendfile_streaming_backend\n\nfrom wagtail.wagtaildocs.models import Document, document_served\n\n\ndef serve(request, document_id, document_filename):\n doc = get_object_or_404(Document, id=document_id)\n\n # Send document_served signal\n document_served.send(sender=Document, instance=doc, request=request)\n\n if hasattr(settings, 'SENDFILE_BACKEND'):\n return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename)\n else:\n # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n return sendfile(request, doc.file.path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n", "path": "wagtail/wagtaildocs/views/serve.py"}], "after_files": [{"content": "from django.shortcuts import get_object_or_404\nfrom django.conf import settings\nfrom django.http import StreamingHttpResponse, BadHeaderError\n\nfrom unidecode import unidecode\nfrom wsgiref.util import FileWrapper\n\nfrom wagtail.utils.sendfile import sendfile\nfrom wagtail.utils import sendfile_streaming_backend\n\nfrom wagtail.wagtaildocs.models import Document, document_served\n\n\ndef serve(request, document_id, document_filename):\n doc = get_object_or_404(Document, id=document_id)\n\n # Send document_served signal\n document_served.send(sender=Document, instance=doc, request=request)\n\n try:\n local_path = doc.file.path\n except NotImplementedError:\n local_path = None\n\n if local_path:\n\n # Use wagtail.utils.sendfile to serve the file;\n # this provides support for mimetypes, if-modified-since and django-sendfile backends\n\n if hasattr(settings, 'SENDFILE_BACKEND'):\n return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename)\n else:\n # Fallback to streaming backend if user hasn't specified SENDFILE_BACKEND\n return sendfile(request, local_path, attachment=True, attachment_filename=doc.filename, backend=sendfile_streaming_backend.sendfile)\n\n else:\n\n # We are using a storage backend which does not expose filesystem paths\n # (e.g. storages.backends.s3boto.S3BotoStorage).\n # Fall back on pre-sendfile behaviour of reading the file content and serving it\n # as a StreamingHttpResponse\n\n wrapper = FileWrapper(doc.file)\n response = StreamingHttpResponse(wrapper, content_type='application/octet-stream')\n\n try:\n response['Content-Disposition'] = 'attachment; filename=%s' % doc.filename\n except BadHeaderError:\n # Unicode filenames can fail on Django <1.8, Python 2 due to\n # https://code.djangoproject.com/ticket/20889 - try with an ASCIIfied version of the name\n response['Content-Disposition'] = 'attachment; filename=%s' % unidecode(doc.filename)\n\n # FIXME: storage backends are not guaranteed to implement 'size'\n response['Content-Length'] = doc.file.size\n\n return response\n", "path": "wagtail/wagtaildocs/views/serve.py"}]}
| 780 | 637 |
gh_patches_debug_5789
|
rasdani/github-patches
|
git_diff
|
weni-ai__bothub-engine-145
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Just email console backend in development mode
When EMAIL_HOST is setted and DEBUG is True email continue on console
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bothub/settings.py`
Content:
```
1 import os
2 import dj_database_url
3
4 from decouple import config
5
6
7 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
8 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
9
10
11 # SECURITY WARNING: keep the secret key used in production secret!
12 SECRET_KEY = config('SECRET_KEY')
13
14 # SECURITY WARNING: don't run with debug turned on in production!
15 DEBUG = config('DEBUG', default=False, cast=bool)
16
17 ALLOWED_HOSTS = config(
18 'ALLOWED_HOSTS',
19 default='*',
20 cast=lambda v: [s.strip() for s in v.split(',')])
21
22
23 # Application definition
24
25 INSTALLED_APPS = [
26 'django.contrib.admin',
27 'django.contrib.auth',
28 'django.contrib.contenttypes',
29 'django.contrib.sessions',
30 'django.contrib.messages',
31 'django.contrib.staticfiles',
32 'rest_framework',
33 'rest_framework.authtoken',
34 'django_filters',
35 'corsheaders',
36 'bothub.authentication',
37 'bothub.common',
38 'bothub.api',
39 ]
40
41 MIDDLEWARE = [
42 'django.middleware.security.SecurityMiddleware',
43 'whitenoise.middleware.WhiteNoiseMiddleware',
44 'django.contrib.sessions.middleware.SessionMiddleware',
45 'corsheaders.middleware.CorsMiddleware',
46 'django.middleware.common.CommonMiddleware',
47 'django.middleware.csrf.CsrfViewMiddleware',
48 'django.contrib.auth.middleware.AuthenticationMiddleware',
49 'django.contrib.messages.middleware.MessageMiddleware',
50 'django.middleware.clickjacking.XFrameOptionsMiddleware',
51 ]
52
53 ROOT_URLCONF = 'bothub.urls'
54
55 TEMPLATES = [
56 {
57 'BACKEND': 'django.template.backends.django.DjangoTemplates',
58 'DIRS': [],
59 'APP_DIRS': True,
60 'OPTIONS': {
61 'context_processors': [
62 'django.template.context_processors.debug',
63 'django.template.context_processors.request',
64 'django.contrib.auth.context_processors.auth',
65 'django.contrib.messages.context_processors.messages',
66 ],
67 },
68 },
69 ]
70
71 WSGI_APPLICATION = 'bothub.wsgi.application'
72
73
74 # Database
75
76 DATABASES = {}
77 DATABASES['default'] = dj_database_url.parse(
78 config(
79 'DEFAULT_DATABASE',
80 default='sqlite:///db.sqlite3'))
81
82
83 # Auth
84
85 AUTH_USER_MODEL = 'authentication.User'
86
87
88 # Password validation
89
90 AUTH_PASSWORD_VALIDATORS = [
91 {
92 'NAME': 'django.contrib.auth.password_validation.' +
93 'UserAttributeSimilarityValidator',
94 },
95 {
96 'NAME': 'django.contrib.auth.password_validation.' +
97 'MinimumLengthValidator',
98 },
99 {
100 'NAME': 'django.contrib.auth.password_validation.' +
101 'CommonPasswordValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.' +
105 'NumericPasswordValidator',
106 },
107 ]
108
109
110 # Internationalization
111
112 LANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')
113
114 TIME_ZONE = config('TIME_ZONE', default='UTC')
115
116 USE_I18N = True
117
118 USE_L10N = True
119
120 USE_TZ = True
121
122
123 # Static files (CSS, JavaScript, Images)
124
125 STATIC_URL = '/static/'
126
127 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
128
129 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
130
131
132 # rest framework
133
134 REST_FRAMEWORK = {
135 'DEFAULT_AUTHENTICATION_CLASSES': [
136 'rest_framework.authentication.TokenAuthentication',
137 ],
138 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +
139 'LimitOffsetPagination',
140 'PAGE_SIZE': 20,
141 'DEFAULT_FILTER_BACKENDS': [
142 'django_filters.rest_framework.DjangoFilterBackend',
143 ],
144 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',
145 }
146
147
148 # cors headers
149
150 CORS_ORIGIN_ALLOW_ALL = True
151 CORS_URLS_REGEX = r'^/api/.*$'
152
153
154 # mail
155
156 envvar_EMAIL_HOST = config('EMAIL_HOST', default=None)
157
158 ADMINS = config(
159 'ADMINS',
160 default='',
161 cast=lambda v: [
162 (
163 s.strip().split('|')[0],
164 s.strip().split('|')[1],
165 ) for s in v.split(',')] if v else [])
166 EMAIL_SUBJECT_PREFIX = '[bothub] '
167 DEFAULT_FROM_EMAIL = config(
168 'DEFAULT_FROM_EMAIL',
169 default='webmaster@localhost')
170 SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
171
172 if not DEBUG and envvar_EMAIL_HOST:
173 EMAIL_HOST = envvar_EMAIL_HOST
174 EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
175 EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
176 EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
177 EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)
178 EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
179 else:
180 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
181
182
183 # webapp
184
185 BOTHUB_WEBAPP_BASE_URL = config(
186 'BOTHUB_WEBAPP_BASE_URL',
187 default='http://localhost:8080/')
188
189
190 # NLP
191
192 BOTHUB_NLP_BASE_URL = config(
193 'BOTHUB_NLP_BASE_URL',
194 default='http://localhost:8001/')
195
196
197 # CSRF
198
199 CSRF_COOKIE_DOMAIN = config(
200 'CSRF_COOKIE_DOMAIN',
201 default=None)
202
203 CSRF_COOKIE_SECURE = config(
204 'CSRF_COOKIE_SECURE',
205 default=False,
206 cast=bool)
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bothub/settings.py b/bothub/settings.py
--- a/bothub/settings.py
+++ b/bothub/settings.py
@@ -169,7 +169,7 @@
default='webmaster@localhost')
SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
-if not DEBUG and envvar_EMAIL_HOST:
+if envvar_EMAIL_HOST:
EMAIL_HOST = envvar_EMAIL_HOST
EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
|
{"golden_diff": "diff --git a/bothub/settings.py b/bothub/settings.py\n--- a/bothub/settings.py\n+++ b/bothub/settings.py\n@@ -169,7 +169,7 @@\n default='webmaster@localhost')\n SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n \n-if not DEBUG and envvar_EMAIL_HOST:\n+if envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n", "issue": "Just email console backend in development mode\nWhen EMAIL_HOST is setted and DEBUG is True email continue on console\n", "before_files": [{"content": "import os\nimport dj_database_url\n\nfrom decouple import config\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif not DEBUG and envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:8001/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n", "path": "bothub/settings.py"}], "after_files": [{"content": "import os\nimport dj_database_url\n\nfrom decouple import config\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:8001/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n", "path": "bothub/settings.py"}]}
| 1,958 | 124 |
gh_patches_debug_4929
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-1283
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add search by github handle to API persons endpoint
Would it be possible to add the functionality to search for people using their github handle through the API? i.e. `https://amy.software-carpentry.org/api/v1/persons/?github=fmichonneau`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/filters.py`
Content:
```
1 from django_filters import rest_framework as filters
2
3 from workshops.filters import AMYFilterSet
4 from workshops.models import Event, Task, Tag, Person, Badge
5
6
7 def filter_tag_by_name(queryset, name, values):
8 tags = Tag.objects.filter(name__in=values)
9 for tag in tags:
10 queryset = queryset.filter(tags=tag)
11 return queryset
12
13
14 class EventFilter(filters.FilterSet):
15 start_after = filters.DateFilter(name='start', lookup_expr='gte')
16 start_before = filters.DateFilter(name='start', lookup_expr='lte')
17 end_after = filters.DateFilter(name='end', lookup_expr='gte')
18 end_before = filters.DateFilter(name='end', lookup_expr='lte')
19 TAG_CHOICES = Tag.objects.all().values_list('name', 'name')
20 tag = filters.MultipleChoiceFilter(
21 choices=TAG_CHOICES, name='tags', method=filter_tag_by_name,
22 )
23
24 class Meta:
25 model = Event
26 fields = (
27 'completed', 'tag',
28 'start', 'start_before', 'start_after',
29 'end', 'end_before', 'end_after',
30 )
31 order_by = ('-slug', 'slug', 'start', '-start', 'end', '-end')
32
33
34 class TaskFilter(filters.FilterSet):
35 role = filters.CharFilter(name='role__name')
36
37 class Meta:
38 model = Task
39 fields = (
40 'role',
41 )
42
43
44 def filter_instructors(queryset, name, value):
45 instructor_badges = Badge.objects.instructor_badges()
46 if value is True:
47 return queryset.filter(badges__in=instructor_badges)
48 elif value is False:
49 return queryset.exclude(badges__in=instructor_badges)
50 else:
51 return queryset
52
53
54 class PersonFilter(filters.FilterSet):
55 is_instructor = filters.BooleanFilter(method=filter_instructors,
56 label='Is instructor?')
57
58 class Meta:
59 model = Person
60 fields = (
61 'badges', 'username', 'personal', 'middle', 'family', 'email',
62 'may_contact', 'publish_profile',
63 )
64 order_by = (
65 'lastname', '-lastname', 'firstname', '-firstname', 'email',
66 '-email',
67 )
68
69 def get_order_by(self, order_value):
70 if order_value == 'firstname':
71 return ['personal', 'middle', 'family']
72 elif order_value == '-firstname':
73 return ['-personal', '-middle', '-family']
74 elif order_value == 'lastname':
75 return ['family', 'middle', 'personal']
76 elif order_value == '-lastname':
77 return ['-family', '-middle', '-personal']
78 return super().get_order_by(order_value)
79
80
81 class InstructorsOverTimeFilter(AMYFilterSet):
82 badges = filters.ModelMultipleChoiceFilter(
83 queryset=Badge.objects.instructor_badges(),
84 label='Badges',
85 lookup_expr='in',
86 )
87
88 class Meta:
89 model = Person
90 fields = [
91 'badges',
92 ]
93
94
95 class WorkshopsOverTimeFilter(AMYFilterSet):
96 tags = filters.ModelMultipleChoiceFilter(
97 queryset=Tag.objects.all(),
98 label='Events with at least one of the following tags:',
99 )
100
101 class Meta:
102 model = Event
103 fields = [
104 'tags',
105 ]
106
107
108 class LearnersOverTimeFilter(AMYFilterSet):
109 tags = filters.ModelMultipleChoiceFilter(
110 queryset=Tag.objects.all(),
111 label='Events with all the following tags:',
112 conjoined=True,
113 )
114
115 class Meta:
116 model = Event
117 fields = [
118 'tags',
119 ]
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/filters.py b/api/filters.py
--- a/api/filters.py
+++ b/api/filters.py
@@ -59,7 +59,7 @@
model = Person
fields = (
'badges', 'username', 'personal', 'middle', 'family', 'email',
- 'may_contact', 'publish_profile',
+ 'may_contact', 'publish_profile', 'github',
)
order_by = (
'lastname', '-lastname', 'firstname', '-firstname', 'email',
|
{"golden_diff": "diff --git a/api/filters.py b/api/filters.py\n--- a/api/filters.py\n+++ b/api/filters.py\n@@ -59,7 +59,7 @@\n model = Person\n fields = (\n 'badges', 'username', 'personal', 'middle', 'family', 'email',\n- 'may_contact', 'publish_profile',\n+ 'may_contact', 'publish_profile', 'github',\n )\n order_by = (\n 'lastname', '-lastname', 'firstname', '-firstname', 'email',\n", "issue": "Add search by github handle to API persons endpoint\nWould it be possible to add the functionality to search for people using their github handle through the API? i.e. `https://amy.software-carpentry.org/api/v1/persons/?github=fmichonneau`\n", "before_files": [{"content": "from django_filters import rest_framework as filters\n\nfrom workshops.filters import AMYFilterSet\nfrom workshops.models import Event, Task, Tag, Person, Badge\n\n\ndef filter_tag_by_name(queryset, name, values):\n tags = Tag.objects.filter(name__in=values)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n return queryset\n\n\nclass EventFilter(filters.FilterSet):\n start_after = filters.DateFilter(name='start', lookup_expr='gte')\n start_before = filters.DateFilter(name='start', lookup_expr='lte')\n end_after = filters.DateFilter(name='end', lookup_expr='gte')\n end_before = filters.DateFilter(name='end', lookup_expr='lte')\n TAG_CHOICES = Tag.objects.all().values_list('name', 'name')\n tag = filters.MultipleChoiceFilter(\n choices=TAG_CHOICES, name='tags', method=filter_tag_by_name,\n )\n\n class Meta:\n model = Event\n fields = (\n 'completed', 'tag',\n 'start', 'start_before', 'start_after',\n 'end', 'end_before', 'end_after',\n )\n order_by = ('-slug', 'slug', 'start', '-start', 'end', '-end')\n\n\nclass TaskFilter(filters.FilterSet):\n role = filters.CharFilter(name='role__name')\n\n class Meta:\n model = Task\n fields = (\n 'role',\n )\n\n\ndef filter_instructors(queryset, name, value):\n instructor_badges = Badge.objects.instructor_badges()\n if value is True:\n return queryset.filter(badges__in=instructor_badges)\n elif value is False:\n return queryset.exclude(badges__in=instructor_badges)\n else:\n return queryset\n\n\nclass PersonFilter(filters.FilterSet):\n is_instructor = filters.BooleanFilter(method=filter_instructors,\n label='Is instructor?')\n\n class Meta:\n model = Person\n fields = (\n 'badges', 'username', 'personal', 'middle', 'family', 'email',\n 'may_contact', 'publish_profile',\n )\n order_by = (\n 'lastname', '-lastname', 'firstname', '-firstname', 'email',\n '-email',\n )\n\n def get_order_by(self, order_value):\n if order_value == 'firstname':\n return ['personal', 'middle', 'family']\n elif order_value == '-firstname':\n return ['-personal', '-middle', '-family']\n elif order_value == 'lastname':\n return ['family', 'middle', 'personal']\n elif order_value == '-lastname':\n return ['-family', '-middle', '-personal']\n return super().get_order_by(order_value)\n\n\nclass InstructorsOverTimeFilter(AMYFilterSet):\n badges = filters.ModelMultipleChoiceFilter(\n queryset=Badge.objects.instructor_badges(),\n label='Badges',\n lookup_expr='in',\n )\n\n class Meta:\n model = Person\n fields = [\n 'badges',\n ]\n\n\nclass WorkshopsOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with at least one of the following tags:',\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n\n\nclass LearnersOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with all the following tags:',\n conjoined=True,\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n", "path": "api/filters.py"}], "after_files": [{"content": "from django_filters import rest_framework as filters\n\nfrom workshops.filters import AMYFilterSet\nfrom workshops.models import Event, Task, Tag, Person, Badge\n\n\ndef filter_tag_by_name(queryset, name, values):\n tags = Tag.objects.filter(name__in=values)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n return queryset\n\n\nclass EventFilter(filters.FilterSet):\n start_after = filters.DateFilter(name='start', lookup_expr='gte')\n start_before = filters.DateFilter(name='start', lookup_expr='lte')\n end_after = filters.DateFilter(name='end', lookup_expr='gte')\n end_before = filters.DateFilter(name='end', lookup_expr='lte')\n TAG_CHOICES = Tag.objects.all().values_list('name', 'name')\n tag = filters.MultipleChoiceFilter(\n choices=TAG_CHOICES, name='tags', method=filter_tag_by_name,\n )\n\n class Meta:\n model = Event\n fields = (\n 'completed', 'tag',\n 'start', 'start_before', 'start_after',\n 'end', 'end_before', 'end_after',\n )\n order_by = ('-slug', 'slug', 'start', '-start', 'end', '-end')\n\n\nclass TaskFilter(filters.FilterSet):\n role = filters.CharFilter(name='role__name')\n\n class Meta:\n model = Task\n fields = (\n 'role',\n )\n\n\ndef filter_instructors(queryset, name, value):\n instructor_badges = Badge.objects.instructor_badges()\n if value is True:\n return queryset.filter(badges__in=instructor_badges)\n elif value is False:\n return queryset.exclude(badges__in=instructor_badges)\n else:\n return queryset\n\n\nclass PersonFilter(filters.FilterSet):\n is_instructor = filters.BooleanFilter(method=filter_instructors,\n label='Is instructor?')\n\n class Meta:\n model = Person\n fields = (\n 'badges', 'username', 'personal', 'middle', 'family', 'email',\n 'may_contact', 'publish_profile', 'github',\n )\n order_by = (\n 'lastname', '-lastname', 'firstname', '-firstname', 'email',\n '-email',\n )\n\n def get_order_by(self, order_value):\n if order_value == 'firstname':\n return ['personal', 'middle', 'family']\n elif order_value == '-firstname':\n return ['-personal', '-middle', '-family']\n elif order_value == 'lastname':\n return ['family', 'middle', 'personal']\n elif order_value == '-lastname':\n return ['-family', '-middle', '-personal']\n return super().get_order_by(order_value)\n\n\nclass InstructorsOverTimeFilter(AMYFilterSet):\n badges = filters.ModelMultipleChoiceFilter(\n queryset=Badge.objects.instructor_badges(),\n label='Badges',\n lookup_expr='in',\n )\n\n class Meta:\n model = Person\n fields = [\n 'badges',\n ]\n\n\nclass WorkshopsOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with at least one of the following tags:',\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n\n\nclass LearnersOverTimeFilter(AMYFilterSet):\n tags = filters.ModelMultipleChoiceFilter(\n queryset=Tag.objects.all(),\n label='Events with all the following tags:',\n conjoined=True,\n )\n\n class Meta:\n model = Event\n fields = [\n 'tags',\n ]\n", "path": "api/filters.py"}]}
| 1,340 | 115 |
gh_patches_debug_14474
|
rasdani/github-patches
|
git_diff
|
ocadotechnology__aimmo-512
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make logout button on AI:MMO
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `players/urls.py`
Content:
```
1 from django.conf.urls import url
2 from django.contrib.auth import views as auth_views
3 from django.contrib.auth.decorators import login_required
4 from django.views.generic import TemplateView
5 from django.views.generic import RedirectView
6
7 from players import views
8
9 urlpatterns = [
10 url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),
11
12 url(r'^accounts/login/$', auth_views.login),
13
14 url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),
15 url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),
16 url(r'^watch/(?P<id>[0-9]+)/$', login_required(views.watch_game), name='aimmo/watch'),
17 url(r'^watch_level/(?P<num>[0-9]+)/$', login_required(views.watch_level), name='aimmo/watch_level'),
18 url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),
19 url(r'^game_ide/$', TemplateView.as_view(template_name='players/game_ide.html'), name='aimmo/game_ide'),
20
21 url(r'^api/code/(?P<id>[0-9]+)/$', views.code, name='aimmo/code'),
22 url(r'^api/games/$', views.list_games, name='aimmo/games'),
23 url(r'^api/games/(?P<id>[0-9]+)/$', views.get_game, name='aimmo/game_details'),
24 url(r'^api/games/(?P<id>[0-9]+)/complete/$', views.mark_game_complete, name='aimmo/complete_game'),
25 url(r'^api/games/(?P<game_id>[0-9]+)/current_avatar/$', views.current_avatar_in_game, name='aimmo/current_avatar_in_game'),
26
27 url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls
28 url(r'^games/new/$', views.add_game, name='aimmo/new_game'),
29
30 # TODO: this is a quickfix for redirecting for the Unity resources
31 url(r'^watch/(?P<id>[0-9]+)/(?P<resource>.[0-9A-Za-z/.]+)$',
32 RedirectView.as_view(url='/static/unity/%(resource)s', permanent=False)),
33
34 url(r'^socket.io/socket.io.js',
35 RedirectView.as_view(url='https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.7.4/socket.io.min.js', permanent=False)),
36 ]
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/players/urls.py b/players/urls.py
--- a/players/urls.py
+++ b/players/urls.py
@@ -10,6 +10,8 @@
url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),
url(r'^accounts/login/$', auth_views.login),
+ url(r'^accounts/logout/$', auth_views.logout, {'next_page' : 'aimmo/logout_success'}, name='aimmo/logout'),
+ url(r'^accounts/logout_success/$', TemplateView.as_view(template_name='registration/success_logout.html'), name='aimmo/logout_success'),
url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),
url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),
|
{"golden_diff": "diff --git a/players/urls.py b/players/urls.py\n--- a/players/urls.py\n+++ b/players/urls.py\n@@ -10,6 +10,8 @@\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n \n url(r'^accounts/login/$', auth_views.login),\n+ url(r'^accounts/logout/$', auth_views.logout, {'next_page' : 'aimmo/logout_success'}, name='aimmo/logout'),\n+ url(r'^accounts/logout_success/$', TemplateView.as_view(template_name='registration/success_logout.html'), name='aimmo/logout_success'),\n \n url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),\n url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),\n", "issue": "Make logout button on AI:MMO\n\n", "before_files": [{"content": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView\nfrom django.views.generic import RedirectView\n\nfrom players import views\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n\n url(r'^accounts/login/$', auth_views.login),\n\n url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),\n url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),\n url(r'^watch/(?P<id>[0-9]+)/$', login_required(views.watch_game), name='aimmo/watch'),\n url(r'^watch_level/(?P<num>[0-9]+)/$', login_required(views.watch_level), name='aimmo/watch_level'),\n url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),\n url(r'^game_ide/$', TemplateView.as_view(template_name='players/game_ide.html'), name='aimmo/game_ide'),\n\n url(r'^api/code/(?P<id>[0-9]+)/$', views.code, name='aimmo/code'),\n url(r'^api/games/$', views.list_games, name='aimmo/games'),\n url(r'^api/games/(?P<id>[0-9]+)/$', views.get_game, name='aimmo/game_details'),\n url(r'^api/games/(?P<id>[0-9]+)/complete/$', views.mark_game_complete, name='aimmo/complete_game'),\n url(r'^api/games/(?P<game_id>[0-9]+)/current_avatar/$', views.current_avatar_in_game, name='aimmo/current_avatar_in_game'),\n\n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n url(r'^games/new/$', views.add_game, name='aimmo/new_game'),\n\n # TODO: this is a quickfix for redirecting for the Unity resources\n url(r'^watch/(?P<id>[0-9]+)/(?P<resource>.[0-9A-Za-z/.]+)$',\n RedirectView.as_view(url='/static/unity/%(resource)s', permanent=False)),\n\n url(r'^socket.io/socket.io.js',\n RedirectView.as_view(url='https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.7.4/socket.io.min.js', permanent=False)),\n]\n", "path": "players/urls.py"}], "after_files": [{"content": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import TemplateView\nfrom django.views.generic import RedirectView\n\nfrom players import views\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='players/home.html'), name='aimmo/home'),\n\n url(r'^accounts/login/$', auth_views.login),\n url(r'^accounts/logout/$', auth_views.logout, {'next_page' : 'aimmo/logout_success'}, name='aimmo/logout'),\n url(r'^accounts/logout_success/$', TemplateView.as_view(template_name='registration/success_logout.html'), name='aimmo/logout_success'),\n\n url(r'^program/(?P<id>[0-9]+)/$', login_required(views.ProgramView.as_view()), name='aimmo/program'),\n url(r'^program_level/(?P<num>[0-9]+)/$', login_required(views.program_level), name='aimmo/program_level'),\n url(r'^watch/(?P<id>[0-9]+)/$', login_required(views.watch_game), name='aimmo/watch'),\n url(r'^watch_level/(?P<num>[0-9]+)/$', login_required(views.watch_level), name='aimmo/watch_level'),\n url(r'^statistics/$', TemplateView.as_view(template_name='players/statistics.html'), name='aimmo/statistics'),\n url(r'^game_ide/$', TemplateView.as_view(template_name='players/game_ide.html'), name='aimmo/game_ide'),\n\n url(r'^api/code/(?P<id>[0-9]+)/$', views.code, name='aimmo/code'),\n url(r'^api/games/$', views.list_games, name='aimmo/games'),\n url(r'^api/games/(?P<id>[0-9]+)/$', views.get_game, name='aimmo/game_details'),\n url(r'^api/games/(?P<id>[0-9]+)/complete/$', views.mark_game_complete, name='aimmo/complete_game'),\n url(r'^api/games/(?P<game_id>[0-9]+)/current_avatar/$', views.current_avatar_in_game, name='aimmo/current_avatar_in_game'),\n\n url(r'^jsreverse/$', 'django_js_reverse.views.urls_js', name='aimmo/js_reverse'), # TODO: Pull request to make django_js_reverse.urls\n url(r'^games/new/$', views.add_game, name='aimmo/new_game'),\n\n # TODO: this is a quickfix for redirecting for the Unity resources\n url(r'^watch/(?P<id>[0-9]+)/(?P<resource>.[0-9A-Za-z/.]+)$',\n RedirectView.as_view(url='/static/unity/%(resource)s', permanent=False)),\n\n url(r'^socket.io/socket.io.js',\n RedirectView.as_view(url='https://cdnjs.cloudflare.com/ajax/libs/socket.io/1.7.4/socket.io.min.js', permanent=False)),\n]\n", "path": "players/urls.py"}]}
| 887 | 197 |
gh_patches_debug_173
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-2038
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parsl v1.1.0 Release Checklist
## Checklist
Please edit the checklist if I've missed any items.
### Documentation updates :
- [x] Update docs to point at 1.1.0 as the latest
- [x] Make sure docs are not broken on readthedocs, since a broken doc build will stick on as stable till next release.
- [x] Update changelog with summary of changes since 0.9.0 [@benclifford to take a crack at this]
- [ ] Update Parsl tutorial repo with a 1.1.0 branch that folds in changes
- [x] Add `Beta` tags to components/features that are not yet stable.
### Testing :
- [ ] All testing should be green on Travis
- [x] Update all configs in `parsl/parsl/configs` to match current best practices
- [x] Update all test configs in `parsl/parsl/test/configs`
- [x] Test notebooks/tutorials and basic tests on a Mac
- [ ] Post news update on the website about release
- [x] Site testing:
- [x] Bridges2(PSC) [YY]
- [ ] ~~Comet (SDSC)~~ Machine is getting replaced by Expanse
- [x] Cori (NERSC) [YY/Yadu]
- [x] Stampede2 (TACC) [Yadu]
- [ ] ~~Frontera (TACC)~~ [Yadu, no access]
- [x] Theta (ALCF) [YY]
- [x] Bluewaters (NCSA) [ZZ]
- [x] Summit (ORNL) [Yadu]
- [ ] ~~CC-IN2P3 (French Grid)~~ [Yadu]
- [x] Midway (RCC, UChicago) [YY]
- [x] Open Science Grid
- [x] AWS
- [x] Kubernetes [ZZ]
- [x] NSCC Singapore [ZZ]
- [ ] Ad-Hoc clusters [YY]
### Release Tagging and pushing to PyPI
I'll make an updated alpha to smoothen installation and site testing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/version.py`
Content:
```
1 """Set module version.
2
3 <Major>.<Minor>.<maintenance>[alpha/beta/..]
4 Alphas will be numbered like this -> 0.4.0a0
5 """
6 VERSION = '1.1.0a1'
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/version.py b/parsl/version.py
--- a/parsl/version.py
+++ b/parsl/version.py
@@ -3,4 +3,4 @@
<Major>.<Minor>.<maintenance>[alpha/beta/..]
Alphas will be numbered like this -> 0.4.0a0
"""
-VERSION = '1.1.0a1'
+VERSION = '1.1.0'
|
{"golden_diff": "diff --git a/parsl/version.py b/parsl/version.py\n--- a/parsl/version.py\n+++ b/parsl/version.py\n@@ -3,4 +3,4 @@\n <Major>.<Minor>.<maintenance>[alpha/beta/..]\n Alphas will be numbered like this -> 0.4.0a0\n \"\"\"\n-VERSION = '1.1.0a1'\n+VERSION = '1.1.0'\n", "issue": "Parsl v1.1.0 Release Checklist\n## Checklist\r\n\r\nPlease edit the checklist if I've missed any items. \r\n\r\n### Documentation updates :\r\n\r\n- [x] Update docs to point at 1.1.0 as the latest\r\n- [x] Make sure docs are not broken on readthedocs, since a broken doc build will stick on as stable till next release.\r\n- [x] Update changelog with summary of changes since 0.9.0 [@benclifford to take a crack at this]\r\n- [ ] Update Parsl tutorial repo with a 1.1.0 branch that folds in changes \r\n- [x] Add `Beta` tags to components/features that are not yet stable.\r\n\r\n\r\n### Testing :\r\n\r\n- [ ] All testing should be green on Travis\r\n- [x] Update all configs in `parsl/parsl/configs` to match current best practices\r\n- [x] Update all test configs in `parsl/parsl/test/configs`\r\n- [x] Test notebooks/tutorials and basic tests on a Mac\r\n- [ ] Post news update on the website about release\r\n\r\n- [x] Site testing:\r\n - [x] Bridges2(PSC) [YY]\r\n - [ ] ~~Comet (SDSC)~~ Machine is getting replaced by Expanse\r\n - [x] Cori (NERSC) [YY/Yadu]\r\n - [x] Stampede2 (TACC) [Yadu]\r\n - [ ] ~~Frontera (TACC)~~ [Yadu, no access]\r\n - [x] Theta (ALCF) [YY]\r\n - [x] Bluewaters (NCSA) [ZZ]\r\n - [x] Summit (ORNL) [Yadu]\r\n - [ ] ~~CC-IN2P3 (French Grid)~~ [Yadu]\r\n - [x] Midway (RCC, UChicago) [YY]\r\n - [x] Open Science Grid\r\n - [x] AWS\r\n - [x] Kubernetes [ZZ]\r\n - [x] NSCC Singapore [ZZ]\r\n - [ ] Ad-Hoc clusters [YY]\r\n\r\n### Release Tagging and pushing to PyPI\r\n\r\nI'll make an updated alpha to smoothen installation and site testing.\r\n\n", "before_files": [{"content": "\"\"\"Set module version.\n\n<Major>.<Minor>.<maintenance>[alpha/beta/..]\nAlphas will be numbered like this -> 0.4.0a0\n\"\"\"\nVERSION = '1.1.0a1'\n", "path": "parsl/version.py"}], "after_files": [{"content": "\"\"\"Set module version.\n\n<Major>.<Minor>.<maintenance>[alpha/beta/..]\nAlphas will be numbered like this -> 0.4.0a0\n\"\"\"\nVERSION = '1.1.0'\n", "path": "parsl/version.py"}]}
| 799 | 97 |
gh_patches_debug_6931
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-28775
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve integrations documentation in help center
We should:
- Rename /help/bots-and-integrations to "Bots overview" everywhere (sidebar, page title, page URL).
- Add a copy of https://zulip.com/api/integrations-overview as the second page in the Bots & integrations section, titled "Integration overview"
- Cross-link as appropriate, both in related articles and in the content of the pages.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/url_redirects.py`
Content:
```
1 from dataclasses import dataclass
2 from typing import List
3
4
5 @dataclass
6 class URLRedirect:
7 old_url: str
8 new_url: str
9
10
11 API_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
12 # Add URL redirects for REST API documentation here:
13 URLRedirect("/api/delete-stream", "/api/archive-stream"),
14 ]
15
16 POLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
17 # Add URL redirects for policy documentation here:
18 URLRedirect("/privacy/", "/policies/privacy"),
19 URLRedirect("/terms/", "/policies/terms"),
20 ]
21
22 HELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [
23 # Add URL redirects for help center documentation here:
24 URLRedirect("/help/pm-mention-alert-notifications", "/help/dm-mention-alert-notifications"),
25 URLRedirect("/help/restrict-private-messages", "/help/restrict-direct-messages"),
26 URLRedirect("/help/reading-pms", "/help/reading-dms"),
27 URLRedirect("/help/private-messages", "/help/direct-messages"),
28 URLRedirect("/help/configure-who-can-edit-topics", "/help/restrict-moving-messages"),
29 URLRedirect(
30 "/help/configure-message-editing-and-deletion",
31 "/help/restrict-message-editing-and-deletion",
32 ),
33 URLRedirect("/help/restrict-visibility-of-email-addresses", "/help/configure-email-visibility"),
34 URLRedirect("/help/change-default-view", "/help/configure-default-view"),
35 URLRedirect("/help/recent-topics", "/help/recent-conversations"),
36 URLRedirect(
37 "/help/add-custom-profile-fields",
38 "/help/custom-profile-fields",
39 ),
40 URLRedirect(
41 "/help/enable-enter-to-send",
42 "/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message",
43 ),
44 URLRedirect(
45 "/help/change-the-default-language-for-your-organization",
46 "/help/configure-organization-language",
47 ),
48 URLRedirect("/help/delete-a-stream", "/help/archive-a-stream"),
49 URLRedirect("/help/change-the-topic-of-a-message", "/help/rename-a-topic"),
50 URLRedirect("/help/configure-missed-message-emails", "/help/email-notifications"),
51 URLRedirect("/help/add-an-alert-word", "/help/dm-mention-alert-notifications#alert-words"),
52 URLRedirect("/help/test-mobile-notifications", "/help/mobile-notifications"),
53 URLRedirect(
54 "/help/troubleshooting-desktop-notifications",
55 "/help/desktop-notifications#troubleshooting-desktop-notifications",
56 ),
57 URLRedirect(
58 "/help/change-notification-sound", "/help/desktop-notifications#change-notification-sound"
59 ),
60 URLRedirect("/help/configure-message-notification-emails", "/help/email-notifications"),
61 URLRedirect("/help/disable-new-login-emails", "/help/email-notifications#new-login-emails"),
62 # The `help/about-streams-and-topics` redirect is particularly important,
63 # because the old URL appears in links from Welcome Bot messages.
64 URLRedirect("/help/about-streams-and-topics", "/help/streams-and-topics"),
65 URLRedirect("/help/community-topic-edits", "/help/restrict-moving-messages"),
66 URLRedirect(
67 "/help/only-allow-admins-to-add-emoji", "/help/custom-emoji#change-who-can-add-custom-emoji"
68 ),
69 URLRedirect(
70 "/help/configure-who-can-add-custom-emoji",
71 "/help/custom-emoji#change-who-can-add-custom-emoji",
72 ),
73 URLRedirect("/help/add-custom-emoji", "/help/custom-emoji"),
74 URLRedirect("/help/night-mode", "/help/dark-theme"),
75 URLRedirect("/help/enable-emoticon-translations", "/help/configure-emoticon-translations"),
76 URLRedirect("/help/web-public-streams", "/help/public-access-option"),
77 URLRedirect("/help/starting-a-new-private-thread", "/help/starting-a-new-direct-message"),
78 URLRedirect("/help/edit-or-delete-a-message", "/help/delete-a-message"),
79 URLRedirect("/help/start-a-new-topic", "/help/starting-a-new-topic"),
80 URLRedirect("/help/configure-default-view", "/help/configure-home-view"),
81 URLRedirect("/help/reading-topics", "/help/reading-conversations"),
82 URLRedirect("/help/finding-a-topic-to-read", "/help/finding-a-conversation-to-read"),
83 URLRedirect("/help/view-and-browse-images", "/help/view-images-and-videos"),
84 ]
85
86 LANDING_PAGE_REDIRECTS = [
87 # Add URL redirects for corporate landing pages here.
88 URLRedirect("/new-user/", "/hello/"),
89 URLRedirect("/developer-community/", "/development-community"),
90 URLRedirect("/for/companies/", "/for/business"),
91 URLRedirect("/for/working-groups-and-communities/", "/for/communities"),
92 ]
93
94 DOCUMENTATION_REDIRECTS = (
95 API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py
--- a/zerver/lib/url_redirects.py
+++ b/zerver/lib/url_redirects.py
@@ -81,6 +81,7 @@
URLRedirect("/help/reading-topics", "/help/reading-conversations"),
URLRedirect("/help/finding-a-topic-to-read", "/help/finding-a-conversation-to-read"),
URLRedirect("/help/view-and-browse-images", "/help/view-images-and-videos"),
+ URLRedirect("/help/bots-and-integrations", "/help/bots-overview"),
]
LANDING_PAGE_REDIRECTS = [
|
{"golden_diff": "diff --git a/zerver/lib/url_redirects.py b/zerver/lib/url_redirects.py\n--- a/zerver/lib/url_redirects.py\n+++ b/zerver/lib/url_redirects.py\n@@ -81,6 +81,7 @@\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n+ URLRedirect(\"/help/bots-and-integrations\", \"/help/bots-overview\"),\n ]\n \n LANDING_PAGE_REDIRECTS = [\n", "issue": "Improve integrations documentation in help center\nWe should:\r\n- Rename /help/bots-and-integrations to \"Bots overview\" everywhere (sidebar, page title, page URL).\r\n- Add a copy of https://zulip.com/api/integrations-overview as the second page in the Bots & integrations section, titled \"Integration overview\"\r\n- Cross-link as appropriate, both in related articles and in the content of the pages.\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import List\n\n\n@dataclass\nclass URLRedirect:\n old_url: str\n new_url: str\n\n\nAPI_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for REST API documentation here:\n URLRedirect(\"/api/delete-stream\", \"/api/archive-stream\"),\n]\n\nPOLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for policy documentation here:\n URLRedirect(\"/privacy/\", \"/policies/privacy\"),\n URLRedirect(\"/terms/\", \"/policies/terms\"),\n]\n\nHELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for help center documentation here:\n URLRedirect(\"/help/pm-mention-alert-notifications\", \"/help/dm-mention-alert-notifications\"),\n URLRedirect(\"/help/restrict-private-messages\", \"/help/restrict-direct-messages\"),\n URLRedirect(\"/help/reading-pms\", \"/help/reading-dms\"),\n URLRedirect(\"/help/private-messages\", \"/help/direct-messages\"),\n URLRedirect(\"/help/configure-who-can-edit-topics\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/configure-message-editing-and-deletion\",\n \"/help/restrict-message-editing-and-deletion\",\n ),\n URLRedirect(\"/help/restrict-visibility-of-email-addresses\", \"/help/configure-email-visibility\"),\n URLRedirect(\"/help/change-default-view\", \"/help/configure-default-view\"),\n URLRedirect(\"/help/recent-topics\", \"/help/recent-conversations\"),\n URLRedirect(\n \"/help/add-custom-profile-fields\",\n \"/help/custom-profile-fields\",\n ),\n URLRedirect(\n \"/help/enable-enter-to-send\",\n \"/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message\",\n ),\n URLRedirect(\n \"/help/change-the-default-language-for-your-organization\",\n \"/help/configure-organization-language\",\n ),\n URLRedirect(\"/help/delete-a-stream\", \"/help/archive-a-stream\"),\n URLRedirect(\"/help/change-the-topic-of-a-message\", \"/help/rename-a-topic\"),\n URLRedirect(\"/help/configure-missed-message-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/add-an-alert-word\", \"/help/dm-mention-alert-notifications#alert-words\"),\n URLRedirect(\"/help/test-mobile-notifications\", \"/help/mobile-notifications\"),\n URLRedirect(\n \"/help/troubleshooting-desktop-notifications\",\n \"/help/desktop-notifications#troubleshooting-desktop-notifications\",\n ),\n URLRedirect(\n \"/help/change-notification-sound\", \"/help/desktop-notifications#change-notification-sound\"\n ),\n URLRedirect(\"/help/configure-message-notification-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/disable-new-login-emails\", \"/help/email-notifications#new-login-emails\"),\n # The `help/about-streams-and-topics` redirect is particularly important,\n # because the old URL appears in links from Welcome Bot messages.\n URLRedirect(\"/help/about-streams-and-topics\", \"/help/streams-and-topics\"),\n URLRedirect(\"/help/community-topic-edits\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/only-allow-admins-to-add-emoji\", \"/help/custom-emoji#change-who-can-add-custom-emoji\"\n ),\n URLRedirect(\n \"/help/configure-who-can-add-custom-emoji\",\n \"/help/custom-emoji#change-who-can-add-custom-emoji\",\n ),\n URLRedirect(\"/help/add-custom-emoji\", \"/help/custom-emoji\"),\n URLRedirect(\"/help/night-mode\", \"/help/dark-theme\"),\n URLRedirect(\"/help/enable-emoticon-translations\", \"/help/configure-emoticon-translations\"),\n URLRedirect(\"/help/web-public-streams\", \"/help/public-access-option\"),\n URLRedirect(\"/help/starting-a-new-private-thread\", \"/help/starting-a-new-direct-message\"),\n URLRedirect(\"/help/edit-or-delete-a-message\", \"/help/delete-a-message\"),\n URLRedirect(\"/help/start-a-new-topic\", \"/help/starting-a-new-topic\"),\n URLRedirect(\"/help/configure-default-view\", \"/help/configure-home-view\"),\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n]\n\nLANDING_PAGE_REDIRECTS = [\n # Add URL redirects for corporate landing pages here.\n URLRedirect(\"/new-user/\", \"/hello/\"),\n URLRedirect(\"/developer-community/\", \"/development-community\"),\n URLRedirect(\"/for/companies/\", \"/for/business\"),\n URLRedirect(\"/for/working-groups-and-communities/\", \"/for/communities\"),\n]\n\nDOCUMENTATION_REDIRECTS = (\n API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS\n)\n", "path": "zerver/lib/url_redirects.py"}], "after_files": [{"content": "from dataclasses import dataclass\nfrom typing import List\n\n\n@dataclass\nclass URLRedirect:\n old_url: str\n new_url: str\n\n\nAPI_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for REST API documentation here:\n URLRedirect(\"/api/delete-stream\", \"/api/archive-stream\"),\n]\n\nPOLICY_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for policy documentation here:\n URLRedirect(\"/privacy/\", \"/policies/privacy\"),\n URLRedirect(\"/terms/\", \"/policies/terms\"),\n]\n\nHELP_DOCUMENTATION_REDIRECTS: List[URLRedirect] = [\n # Add URL redirects for help center documentation here:\n URLRedirect(\"/help/pm-mention-alert-notifications\", \"/help/dm-mention-alert-notifications\"),\n URLRedirect(\"/help/restrict-private-messages\", \"/help/restrict-direct-messages\"),\n URLRedirect(\"/help/reading-pms\", \"/help/reading-dms\"),\n URLRedirect(\"/help/private-messages\", \"/help/direct-messages\"),\n URLRedirect(\"/help/configure-who-can-edit-topics\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/configure-message-editing-and-deletion\",\n \"/help/restrict-message-editing-and-deletion\",\n ),\n URLRedirect(\"/help/restrict-visibility-of-email-addresses\", \"/help/configure-email-visibility\"),\n URLRedirect(\"/help/change-default-view\", \"/help/configure-default-view\"),\n URLRedirect(\"/help/recent-topics\", \"/help/recent-conversations\"),\n URLRedirect(\n \"/help/add-custom-profile-fields\",\n \"/help/custom-profile-fields\",\n ),\n URLRedirect(\n \"/help/enable-enter-to-send\",\n \"/help/mastering-the-compose-box#toggle-between-ctrl-enter-and-enter-to-send-a-message\",\n ),\n URLRedirect(\n \"/help/change-the-default-language-for-your-organization\",\n \"/help/configure-organization-language\",\n ),\n URLRedirect(\"/help/delete-a-stream\", \"/help/archive-a-stream\"),\n URLRedirect(\"/help/change-the-topic-of-a-message\", \"/help/rename-a-topic\"),\n URLRedirect(\"/help/configure-missed-message-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/add-an-alert-word\", \"/help/dm-mention-alert-notifications#alert-words\"),\n URLRedirect(\"/help/test-mobile-notifications\", \"/help/mobile-notifications\"),\n URLRedirect(\n \"/help/troubleshooting-desktop-notifications\",\n \"/help/desktop-notifications#troubleshooting-desktop-notifications\",\n ),\n URLRedirect(\n \"/help/change-notification-sound\", \"/help/desktop-notifications#change-notification-sound\"\n ),\n URLRedirect(\"/help/configure-message-notification-emails\", \"/help/email-notifications\"),\n URLRedirect(\"/help/disable-new-login-emails\", \"/help/email-notifications#new-login-emails\"),\n # The `help/about-streams-and-topics` redirect is particularly important,\n # because the old URL appears in links from Welcome Bot messages.\n URLRedirect(\"/help/about-streams-and-topics\", \"/help/streams-and-topics\"),\n URLRedirect(\"/help/community-topic-edits\", \"/help/restrict-moving-messages\"),\n URLRedirect(\n \"/help/only-allow-admins-to-add-emoji\", \"/help/custom-emoji#change-who-can-add-custom-emoji\"\n ),\n URLRedirect(\n \"/help/configure-who-can-add-custom-emoji\",\n \"/help/custom-emoji#change-who-can-add-custom-emoji\",\n ),\n URLRedirect(\"/help/add-custom-emoji\", \"/help/custom-emoji\"),\n URLRedirect(\"/help/night-mode\", \"/help/dark-theme\"),\n URLRedirect(\"/help/enable-emoticon-translations\", \"/help/configure-emoticon-translations\"),\n URLRedirect(\"/help/web-public-streams\", \"/help/public-access-option\"),\n URLRedirect(\"/help/starting-a-new-private-thread\", \"/help/starting-a-new-direct-message\"),\n URLRedirect(\"/help/edit-or-delete-a-message\", \"/help/delete-a-message\"),\n URLRedirect(\"/help/start-a-new-topic\", \"/help/starting-a-new-topic\"),\n URLRedirect(\"/help/configure-default-view\", \"/help/configure-home-view\"),\n URLRedirect(\"/help/reading-topics\", \"/help/reading-conversations\"),\n URLRedirect(\"/help/finding-a-topic-to-read\", \"/help/finding-a-conversation-to-read\"),\n URLRedirect(\"/help/view-and-browse-images\", \"/help/view-images-and-videos\"),\n URLRedirect(\"/help/bots-and-integrations\", \"/help/bots-overview\"),\n]\n\nLANDING_PAGE_REDIRECTS = [\n # Add URL redirects for corporate landing pages here.\n URLRedirect(\"/new-user/\", \"/hello/\"),\n URLRedirect(\"/developer-community/\", \"/development-community\"),\n URLRedirect(\"/for/companies/\", \"/for/business\"),\n URLRedirect(\"/for/working-groups-and-communities/\", \"/for/communities\"),\n]\n\nDOCUMENTATION_REDIRECTS = (\n API_DOCUMENTATION_REDIRECTS + POLICY_DOCUMENTATION_REDIRECTS + HELP_DOCUMENTATION_REDIRECTS\n)\n", "path": "zerver/lib/url_redirects.py"}]}
| 1,574 | 139 |
gh_patches_debug_1008
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-4892
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.
**Bug description**
Please enter a clear and concise description of what the bug is.
When I execute:
```sh
$python -m parlai eval_model --task fromfile:parlaiformat\
--fromfile_datapath "${test_set_path}" \
-mf zoo:saferdialogues/model\
-bs 1\
--world-logs $test_set_path.SafeRDialog_parlai.jsonl\
--no-cuda
```
It report:
```sh
16:13:53 | Overriding opt["task"] to fromfile:parlaiformat (previously: internal:safety_failures_with_recovery,internal:bst_sf_modified)
16:13:53 | Overriding opt["no_cuda"] to True (previously: False)
>>>using / style agent path
>>>finally module name: parlai.agents.transformer.generator
16:13:53 | loading dictionary from /home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/data/models/saferdialogues/model.dict
16:13:53 | num words = 8008
Traceback (most recent call last):
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py", line 18, in <module>
main()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py", line 14, in main
superscript_main()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py", line 325, in superscript_main
return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py", line 108, in _run_from_parser_and_opt
return script.run()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py", line 265, in run
return eval_model(self.opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py", line 233, in eval_model
agent = create_agent(opt, requireModelExists=True)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py", line 468, in create_agent
model = create_agent_from_opt_file(opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py", line 421, in create_agent_from_opt_file
return model_class(opt_from_file)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_generator_agent.py", line 462, in __init__
super().__init__(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py", line 783, in __init__
self.dict = self.build_dictionary()
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py", line 862, in build_dictionary
d = self.dictionary_class()(self.opt)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/dict.py", line 322, in __init__
self.bpe = bpe_factory(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py", line 68, in bpe_factory
bpe_helper = HuggingFaceBpeHelper(opt, shared)
File "/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py", line 841, in __init__
raise IOError(
OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.
```
and the parlai version is: `1.6.0`.
**Reproduction steps**
Enter steps to reproduce the behavior.
**Expected behavior**
Give a clear and concise description of what you expected to happen.
**Logs**
Please paste the command line output:
```
Output goes here
```
**Additional context**
Add any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/zoo/saferdialogues/build.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 """
8 Blender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.
9 """
10
11 from parlai.core.build_data import download_models
12
13
14 def download(datapath):
15 opt = {'datapath': datapath}
16 version = 'v0.1'
17 fnames = [f'models_{version}.tar.gz']
18 download_models(
19 opt,
20 fnames,
21 model_folder='saferdialogues',
22 version=version,
23 use_model_type=False,
24 )
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parlai/zoo/saferdialogues/build.py b/parlai/zoo/saferdialogues/build.py
--- a/parlai/zoo/saferdialogues/build.py
+++ b/parlai/zoo/saferdialogues/build.py
@@ -13,7 +13,7 @@
def download(datapath):
opt = {'datapath': datapath}
- version = 'v0.1'
+ version = 'v0.2'
fnames = [f'models_{version}.tar.gz']
download_models(
opt,
|
{"golden_diff": "diff --git a/parlai/zoo/saferdialogues/build.py b/parlai/zoo/saferdialogues/build.py\n--- a/parlai/zoo/saferdialogues/build.py\n+++ b/parlai/zoo/saferdialogues/build.py\n@@ -13,7 +13,7 @@\n \n def download(datapath):\n opt = {'datapath': datapath}\n- version = 'v0.1'\n+ version = 'v0.2'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n", "issue": "OSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.\n**Bug description**\r\n\r\nPlease enter a clear and concise description of what the bug is.\r\n\r\nWhen I execute:\r\n```sh\r\n$python -m parlai eval_model --task fromfile:parlaiformat\\\r\n --fromfile_datapath \"${test_set_path}\" \\\r\n -mf zoo:saferdialogues/model\\\r\n -bs 1\\\r\n --world-logs $test_set_path.SafeRDialog_parlai.jsonl\\\r\n --no-cuda\r\n```\r\nIt report:\r\n\r\n```sh\r\n16:13:53 | Overriding opt[\"task\"] to fromfile:parlaiformat (previously: internal:safety_failures_with_recovery,internal:bst_sf_modified)\r\n16:13:53 | Overriding opt[\"no_cuda\"] to True (previously: False)\r\n>>>using / style agent path\r\n>>>finally module name: parlai.agents.transformer.generator\r\n16:13:53 | loading dictionary from /home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/data/models/saferdialogues/model.dict\r\n16:13:53 | num words = 8008\r\nTraceback (most recent call last):\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py\", line 18, in <module>\r\n main()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/__main__.py\", line 14, in main\r\n superscript_main()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py\", line 325, in superscript_main\r\n return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/script.py\", line 108, in _run_from_parser_and_opt\r\n return script.run()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py\", line 265, in run\r\n return eval_model(self.opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/scripts/eval_model.py\", line 233, in eval_model\r\n agent = create_agent(opt, requireModelExists=True)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py\", line 468, in create_agent\r\n model = create_agent_from_opt_file(opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/agents.py\", line 421, in create_agent_from_opt_file\r\n return model_class(opt_from_file)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_generator_agent.py\", line 462, in __init__\r\n super().__init__(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py\", line 783, in __init__\r\n self.dict = self.build_dictionary()\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/torch_agent.py\", line 862, in build_dictionary\r\n d = self.dictionary_class()(self.opt)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/core/dict.py\", line 322, in __init__\r\n self.bpe = bpe_factory(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py\", line 68, in bpe_factory\r\n bpe_helper = HuggingFaceBpeHelper(opt, shared)\r\n File \"/home/liangzi/anaconda3/envs/opendomaindialogue/lib/python3.8/site-packages/parlai/utils/bpe.py\", line 841, in __init__\r\n raise IOError(\r\nOSError: File /checkpoint/meganu/projects/safety_failures/recovery/model_templates/blender_3B/model.dict-vocab.json does not exist. --bpe-vocab must be pretrained.\r\n```\r\n\r\nand the parlai version is: `1.6.0`.\r\n\r\n**Reproduction steps**\r\nEnter steps to reproduce the behavior.\r\n\r\n**Expected behavior**\r\nGive a clear and concise description of what you expected to happen.\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\nOutput goes here\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. (like proxy settings, network setup, overall goals, etc.)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nBlender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.\n\"\"\"\n\nfrom parlai.core.build_data import download_models\n\n\ndef download(datapath):\n opt = {'datapath': datapath}\n version = 'v0.1'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n fnames,\n model_folder='saferdialogues',\n version=version,\n use_model_type=False,\n )\n", "path": "parlai/zoo/saferdialogues/build.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nBlender 2.7B model fine-tuned on the SaFeRDialogues and BST (without persona) tasks.\n\"\"\"\n\nfrom parlai.core.build_data import download_models\n\n\ndef download(datapath):\n opt = {'datapath': datapath}\n version = 'v0.2'\n fnames = [f'models_{version}.tar.gz']\n download_models(\n opt,\n fnames,\n model_folder='saferdialogues',\n version=version,\n use_model_type=False,\n )\n", "path": "parlai/zoo/saferdialogues/build.py"}]}
| 1,751 | 135 |
gh_patches_debug_17633
|
rasdani/github-patches
|
git_diff
|
nextcloud__appstore-523
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
App Generator creates faulty info.xml when using umlauts (special characters äöü)
Problem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**
## Details
* Browser and browser version: Firefox 55.0.3
## Steps to reproduce
Steps to reproduce:
- visit https://apps.nextcloud.com/developer/apps/generate
- enter the required information; in summary and description enter "This is a test app äöü"
- click generate and download.
- look at the generated info.xml
App Generator creates faulty info.xml when using umlauts (special characters äöü)
Problem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**
## Details
* Browser and browser version: Firefox 55.0.3
## Steps to reproduce
Steps to reproduce:
- visit https://apps.nextcloud.com/developer/apps/generate
- enter the required information; in summary and description enter "This is a test app äöü"
- click generate and download.
- look at the generated info.xml
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nextcloudappstore/scaffolding/archive.py`
Content:
```
1 import re
2 import tarfile
3 from io import BytesIO, StringIO
4 from typing import Dict
5 from os.path import join, isdir, relpath
6 from os import walk
7
8 from django.template import Context
9 from django.template import Template
10
11 from nextcloudappstore.core.facades import resolve_file_relative_path
12 from nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES
13
14
15 def build_files(args: Dict[str, str]) -> Dict[str, str]:
16 platform = int(args['platform']) # prevent path traversal
17 vars = {
18 'id': args['name'].lower(),
19 'summary': args['summary'],
20 'description': args['description'],
21 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),
22 'namespace': args['name'],
23 'author_name': args['author_name'],
24 'author_mail': args['author_email'],
25 'author_homepage': args['author_homepage'],
26 'issue_tracker': args['issue_tracker'],
27 'categories': args['categories'],
28 'nextcloud_version': platform
29 }
30 vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))
31 relative_base = 'app-templates/%i/app/' % platform
32 base = resolve_file_relative_path(__file__, relative_base)
33
34 context = Context({'app': vars})
35 result = {}
36 if isdir(base):
37 for root, dirs, files in walk(base):
38 for file in files:
39 file_path = join(root, file)
40 rel_file_path = '%s/%s' % (
41 vars['id'], relpath(file_path, base)
42 )
43 with open(file_path) as f:
44 t = Template(f.read())
45 result[rel_file_path] = t.render(context)
46
47 return result
48
49
50 def build_archive(parameters: Dict[str, str]) -> BytesIO:
51 buffer = BytesIO()
52 with tarfile.open(fileobj=buffer, mode='w:gz') as f:
53 files = build_files(parameters)
54 for path, contents in files.items():
55 info = tarfile.TarInfo(path)
56 info.size = len(contents)
57 f.addfile(info, BytesIO(contents.encode()))
58 buffer.seek(0)
59 return buffer
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nextcloudappstore/scaffolding/archive.py b/nextcloudappstore/scaffolding/archive.py
--- a/nextcloudappstore/scaffolding/archive.py
+++ b/nextcloudappstore/scaffolding/archive.py
@@ -1,9 +1,9 @@
import re
import tarfile
-from io import BytesIO, StringIO
-from typing import Dict
-from os.path import join, isdir, relpath
+from io import BytesIO
from os import walk
+from os.path import join, isdir, relpath
+from typing import Dict
from django.template import Context
from django.template import Template
@@ -53,7 +53,8 @@
files = build_files(parameters)
for path, contents in files.items():
info = tarfile.TarInfo(path)
- info.size = len(contents)
- f.addfile(info, BytesIO(contents.encode()))
+ encoded_content = contents.encode()
+ info.size = len(encoded_content)
+ f.addfile(info, BytesIO(encoded_content))
buffer.seek(0)
return buffer
|
{"golden_diff": "diff --git a/nextcloudappstore/scaffolding/archive.py b/nextcloudappstore/scaffolding/archive.py\n--- a/nextcloudappstore/scaffolding/archive.py\n+++ b/nextcloudappstore/scaffolding/archive.py\n@@ -1,9 +1,9 @@\n import re\n import tarfile\n-from io import BytesIO, StringIO\n-from typing import Dict\n-from os.path import join, isdir, relpath\n+from io import BytesIO\n from os import walk\n+from os.path import join, isdir, relpath\n+from typing import Dict\n \n from django.template import Context\n from django.template import Template\n@@ -53,7 +53,8 @@\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n- info.size = len(contents)\n- f.addfile(info, BytesIO(contents.encode()))\n+ encoded_content = contents.encode()\n+ info.size = len(encoded_content)\n+ f.addfile(info, BytesIO(encoded_content))\n buffer.seek(0)\n return buffer\n", "issue": "App Generator creates faulty info.xml when using umlauts (special characters \u00e4\u00f6\u00fc)\nProblem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**\r\n\r\n## Details\r\n\r\n* Browser and browser version: Firefox 55.0.3\r\n\r\n## Steps to reproduce\r\n\r\nSteps to reproduce:\r\n\r\n- visit https://apps.nextcloud.com/developer/apps/generate\r\n- enter the required information; in summary and description enter \"This is a test app \u00e4\u00f6\u00fc\"\r\n- click generate and download.\r\n- look at the generated info.xml\r\n\r\n\nApp Generator creates faulty info.xml when using umlauts (special characters \u00e4\u00f6\u00fc)\nProblem: nextcloud/apps/foo/appinfo/info.xml misses the closing **</info>** tag. Just generates **</**\r\n\r\n## Details\r\n\r\n* Browser and browser version: Firefox 55.0.3\r\n\r\n## Steps to reproduce\r\n\r\nSteps to reproduce:\r\n\r\n- visit https://apps.nextcloud.com/developer/apps/generate\r\n- enter the required information; in summary and description enter \"This is a test app \u00e4\u00f6\u00fc\"\r\n- click generate and download.\r\n- look at the generated info.xml\r\n\r\n\n", "before_files": [{"content": "import re\nimport tarfile\nfrom io import BytesIO, StringIO\nfrom typing import Dict\nfrom os.path import join, isdir, relpath\nfrom os import walk\n\nfrom django.template import Context\nfrom django.template import Template\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES\n\n\ndef build_files(args: Dict[str, str]) -> Dict[str, str]:\n platform = int(args['platform']) # prevent path traversal\n vars = {\n 'id': args['name'].lower(),\n 'summary': args['summary'],\n 'description': args['description'],\n 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),\n 'namespace': args['name'],\n 'author_name': args['author_name'],\n 'author_mail': args['author_email'],\n 'author_homepage': args['author_homepage'],\n 'issue_tracker': args['issue_tracker'],\n 'categories': args['categories'],\n 'nextcloud_version': platform\n }\n vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))\n relative_base = 'app-templates/%i/app/' % platform\n base = resolve_file_relative_path(__file__, relative_base)\n\n context = Context({'app': vars})\n result = {}\n if isdir(base):\n for root, dirs, files in walk(base):\n for file in files:\n file_path = join(root, file)\n rel_file_path = '%s/%s' % (\n vars['id'], relpath(file_path, base)\n )\n with open(file_path) as f:\n t = Template(f.read())\n result[rel_file_path] = t.render(context)\n\n return result\n\n\ndef build_archive(parameters: Dict[str, str]) -> BytesIO:\n buffer = BytesIO()\n with tarfile.open(fileobj=buffer, mode='w:gz') as f:\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n info.size = len(contents)\n f.addfile(info, BytesIO(contents.encode()))\n buffer.seek(0)\n return buffer\n", "path": "nextcloudappstore/scaffolding/archive.py"}], "after_files": [{"content": "import re\nimport tarfile\nfrom io import BytesIO\nfrom os import walk\nfrom os.path import join, isdir, relpath\nfrom typing import Dict\n\nfrom django.template import Context\nfrom django.template import Template\n\nfrom nextcloudappstore.core.facades import resolve_file_relative_path\nfrom nextcloudappstore.settings.base import APP_SCAFFOLDING_PROFILES\n\n\ndef build_files(args: Dict[str, str]) -> Dict[str, str]:\n platform = int(args['platform']) # prevent path traversal\n vars = {\n 'id': args['name'].lower(),\n 'summary': args['summary'],\n 'description': args['description'],\n 'name': ' '.join(re.findall(r'[A-Z][^A-Z]*', args['name'])),\n 'namespace': args['name'],\n 'author_name': args['author_name'],\n 'author_mail': args['author_email'],\n 'author_homepage': args['author_homepage'],\n 'issue_tracker': args['issue_tracker'],\n 'categories': args['categories'],\n 'nextcloud_version': platform\n }\n vars.update(APP_SCAFFOLDING_PROFILES.get(platform, {}))\n relative_base = 'app-templates/%i/app/' % platform\n base = resolve_file_relative_path(__file__, relative_base)\n\n context = Context({'app': vars})\n result = {}\n if isdir(base):\n for root, dirs, files in walk(base):\n for file in files:\n file_path = join(root, file)\n rel_file_path = '%s/%s' % (\n vars['id'], relpath(file_path, base)\n )\n with open(file_path) as f:\n t = Template(f.read())\n result[rel_file_path] = t.render(context)\n\n return result\n\n\ndef build_archive(parameters: Dict[str, str]) -> BytesIO:\n buffer = BytesIO()\n with tarfile.open(fileobj=buffer, mode='w:gz') as f:\n files = build_files(parameters)\n for path, contents in files.items():\n info = tarfile.TarInfo(path)\n encoded_content = contents.encode()\n info.size = len(encoded_content)\n f.addfile(info, BytesIO(encoded_content))\n buffer.seek(0)\n return buffer\n", "path": "nextcloudappstore/scaffolding/archive.py"}]}
| 1,099 | 234 |
gh_patches_debug_41254
|
rasdani/github-patches
|
git_diff
|
Pylons__pyramid-3029
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove request.exception if the excview tween fails to handle the exception
Pyramid 1.9 makes `request.exception` and `request.exc_info` a little more important as I've moved the pyramid_tm tween over the excview and in general would advocate to move most tweens over the excview. With that in mind it's currently not possible to test `request.exception` to see if the response was rendered in relation to that exception - the excview tween sets the exception even if it failed to squash it (attempted to render an excview and couldn't find one). Ideally the exception would be related to the response that was generated when it was squashed. This would be more explicit if we used `response.exception` to indicate the response is from a squashed exception but I think that's a larger change.
I'm proposing to remove `request.exception` and `request.exc_info` in the excview tween if it reraises the original exception. This makes introspection `request.exception` more reliable by upstream tweens that want to know what the squashed exception was... Of course any raised exception should be more interesting than the original `request.exception` but if the tween receives a response then they can see if it is a response generated by a squashed exception or if it is a "normal" response.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/tweens.py`
Content:
```
1 import sys
2
3 from pyramid.compat import reraise
4 from pyramid.exceptions import PredicateMismatch
5 from pyramid.interfaces import (
6 IExceptionViewClassifier,
7 IRequest,
8 )
9
10 from zope.interface import providedBy
11 from pyramid.view import _call_view
12
13 def excview_tween_factory(handler, registry):
14 """ A :term:`tween` factory which produces a tween that catches an
15 exception raised by downstream tweens (or the main Pyramid request
16 handler) and, if possible, converts it into a Response using an
17 :term:`exception view`."""
18
19 def excview_tween(request):
20 attrs = request.__dict__
21 try:
22 response = handler(request)
23 except Exception as exc:
24 # WARNING: do not assign the result of sys.exc_info() to a local
25 # var here, doing so will cause a leak. We used to actually
26 # explicitly delete both "exception" and "exc_info" from ``attrs``
27 # in a ``finally:`` clause below, but now we do not because these
28 # attributes are useful to upstream tweens. This actually still
29 # apparently causes a reference cycle, but it is broken
30 # successfully by the garbage collector (see
31 # https://github.com/Pylons/pyramid/issues/1223).
32 attrs['exc_info'] = sys.exc_info()
33 attrs['exception'] = exc
34 # clear old generated request.response, if any; it may
35 # have been mutated by the view, and its state is not
36 # sane (e.g. caching headers)
37 if 'response' in attrs:
38 del attrs['response']
39 # we use .get instead of .__getitem__ below due to
40 # https://github.com/Pylons/pyramid/issues/700
41 request_iface = attrs.get('request_iface', IRequest)
42 provides = providedBy(exc)
43 try:
44 response = _call_view(
45 registry,
46 request,
47 exc,
48 provides,
49 '',
50 view_classifier=IExceptionViewClassifier,
51 request_iface=request_iface.combined
52 )
53
54 # if views matched but did not pass predicates, squash the error
55 # and re-raise the original exception
56 except PredicateMismatch:
57 response = None
58
59 # re-raise the original exception as no exception views were
60 # able to handle the error
61 if response is None:
62 reraise(*attrs['exc_info'])
63
64 return response
65
66 return excview_tween
67
68 MAIN = 'MAIN'
69 INGRESS = 'INGRESS'
70 EXCVIEW = 'pyramid.tweens.excview_tween_factory'
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyramid/tweens.py b/pyramid/tweens.py
--- a/pyramid/tweens.py
+++ b/pyramid/tweens.py
@@ -10,6 +10,50 @@
from zope.interface import providedBy
from pyramid.view import _call_view
+def _error_handler(request, exc):
+ # NOTE: we do not need to delete exc_info because this function
+ # should never be in the call stack of the exception
+ exc_info = sys.exc_info()
+
+ attrs = request.__dict__
+ attrs['exc_info'] = exc_info
+ attrs['exception'] = exc
+ # clear old generated request.response, if any; it may
+ # have been mutated by the view, and its state is not
+ # sane (e.g. caching headers)
+ if 'response' in attrs:
+ del attrs['response']
+ # we use .get instead of .__getitem__ below due to
+ # https://github.com/Pylons/pyramid/issues/700
+ request_iface = attrs.get('request_iface', IRequest)
+ provides = providedBy(exc)
+ try:
+ response = _call_view(
+ request.registry,
+ request,
+ exc,
+ provides,
+ '',
+ view_classifier=IExceptionViewClassifier,
+ request_iface=request_iface.combined
+ )
+
+ # if views matched but did not pass predicates then treat the
+ # same as not finding any matching views
+ except PredicateMismatch:
+ response = None
+
+ # re-raise the original exception as no exception views were
+ # able to handle the error
+ if response is None:
+ if 'exception' in attrs:
+ del attrs['exception']
+ if 'exc_info' in attrs:
+ del attrs['exc_info']
+ reraise(*exc_info)
+
+ return response
+
def excview_tween_factory(handler, registry):
""" A :term:`tween` factory which produces a tween that catches an
exception raised by downstream tweens (or the main Pyramid request
@@ -17,50 +61,10 @@
:term:`exception view`."""
def excview_tween(request):
- attrs = request.__dict__
try:
response = handler(request)
except Exception as exc:
- # WARNING: do not assign the result of sys.exc_info() to a local
- # var here, doing so will cause a leak. We used to actually
- # explicitly delete both "exception" and "exc_info" from ``attrs``
- # in a ``finally:`` clause below, but now we do not because these
- # attributes are useful to upstream tweens. This actually still
- # apparently causes a reference cycle, but it is broken
- # successfully by the garbage collector (see
- # https://github.com/Pylons/pyramid/issues/1223).
- attrs['exc_info'] = sys.exc_info()
- attrs['exception'] = exc
- # clear old generated request.response, if any; it may
- # have been mutated by the view, and its state is not
- # sane (e.g. caching headers)
- if 'response' in attrs:
- del attrs['response']
- # we use .get instead of .__getitem__ below due to
- # https://github.com/Pylons/pyramid/issues/700
- request_iface = attrs.get('request_iface', IRequest)
- provides = providedBy(exc)
- try:
- response = _call_view(
- registry,
- request,
- exc,
- provides,
- '',
- view_classifier=IExceptionViewClassifier,
- request_iface=request_iface.combined
- )
-
- # if views matched but did not pass predicates, squash the error
- # and re-raise the original exception
- except PredicateMismatch:
- response = None
-
- # re-raise the original exception as no exception views were
- # able to handle the error
- if response is None:
- reraise(*attrs['exc_info'])
-
+ response = _error_handler(request, exc)
return response
return excview_tween
|
{"golden_diff": "diff --git a/pyramid/tweens.py b/pyramid/tweens.py\n--- a/pyramid/tweens.py\n+++ b/pyramid/tweens.py\n@@ -10,6 +10,50 @@\n from zope.interface import providedBy\n from pyramid.view import _call_view\n \n+def _error_handler(request, exc):\n+ # NOTE: we do not need to delete exc_info because this function\n+ # should never be in the call stack of the exception\n+ exc_info = sys.exc_info()\n+\n+ attrs = request.__dict__\n+ attrs['exc_info'] = exc_info\n+ attrs['exception'] = exc\n+ # clear old generated request.response, if any; it may\n+ # have been mutated by the view, and its state is not\n+ # sane (e.g. caching headers)\n+ if 'response' in attrs:\n+ del attrs['response']\n+ # we use .get instead of .__getitem__ below due to\n+ # https://github.com/Pylons/pyramid/issues/700\n+ request_iface = attrs.get('request_iface', IRequest)\n+ provides = providedBy(exc)\n+ try:\n+ response = _call_view(\n+ request.registry,\n+ request,\n+ exc,\n+ provides,\n+ '',\n+ view_classifier=IExceptionViewClassifier,\n+ request_iface=request_iface.combined\n+ )\n+\n+ # if views matched but did not pass predicates then treat the\n+ # same as not finding any matching views\n+ except PredicateMismatch:\n+ response = None\n+\n+ # re-raise the original exception as no exception views were\n+ # able to handle the error\n+ if response is None:\n+ if 'exception' in attrs:\n+ del attrs['exception']\n+ if 'exc_info' in attrs:\n+ del attrs['exc_info']\n+ reraise(*exc_info)\n+\n+ return response\n+\n def excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n@@ -17,50 +61,10 @@\n :term:`exception view`.\"\"\"\n \n def excview_tween(request):\n- attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n- # WARNING: do not assign the result of sys.exc_info() to a local\n- # var here, doing so will cause a leak. We used to actually\n- # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n- # in a ``finally:`` clause below, but now we do not because these\n- # attributes are useful to upstream tweens. This actually still\n- # apparently causes a reference cycle, but it is broken\n- # successfully by the garbage collector (see\n- # https://github.com/Pylons/pyramid/issues/1223).\n- attrs['exc_info'] = sys.exc_info()\n- attrs['exception'] = exc\n- # clear old generated request.response, if any; it may\n- # have been mutated by the view, and its state is not\n- # sane (e.g. caching headers)\n- if 'response' in attrs:\n- del attrs['response']\n- # we use .get instead of .__getitem__ below due to\n- # https://github.com/Pylons/pyramid/issues/700\n- request_iface = attrs.get('request_iface', IRequest)\n- provides = providedBy(exc)\n- try:\n- response = _call_view(\n- registry,\n- request,\n- exc,\n- provides,\n- '',\n- view_classifier=IExceptionViewClassifier,\n- request_iface=request_iface.combined\n- )\n-\n- # if views matched but did not pass predicates, squash the error\n- # and re-raise the original exception\n- except PredicateMismatch:\n- response = None\n-\n- # re-raise the original exception as no exception views were\n- # able to handle the error\n- if response is None:\n- reraise(*attrs['exc_info'])\n-\n+ response = _error_handler(request, exc)\n return response\n \n return excview_tween\n", "issue": "remove request.exception if the excview tween fails to handle the exception\nPyramid 1.9 makes `request.exception` and `request.exc_info` a little more important as I've moved the pyramid_tm tween over the excview and in general would advocate to move most tweens over the excview. With that in mind it's currently not possible to test `request.exception` to see if the response was rendered in relation to that exception - the excview tween sets the exception even if it failed to squash it (attempted to render an excview and couldn't find one). Ideally the exception would be related to the response that was generated when it was squashed. This would be more explicit if we used `response.exception` to indicate the response is from a squashed exception but I think that's a larger change.\r\n\r\nI'm proposing to remove `request.exception` and `request.exc_info` in the excview tween if it reraises the original exception. This makes introspection `request.exception` more reliable by upstream tweens that want to know what the squashed exception was... Of course any raised exception should be more interesting than the original `request.exception` but if the tween receives a response then they can see if it is a response generated by a squashed exception or if it is a \"normal\" response.\n", "before_files": [{"content": "import sys\n\nfrom pyramid.compat import reraise\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n )\n\nfrom zope.interface import providedBy\nfrom pyramid.view import _call_view\n\ndef excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n handler) and, if possible, converts it into a Response using an\n :term:`exception view`.\"\"\"\n\n def excview_tween(request):\n attrs = request.__dict__\n try:\n response = handler(request)\n except Exception as exc:\n # WARNING: do not assign the result of sys.exc_info() to a local\n # var here, doing so will cause a leak. We used to actually\n # explicitly delete both \"exception\" and \"exc_info\" from ``attrs``\n # in a ``finally:`` clause below, but now we do not because these\n # attributes are useful to upstream tweens. This actually still\n # apparently causes a reference cycle, but it is broken\n # successfully by the garbage collector (see\n # https://github.com/Pylons/pyramid/issues/1223).\n attrs['exc_info'] = sys.exc_info()\n attrs['exception'] = exc\n # clear old generated request.response, if any; it may\n # have been mutated by the view, and its state is not\n # sane (e.g. caching headers)\n if 'response' in attrs:\n del attrs['response']\n # we use .get instead of .__getitem__ below due to\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n try:\n response = _call_view(\n registry,\n request,\n exc,\n provides,\n '',\n view_classifier=IExceptionViewClassifier,\n request_iface=request_iface.combined\n )\n\n # if views matched but did not pass predicates, squash the error\n # and re-raise the original exception\n except PredicateMismatch:\n response = None\n\n # re-raise the original exception as no exception views were\n # able to handle the error\n if response is None:\n reraise(*attrs['exc_info'])\n\n return response\n\n return excview_tween\n\nMAIN = 'MAIN'\nINGRESS = 'INGRESS'\nEXCVIEW = 'pyramid.tweens.excview_tween_factory'\n", "path": "pyramid/tweens.py"}], "after_files": [{"content": "import sys\n\nfrom pyramid.compat import reraise\nfrom pyramid.exceptions import PredicateMismatch\nfrom pyramid.interfaces import (\n IExceptionViewClassifier,\n IRequest,\n )\n\nfrom zope.interface import providedBy\nfrom pyramid.view import _call_view\n\ndef _error_handler(request, exc):\n # NOTE: we do not need to delete exc_info because this function\n # should never be in the call stack of the exception\n exc_info = sys.exc_info()\n\n attrs = request.__dict__\n attrs['exc_info'] = exc_info\n attrs['exception'] = exc\n # clear old generated request.response, if any; it may\n # have been mutated by the view, and its state is not\n # sane (e.g. caching headers)\n if 'response' in attrs:\n del attrs['response']\n # we use .get instead of .__getitem__ below due to\n # https://github.com/Pylons/pyramid/issues/700\n request_iface = attrs.get('request_iface', IRequest)\n provides = providedBy(exc)\n try:\n response = _call_view(\n request.registry,\n request,\n exc,\n provides,\n '',\n view_classifier=IExceptionViewClassifier,\n request_iface=request_iface.combined\n )\n\n # if views matched but did not pass predicates then treat the\n # same as not finding any matching views\n except PredicateMismatch:\n response = None\n\n # re-raise the original exception as no exception views were\n # able to handle the error\n if response is None:\n if 'exception' in attrs:\n del attrs['exception']\n if 'exc_info' in attrs:\n del attrs['exc_info']\n reraise(*exc_info)\n\n return response\n\ndef excview_tween_factory(handler, registry):\n \"\"\" A :term:`tween` factory which produces a tween that catches an\n exception raised by downstream tweens (or the main Pyramid request\n handler) and, if possible, converts it into a Response using an\n :term:`exception view`.\"\"\"\n\n def excview_tween(request):\n try:\n response = handler(request)\n except Exception as exc:\n response = _error_handler(request, exc)\n return response\n\n return excview_tween\n\nMAIN = 'MAIN'\nINGRESS = 'INGRESS'\nEXCVIEW = 'pyramid.tweens.excview_tween_factory'\n", "path": "pyramid/tweens.py"}]}
| 1,225 | 960 |
gh_patches_debug_668
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-388
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
timeline wrong way?
the phases in the timeline seem to be sorted in the wrong direction:


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/phases/models.py`
Content:
```
1 from django.core.exceptions import ValidationError
2 from django.db import models
3 from django.utils import timezone
4 from django.utils.translation import ugettext as _
5
6 from euth.modules import models as modules_models
7
8 from . import content
9 from .validators import validate_content
10
11
12 class PhasesQuerySet(models.QuerySet):
13
14 def active_phases(self):
15 now = timezone.now()
16 return self.filter(start_date__lte=now, end_date__gt=now)
17
18
19 class Phase(models.Model):
20 name = models.CharField(max_length=80)
21 description = models.TextField(max_length=300)
22 type = models.CharField(max_length=128, validators=[validate_content])
23 module = models.ForeignKey(modules_models.Module, on_delete=models.CASCADE)
24 start_date = models.DateTimeField(blank=True, null=True)
25 end_date = models.DateTimeField(blank=True, null=True)
26
27 objects = PhasesQuerySet.as_manager()
28
29 def __str__(self):
30 return '{} ({})'.format(self.name, self.type)
31
32 def content(self):
33 return content[self.type]
34
35 def clean(self):
36 if self.end_date and self.start_date:
37 if self.end_date < self.start_date:
38 raise ValidationError({
39 'end_date': _('End date can not be smaller'
40 'than the start date.')
41 })
42 super().clean()
43
44 @property
45 def view(self):
46 return content[self.type].view
47
48 def has_feature(self, feature, model):
49 return content[self.type].has_feature(feature, model)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/euth/phases/models.py b/euth/phases/models.py
--- a/euth/phases/models.py
+++ b/euth/phases/models.py
@@ -26,6 +26,9 @@
objects = PhasesQuerySet.as_manager()
+ class Meta:
+ ordering = ['type']
+
def __str__(self):
return '{} ({})'.format(self.name, self.type)
|
{"golden_diff": "diff --git a/euth/phases/models.py b/euth/phases/models.py\n--- a/euth/phases/models.py\n+++ b/euth/phases/models.py\n@@ -26,6 +26,9 @@\n \n objects = PhasesQuerySet.as_manager()\n \n+ class Meta:\n+ ordering = ['type']\n+\n def __str__(self):\n return '{} ({})'.format(self.name, self.type)\n", "issue": "timeline wrong way?\nthe phases in the timeline seem to be sorted in the wrong direction:\n\n\n\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom euth.modules import models as modules_models\n\nfrom . import content\nfrom .validators import validate_content\n\n\nclass PhasesQuerySet(models.QuerySet):\n\n def active_phases(self):\n now = timezone.now()\n return self.filter(start_date__lte=now, end_date__gt=now)\n\n\nclass Phase(models.Model):\n name = models.CharField(max_length=80)\n description = models.TextField(max_length=300)\n type = models.CharField(max_length=128, validators=[validate_content])\n module = models.ForeignKey(modules_models.Module, on_delete=models.CASCADE)\n start_date = models.DateTimeField(blank=True, null=True)\n end_date = models.DateTimeField(blank=True, null=True)\n\n objects = PhasesQuerySet.as_manager()\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.type)\n\n def content(self):\n return content[self.type]\n\n def clean(self):\n if self.end_date and self.start_date:\n if self.end_date < self.start_date:\n raise ValidationError({\n 'end_date': _('End date can not be smaller'\n 'than the start date.')\n })\n super().clean()\n\n @property\n def view(self):\n return content[self.type].view\n\n def has_feature(self, feature, model):\n return content[self.type].has_feature(feature, model)\n", "path": "euth/phases/models.py"}], "after_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom euth.modules import models as modules_models\n\nfrom . import content\nfrom .validators import validate_content\n\n\nclass PhasesQuerySet(models.QuerySet):\n\n def active_phases(self):\n now = timezone.now()\n return self.filter(start_date__lte=now, end_date__gt=now)\n\n\nclass Phase(models.Model):\n name = models.CharField(max_length=80)\n description = models.TextField(max_length=300)\n type = models.CharField(max_length=128, validators=[validate_content])\n module = models.ForeignKey(modules_models.Module, on_delete=models.CASCADE)\n start_date = models.DateTimeField(blank=True, null=True)\n end_date = models.DateTimeField(blank=True, null=True)\n\n objects = PhasesQuerySet.as_manager()\n\n class Meta:\n ordering = ['type']\n\n def __str__(self):\n return '{} ({})'.format(self.name, self.type)\n\n def content(self):\n return content[self.type]\n\n def clean(self):\n if self.end_date and self.start_date:\n if self.end_date < self.start_date:\n raise ValidationError({\n 'end_date': _('End date can not be smaller'\n 'than the start date.')\n })\n super().clean()\n\n @property\n def view(self):\n return content[self.type].view\n\n def has_feature(self, feature, model):\n return content[self.type].has_feature(feature, model)\n", "path": "euth/phases/models.py"}]}
| 865 | 93 |
gh_patches_debug_29172
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-4876
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fixed light/dark theme setting
Hello, is it possible to set the dark and light theme fixed? With the OS autodetection I have otherwise problems with my different logos that I use in my Tenants settings. Light logos are not visible to users on light platforms, dark logos are not visible to users on dark platforms.
Thanks in advanced
pupazze
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/tenants/api.py`
Content:
```
1 """Serializer for tenant models"""
2 from typing import Any
3
4 from drf_spectacular.utils import extend_schema
5 from rest_framework.decorators import action
6 from rest_framework.exceptions import ValidationError
7 from rest_framework.fields import CharField, ListField
8 from rest_framework.filters import OrderingFilter, SearchFilter
9 from rest_framework.permissions import AllowAny
10 from rest_framework.request import Request
11 from rest_framework.response import Response
12 from rest_framework.serializers import ModelSerializer
13 from rest_framework.viewsets import ModelViewSet
14
15 from authentik.api.authorization import SecretKeyFilter
16 from authentik.core.api.used_by import UsedByMixin
17 from authentik.core.api.utils import PassiveSerializer
18 from authentik.lib.config import CONFIG
19 from authentik.tenants.models import Tenant
20
21
22 class FooterLinkSerializer(PassiveSerializer):
23 """Links returned in Config API"""
24
25 href = CharField(read_only=True)
26 name = CharField(read_only=True)
27
28
29 class TenantSerializer(ModelSerializer):
30 """Tenant Serializer"""
31
32 def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:
33 if attrs.get("default", False):
34 tenants = Tenant.objects.filter(default=True)
35 if self.instance:
36 tenants = tenants.exclude(pk=self.instance.pk)
37 if tenants.exists():
38 raise ValidationError("Only a single Tenant can be set as default.")
39 return super().validate(attrs)
40
41 class Meta:
42 model = Tenant
43 fields = [
44 "tenant_uuid",
45 "domain",
46 "default",
47 "branding_title",
48 "branding_logo",
49 "branding_favicon",
50 "flow_authentication",
51 "flow_invalidation",
52 "flow_recovery",
53 "flow_unenrollment",
54 "flow_user_settings",
55 "flow_device_code",
56 "event_retention",
57 "web_certificate",
58 "attributes",
59 ]
60
61
62 class CurrentTenantSerializer(PassiveSerializer):
63 """Partial tenant information for styling"""
64
65 matched_domain = CharField(source="domain")
66 branding_title = CharField()
67 branding_logo = CharField()
68 branding_favicon = CharField()
69 ui_footer_links = ListField(
70 child=FooterLinkSerializer(),
71 read_only=True,
72 default=CONFIG.y("footer_links", []),
73 )
74
75 flow_authentication = CharField(source="flow_authentication.slug", required=False)
76 flow_invalidation = CharField(source="flow_invalidation.slug", required=False)
77 flow_recovery = CharField(source="flow_recovery.slug", required=False)
78 flow_unenrollment = CharField(source="flow_unenrollment.slug", required=False)
79 flow_user_settings = CharField(source="flow_user_settings.slug", required=False)
80 flow_device_code = CharField(source="flow_device_code.slug", required=False)
81
82 default_locale = CharField(read_only=True)
83
84
85 class TenantViewSet(UsedByMixin, ModelViewSet):
86 """Tenant Viewset"""
87
88 queryset = Tenant.objects.all()
89 serializer_class = TenantSerializer
90 search_fields = [
91 "domain",
92 "branding_title",
93 "web_certificate__name",
94 ]
95 filterset_fields = [
96 "tenant_uuid",
97 "domain",
98 "default",
99 "branding_title",
100 "branding_logo",
101 "branding_favicon",
102 "flow_authentication",
103 "flow_invalidation",
104 "flow_recovery",
105 "flow_unenrollment",
106 "flow_user_settings",
107 "flow_device_code",
108 "event_retention",
109 "web_certificate",
110 ]
111 ordering = ["domain"]
112
113 filter_backends = [SecretKeyFilter, OrderingFilter, SearchFilter]
114
115 @extend_schema(
116 responses=CurrentTenantSerializer(many=False),
117 )
118 @action(methods=["GET"], detail=False, permission_classes=[AllowAny])
119 def current(self, request: Request) -> Response:
120 """Get current tenant"""
121 tenant: Tenant = request._request.tenant
122 return Response(CurrentTenantSerializer(tenant).data)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/tenants/api.py b/authentik/tenants/api.py
--- a/authentik/tenants/api.py
+++ b/authentik/tenants/api.py
@@ -1,10 +1,11 @@
"""Serializer for tenant models"""
from typing import Any
+from django.db import models
from drf_spectacular.utils import extend_schema
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
-from rest_framework.fields import CharField, ListField
+from rest_framework.fields import CharField, ChoiceField, ListField
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.permissions import AllowAny
from rest_framework.request import Request
@@ -59,6 +60,14 @@
]
+class Themes(models.TextChoices):
+ """Themes"""
+
+ AUTOMATIC = "automatic"
+ LIGHT = "light"
+ DARK = "dark"
+
+
class CurrentTenantSerializer(PassiveSerializer):
"""Partial tenant information for styling"""
@@ -71,6 +80,12 @@
read_only=True,
default=CONFIG.y("footer_links", []),
)
+ ui_theme = ChoiceField(
+ choices=Themes.choices,
+ source="attributes.settings.theme.base",
+ default=Themes.AUTOMATIC,
+ read_only=True,
+ )
flow_authentication = CharField(source="flow_authentication.slug", required=False)
flow_invalidation = CharField(source="flow_invalidation.slug", required=False)
|
{"golden_diff": "diff --git a/authentik/tenants/api.py b/authentik/tenants/api.py\n--- a/authentik/tenants/api.py\n+++ b/authentik/tenants/api.py\n@@ -1,10 +1,11 @@\n \"\"\"Serializer for tenant models\"\"\"\n from typing import Any\n \n+from django.db import models\n from drf_spectacular.utils import extend_schema\n from rest_framework.decorators import action\n from rest_framework.exceptions import ValidationError\n-from rest_framework.fields import CharField, ListField\n+from rest_framework.fields import CharField, ChoiceField, ListField\n from rest_framework.filters import OrderingFilter, SearchFilter\n from rest_framework.permissions import AllowAny\n from rest_framework.request import Request\n@@ -59,6 +60,14 @@\n ]\n \n \n+class Themes(models.TextChoices):\n+ \"\"\"Themes\"\"\"\n+\n+ AUTOMATIC = \"automatic\"\n+ LIGHT = \"light\"\n+ DARK = \"dark\"\n+\n+\n class CurrentTenantSerializer(PassiveSerializer):\n \"\"\"Partial tenant information for styling\"\"\"\n \n@@ -71,6 +80,12 @@\n read_only=True,\n default=CONFIG.y(\"footer_links\", []),\n )\n+ ui_theme = ChoiceField(\n+ choices=Themes.choices,\n+ source=\"attributes.settings.theme.base\",\n+ default=Themes.AUTOMATIC,\n+ read_only=True,\n+ )\n \n flow_authentication = CharField(source=\"flow_authentication.slug\", required=False)\n flow_invalidation = CharField(source=\"flow_invalidation.slug\", required=False)\n", "issue": "Fixed light/dark theme setting\nHello, is it possible to set the dark and light theme fixed? With the OS autodetection I have otherwise problems with my different logos that I use in my Tenants settings. Light logos are not visible to users on light platforms, dark logos are not visible to users on dark platforms.\r\nThanks in advanced\r\npupazze\n", "before_files": [{"content": "\"\"\"Serializer for tenant models\"\"\"\nfrom typing import Any\n\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import CharField, ListField\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.api.authorization import SecretKeyFilter\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.config import CONFIG\nfrom authentik.tenants.models import Tenant\n\n\nclass FooterLinkSerializer(PassiveSerializer):\n \"\"\"Links returned in Config API\"\"\"\n\n href = CharField(read_only=True)\n name = CharField(read_only=True)\n\n\nclass TenantSerializer(ModelSerializer):\n \"\"\"Tenant Serializer\"\"\"\n\n def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:\n if attrs.get(\"default\", False):\n tenants = Tenant.objects.filter(default=True)\n if self.instance:\n tenants = tenants.exclude(pk=self.instance.pk)\n if tenants.exists():\n raise ValidationError(\"Only a single Tenant can be set as default.\")\n return super().validate(attrs)\n\n class Meta:\n model = Tenant\n fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n \"attributes\",\n ]\n\n\nclass CurrentTenantSerializer(PassiveSerializer):\n \"\"\"Partial tenant information for styling\"\"\"\n\n matched_domain = CharField(source=\"domain\")\n branding_title = CharField()\n branding_logo = CharField()\n branding_favicon = CharField()\n ui_footer_links = ListField(\n child=FooterLinkSerializer(),\n read_only=True,\n default=CONFIG.y(\"footer_links\", []),\n )\n\n flow_authentication = CharField(source=\"flow_authentication.slug\", required=False)\n flow_invalidation = CharField(source=\"flow_invalidation.slug\", required=False)\n flow_recovery = CharField(source=\"flow_recovery.slug\", required=False)\n flow_unenrollment = CharField(source=\"flow_unenrollment.slug\", required=False)\n flow_user_settings = CharField(source=\"flow_user_settings.slug\", required=False)\n flow_device_code = CharField(source=\"flow_device_code.slug\", required=False)\n\n default_locale = CharField(read_only=True)\n\n\nclass TenantViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Tenant Viewset\"\"\"\n\n queryset = Tenant.objects.all()\n serializer_class = TenantSerializer\n search_fields = [\n \"domain\",\n \"branding_title\",\n \"web_certificate__name\",\n ]\n filterset_fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n ]\n ordering = [\"domain\"]\n\n filter_backends = [SecretKeyFilter, OrderingFilter, SearchFilter]\n\n @extend_schema(\n responses=CurrentTenantSerializer(many=False),\n )\n @action(methods=[\"GET\"], detail=False, permission_classes=[AllowAny])\n def current(self, request: Request) -> Response:\n \"\"\"Get current tenant\"\"\"\n tenant: Tenant = request._request.tenant\n return Response(CurrentTenantSerializer(tenant).data)\n", "path": "authentik/tenants/api.py"}], "after_files": [{"content": "\"\"\"Serializer for tenant models\"\"\"\nfrom typing import Any\n\nfrom django.db import models\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import CharField, ChoiceField, ListField\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.api.authorization import SecretKeyFilter\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.config import CONFIG\nfrom authentik.tenants.models import Tenant\n\n\nclass FooterLinkSerializer(PassiveSerializer):\n \"\"\"Links returned in Config API\"\"\"\n\n href = CharField(read_only=True)\n name = CharField(read_only=True)\n\n\nclass TenantSerializer(ModelSerializer):\n \"\"\"Tenant Serializer\"\"\"\n\n def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:\n if attrs.get(\"default\", False):\n tenants = Tenant.objects.filter(default=True)\n if self.instance:\n tenants = tenants.exclude(pk=self.instance.pk)\n if tenants.exists():\n raise ValidationError(\"Only a single Tenant can be set as default.\")\n return super().validate(attrs)\n\n class Meta:\n model = Tenant\n fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n \"attributes\",\n ]\n\n\nclass Themes(models.TextChoices):\n \"\"\"Themes\"\"\"\n\n AUTOMATIC = \"automatic\"\n LIGHT = \"light\"\n DARK = \"dark\"\n\n\nclass CurrentTenantSerializer(PassiveSerializer):\n \"\"\"Partial tenant information for styling\"\"\"\n\n matched_domain = CharField(source=\"domain\")\n branding_title = CharField()\n branding_logo = CharField()\n branding_favicon = CharField()\n ui_footer_links = ListField(\n child=FooterLinkSerializer(),\n read_only=True,\n default=CONFIG.y(\"footer_links\", []),\n )\n ui_theme = ChoiceField(\n choices=Themes.choices,\n source=\"attributes.settings.theme.base\",\n default=Themes.AUTOMATIC,\n read_only=True,\n )\n\n flow_authentication = CharField(source=\"flow_authentication.slug\", required=False)\n flow_invalidation = CharField(source=\"flow_invalidation.slug\", required=False)\n flow_recovery = CharField(source=\"flow_recovery.slug\", required=False)\n flow_unenrollment = CharField(source=\"flow_unenrollment.slug\", required=False)\n flow_user_settings = CharField(source=\"flow_user_settings.slug\", required=False)\n flow_device_code = CharField(source=\"flow_device_code.slug\", required=False)\n\n default_locale = CharField(read_only=True)\n\n\nclass TenantViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Tenant Viewset\"\"\"\n\n queryset = Tenant.objects.all()\n serializer_class = TenantSerializer\n search_fields = [\n \"domain\",\n \"branding_title\",\n \"web_certificate__name\",\n ]\n filterset_fields = [\n \"tenant_uuid\",\n \"domain\",\n \"default\",\n \"branding_title\",\n \"branding_logo\",\n \"branding_favicon\",\n \"flow_authentication\",\n \"flow_invalidation\",\n \"flow_recovery\",\n \"flow_unenrollment\",\n \"flow_user_settings\",\n \"flow_device_code\",\n \"event_retention\",\n \"web_certificate\",\n ]\n ordering = [\"domain\"]\n\n filter_backends = [SecretKeyFilter, OrderingFilter, SearchFilter]\n\n @extend_schema(\n responses=CurrentTenantSerializer(many=False),\n )\n @action(methods=[\"GET\"], detail=False, permission_classes=[AllowAny])\n def current(self, request: Request) -> Response:\n \"\"\"Get current tenant\"\"\"\n tenant: Tenant = request._request.tenant\n return Response(CurrentTenantSerializer(tenant).data)\n", "path": "authentik/tenants/api.py"}]}
| 1,418 | 327 |
gh_patches_debug_10131
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nodeenv try to download non existing tar.gz prebuilt under Cygwin
Hi,
Strange issue: I suspect a recent change broke this as it used to work last week, on another Windows computer with Cygwin.
Bug reproduction: `pre-commit run` using e.g. https://github.com/Lucas-C/pre-commit-hooks-html v1.1.0
`pre-commit` execute the following command under the hood, a command that also fails if I execute it manually:
```
nodeenv --prebuilt /cygdrive/c/Users/admin/.pre-commit/repoYHJ85q/node_env-default
```
The error is the following:
```
urllib2.HTTPError: HTTP Error 404: Not Found
```
The `tar.gz` it tries to install is https://nodejs.org/dist/v7.2.1/node-v7.2.1-cygwin_nt-6.1-x64.tar.gz, which does not exist. My guess is that `nodeenv` should use the Windows prebuilts instead: https://nodejs.org/dist/v7.2.1/node-v7.2.1-win-x64.zip This is because `platform.system()` is used: https://github.com/ekalinin/nodeenv/blob/master/nodeenv.py#L503
I'm going to ask for help on the https://github.com/ekalinin/nodeenv project, but do you have any hint at what the root cause could be here ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/node.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import os
5 import sys
6
7 from pre_commit.envcontext import envcontext
8 from pre_commit.envcontext import Var
9 from pre_commit.languages import helpers
10 from pre_commit.util import clean_path_on_failure
11 from pre_commit.xargs import xargs
12
13
14 ENVIRONMENT_DIR = 'node_env'
15 get_default_version = helpers.basic_get_default_version
16 healthy = helpers.basic_healthy
17
18
19 def get_env_patch(venv): # pragma: windows no cover
20 return (
21 ('NODE_VIRTUAL_ENV', venv),
22 ('NPM_CONFIG_PREFIX', venv),
23 ('npm_config_prefix', venv),
24 ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),
25 ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
26 )
27
28
29 @contextlib.contextmanager
30 def in_env(repo_cmd_runner, language_version): # pragma: windows no cover
31 envdir = repo_cmd_runner.path(
32 helpers.environment_dir(ENVIRONMENT_DIR, language_version),
33 )
34 with envcontext(get_env_patch(envdir)):
35 yield
36
37
38 def install_environment(
39 repo_cmd_runner, version, additional_dependencies,
40 ): # pragma: windows no cover
41 additional_dependencies = tuple(additional_dependencies)
42 assert repo_cmd_runner.exists('package.json')
43 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
44
45 env_dir = repo_cmd_runner.path(directory)
46 with clean_path_on_failure(env_dir):
47 cmd = [
48 sys.executable, '-m', 'nodeenv', '--prebuilt',
49 '{{prefix}}{}'.format(directory),
50 ]
51
52 if version != 'default':
53 cmd.extend(['-n', version])
54
55 repo_cmd_runner.run(cmd)
56
57 with in_env(repo_cmd_runner, version):
58 helpers.run_setup_cmd(
59 repo_cmd_runner,
60 ('npm', 'install', '-g', '.') + additional_dependencies,
61 )
62
63
64 def run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover
65 with in_env(repo_cmd_runner, hook['language_version']):
66 return xargs(helpers.to_cmd(hook), file_args)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py
--- a/pre_commit/languages/node.py
+++ b/pre_commit/languages/node.py
@@ -17,10 +17,11 @@
def get_env_patch(venv): # pragma: windows no cover
+ config = os.path.join(venv, 'bin') if sys.platform == 'cygwin' else venv
return (
('NODE_VIRTUAL_ENV', venv),
- ('NPM_CONFIG_PREFIX', venv),
- ('npm_config_prefix', venv),
+ ('NPM_CONFIG_PREFIX', config),
+ ('npm_config_prefix', config),
('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),
('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),
)
|
{"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -17,10 +17,11 @@\n \n \n def get_env_patch(venv): # pragma: windows no cover\n+ config = os.path.join(venv, 'bin') if sys.platform == 'cygwin' else venv\n return (\n ('NODE_VIRTUAL_ENV', venv),\n- ('NPM_CONFIG_PREFIX', venv),\n- ('npm_config_prefix', venv),\n+ ('NPM_CONFIG_PREFIX', config),\n+ ('npm_config_prefix', config),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n", "issue": "nodeenv try to download non existing tar.gz prebuilt under Cygwin\nHi,\r\n\r\nStrange issue: I suspect a recent change broke this as it used to work last week, on another Windows computer with Cygwin.\r\n\r\nBug reproduction: `pre-commit run` using e.g. https://github.com/Lucas-C/pre-commit-hooks-html v1.1.0\r\n\r\n`pre-commit` execute the following command under the hood, a command that also fails if I execute it manually:\r\n```\r\nnodeenv --prebuilt /cygdrive/c/Users/admin/.pre-commit/repoYHJ85q/node_env-default\r\n```\r\nThe error is the following:\r\n```\r\nurllib2.HTTPError: HTTP Error 404: Not Found\r\n```\r\nThe `tar.gz` it tries to install is https://nodejs.org/dist/v7.2.1/node-v7.2.1-cygwin_nt-6.1-x64.tar.gz, which does not exist. My guess is that `nodeenv` should use the Windows prebuilts instead: https://nodejs.org/dist/v7.2.1/node-v7.2.1-win-x64.zip This is because `platform.system()` is used: https://github.com/ekalinin/nodeenv/blob/master/nodeenv.py#L503\r\n\r\nI'm going to ask for help on the https://github.com/ekalinin/nodeenv project, but do you have any hint at what the root cause could be here ?\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'node_env'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv): # pragma: windows no cover\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', venv),\n ('npm_config_prefix', venv),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version): # pragma: windows no cover\n envdir = repo_cmd_runner.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n repo_cmd_runner, version, additional_dependencies,\n): # pragma: windows no cover\n additional_dependencies = tuple(additional_dependencies)\n assert repo_cmd_runner.exists('package.json')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n env_dir = repo_cmd_runner.path(directory)\n with clean_path_on_failure(env_dir):\n cmd = [\n sys.executable, '-m', 'nodeenv', '--prebuilt',\n '{{prefix}}{}'.format(directory),\n ]\n\n if version != 'default':\n cmd.extend(['-n', version])\n\n repo_cmd_runner.run(cmd)\n\n with in_env(repo_cmd_runner, version):\n helpers.run_setup_cmd(\n repo_cmd_runner,\n ('npm', 'install', '-g', '.') + additional_dependencies,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover\n with in_env(repo_cmd_runner, hook['language_version']):\n return xargs(helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/node.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'node_env'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv): # pragma: windows no cover\n config = os.path.join(venv, 'bin') if sys.platform == 'cygwin' else venv\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', config),\n ('npm_config_prefix', config),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner, language_version): # pragma: windows no cover\n envdir = repo_cmd_runner.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n repo_cmd_runner, version, additional_dependencies,\n): # pragma: windows no cover\n additional_dependencies = tuple(additional_dependencies)\n assert repo_cmd_runner.exists('package.json')\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n env_dir = repo_cmd_runner.path(directory)\n with clean_path_on_failure(env_dir):\n cmd = [\n sys.executable, '-m', 'nodeenv', '--prebuilt',\n '{{prefix}}{}'.format(directory),\n ]\n\n if version != 'default':\n cmd.extend(['-n', version])\n\n repo_cmd_runner.run(cmd)\n\n with in_env(repo_cmd_runner, version):\n helpers.run_setup_cmd(\n repo_cmd_runner,\n ('npm', 'install', '-g', '.') + additional_dependencies,\n )\n\n\ndef run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover\n with in_env(repo_cmd_runner, hook['language_version']):\n return xargs(helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/node.py"}]}
| 1,163 | 191 |
gh_patches_debug_1913
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-1946
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate falcon.api_helpers
See https://github.com/falconry/falcon/issues/1902.
Starting with 3.1, mark `falcon.api_helpers` as deprecated. We could employ module-level `__getattr__` or redecorate re-imported functions.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/api_helpers.py`
Content:
```
1 from .app_helpers import * # NOQA
2
3 # TODO deprecate
4 # import warnings
5 # from .util.deprecation import DeprecatedWarning
6
7 # warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/api_helpers.py b/falcon/api_helpers.py
--- a/falcon/api_helpers.py
+++ b/falcon/api_helpers.py
@@ -1,7 +1,6 @@
-from .app_helpers import * # NOQA
+import warnings
-# TODO deprecate
-# import warnings
-# from .util.deprecation import DeprecatedWarning
+from .app_helpers import * # NOQA
+from .util.deprecation import DeprecatedWarning
-# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
+warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)
|
{"golden_diff": "diff --git a/falcon/api_helpers.py b/falcon/api_helpers.py\n--- a/falcon/api_helpers.py\n+++ b/falcon/api_helpers.py\n@@ -1,7 +1,6 @@\n-from .app_helpers import * # NOQA\n+import warnings\n \n-# TODO deprecate\n-# import warnings\n-# from .util.deprecation import DeprecatedWarning\n+from .app_helpers import * # NOQA\n+from .util.deprecation import DeprecatedWarning\n \n-# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n+warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n", "issue": "Deprecate falcon.api_helpers\nSee https://github.com/falconry/falcon/issues/1902.\r\n\r\nStarting with 3.1, mark `falcon.api_helpers` as deprecated. We could employ module-level `__getattr__` or redecorate re-imported functions.\n", "before_files": [{"content": "from .app_helpers import * # NOQA\n\n# TODO deprecate\n# import warnings\n# from .util.deprecation import DeprecatedWarning\n\n# warnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n", "path": "falcon/api_helpers.py"}], "after_files": [{"content": "import warnings\n\nfrom .app_helpers import * # NOQA\nfrom .util.deprecation import DeprecatedWarning\n\nwarnings.warn('The api_helpers module was renamed to app_helpers.', DeprecatedWarning)\n", "path": "falcon/api_helpers.py"}]}
| 378 | 137 |
gh_patches_debug_29346
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-13789
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error with AWX 7.0.0
Hello.
I'm testing integration between **AWX 7.0.0** (Ansible Tower) by sending notifications in **Zulip 2.0.4**.
During testing, I encounter an error from Ansible :

And I immediatly receive an email warning from Zulip with the following content :
```Logger root, from module zerver.middleware line 291:
Error generated by Ansible (user42@zulip.******.**) on zulip.******.** deployment
Traceback (most recent call last):
File "/srv/zulip-venv-cache/ebe617662f96425113e5a75344bbe5a0593f634a/zulip-py3-venv/lib/python3.7/site-packages/django/core/handlers/base.py", line 185, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/srv/zulip-venv-cache/ebe617662f96425113e5a75344bbe5a0593f634a/zulip-py3-venv/lib/python3.7/site-packages/django/views/decorators/csrf.py", line 58, in wrapped_view
return view_func(*args, **kwargs)
File "./zerver/lib/request.py", line 289, in _wrapped_view_func
return view_func(request, *args, **kwargs)
File "./zerver/decorator.py", line 375, in _wrapped_func_arguments
raise err
File "./zerver/decorator.py", line 361, in _wrapped_func_arguments
return view_func(request, user_profile, *args, **kwargs)
File "./zerver/lib/request.py", line 289, in _wrapped_view_func
return view_func(request, *args, **kwargs)
File "./zerver/webhooks/ansibletower/view.py", line 27, in api_ansibletower_webhook
body = get_body(payload)
File "./zerver/webhooks/ansibletower/view.py", line 34, in get_body
if (payload['friendly_name'] == 'Job'):
KeyError: 'friendly_name'
Deployed code:
- git: 2.0.0-2546-ga1fa0b011
- ZULIP_VERSION: 2.0.4+git
Request info:
- path: /api/v1/external/ansibletower
- POST: {}
- REMOTE_ADDR: "['10.10.36.6']"
- QUERY_STRING: "['api_key=******&topic=******&stream=******&topic=******"
- SERVER_NAME: "['']"
```
I have already disable the "Disable SSL checking" but it seems also that the new version of AWX (the 7.0.0) contains new options for webhook like "HTTP Headers" and "HTTP Method".

Note that I have already notifications from GitLab so the notification service works in my self-hosted Zulip configuration.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/webhooks/ansibletower/view.py`
Content:
```
1 import operator
2 from typing import Any, Dict, List
3
4 from django.http import HttpRequest, HttpResponse
5
6 from zerver.decorator import REQ, api_key_only_webhook_view, \
7 has_request_variables
8 from zerver.lib.response import json_success
9 from zerver.lib.webhooks.common import check_send_webhook_message
10 from zerver.models import UserProfile
11
12 ANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE = "{friendly_name}: [#{id} {name}]({url}) {status}."
13
14
15 ANSIBLETOWER_JOB_MESSAGE_TEMPLATE = """
16 {friendly_name}: [#{id} {name}]({url}) {status}:
17 {hosts_final_data}
18 """.strip()
19
20 ANSIBLETOWER_JOB_HOST_ROW_TEMPLATE = '* {hostname}: {status}\n'
21
22 @api_key_only_webhook_view('Ansibletower')
23 @has_request_variables
24 def api_ansibletower_webhook(request: HttpRequest, user_profile: UserProfile,
25 payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
26
27 body = get_body(payload)
28 subject = payload['name']
29
30 check_send_webhook_message(request, user_profile, subject, body)
31 return json_success()
32
33 def get_body(payload: Dict[str, Any]) -> str:
34 if (payload['friendly_name'] == 'Job'):
35 hosts_list_data = payload['hosts']
36 hosts_data = []
37 for host in payload['hosts']:
38 if (hosts_list_data[host].get('failed') is True):
39 hoststatus = 'Failed'
40 elif (hosts_list_data[host].get('failed') is False):
41 hoststatus = 'Success'
42 hosts_data.append({
43 'hostname': host,
44 'status': hoststatus
45 })
46
47 if (payload['status'] == "successful"):
48 status = 'was successful'
49 else:
50 status = 'failed'
51
52 return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(
53 name=payload['name'],
54 friendly_name=payload['friendly_name'],
55 id=payload['id'],
56 url=payload['url'],
57 status=status,
58 hosts_final_data=get_hosts_content(hosts_data)
59 )
60
61 else:
62
63 if (payload['status'] == "successful"):
64 status = 'was successful'
65 else:
66 status = 'failed'
67
68 data = {
69 "name": payload['name'],
70 "friendly_name": payload['friendly_name'],
71 "id": payload['id'],
72 "url": payload['url'],
73 "status": status
74 }
75
76 return ANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE.format(**data)
77
78 def get_hosts_content(hosts_data: List[Dict[str, Any]]) -> str:
79 hosts_data = sorted(hosts_data, key=operator.itemgetter('hostname'))
80 hosts_content = ''
81 for host in hosts_data:
82 hosts_content += ANSIBLETOWER_JOB_HOST_ROW_TEMPLATE.format(
83 hostname=host.get('hostname'),
84 status=host.get('status')
85 )
86 return hosts_content
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/webhooks/ansibletower/view.py b/zerver/webhooks/ansibletower/view.py
--- a/zerver/webhooks/ansibletower/view.py
+++ b/zerver/webhooks/ansibletower/view.py
@@ -30,8 +30,19 @@
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
+def extract_friendly_name(payload: Dict[str, Any]) -> str:
+ tentative_job_name = payload.get("friendly_name", "")
+ if not tentative_job_name:
+ url = payload["url"]
+ segments = url.split("/")
+ tentative_job_name = segments[-3]
+ if tentative_job_name == "jobs":
+ tentative_job_name = "Job"
+ return tentative_job_name
+
def get_body(payload: Dict[str, Any]) -> str:
- if (payload['friendly_name'] == 'Job'):
+ friendly_name = extract_friendly_name(payload)
+ if (friendly_name == 'Job'):
hosts_list_data = payload['hosts']
hosts_data = []
for host in payload['hosts']:
@@ -51,7 +62,7 @@
return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(
name=payload['name'],
- friendly_name=payload['friendly_name'],
+ friendly_name=friendly_name,
id=payload['id'],
url=payload['url'],
status=status,
@@ -67,7 +78,7 @@
data = {
"name": payload['name'],
- "friendly_name": payload['friendly_name'],
+ "friendly_name": friendly_name,
"id": payload['id'],
"url": payload['url'],
"status": status
|
{"golden_diff": "diff --git a/zerver/webhooks/ansibletower/view.py b/zerver/webhooks/ansibletower/view.py\n--- a/zerver/webhooks/ansibletower/view.py\n+++ b/zerver/webhooks/ansibletower/view.py\n@@ -30,8 +30,19 @@\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n \n+def extract_friendly_name(payload: Dict[str, Any]) -> str:\n+ tentative_job_name = payload.get(\"friendly_name\", \"\")\n+ if not tentative_job_name:\n+ url = payload[\"url\"]\n+ segments = url.split(\"/\")\n+ tentative_job_name = segments[-3]\n+ if tentative_job_name == \"jobs\":\n+ tentative_job_name = \"Job\"\n+ return tentative_job_name\n+\n def get_body(payload: Dict[str, Any]) -> str:\n- if (payload['friendly_name'] == 'Job'):\n+ friendly_name = extract_friendly_name(payload)\n+ if (friendly_name == 'Job'):\n hosts_list_data = payload['hosts']\n hosts_data = []\n for host in payload['hosts']:\n@@ -51,7 +62,7 @@\n \n return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(\n name=payload['name'],\n- friendly_name=payload['friendly_name'],\n+ friendly_name=friendly_name,\n id=payload['id'],\n url=payload['url'],\n status=status,\n@@ -67,7 +78,7 @@\n \n data = {\n \"name\": payload['name'],\n- \"friendly_name\": payload['friendly_name'],\n+ \"friendly_name\": friendly_name,\n \"id\": payload['id'],\n \"url\": payload['url'],\n \"status\": status\n", "issue": "Error with AWX 7.0.0\nHello.\r\nI'm testing integration between **AWX 7.0.0** (Ansible Tower) by sending notifications in **Zulip 2.0.4**.\r\nDuring testing, I encounter an error from Ansible :\r\n\r\n\r\n\r\nAnd I immediatly receive an email warning from Zulip with the following content :\r\n\r\n```Logger root, from module zerver.middleware line 291:\r\nError generated by Ansible (user42@zulip.******.**) on zulip.******.** deployment\r\n\r\nTraceback (most recent call last):\r\n File \"/srv/zulip-venv-cache/ebe617662f96425113e5a75344bbe5a0593f634a/zulip-py3-venv/lib/python3.7/site-packages/django/core/handlers/base.py\", line 185, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/srv/zulip-venv-cache/ebe617662f96425113e5a75344bbe5a0593f634a/zulip-py3-venv/lib/python3.7/site-packages/django/views/decorators/csrf.py\", line 58, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"./zerver/lib/request.py\", line 289, in _wrapped_view_func\r\n return view_func(request, *args, **kwargs)\r\n File \"./zerver/decorator.py\", line 375, in _wrapped_func_arguments\r\n raise err\r\n File \"./zerver/decorator.py\", line 361, in _wrapped_func_arguments\r\n return view_func(request, user_profile, *args, **kwargs)\r\n File \"./zerver/lib/request.py\", line 289, in _wrapped_view_func\r\n return view_func(request, *args, **kwargs)\r\n File \"./zerver/webhooks/ansibletower/view.py\", line 27, in api_ansibletower_webhook\r\n body = get_body(payload)\r\n File \"./zerver/webhooks/ansibletower/view.py\", line 34, in get_body\r\n if (payload['friendly_name'] == 'Job'):\r\nKeyError: 'friendly_name'\r\n\r\n\r\nDeployed code:\r\n- git: 2.0.0-2546-ga1fa0b011\r\n- ZULIP_VERSION: 2.0.4+git\r\n\r\n\r\nRequest info:\r\n- path: /api/v1/external/ansibletower\r\n- POST: {}\r\n- REMOTE_ADDR: \"['10.10.36.6']\"\r\n- QUERY_STRING: \"['api_key=******&topic=******&stream=******&topic=******\"\r\n- SERVER_NAME: \"['']\"\r\n```\r\n\r\nI have already disable the \"Disable SSL checking\" but it seems also that the new version of AWX (the 7.0.0) contains new options for webhook like \"HTTP Headers\" and \"HTTP Method\".\r\n\r\n\r\n\r\nNote that I have already notifications from GitLab so the notification service works in my self-hosted Zulip configuration.\n", "before_files": [{"content": "import operator\nfrom typing import Any, Dict, List\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import REQ, api_key_only_webhook_view, \\\n has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message\nfrom zerver.models import UserProfile\n\nANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE = \"{friendly_name}: [#{id} {name}]({url}) {status}.\"\n\n\nANSIBLETOWER_JOB_MESSAGE_TEMPLATE = \"\"\"\n{friendly_name}: [#{id} {name}]({url}) {status}:\n{hosts_final_data}\n\"\"\".strip()\n\nANSIBLETOWER_JOB_HOST_ROW_TEMPLATE = '* {hostname}: {status}\\n'\n\n@api_key_only_webhook_view('Ansibletower')\n@has_request_variables\ndef api_ansibletower_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:\n\n body = get_body(payload)\n subject = payload['name']\n\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\ndef get_body(payload: Dict[str, Any]) -> str:\n if (payload['friendly_name'] == 'Job'):\n hosts_list_data = payload['hosts']\n hosts_data = []\n for host in payload['hosts']:\n if (hosts_list_data[host].get('failed') is True):\n hoststatus = 'Failed'\n elif (hosts_list_data[host].get('failed') is False):\n hoststatus = 'Success'\n hosts_data.append({\n 'hostname': host,\n 'status': hoststatus\n })\n\n if (payload['status'] == \"successful\"):\n status = 'was successful'\n else:\n status = 'failed'\n\n return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(\n name=payload['name'],\n friendly_name=payload['friendly_name'],\n id=payload['id'],\n url=payload['url'],\n status=status,\n hosts_final_data=get_hosts_content(hosts_data)\n )\n\n else:\n\n if (payload['status'] == \"successful\"):\n status = 'was successful'\n else:\n status = 'failed'\n\n data = {\n \"name\": payload['name'],\n \"friendly_name\": payload['friendly_name'],\n \"id\": payload['id'],\n \"url\": payload['url'],\n \"status\": status\n }\n\n return ANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE.format(**data)\n\ndef get_hosts_content(hosts_data: List[Dict[str, Any]]) -> str:\n hosts_data = sorted(hosts_data, key=operator.itemgetter('hostname'))\n hosts_content = ''\n for host in hosts_data:\n hosts_content += ANSIBLETOWER_JOB_HOST_ROW_TEMPLATE.format(\n hostname=host.get('hostname'),\n status=host.get('status')\n )\n return hosts_content\n", "path": "zerver/webhooks/ansibletower/view.py"}], "after_files": [{"content": "import operator\nfrom typing import Any, Dict, List\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import REQ, api_key_only_webhook_view, \\\n has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message\nfrom zerver.models import UserProfile\n\nANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE = \"{friendly_name}: [#{id} {name}]({url}) {status}.\"\n\n\nANSIBLETOWER_JOB_MESSAGE_TEMPLATE = \"\"\"\n{friendly_name}: [#{id} {name}]({url}) {status}:\n{hosts_final_data}\n\"\"\".strip()\n\nANSIBLETOWER_JOB_HOST_ROW_TEMPLATE = '* {hostname}: {status}\\n'\n\n@api_key_only_webhook_view('Ansibletower')\n@has_request_variables\ndef api_ansibletower_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:\n\n body = get_body(payload)\n subject = payload['name']\n\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\ndef extract_friendly_name(payload: Dict[str, Any]) -> str:\n tentative_job_name = payload.get(\"friendly_name\", \"\")\n if not tentative_job_name:\n url = payload[\"url\"]\n segments = url.split(\"/\")\n tentative_job_name = segments[-3]\n if tentative_job_name == \"jobs\":\n tentative_job_name = \"Job\"\n return tentative_job_name\n\ndef get_body(payload: Dict[str, Any]) -> str:\n friendly_name = extract_friendly_name(payload)\n if (friendly_name == 'Job'):\n hosts_list_data = payload['hosts']\n hosts_data = []\n for host in payload['hosts']:\n if (hosts_list_data[host].get('failed') is True):\n hoststatus = 'Failed'\n elif (hosts_list_data[host].get('failed') is False):\n hoststatus = 'Success'\n hosts_data.append({\n 'hostname': host,\n 'status': hoststatus\n })\n\n if (payload['status'] == \"successful\"):\n status = 'was successful'\n else:\n status = 'failed'\n\n return ANSIBLETOWER_JOB_MESSAGE_TEMPLATE.format(\n name=payload['name'],\n friendly_name=friendly_name,\n id=payload['id'],\n url=payload['url'],\n status=status,\n hosts_final_data=get_hosts_content(hosts_data)\n )\n\n else:\n\n if (payload['status'] == \"successful\"):\n status = 'was successful'\n else:\n status = 'failed'\n\n data = {\n \"name\": payload['name'],\n \"friendly_name\": friendly_name,\n \"id\": payload['id'],\n \"url\": payload['url'],\n \"status\": status\n }\n\n return ANSIBLETOWER_DEFAULT_MESSAGE_TEMPLATE.format(**data)\n\ndef get_hosts_content(hosts_data: List[Dict[str, Any]]) -> str:\n hosts_data = sorted(hosts_data, key=operator.itemgetter('hostname'))\n hosts_content = ''\n for host in hosts_data:\n hosts_content += ANSIBLETOWER_JOB_HOST_ROW_TEMPLATE.format(\n hostname=host.get('hostname'),\n status=host.get('status')\n )\n return hosts_content\n", "path": "zerver/webhooks/ansibletower/view.py"}]}
| 1,898 | 383 |
gh_patches_debug_29441
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-3254
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
404 error for kubernetes depoyment
**Describe the bug**
/if/flow/initial-setup/ endpoint in the browser gives a 404 not found error
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'https://goauthentik.io/docs/installation/kubernetes'
2. Create Values.yaml
3. run helm commands
4. all pods are up and running
5. go to browser http://<ing-url>/if/flow/initial-setup/
**Expected behavior**
Page should load letting me setup ak-admin password
**Screenshots**
404 not found
**Logs**
{"event": "/api/v3/outposts/instances/", "host": "localhost:8000", "level": "info", "logger": "authentik.asgi", "method": "GET", "pid": 24, "remote": "127.0.0.1", "request_id": "454efe5b57f34713bf837681449b91a6", "runtime": 35, "scheme": "http", "status": 403, "timestamp": "2022-07-11T10:39:00.436171", "user": "", "user_agent": "goauthentik.io/outpost/2022.7.2"}
{"event": "Forbidden: /api/v3/outposts/instances/", "level": "warning", "logger": "django.request", "timestamp": 1657535940.437195}
{"error":"403 Forbidden","event":"Failed to fetch outpost configuration, retrying in 3 seconds","level":"error","logger":"authentik.outpost.ak-api-controller","timestamp":"2022-07-11T10:39:00Z"}
**Version and Deployment (please complete the following information):**
- authentik version: authentik-2022.7.2
- Deployment: [kubectl 1.21, helm v3.1.0]
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lifecycle/migrate.py`
Content:
```
1 #!/usr/bin/env python
2 """System Migration handler"""
3 import os
4 from importlib.util import module_from_spec, spec_from_file_location
5 from inspect import getmembers, isclass
6 from pathlib import Path
7 from typing import Any
8
9 from psycopg2 import connect
10 from structlog.stdlib import get_logger
11
12 from authentik.lib.config import CONFIG
13
14 LOGGER = get_logger()
15 ADV_LOCK_UID = 1000
16 LOCKED = False
17
18
19 class BaseMigration:
20 """Base System Migration"""
21
22 cur: Any
23 con: Any
24
25 def __init__(self, cur: Any, con: Any):
26 self.cur = cur
27 self.con = con
28
29 def needs_migration(self) -> bool:
30 """Return true if Migration needs to be run"""
31 return False
32
33 def run(self):
34 """Run the actual migration"""
35
36
37 def wait_for_lock():
38 """lock an advisory lock to prevent multiple instances from migrating at once"""
39 LOGGER.info("waiting to acquire database lock")
40 curr.execute("SELECT pg_advisory_lock(%s)", (ADV_LOCK_UID,))
41 # pylint: disable=global-statement
42 global LOCKED
43 LOCKED = True
44
45
46 def release_lock():
47 """Release database lock"""
48 if not LOCKED:
49 return
50 curr.execute("SELECT pg_advisory_unlock(%s)", (ADV_LOCK_UID,))
51
52
53 if __name__ == "__main__":
54
55 conn = connect(
56 dbname=CONFIG.y("postgresql.name"),
57 user=CONFIG.y("postgresql.user"),
58 password=CONFIG.y("postgresql.password"),
59 host=CONFIG.y("postgresql.host"),
60 port=int(CONFIG.y("postgresql.port")),
61 )
62 curr = conn.cursor()
63 try:
64 for migration in Path(__file__).parent.absolute().glob("system_migrations/*.py"):
65 spec = spec_from_file_location("lifecycle.system_migrations", migration)
66 mod = module_from_spec(spec)
67 # pyright: reportGeneralTypeIssues=false
68 spec.loader.exec_module(mod)
69
70 for name, sub in getmembers(mod, isclass):
71 if name != "Migration":
72 continue
73 migration = sub(curr, conn)
74 if migration.needs_migration():
75 wait_for_lock()
76 LOGGER.info("Migration needs to be applied", migration=sub)
77 migration.run()
78 LOGGER.info("Migration finished applying", migration=sub)
79 release_lock()
80 LOGGER.info("applying django migrations")
81 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
82 wait_for_lock()
83 try:
84 from django.core.management import execute_from_command_line
85 except ImportError as exc:
86 raise ImportError(
87 "Couldn't import Django. Are you sure it's installed and "
88 "available on your PYTHONPATH environment variable? Did you "
89 "forget to activate a virtual environment?"
90 ) from exc
91 execute_from_command_line(["", "migrate"])
92 finally:
93 release_lock()
94
```
Path: `authentik/managed/tasks.py`
Content:
```
1 """managed tasks"""
2 from django.db import DatabaseError
3
4 from authentik.core.tasks import CELERY_APP
5 from authentik.events.monitored_tasks import (
6 MonitoredTask,
7 TaskResult,
8 TaskResultStatus,
9 prefill_task,
10 )
11 from authentik.managed.manager import ObjectManager
12
13
14 @CELERY_APP.task(bind=True, base=MonitoredTask)
15 @prefill_task
16 def managed_reconcile(self: MonitoredTask):
17 """Run ObjectManager to ensure objects are up-to-date"""
18 try:
19 ObjectManager().run()
20 self.set_status(
21 TaskResult(TaskResultStatus.SUCCESSFUL, ["Successfully updated managed models."])
22 )
23 except DatabaseError as exc: # pragma: no cover
24 self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/managed/tasks.py b/authentik/managed/tasks.py
--- a/authentik/managed/tasks.py
+++ b/authentik/managed/tasks.py
@@ -11,7 +11,11 @@
from authentik.managed.manager import ObjectManager
-@CELERY_APP.task(bind=True, base=MonitoredTask)
+@CELERY_APP.task(
+ bind=True,
+ base=MonitoredTask,
+ retry_backoff=True,
+)
@prefill_task
def managed_reconcile(self: MonitoredTask):
"""Run ObjectManager to ensure objects are up-to-date"""
@@ -22,3 +26,4 @@
)
except DatabaseError as exc: # pragma: no cover
self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))
+ self.retry()
diff --git a/lifecycle/migrate.py b/lifecycle/migrate.py
--- a/lifecycle/migrate.py
+++ b/lifecycle/migrate.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
"""System Migration handler"""
import os
+import sys
from importlib.util import module_from_spec, spec_from_file_location
from inspect import getmembers, isclass
from pathlib import Path
@@ -50,7 +51,16 @@
curr.execute("SELECT pg_advisory_unlock(%s)", (ADV_LOCK_UID,))
+def is_locked():
+ """Check if lock is currently active (used by worker to wait for migrations)"""
+ curr.executor("SELECT count(*) FROM pg_locks WHERE objid = %s", (ADV_LOCK_UID,))
+ return curr.rowcount
+
+
if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ if sys.argv[1] == "check_lock":
+ sys.exit(is_locked())
conn = connect(
dbname=CONFIG.y("postgresql.name"),
|
{"golden_diff": "diff --git a/authentik/managed/tasks.py b/authentik/managed/tasks.py\n--- a/authentik/managed/tasks.py\n+++ b/authentik/managed/tasks.py\n@@ -11,7 +11,11 @@\n from authentik.managed.manager import ObjectManager\n \n \n-@CELERY_APP.task(bind=True, base=MonitoredTask)\n+@CELERY_APP.task(\n+ bind=True,\n+ base=MonitoredTask,\n+ retry_backoff=True,\n+)\n @prefill_task\n def managed_reconcile(self: MonitoredTask):\n \"\"\"Run ObjectManager to ensure objects are up-to-date\"\"\"\n@@ -22,3 +26,4 @@\n )\n except DatabaseError as exc: # pragma: no cover\n self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))\n+ self.retry()\ndiff --git a/lifecycle/migrate.py b/lifecycle/migrate.py\n--- a/lifecycle/migrate.py\n+++ b/lifecycle/migrate.py\n@@ -1,6 +1,7 @@\n #!/usr/bin/env python\n \"\"\"System Migration handler\"\"\"\n import os\n+import sys\n from importlib.util import module_from_spec, spec_from_file_location\n from inspect import getmembers, isclass\n from pathlib import Path\n@@ -50,7 +51,16 @@\n curr.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n \n \n+def is_locked():\n+ \"\"\"Check if lock is currently active (used by worker to wait for migrations)\"\"\"\n+ curr.executor(\"SELECT count(*) FROM pg_locks WHERE objid = %s\", (ADV_LOCK_UID,))\n+ return curr.rowcount\n+\n+\n if __name__ == \"__main__\":\n+ if len(sys.argv) > 1:\n+ if sys.argv[1] == \"check_lock\":\n+ sys.exit(is_locked())\n \n conn = connect(\n dbname=CONFIG.y(\"postgresql.name\"),\n", "issue": "404 error for kubernetes depoyment\n**Describe the bug**\r\n/if/flow/initial-setup/ endpoint in the browser gives a 404 not found error\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'https://goauthentik.io/docs/installation/kubernetes'\r\n2. Create Values.yaml\r\n3. run helm commands\r\n4. all pods are up and running\r\n5. go to browser http://<ing-url>/if/flow/initial-setup/\r\n\r\n**Expected behavior**\r\nPage should load letting me setup ak-admin password\r\n\r\n**Screenshots**\r\n404 not found\r\n\r\n**Logs**\r\n{\"event\": \"/api/v3/outposts/instances/\", \"host\": \"localhost:8000\", \"level\": \"info\", \"logger\": \"authentik.asgi\", \"method\": \"GET\", \"pid\": 24, \"remote\": \"127.0.0.1\", \"request_id\": \"454efe5b57f34713bf837681449b91a6\", \"runtime\": 35, \"scheme\": \"http\", \"status\": 403, \"timestamp\": \"2022-07-11T10:39:00.436171\", \"user\": \"\", \"user_agent\": \"goauthentik.io/outpost/2022.7.2\"}\r\n{\"event\": \"Forbidden: /api/v3/outposts/instances/\", \"level\": \"warning\", \"logger\": \"django.request\", \"timestamp\": 1657535940.437195}\r\n{\"error\":\"403 Forbidden\",\"event\":\"Failed to fetch outpost configuration, retrying in 3 seconds\",\"level\":\"error\",\"logger\":\"authentik.outpost.ak-api-controller\",\"timestamp\":\"2022-07-11T10:39:00Z\"}\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: authentik-2022.7.2 \r\n - Deployment: [kubectl 1.21, helm v3.1.0]\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"System Migration handler\"\"\"\nimport os\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom inspect import getmembers, isclass\nfrom pathlib import Path\nfrom typing import Any\n\nfrom psycopg2 import connect\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.config import CONFIG\n\nLOGGER = get_logger()\nADV_LOCK_UID = 1000\nLOCKED = False\n\n\nclass BaseMigration:\n \"\"\"Base System Migration\"\"\"\n\n cur: Any\n con: Any\n\n def __init__(self, cur: Any, con: Any):\n self.cur = cur\n self.con = con\n\n def needs_migration(self) -> bool:\n \"\"\"Return true if Migration needs to be run\"\"\"\n return False\n\n def run(self):\n \"\"\"Run the actual migration\"\"\"\n\n\ndef wait_for_lock():\n \"\"\"lock an advisory lock to prevent multiple instances from migrating at once\"\"\"\n LOGGER.info(\"waiting to acquire database lock\")\n curr.execute(\"SELECT pg_advisory_lock(%s)\", (ADV_LOCK_UID,))\n # pylint: disable=global-statement\n global LOCKED\n LOCKED = True\n\n\ndef release_lock():\n \"\"\"Release database lock\"\"\"\n if not LOCKED:\n return\n curr.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n\n\nif __name__ == \"__main__\":\n\n conn = connect(\n dbname=CONFIG.y(\"postgresql.name\"),\n user=CONFIG.y(\"postgresql.user\"),\n password=CONFIG.y(\"postgresql.password\"),\n host=CONFIG.y(\"postgresql.host\"),\n port=int(CONFIG.y(\"postgresql.port\")),\n )\n curr = conn.cursor()\n try:\n for migration in Path(__file__).parent.absolute().glob(\"system_migrations/*.py\"):\n spec = spec_from_file_location(\"lifecycle.system_migrations\", migration)\n mod = module_from_spec(spec)\n # pyright: reportGeneralTypeIssues=false\n spec.loader.exec_module(mod)\n\n for name, sub in getmembers(mod, isclass):\n if name != \"Migration\":\n continue\n migration = sub(curr, conn)\n if migration.needs_migration():\n wait_for_lock()\n LOGGER.info(\"Migration needs to be applied\", migration=sub)\n migration.run()\n LOGGER.info(\"Migration finished applying\", migration=sub)\n release_lock()\n LOGGER.info(\"applying django migrations\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"authentik.root.settings\")\n wait_for_lock()\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line([\"\", \"migrate\"])\n finally:\n release_lock()\n", "path": "lifecycle/migrate.py"}, {"content": "\"\"\"managed tasks\"\"\"\nfrom django.db import DatabaseError\n\nfrom authentik.core.tasks import CELERY_APP\nfrom authentik.events.monitored_tasks import (\n MonitoredTask,\n TaskResult,\n TaskResultStatus,\n prefill_task,\n)\nfrom authentik.managed.manager import ObjectManager\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\n@prefill_task\ndef managed_reconcile(self: MonitoredTask):\n \"\"\"Run ObjectManager to ensure objects are up-to-date\"\"\"\n try:\n ObjectManager().run()\n self.set_status(\n TaskResult(TaskResultStatus.SUCCESSFUL, [\"Successfully updated managed models.\"])\n )\n except DatabaseError as exc: # pragma: no cover\n self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))\n", "path": "authentik/managed/tasks.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"System Migration handler\"\"\"\nimport os\nimport sys\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom inspect import getmembers, isclass\nfrom pathlib import Path\nfrom typing import Any\n\nfrom psycopg2 import connect\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.config import CONFIG\n\nLOGGER = get_logger()\nADV_LOCK_UID = 1000\nLOCKED = False\n\n\nclass BaseMigration:\n \"\"\"Base System Migration\"\"\"\n\n cur: Any\n con: Any\n\n def __init__(self, cur: Any, con: Any):\n self.cur = cur\n self.con = con\n\n def needs_migration(self) -> bool:\n \"\"\"Return true if Migration needs to be run\"\"\"\n return False\n\n def run(self):\n \"\"\"Run the actual migration\"\"\"\n\n\ndef wait_for_lock():\n \"\"\"lock an advisory lock to prevent multiple instances from migrating at once\"\"\"\n LOGGER.info(\"waiting to acquire database lock\")\n curr.execute(\"SELECT pg_advisory_lock(%s)\", (ADV_LOCK_UID,))\n # pylint: disable=global-statement\n global LOCKED\n LOCKED = True\n\n\ndef release_lock():\n \"\"\"Release database lock\"\"\"\n if not LOCKED:\n return\n curr.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n\n\ndef is_locked():\n \"\"\"Check if lock is currently active (used by worker to wait for migrations)\"\"\"\n curr.executor(\"SELECT count(*) FROM pg_locks WHERE objid = %s\", (ADV_LOCK_UID,))\n return curr.rowcount\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n if sys.argv[1] == \"check_lock\":\n sys.exit(is_locked())\n\n conn = connect(\n dbname=CONFIG.y(\"postgresql.name\"),\n user=CONFIG.y(\"postgresql.user\"),\n password=CONFIG.y(\"postgresql.password\"),\n host=CONFIG.y(\"postgresql.host\"),\n port=int(CONFIG.y(\"postgresql.port\")),\n )\n curr = conn.cursor()\n try:\n for migration in Path(__file__).parent.absolute().glob(\"system_migrations/*.py\"):\n spec = spec_from_file_location(\"lifecycle.system_migrations\", migration)\n mod = module_from_spec(spec)\n # pyright: reportGeneralTypeIssues=false\n spec.loader.exec_module(mod)\n\n for name, sub in getmembers(mod, isclass):\n if name != \"Migration\":\n continue\n migration = sub(curr, conn)\n if migration.needs_migration():\n wait_for_lock()\n LOGGER.info(\"Migration needs to be applied\", migration=sub)\n migration.run()\n LOGGER.info(\"Migration finished applying\", migration=sub)\n release_lock()\n LOGGER.info(\"applying django migrations\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"authentik.root.settings\")\n wait_for_lock()\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line([\"\", \"migrate\"])\n finally:\n release_lock()\n", "path": "lifecycle/migrate.py"}, {"content": "\"\"\"managed tasks\"\"\"\nfrom django.db import DatabaseError\n\nfrom authentik.core.tasks import CELERY_APP\nfrom authentik.events.monitored_tasks import (\n MonitoredTask,\n TaskResult,\n TaskResultStatus,\n prefill_task,\n)\nfrom authentik.managed.manager import ObjectManager\n\n\n@CELERY_APP.task(\n bind=True,\n base=MonitoredTask,\n retry_backoff=True,\n)\n@prefill_task\ndef managed_reconcile(self: MonitoredTask):\n \"\"\"Run ObjectManager to ensure objects are up-to-date\"\"\"\n try:\n ObjectManager().run()\n self.set_status(\n TaskResult(TaskResultStatus.SUCCESSFUL, [\"Successfully updated managed models.\"])\n )\n except DatabaseError as exc: # pragma: no cover\n self.set_status(TaskResult(TaskResultStatus.WARNING, [str(exc)]))\n self.retry()\n", "path": "authentik/managed/tasks.py"}]}
| 1,773 | 418 |
gh_patches_debug_22932
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2568
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add type annontations
please add type annotations here
_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python/pull/2400#discussion_r809406486_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 from logging import getLogger
17 from threading import Lock
18 from typing import TYPE_CHECKING, Iterable
19
20 from opentelemetry.sdk._metrics.aggregation import (
21 _convert_aggregation_temporality,
22 )
23 from opentelemetry.sdk._metrics.measurement import Measurement
24 from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric
25 from opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration
26 from opentelemetry.sdk._metrics.view import View
27
28 if TYPE_CHECKING:
29 from opentelemetry.sdk._metrics.instrument import _Instrument
30
31 _logger = getLogger(__name__)
32
33
34 class _ViewInstrumentMatch:
35 def __init__(
36 self,
37 view: View,
38 instrument: "_Instrument",
39 sdk_config: SdkConfiguration,
40 ):
41 self._view = view
42 self._instrument = instrument
43 self._sdk_config = sdk_config
44 self._attributes_aggregation = {}
45 self._attributes_previous_point = {}
46 self._lock = Lock()
47
48 # pylint: disable=protected-access
49 def consume_measurement(self, measurement: Measurement) -> None:
50
51 if self._view._attribute_keys is not None:
52
53 attributes = {}
54
55 for key, value in (measurement.attributes or {}).items():
56 if key in self._view._attribute_keys:
57 attributes[key] = value
58 elif measurement.attributes is not None:
59 attributes = measurement.attributes
60 else:
61 attributes = {}
62
63 attributes = frozenset(attributes.items())
64
65 if attributes not in self._attributes_aggregation:
66 with self._lock:
67 if attributes not in self._attributes_aggregation:
68 if self._view._aggregation:
69 aggregation = (
70 self._view._aggregation._create_aggregation(
71 self._instrument
72 )
73 )
74 else:
75 aggregation = self._instrument._default_aggregation
76 self._attributes_aggregation[attributes] = aggregation
77
78 self._attributes_aggregation[attributes].aggregate(measurement)
79
80 def collect(self, temporality: int) -> Iterable[Metric]:
81
82 with self._lock:
83 for (
84 attributes,
85 aggregation,
86 ) in self._attributes_aggregation.items():
87
88 previous_point = self._attributes_previous_point.get(
89 attributes
90 )
91
92 current_point = aggregation.collect()
93
94 # pylint: disable=assignment-from-none
95 self._attributes_previous_point[
96 attributes
97 ] = _convert_aggregation_temporality(
98 previous_point,
99 current_point,
100 AggregationTemporality.CUMULATIVE,
101 )
102
103 if current_point is not None:
104
105 yield Metric(
106 attributes=dict(attributes),
107 description=(
108 self._view._description
109 or self._instrument.description
110 ),
111 instrumentation_info=self._instrument.instrumentation_info,
112 name=self._view._name or self._instrument.name,
113 resource=self._sdk_config.resource,
114 unit=self._instrument.unit,
115 point=_convert_aggregation_temporality(
116 previous_point,
117 current_point,
118 temporality,
119 ),
120 )
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py
@@ -15,10 +15,12 @@
from logging import getLogger
from threading import Lock
-from typing import TYPE_CHECKING, Iterable
+from typing import TYPE_CHECKING, Dict, Iterable
from opentelemetry.sdk._metrics.aggregation import (
+ _Aggregation,
_convert_aggregation_temporality,
+ _PointVarT,
)
from opentelemetry.sdk._metrics.measurement import Measurement
from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric
@@ -41,8 +43,8 @@
self._view = view
self._instrument = instrument
self._sdk_config = sdk_config
- self._attributes_aggregation = {}
- self._attributes_previous_point = {}
+ self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}
+ self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}
self._lock = Lock()
# pylint: disable=protected-access
|
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py\n@@ -15,10 +15,12 @@\n \n from logging import getLogger\n from threading import Lock\n-from typing import TYPE_CHECKING, Iterable\n+from typing import TYPE_CHECKING, Dict, Iterable\n \n from opentelemetry.sdk._metrics.aggregation import (\n+ _Aggregation,\n _convert_aggregation_temporality,\n+ _PointVarT,\n )\n from opentelemetry.sdk._metrics.measurement import Measurement\n from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\n@@ -41,8 +43,8 @@\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n- self._attributes_aggregation = {}\n- self._attributes_previous_point = {}\n+ self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}\n+ self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}\n self._lock = Lock()\n \n # pylint: disable=protected-access\n", "issue": "Add type annontations\nplease add type annotations here\r\n\r\n_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python/pull/2400#discussion_r809406486_\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import TYPE_CHECKING, Iterable\n\nfrom opentelemetry.sdk._metrics.aggregation import (\n _convert_aggregation_temporality,\n)\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk._metrics.view import View\n\nif TYPE_CHECKING:\n from opentelemetry.sdk._metrics.instrument import _Instrument\n\n_logger = getLogger(__name__)\n\n\nclass _ViewInstrumentMatch:\n def __init__(\n self,\n view: View,\n instrument: \"_Instrument\",\n sdk_config: SdkConfiguration,\n ):\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n self._attributes_aggregation = {}\n self._attributes_previous_point = {}\n self._lock = Lock()\n\n # pylint: disable=protected-access\n def consume_measurement(self, measurement: Measurement) -> None:\n\n if self._view._attribute_keys is not None:\n\n attributes = {}\n\n for key, value in (measurement.attributes or {}).items():\n if key in self._view._attribute_keys:\n attributes[key] = value\n elif measurement.attributes is not None:\n attributes = measurement.attributes\n else:\n attributes = {}\n\n attributes = frozenset(attributes.items())\n\n if attributes not in self._attributes_aggregation:\n with self._lock:\n if attributes not in self._attributes_aggregation:\n if self._view._aggregation:\n aggregation = (\n self._view._aggregation._create_aggregation(\n self._instrument\n )\n )\n else:\n aggregation = self._instrument._default_aggregation\n self._attributes_aggregation[attributes] = aggregation\n\n self._attributes_aggregation[attributes].aggregate(measurement)\n\n def collect(self, temporality: int) -> Iterable[Metric]:\n\n with self._lock:\n for (\n attributes,\n aggregation,\n ) in self._attributes_aggregation.items():\n\n previous_point = self._attributes_previous_point.get(\n attributes\n )\n\n current_point = aggregation.collect()\n\n # pylint: disable=assignment-from-none\n self._attributes_previous_point[\n attributes\n ] = _convert_aggregation_temporality(\n previous_point,\n current_point,\n AggregationTemporality.CUMULATIVE,\n )\n\n if current_point is not None:\n\n yield Metric(\n attributes=dict(attributes),\n description=(\n self._view._description\n or self._instrument.description\n ),\n instrumentation_info=self._instrument.instrumentation_info,\n name=self._view._name or self._instrument.name,\n resource=self._sdk_config.resource,\n unit=self._instrument.unit,\n point=_convert_aggregation_temporality(\n previous_point,\n current_point,\n temporality,\n ),\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import TYPE_CHECKING, Dict, Iterable\n\nfrom opentelemetry.sdk._metrics.aggregation import (\n _Aggregation,\n _convert_aggregation_temporality,\n _PointVarT,\n)\nfrom opentelemetry.sdk._metrics.measurement import Measurement\nfrom opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\nfrom opentelemetry.sdk._metrics.sdk_configuration import SdkConfiguration\nfrom opentelemetry.sdk._metrics.view import View\n\nif TYPE_CHECKING:\n from opentelemetry.sdk._metrics.instrument import _Instrument\n\n_logger = getLogger(__name__)\n\n\nclass _ViewInstrumentMatch:\n def __init__(\n self,\n view: View,\n instrument: \"_Instrument\",\n sdk_config: SdkConfiguration,\n ):\n self._view = view\n self._instrument = instrument\n self._sdk_config = sdk_config\n self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}\n self._attributes_previous_point: Dict[frozenset, _PointVarT] = {}\n self._lock = Lock()\n\n # pylint: disable=protected-access\n def consume_measurement(self, measurement: Measurement) -> None:\n\n if self._view._attribute_keys is not None:\n\n attributes = {}\n\n for key, value in (measurement.attributes or {}).items():\n if key in self._view._attribute_keys:\n attributes[key] = value\n elif measurement.attributes is not None:\n attributes = measurement.attributes\n else:\n attributes = {}\n\n attributes = frozenset(attributes.items())\n\n if attributes not in self._attributes_aggregation:\n with self._lock:\n if attributes not in self._attributes_aggregation:\n if self._view._aggregation:\n aggregation = (\n self._view._aggregation._create_aggregation(\n self._instrument\n )\n )\n else:\n aggregation = self._instrument._default_aggregation\n self._attributes_aggregation[attributes] = aggregation\n\n self._attributes_aggregation[attributes].aggregate(measurement)\n\n def collect(self, temporality: int) -> Iterable[Metric]:\n\n with self._lock:\n for (\n attributes,\n aggregation,\n ) in self._attributes_aggregation.items():\n\n previous_point = self._attributes_previous_point.get(\n attributes\n )\n\n current_point = aggregation.collect()\n\n # pylint: disable=assignment-from-none\n self._attributes_previous_point[\n attributes\n ] = _convert_aggregation_temporality(\n previous_point,\n current_point,\n AggregationTemporality.CUMULATIVE,\n )\n\n if current_point is not None:\n\n yield Metric(\n attributes=dict(attributes),\n description=(\n self._view._description\n or self._instrument.description\n ),\n instrumentation_info=self._instrument.instrumentation_info,\n name=self._view._name or self._instrument.name,\n resource=self._sdk_config.resource,\n unit=self._instrument.unit,\n point=_convert_aggregation_temporality(\n previous_point,\n current_point,\n temporality,\n ),\n )\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/_view_instrument_match.py"}]}
| 1,341 | 300 |
gh_patches_debug_3773
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-860
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Python 3.8 to CI
# Description
The branch [`ci/add-Python-3.8-to-CI`](https://github.com/diana-hep/pyhf/compare/ci/add-Python-3.8-to-CI) adds Python 3.8 to the CI. However, as [PyTorch won't have a Python 3.8 wheel until the next release](https://github.com/pytorch/pytorch/issues/21741#issuecomment-541242504) this won't be able to happen until around December 2019.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
83 author_email='[email protected], [email protected], [email protected]',
84 license='Apache',
85 keywords='physics fitting numpy scipy tensorflow pytorch',
86 classifiers=[
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 ],
91 package_dir={'': 'src'},
92 packages=find_packages(where='src'),
93 include_package_data=True,
94 python_requires=">=3.6",
95 install_requires=[
96 'scipy', # requires numpy, which is required by pyhf and tensorflow
97 'click>=6.0', # for console scripts,
98 'tqdm', # for readxml
99 'jsonschema>=3.2.0', # for utils
100 'jsonpatch',
101 'pyyaml', # for parsing CLI equal-delimited options
102 ],
103 extras_require=extras_require,
104 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
105 dependency_links=[],
106 use_scm_version=lambda: {'local_scheme': lambda version: ''},
107 )
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -87,6 +87,7 @@
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
],
package_dir={'': 'src'},
packages=find_packages(where='src'),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -87,6 +87,7 @@\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n", "issue": "Add Python 3.8 to CI\n# Description\r\n\r\nThe branch [`ci/add-Python-3.8-to-CI`](https://github.com/diana-hep/pyhf/compare/ci/add-Python-3.8-to-CI) adds Python 3.8 to the CI. However, as [PyTorch won't have a Python 3.8 wheel until the next release](https://github.com/pytorch/pytorch/issues/21741#issuecomment-541242504) this won't be able to happen until around December 2019.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 1,446 | 97 |
gh_patches_debug_34144
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-3017
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Doesn't catch invalid `rate(1 hours)`
*cfn-lint version: (`cfn-lint --version`)*
0.44.7
*Description of issue.*
cfn-lint doesn't recognize that this ScheduledExpression is invalid (should be `rate(1 hour)`)
```yaml
ExampleRule:
Type: AWS::Events::Rule
Properties:
Description: desc
Name: name
ScheduleExpression: rate(1 hours)
State: ENABLED
```
But when building the cloudformation, I get the following error:
```
Parameter ScheduleExpression is not valid. (Service: AmazonCloudWatchEvents; Status Code: 400; Error Code: ValidationException; Request ID: ...; Proxy: null)
```
I saw #816, but since this is a `rate` issue, not a `cron` issue, I thought I should open a new ticket
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/events/RuleScheduleExpression.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule, RuleMatch
6
7
8 class RuleScheduleExpression(CloudFormationLintRule):
9 """Validate AWS Events Schedule expression format"""
10
11 id = "E3027"
12 shortdesc = "Validate AWS Event ScheduleExpression format"
13 description = "Validate the formation of the AWS::Event ScheduleExpression"
14 source_url = "https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html"
15 tags = ["resources", "events"]
16
17 def initialize(self, cfn):
18 """Initialize the rule"""
19 self.resource_property_types = ["AWS::Events::Rule"]
20
21 def check_rate(self, value, path):
22 """Check Rate configuration"""
23 matches = []
24 # Extract the expression from rate(XXX)
25 rate_expression = value[value.find("(") + 1 : value.find(")")]
26
27 if not rate_expression:
28 matches.append(
29 RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")
30 )
31 else:
32 # Rate format: rate(Value Unit)
33 items = rate_expression.split(" ")
34
35 if len(items) != 2:
36 message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
37 matches.append(RuleMatch(path, message.format(len(items))))
38 else:
39 # Check the Value
40 if not items[0].isdigit():
41 message = "Rate Value ({}) should be of type Integer."
42 extra_args = {
43 "actual_type": type(items[0]).__name__,
44 "expected_type": int.__name__,
45 }
46 matches.append(
47 RuleMatch(path, message.format(items[0]), **extra_args)
48 )
49
50 return matches
51
52 def check_cron(self, value, path):
53 """Check Cron configuration"""
54 matches = []
55 # Extract the expression from cron(XXX)
56 cron_expression = value[value.find("(") + 1 : value.find(")")]
57
58 if not cron_expression:
59 matches.append(
60 RuleMatch(path, "Cron value of ScheduleExpression cannot be empty")
61 )
62 else:
63 # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)
64 items = cron_expression.split(" ")
65
66 if len(items) != 6:
67 message = "Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements"
68 matches.append(RuleMatch(path, message.format(len(items))))
69 return matches
70
71 _, _, day_of_month, _, day_of_week, _ = cron_expression.split(" ")
72 if day_of_month != "?" and day_of_week != "?":
73 matches.append(
74 RuleMatch(
75 path,
76 "Don't specify the Day-of-month and Day-of-week fields in the same cron expression",
77 )
78 )
79
80 return matches
81
82 def check_value(self, value, path):
83 """Count ScheduledExpression value"""
84 matches = []
85
86 # Value is either "cron()" or "rate()"
87 if value.startswith("rate(") and value.endswith(")"):
88 matches.extend(self.check_rate(value, path))
89 elif value.startswith("cron(") and value.endswith(")"):
90 matches.extend(self.check_cron(value, path))
91 else:
92 message = "Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()"
93 matches.append(RuleMatch(path, message.format(value)))
94
95 return matches
96
97 def match_resource_properties(self, properties, _, path, cfn):
98 """Check CloudFormation Properties"""
99 matches = []
100
101 matches.extend(
102 cfn.check_value(
103 obj=properties,
104 key="ScheduleExpression",
105 path=path[:],
106 check_value=self.check_value,
107 )
108 )
109
110 return matches
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
--- a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
+++ b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py
@@ -25,29 +25,43 @@
rate_expression = value[value.find("(") + 1 : value.find(")")]
if not rate_expression:
- matches.append(
- RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")
- )
- else:
- # Rate format: rate(Value Unit)
- items = rate_expression.split(" ")
-
- if len(items) != 2:
- message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
- matches.append(RuleMatch(path, message.format(len(items))))
- else:
- # Check the Value
- if not items[0].isdigit():
- message = "Rate Value ({}) should be of type Integer."
- extra_args = {
- "actual_type": type(items[0]).__name__,
- "expected_type": int.__name__,
- }
- matches.append(
- RuleMatch(path, message.format(items[0]), **extra_args)
- )
+ return [RuleMatch(path, "Rate value of ScheduleExpression cannot be empty")]
+
+ # Rate format: rate(Value Unit)
+ items = rate_expression.split(" ")
+
+ if len(items) != 2:
+ message = "Rate expression must contain 2 elements (Value Unit), rate contains {} elements"
+ matches.append(RuleMatch(path, message.format(len(items))))
+ return [RuleMatch(path, message.format(len(items)))]
+
+ # Check the Value
+ if not items[0].isdigit():
+ message = "Rate Value ({}) should be of type Integer."
+ extra_args = {
+ "actual_type": type(items[0]).__name__,
+ "expected_type": int.__name__,
+ }
+ return [RuleMatch(path, message.format(items[0]), **extra_args)]
+
+ if float(items[0]) <= 0:
+ return [
+ RuleMatch(path, f"Rate Value {items[0]!r} should be greater than 0.")
+ ]
+
+ if float(items[0]) <= 1:
+ valid_periods = ["minute", "hour", "day"]
+ elif float(items[0]) > 1:
+ valid_periods = ["minutes", "hours", "days"]
+ # Check the Unit
+ if items[1] not in valid_periods:
+ return [
+ RuleMatch(
+ path, f"Rate Unit {items[1]!r} should be one of {valid_periods!r}."
+ )
+ ]
- return matches
+ return []
def check_cron(self, value, path):
"""Check Cron configuration"""
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n--- a/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n+++ b/src/cfnlint/rules/resources/events/RuleScheduleExpression.py\n@@ -25,29 +25,43 @@\n rate_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n \n if not rate_expression:\n- matches.append(\n- RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")\n- )\n- else:\n- # Rate format: rate(Value Unit)\n- items = rate_expression.split(\" \")\n-\n- if len(items) != 2:\n- message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n- matches.append(RuleMatch(path, message.format(len(items))))\n- else:\n- # Check the Value\n- if not items[0].isdigit():\n- message = \"Rate Value ({}) should be of type Integer.\"\n- extra_args = {\n- \"actual_type\": type(items[0]).__name__,\n- \"expected_type\": int.__name__,\n- }\n- matches.append(\n- RuleMatch(path, message.format(items[0]), **extra_args)\n- )\n+ return [RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")]\n+\n+ # Rate format: rate(Value Unit)\n+ items = rate_expression.split(\" \")\n+\n+ if len(items) != 2:\n+ message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n+ matches.append(RuleMatch(path, message.format(len(items))))\n+ return [RuleMatch(path, message.format(len(items)))]\n+\n+ # Check the Value\n+ if not items[0].isdigit():\n+ message = \"Rate Value ({}) should be of type Integer.\"\n+ extra_args = {\n+ \"actual_type\": type(items[0]).__name__,\n+ \"expected_type\": int.__name__,\n+ }\n+ return [RuleMatch(path, message.format(items[0]), **extra_args)]\n+\n+ if float(items[0]) <= 0:\n+ return [\n+ RuleMatch(path, f\"Rate Value {items[0]!r} should be greater than 0.\")\n+ ]\n+\n+ if float(items[0]) <= 1:\n+ valid_periods = [\"minute\", \"hour\", \"day\"]\n+ elif float(items[0]) > 1:\n+ valid_periods = [\"minutes\", \"hours\", \"days\"]\n+ # Check the Unit\n+ if items[1] not in valid_periods:\n+ return [\n+ RuleMatch(\n+ path, f\"Rate Unit {items[1]!r} should be one of {valid_periods!r}.\"\n+ )\n+ ]\n \n- return matches\n+ return []\n \n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n", "issue": "Doesn't catch invalid `rate(1 hours)`\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\n0.44.7\r\n\r\n*Description of issue.*\r\n\r\ncfn-lint doesn't recognize that this ScheduledExpression is invalid (should be `rate(1 hour)`)\r\n```yaml\r\n ExampleRule:\r\n Type: AWS::Events::Rule\r\n Properties:\r\n Description: desc\r\n Name: name\r\n ScheduleExpression: rate(1 hours)\r\n State: ENABLED\r\n```\r\n\r\nBut when building the cloudformation, I get the following error:\r\n\r\n```\r\nParameter ScheduleExpression is not valid. (Service: AmazonCloudWatchEvents; Status Code: 400; Error Code: ValidationException; Request ID: ...; Proxy: null)\r\n```\r\n\r\nI saw #816, but since this is a `rate` issue, not a `cron` issue, I thought I should open a new ticket\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RuleScheduleExpression(CloudFormationLintRule):\n \"\"\"Validate AWS Events Schedule expression format\"\"\"\n\n id = \"E3027\"\n shortdesc = \"Validate AWS Event ScheduleExpression format\"\n description = \"Validate the formation of the AWS::Event ScheduleExpression\"\n source_url = \"https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html\"\n tags = [\"resources\", \"events\"]\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n self.resource_property_types = [\"AWS::Events::Rule\"]\n\n def check_rate(self, value, path):\n \"\"\"Check Rate configuration\"\"\"\n matches = []\n # Extract the expression from rate(XXX)\n rate_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not rate_expression:\n matches.append(\n RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")\n )\n else:\n # Rate format: rate(Value Unit)\n items = rate_expression.split(\" \")\n\n if len(items) != 2:\n message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n else:\n # Check the Value\n if not items[0].isdigit():\n message = \"Rate Value ({}) should be of type Integer.\"\n extra_args = {\n \"actual_type\": type(items[0]).__name__,\n \"expected_type\": int.__name__,\n }\n matches.append(\n RuleMatch(path, message.format(items[0]), **extra_args)\n )\n\n return matches\n\n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n matches = []\n # Extract the expression from cron(XXX)\n cron_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not cron_expression:\n matches.append(\n RuleMatch(path, \"Cron value of ScheduleExpression cannot be empty\")\n )\n else:\n # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)\n items = cron_expression.split(\" \")\n\n if len(items) != 6:\n message = \"Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n return matches\n\n _, _, day_of_month, _, day_of_week, _ = cron_expression.split(\" \")\n if day_of_month != \"?\" and day_of_week != \"?\":\n matches.append(\n RuleMatch(\n path,\n \"Don't specify the Day-of-month and Day-of-week fields in the same cron expression\",\n )\n )\n\n return matches\n\n def check_value(self, value, path):\n \"\"\"Count ScheduledExpression value\"\"\"\n matches = []\n\n # Value is either \"cron()\" or \"rate()\"\n if value.startswith(\"rate(\") and value.endswith(\")\"):\n matches.extend(self.check_rate(value, path))\n elif value.startswith(\"cron(\") and value.endswith(\")\"):\n matches.extend(self.check_cron(value, path))\n else:\n message = \"Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()\"\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties,\n key=\"ScheduleExpression\",\n path=path[:],\n check_value=self.check_value,\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleScheduleExpression.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass RuleScheduleExpression(CloudFormationLintRule):\n \"\"\"Validate AWS Events Schedule expression format\"\"\"\n\n id = \"E3027\"\n shortdesc = \"Validate AWS Event ScheduleExpression format\"\n description = \"Validate the formation of the AWS::Event ScheduleExpression\"\n source_url = \"https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html\"\n tags = [\"resources\", \"events\"]\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n self.resource_property_types = [\"AWS::Events::Rule\"]\n\n def check_rate(self, value, path):\n \"\"\"Check Rate configuration\"\"\"\n matches = []\n # Extract the expression from rate(XXX)\n rate_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not rate_expression:\n return [RuleMatch(path, \"Rate value of ScheduleExpression cannot be empty\")]\n\n # Rate format: rate(Value Unit)\n items = rate_expression.split(\" \")\n\n if len(items) != 2:\n message = \"Rate expression must contain 2 elements (Value Unit), rate contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n return [RuleMatch(path, message.format(len(items)))]\n\n # Check the Value\n if not items[0].isdigit():\n message = \"Rate Value ({}) should be of type Integer.\"\n extra_args = {\n \"actual_type\": type(items[0]).__name__,\n \"expected_type\": int.__name__,\n }\n return [RuleMatch(path, message.format(items[0]), **extra_args)]\n\n if float(items[0]) <= 0:\n return [\n RuleMatch(path, f\"Rate Value {items[0]!r} should be greater than 0.\")\n ]\n\n if float(items[0]) <= 1:\n valid_periods = [\"minute\", \"hour\", \"day\"]\n elif float(items[0]) > 1:\n valid_periods = [\"minutes\", \"hours\", \"days\"]\n # Check the Unit\n if items[1] not in valid_periods:\n return [\n RuleMatch(\n path, f\"Rate Unit {items[1]!r} should be one of {valid_periods!r}.\"\n )\n ]\n\n return []\n\n def check_cron(self, value, path):\n \"\"\"Check Cron configuration\"\"\"\n matches = []\n # Extract the expression from cron(XXX)\n cron_expression = value[value.find(\"(\") + 1 : value.find(\")\")]\n\n if not cron_expression:\n matches.append(\n RuleMatch(path, \"Cron value of ScheduleExpression cannot be empty\")\n )\n else:\n # Rate format: cron(Minutes Hours Day-of-month Month Day-of-week Year)\n items = cron_expression.split(\" \")\n\n if len(items) != 6:\n message = \"Cron expression must contain 6 elements (Minutes Hours Day-of-month Month Day-of-week Year), cron contains {} elements\"\n matches.append(RuleMatch(path, message.format(len(items))))\n return matches\n\n _, _, day_of_month, _, day_of_week, _ = cron_expression.split(\" \")\n if day_of_month != \"?\" and day_of_week != \"?\":\n matches.append(\n RuleMatch(\n path,\n \"Don't specify the Day-of-month and Day-of-week fields in the same cron expression\",\n )\n )\n\n return matches\n\n def check_value(self, value, path):\n \"\"\"Count ScheduledExpression value\"\"\"\n matches = []\n\n # Value is either \"cron()\" or \"rate()\"\n if value.startswith(\"rate(\") and value.endswith(\")\"):\n matches.extend(self.check_rate(value, path))\n elif value.startswith(\"cron(\") and value.endswith(\")\"):\n matches.extend(self.check_cron(value, path))\n else:\n message = \"Invalid ScheduledExpression specified ({}). Value has to be either cron() or rate()\"\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties,\n key=\"ScheduleExpression\",\n path=path[:],\n check_value=self.check_value,\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleScheduleExpression.py"}]}
| 1,505 | 654 |
gh_patches_debug_10525
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-easyblocks-1924
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pylibdir of versionindependentpythonpackage.py is overwritten by pythonpackage.py
In 599869d `set_pylibdirs` of [pythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/pythonpackage.py) was changed to always set `self.pylibdir` (not only if its value is `UNKNOWN`). This seems to break the `prepare_step` of [versionindependentpythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/versionindependentpythonpackage.py) because pylibdir is now overwritten. This forces the `install_step` to crash afterwards due to missing subdirs within the `os.mkdir(full_pylibdir)` command.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/easyblocks/generic/versionindependentpythonpackage.py`
Content:
```
1 ##
2 # Copyright 2013-2020 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.
27
28 Python installs libraries by defailt in site-packages/python-xxx/
29 But packages that are not dependend on the python version can be installed in a different prefix, e.g. lib
30 as long as we add this folder to the pythonpath.
31
32 @author: Kenneth Hoste, Jens Timmerman (Ghent University)
33 """
34 import os
35 import re
36
37 import easybuild.tools.environment as env
38 from easybuild.easyblocks.generic.pythonpackage import PythonPackage
39 from easybuild.tools.build_log import EasyBuildError
40 from easybuild.tools.run import run_cmd
41
42
43 class VersionIndependentPythonPackage(PythonPackage):
44 """Support for building/installing python packages without requiring a specific python package."""
45
46 def build_step(self):
47 """No build procedure."""
48 pass
49
50 def prepare_step(self, *args, **kwargs):
51 """Set pylibdir"""
52 self.pylibdir = 'lib'
53 super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)
54
55 def install_step(self):
56 """Custom install procedure to skip selection of python package versions."""
57 full_pylibdir = os.path.join(self.installdir, self.pylibdir)
58
59 env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))
60
61 try:
62 os.mkdir(full_pylibdir)
63 except OSError as err:
64 # this will raise an error and not return
65 raise EasyBuildError("Failed to install: %s", err)
66
67 if self.use_setup_py:
68 extra_installopts = [
69 '--install-lib=%s' % full_pylibdir,
70 '--single-version-externally-managed',
71 '--record %s' % os.path.join(self.builddir, 'record'),
72 '--no-compile',
73 ]
74 self.cfg.update('installopts', ' '.join(extra_installopts))
75 else:
76 # using easy_install or pip always results in installation that is specific to Python version
77 eb_name = self.__class__.__name__
78 raise EasyBuildError("%s easyblock is not compatible with using easy_install or pip", eb_name)
79
80 cmd = self.compose_install_command(self.installdir)
81 run_cmd(cmd, log_all=True, simple=True, log_output=True)
82
83 # setuptools stubbornly replaces the shebang line in scripts with
84 # the full path to the Python interpreter used to install;
85 # we change it (back) to '#!/usr/bin/env python' here
86 shebang_re = re.compile("^#!/.*python")
87 bindir = os.path.join(self.installdir, 'bin')
88 if os.path.exists(bindir):
89 for script in os.listdir(bindir):
90 script = os.path.join(bindir, script)
91 if os.path.isfile(script):
92 try:
93 txt = open(script, 'r').read()
94 if shebang_re.search(txt):
95 new_shebang = "#!/usr/bin/env python"
96 self.log.debug("Patching shebang header line in %s to '%s'" % (script, new_shebang))
97 txt = shebang_re.sub(new_shebang, txt)
98 open(script, 'w').write(txt)
99 except IOError as err:
100 raise EasyBuildError("Failed to patch shebang header line in %s: %s", script, err)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/easybuild/easyblocks/generic/versionindependentpythonpackage.py b/easybuild/easyblocks/generic/versionindependentpythonpackage.py
--- a/easybuild/easyblocks/generic/versionindependentpythonpackage.py
+++ b/easybuild/easyblocks/generic/versionindependentpythonpackage.py
@@ -47,10 +47,11 @@
"""No build procedure."""
pass
- def prepare_step(self, *args, **kwargs):
- """Set pylibdir"""
+ def set_pylibdirs(self):
+ """Set pylibdir."""
+ super(VersionIndependentPythonPackage, self).set_pylibdirs()
self.pylibdir = 'lib'
- super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)
+ self.all_pylibdirs = ['lib']
def install_step(self):
"""Custom install procedure to skip selection of python package versions."""
|
{"golden_diff": "diff --git a/easybuild/easyblocks/generic/versionindependentpythonpackage.py b/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n--- a/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n+++ b/easybuild/easyblocks/generic/versionindependentpythonpackage.py\n@@ -47,10 +47,11 @@\n \"\"\"No build procedure.\"\"\"\n pass\n \n- def prepare_step(self, *args, **kwargs):\n- \"\"\"Set pylibdir\"\"\"\n+ def set_pylibdirs(self):\n+ \"\"\"Set pylibdir.\"\"\"\n+ super(VersionIndependentPythonPackage, self).set_pylibdirs()\n self.pylibdir = 'lib'\n- super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)\n+ self.all_pylibdirs = ['lib']\n \n def install_step(self):\n \"\"\"Custom install procedure to skip selection of python package versions.\"\"\"\n", "issue": "pylibdir of versionindependentpythonpackage.py is overwritten by pythonpackage.py\nIn 599869d `set_pylibdirs` of [pythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/pythonpackage.py) was changed to always set `self.pylibdir` (not only if its value is `UNKNOWN`). This seems to break the `prepare_step` of [versionindependentpythonpackage.py](https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/generic/versionindependentpythonpackage.py) because pylibdir is now overwritten. This forces the `install_step` to crash afterwards due to missing subdirs within the `os.mkdir(full_pylibdir)` command.\n", "before_files": [{"content": "##\n# Copyright 2013-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.\n\nPython installs libraries by defailt in site-packages/python-xxx/\nBut packages that are not dependend on the python version can be installed in a different prefix, e.g. lib\nas long as we add this folder to the pythonpath.\n\n@author: Kenneth Hoste, Jens Timmerman (Ghent University)\n\"\"\"\nimport os\nimport re\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.run import run_cmd\n\n\nclass VersionIndependentPythonPackage(PythonPackage):\n \"\"\"Support for building/installing python packages without requiring a specific python package.\"\"\"\n\n def build_step(self):\n \"\"\"No build procedure.\"\"\"\n pass\n\n def prepare_step(self, *args, **kwargs):\n \"\"\"Set pylibdir\"\"\"\n self.pylibdir = 'lib'\n super(VersionIndependentPythonPackage, self).prepare_step(*args, **kwargs)\n\n def install_step(self):\n \"\"\"Custom install procedure to skip selection of python package versions.\"\"\"\n full_pylibdir = os.path.join(self.installdir, self.pylibdir)\n\n env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))\n\n try:\n os.mkdir(full_pylibdir)\n except OSError as err:\n # this will raise an error and not return\n raise EasyBuildError(\"Failed to install: %s\", err)\n\n if self.use_setup_py:\n extra_installopts = [\n '--install-lib=%s' % full_pylibdir,\n '--single-version-externally-managed',\n '--record %s' % os.path.join(self.builddir, 'record'),\n '--no-compile',\n ]\n self.cfg.update('installopts', ' '.join(extra_installopts))\n else:\n # using easy_install or pip always results in installation that is specific to Python version\n eb_name = self.__class__.__name__\n raise EasyBuildError(\"%s easyblock is not compatible with using easy_install or pip\", eb_name)\n\n cmd = self.compose_install_command(self.installdir)\n run_cmd(cmd, log_all=True, simple=True, log_output=True)\n\n # setuptools stubbornly replaces the shebang line in scripts with\n # the full path to the Python interpreter used to install;\n # we change it (back) to '#!/usr/bin/env python' here\n shebang_re = re.compile(\"^#!/.*python\")\n bindir = os.path.join(self.installdir, 'bin')\n if os.path.exists(bindir):\n for script in os.listdir(bindir):\n script = os.path.join(bindir, script)\n if os.path.isfile(script):\n try:\n txt = open(script, 'r').read()\n if shebang_re.search(txt):\n new_shebang = \"#!/usr/bin/env python\"\n self.log.debug(\"Patching shebang header line in %s to '%s'\" % (script, new_shebang))\n txt = shebang_re.sub(new_shebang, txt)\n open(script, 'w').write(txt)\n except IOError as err:\n raise EasyBuildError(\"Failed to patch shebang header line in %s: %s\", script, err)\n", "path": "easybuild/easyblocks/generic/versionindependentpythonpackage.py"}], "after_files": [{"content": "##\n# Copyright 2013-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing a Pythonpackage independend of a python version as an easyblock.\n\nPython installs libraries by defailt in site-packages/python-xxx/\nBut packages that are not dependend on the python version can be installed in a different prefix, e.g. lib\nas long as we add this folder to the pythonpath.\n\n@author: Kenneth Hoste, Jens Timmerman (Ghent University)\n\"\"\"\nimport os\nimport re\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.run import run_cmd\n\n\nclass VersionIndependentPythonPackage(PythonPackage):\n \"\"\"Support for building/installing python packages without requiring a specific python package.\"\"\"\n\n def build_step(self):\n \"\"\"No build procedure.\"\"\"\n pass\n\n def set_pylibdirs(self):\n \"\"\"Set pylibdir.\"\"\"\n super(VersionIndependentPythonPackage, self).set_pylibdirs()\n self.pylibdir = 'lib'\n self.all_pylibdirs = ['lib']\n\n def install_step(self):\n \"\"\"Custom install procedure to skip selection of python package versions.\"\"\"\n full_pylibdir = os.path.join(self.installdir, self.pylibdir)\n\n env.setvar('PYTHONPATH', '%s:%s' % (full_pylibdir, os.getenv('PYTHONPATH')))\n\n try:\n os.mkdir(full_pylibdir)\n except OSError as err:\n # this will raise an error and not return\n raise EasyBuildError(\"Failed to install: %s\", err)\n\n if self.use_setup_py:\n extra_installopts = [\n '--install-lib=%s' % full_pylibdir,\n '--single-version-externally-managed',\n '--record %s' % os.path.join(self.builddir, 'record'),\n '--no-compile',\n ]\n self.cfg.update('installopts', ' '.join(extra_installopts))\n else:\n # using easy_install or pip always results in installation that is specific to Python version\n eb_name = self.__class__.__name__\n raise EasyBuildError(\"%s easyblock is not compatible with using easy_install or pip\", eb_name)\n\n cmd = self.compose_install_command(self.installdir)\n run_cmd(cmd, log_all=True, simple=True, log_output=True)\n\n # setuptools stubbornly replaces the shebang line in scripts with\n # the full path to the Python interpreter used to install;\n # we change it (back) to '#!/usr/bin/env python' here\n shebang_re = re.compile(\"^#!/.*python\")\n bindir = os.path.join(self.installdir, 'bin')\n if os.path.exists(bindir):\n for script in os.listdir(bindir):\n script = os.path.join(bindir, script)\n if os.path.isfile(script):\n try:\n txt = open(script, 'r').read()\n if shebang_re.search(txt):\n new_shebang = \"#!/usr/bin/env python\"\n self.log.debug(\"Patching shebang header line in %s to '%s'\" % (script, new_shebang))\n txt = shebang_re.sub(new_shebang, txt)\n open(script, 'w').write(txt)\n except IOError as err:\n raise EasyBuildError(\"Failed to patch shebang header line in %s: %s\", script, err)\n", "path": "easybuild/easyblocks/generic/versionindependentpythonpackage.py"}]}
| 1,631 | 204 |
gh_patches_debug_22476
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-714
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tokenize person lookup to work with "name last_name"
`lookups.PersonLookup` won't show 'Piotr Banaszkiewicz' for 'Piotr Ban' input – because it only looks up `personal` or `family` or `email`…
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `workshops/lookups.py`
Content:
```
1 from django.contrib.auth.models import Group
2 from django.db.models import Q
3
4 from selectable.base import ModelLookup
5 from selectable.registry import registry
6 from selectable.decorators import login_required
7
8 from workshops import models
9
10
11 @login_required
12 class EventLookup(ModelLookup):
13 model = models.Event
14 search_fields = ('slug__icontains', )
15
16
17 @login_required
18 class HostLookup(ModelLookup):
19 model = models.Host
20 search_fields = (
21 'domain__icontains',
22 'fullname__icontains'
23 )
24
25
26 @login_required
27 class PersonLookup(ModelLookup):
28 model = models.Person
29 search_fields = (
30 'personal__icontains',
31 'family__icontains',
32 'email__icontains',
33 'username__icontains'
34 )
35
36
37 @login_required
38 class AdminLookup(ModelLookup):
39 """The same as PersonLookup, but allows only to select administrators.
40
41 Administrator is anyone with superuser power or in "administrators" group.
42 """
43 model = models.Person
44 search_fields = (
45 'personal__icontains',
46 'family__icontains',
47 'email__icontains',
48 'username__icontains'
49 )
50
51 def get_query(self, request, term):
52 results = super().get_query(request, term)
53 admin_group = Group.objects.get(name='administrators')
54 results = results.filter(
55 Q(is_superuser=True) | Q(groups__in=[admin_group])
56 )
57 return results
58
59
60 @login_required
61 class AirportLookup(ModelLookup):
62 model = models.Airport
63 search_fields = (
64 'iata__icontains',
65 'fullname__icontains'
66 )
67
68
69 registry.register(EventLookup)
70 registry.register(HostLookup)
71 registry.register(PersonLookup)
72 registry.register(AdminLookup)
73 registry.register(AirportLookup)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/workshops/lookups.py b/workshops/lookups.py
--- a/workshops/lookups.py
+++ b/workshops/lookups.py
@@ -1,3 +1,7 @@
+from functools import reduce
+import operator
+import re
+
from django.contrib.auth.models import Group
from django.db.models import Q
@@ -33,6 +37,32 @@
'username__icontains'
)
+ def get_query(self, request, term):
+ """Override this method to allow for additional lookup method: """
+ # original code from selectable.base.ModelLookup.get_query:
+ qs = self.get_queryset()
+ if term:
+ search_filters = []
+ if self.search_fields:
+ for field in self.search_fields:
+ search_filters.append(Q(**{field: term}))
+
+ # tokenizing part
+ tokens = re.split('\s+', term)
+ if len(tokens) == 2:
+ name1, name2 = tokens
+ complex_q = (
+ Q(personal__icontains=name1) & Q(family__icontains=name2)
+ ) | (
+ Q(personal__icontains=name2) & Q(family__icontains=name1)
+ )
+ search_filters.append(complex_q)
+
+ # this is brilliant: it applies OR to all search filters
+ qs = qs.filter(reduce(operator.or_, search_filters))
+
+ return qs
+
@login_required
class AdminLookup(ModelLookup):
|
{"golden_diff": "diff --git a/workshops/lookups.py b/workshops/lookups.py\n--- a/workshops/lookups.py\n+++ b/workshops/lookups.py\n@@ -1,3 +1,7 @@\n+from functools import reduce\n+import operator\n+import re\n+\n from django.contrib.auth.models import Group\n from django.db.models import Q\n \n@@ -33,6 +37,32 @@\n 'username__icontains'\n )\n \n+ def get_query(self, request, term):\n+ \"\"\"Override this method to allow for additional lookup method: \"\"\"\n+ # original code from selectable.base.ModelLookup.get_query:\n+ qs = self.get_queryset()\n+ if term:\n+ search_filters = []\n+ if self.search_fields:\n+ for field in self.search_fields:\n+ search_filters.append(Q(**{field: term}))\n+\n+ # tokenizing part\n+ tokens = re.split('\\s+', term)\n+ if len(tokens) == 2:\n+ name1, name2 = tokens\n+ complex_q = (\n+ Q(personal__icontains=name1) & Q(family__icontains=name2)\n+ ) | (\n+ Q(personal__icontains=name2) & Q(family__icontains=name1)\n+ )\n+ search_filters.append(complex_q)\n+\n+ # this is brilliant: it applies OR to all search filters\n+ qs = qs.filter(reduce(operator.or_, search_filters))\n+\n+ return qs\n+\n \n @login_required\n class AdminLookup(ModelLookup):\n", "issue": "Tokenize person lookup to work with \"name last_name\"\n`lookups.PersonLookup` won't show 'Piotr Banaszkiewicz' for 'Piotr Ban' input \u2013 because it only looks up `personal` or `family` or `email`\u2026\n\n", "before_files": [{"content": "from django.contrib.auth.models import Group\nfrom django.db.models import Q\n\nfrom selectable.base import ModelLookup\nfrom selectable.registry import registry\nfrom selectable.decorators import login_required\n\nfrom workshops import models\n\n\n@login_required\nclass EventLookup(ModelLookup):\n model = models.Event\n search_fields = ('slug__icontains', )\n\n\n@login_required\nclass HostLookup(ModelLookup):\n model = models.Host\n search_fields = (\n 'domain__icontains',\n 'fullname__icontains'\n )\n\n\n@login_required\nclass PersonLookup(ModelLookup):\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n\n@login_required\nclass AdminLookup(ModelLookup):\n \"\"\"The same as PersonLookup, but allows only to select administrators.\n\n Administrator is anyone with superuser power or in \"administrators\" group.\n \"\"\"\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n results = super().get_query(request, term)\n admin_group = Group.objects.get(name='administrators')\n results = results.filter(\n Q(is_superuser=True) | Q(groups__in=[admin_group])\n )\n return results\n\n\n@login_required\nclass AirportLookup(ModelLookup):\n model = models.Airport\n search_fields = (\n 'iata__icontains',\n 'fullname__icontains'\n )\n\n\nregistry.register(EventLookup)\nregistry.register(HostLookup)\nregistry.register(PersonLookup)\nregistry.register(AdminLookup)\nregistry.register(AirportLookup)\n", "path": "workshops/lookups.py"}], "after_files": [{"content": "from functools import reduce\nimport operator\nimport re\n\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Q\n\nfrom selectable.base import ModelLookup\nfrom selectable.registry import registry\nfrom selectable.decorators import login_required\n\nfrom workshops import models\n\n\n@login_required\nclass EventLookup(ModelLookup):\n model = models.Event\n search_fields = ('slug__icontains', )\n\n\n@login_required\nclass HostLookup(ModelLookup):\n model = models.Host\n search_fields = (\n 'domain__icontains',\n 'fullname__icontains'\n )\n\n\n@login_required\nclass PersonLookup(ModelLookup):\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n \"\"\"Override this method to allow for additional lookup method: \"\"\"\n # original code from selectable.base.ModelLookup.get_query:\n qs = self.get_queryset()\n if term:\n search_filters = []\n if self.search_fields:\n for field in self.search_fields:\n search_filters.append(Q(**{field: term}))\n\n # tokenizing part\n tokens = re.split('\\s+', term)\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n Q(personal__icontains=name1) & Q(family__icontains=name2)\n ) | (\n Q(personal__icontains=name2) & Q(family__icontains=name1)\n )\n search_filters.append(complex_q)\n\n # this is brilliant: it applies OR to all search filters\n qs = qs.filter(reduce(operator.or_, search_filters))\n\n return qs\n\n\n@login_required\nclass AdminLookup(ModelLookup):\n \"\"\"The same as PersonLookup, but allows only to select administrators.\n\n Administrator is anyone with superuser power or in \"administrators\" group.\n \"\"\"\n model = models.Person\n search_fields = (\n 'personal__icontains',\n 'family__icontains',\n 'email__icontains',\n 'username__icontains'\n )\n\n def get_query(self, request, term):\n results = super().get_query(request, term)\n admin_group = Group.objects.get(name='administrators')\n results = results.filter(\n Q(is_superuser=True) | Q(groups__in=[admin_group])\n )\n return results\n\n\n@login_required\nclass AirportLookup(ModelLookup):\n model = models.Airport\n search_fields = (\n 'iata__icontains',\n 'fullname__icontains'\n )\n\n\nregistry.register(EventLookup)\nregistry.register(HostLookup)\nregistry.register(PersonLookup)\nregistry.register(AdminLookup)\nregistry.register(AirportLookup)\n", "path": "workshops/lookups.py"}]}
| 827 | 331 |
gh_patches_debug_8564
|
rasdani/github-patches
|
git_diff
|
comfyanonymous__ComfyUI-2859
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Perp-Neg implementation is wrong, parallel component not ignored
https://github.com/comfyanonymous/ComfyUI/blob/18c151b3e3f6838fab4028e7a8ba526e30e610d3/comfy_extras/nodes_perpneg.py#L38-L40
The Perp-Neg node does not match the [paper](https://arxiv.org/pdf/2304.04968.pdf) (pytorch code in Appendix A.1).
When positive and negative prompt are the same, the result should be the same as an empty negative prompt because the prompts are completely parallel (i.e. there is no perpendicular component).
Positive: "forest"
Negative: ""

Positive: "forest"
Negative: "forest"

I'll submit a PR in a bit.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `comfy_extras/nodes_perpneg.py`
Content:
```
1 import torch
2 import comfy.model_management
3 import comfy.sample
4 import comfy.samplers
5 import comfy.utils
6
7
8 class PerpNeg:
9 @classmethod
10 def INPUT_TYPES(s):
11 return {"required": {"model": ("MODEL", ),
12 "empty_conditioning": ("CONDITIONING", ),
13 "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
14 }}
15 RETURN_TYPES = ("MODEL",)
16 FUNCTION = "patch"
17
18 CATEGORY = "_for_testing"
19
20 def patch(self, model, empty_conditioning, neg_scale):
21 m = model.clone()
22 nocond = comfy.sample.convert_cond(empty_conditioning)
23
24 def cfg_function(args):
25 model = args["model"]
26 noise_pred_pos = args["cond_denoised"]
27 noise_pred_neg = args["uncond_denoised"]
28 cond_scale = args["cond_scale"]
29 x = args["input"]
30 sigma = args["sigma"]
31 model_options = args["model_options"]
32 nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, "negative")
33
34 (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)
35
36 pos = noise_pred_pos - noise_pred_nocond
37 neg = noise_pred_neg - noise_pred_nocond
38 perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
39 perp_neg = perp * neg_scale
40 cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
41 cfg_result = x - cfg_result
42 return cfg_result
43
44 m.set_model_sampler_cfg_function(cfg_function)
45
46 return (m, )
47
48
49 NODE_CLASS_MAPPINGS = {
50 "PerpNeg": PerpNeg,
51 }
52
53 NODE_DISPLAY_NAME_MAPPINGS = {
54 "PerpNeg": "Perp-Neg",
55 }
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py
--- a/comfy_extras/nodes_perpneg.py
+++ b/comfy_extras/nodes_perpneg.py
@@ -35,7 +35,7 @@
pos = noise_pred_pos - noise_pred_nocond
neg = noise_pred_neg - noise_pred_nocond
- perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg
+ perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos
perp_neg = perp * neg_scale
cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)
cfg_result = x - cfg_result
|
{"golden_diff": "diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py\n--- a/comfy_extras/nodes_perpneg.py\n+++ b/comfy_extras/nodes_perpneg.py\n@@ -35,7 +35,7 @@\n \n pos = noise_pred_pos - noise_pred_nocond\n neg = noise_pred_neg - noise_pred_nocond\n- perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg\n+ perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos\n perp_neg = perp * neg_scale\n cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)\n cfg_result = x - cfg_result\n", "issue": "Perp-Neg implementation is wrong, parallel component not ignored\nhttps://github.com/comfyanonymous/ComfyUI/blob/18c151b3e3f6838fab4028e7a8ba526e30e610d3/comfy_extras/nodes_perpneg.py#L38-L40\r\n\r\nThe Perp-Neg node does not match the [paper](https://arxiv.org/pdf/2304.04968.pdf) (pytorch code in Appendix A.1).\r\nWhen positive and negative prompt are the same, the result should be the same as an empty negative prompt because the prompts are completely parallel (i.e. there is no perpendicular component).\r\n\r\nPositive: \"forest\"\r\nNegative: \"\"\r\n\r\n\r\nPositive: \"forest\"\r\nNegative: \"forest\"\r\n\r\n\r\nI'll submit a PR in a bit.\n", "before_files": [{"content": "import torch\nimport comfy.model_management\nimport comfy.sample\nimport comfy.samplers\nimport comfy.utils\n\n\nclass PerpNeg:\n @classmethod\n def INPUT_TYPES(s):\n return {\"required\": {\"model\": (\"MODEL\", ),\n \"empty_conditioning\": (\"CONDITIONING\", ),\n \"neg_scale\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 100.0}),\n }}\n RETURN_TYPES = (\"MODEL\",)\n FUNCTION = \"patch\"\n\n CATEGORY = \"_for_testing\"\n\n def patch(self, model, empty_conditioning, neg_scale):\n m = model.clone()\n nocond = comfy.sample.convert_cond(empty_conditioning)\n\n def cfg_function(args):\n model = args[\"model\"]\n noise_pred_pos = args[\"cond_denoised\"]\n noise_pred_neg = args[\"uncond_denoised\"]\n cond_scale = args[\"cond_scale\"]\n x = args[\"input\"]\n sigma = args[\"sigma\"]\n model_options = args[\"model_options\"]\n nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, \"negative\")\n\n (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)\n\n pos = noise_pred_pos - noise_pred_nocond\n neg = noise_pred_neg - noise_pred_nocond\n perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg\n perp_neg = perp * neg_scale\n cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)\n cfg_result = x - cfg_result\n return cfg_result\n\n m.set_model_sampler_cfg_function(cfg_function)\n\n return (m, )\n\n\nNODE_CLASS_MAPPINGS = {\n \"PerpNeg\": PerpNeg,\n}\n\nNODE_DISPLAY_NAME_MAPPINGS = {\n \"PerpNeg\": \"Perp-Neg\",\n}\n", "path": "comfy_extras/nodes_perpneg.py"}], "after_files": [{"content": "import torch\nimport comfy.model_management\nimport comfy.sample\nimport comfy.samplers\nimport comfy.utils\n\n\nclass PerpNeg:\n @classmethod\n def INPUT_TYPES(s):\n return {\"required\": {\"model\": (\"MODEL\", ),\n \"empty_conditioning\": (\"CONDITIONING\", ),\n \"neg_scale\": (\"FLOAT\", {\"default\": 1.0, \"min\": 0.0, \"max\": 100.0}),\n }}\n RETURN_TYPES = (\"MODEL\",)\n FUNCTION = \"patch\"\n\n CATEGORY = \"_for_testing\"\n\n def patch(self, model, empty_conditioning, neg_scale):\n m = model.clone()\n nocond = comfy.sample.convert_cond(empty_conditioning)\n\n def cfg_function(args):\n model = args[\"model\"]\n noise_pred_pos = args[\"cond_denoised\"]\n noise_pred_neg = args[\"uncond_denoised\"]\n cond_scale = args[\"cond_scale\"]\n x = args[\"input\"]\n sigma = args[\"sigma\"]\n model_options = args[\"model_options\"]\n nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, \"negative\")\n\n (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options)\n\n pos = noise_pred_pos - noise_pred_nocond\n neg = noise_pred_neg - noise_pred_nocond\n perp = neg - ((torch.mul(neg, pos).sum())/(torch.norm(pos)**2)) * pos\n perp_neg = perp * neg_scale\n cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg)\n cfg_result = x - cfg_result\n return cfg_result\n\n m.set_model_sampler_cfg_function(cfg_function)\n\n return (m, )\n\n\nNODE_CLASS_MAPPINGS = {\n \"PerpNeg\": PerpNeg,\n}\n\nNODE_DISPLAY_NAME_MAPPINGS = {\n \"PerpNeg\": \"Perp-Neg\",\n}\n", "path": "comfy_extras/nodes_perpneg.py"}]}
| 1,127 | 179 |
gh_patches_debug_15326
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-1192
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Page tree broken after searching and going back
### Describe the Bug
When using the back button of the browser after searching, the page tree is expanded. After that, the collapsing function is broken.
Can we tell the browser to reload the full page after using the back button? However, it would be the best solution to actually keep the status of the tree after using the back button.
~~This could be related to #1131~~
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `integreat_cms/cms/views/pages/page_tree_view.py`
Content:
```
1 import logging
2
3 from django.contrib import messages
4 from django.shortcuts import render, redirect
5 from django.utils.decorators import method_decorator
6 from django.utils.translation import ugettext as _
7 from django.views.generic import TemplateView
8
9 from ...constants import translation_status
10 from ...decorators import permission_required
11 from ...forms import PageFilterForm
12 from ...models import Language
13 from .page_context_mixin import PageContextMixin
14
15 logger = logging.getLogger(__name__)
16
17
18 @method_decorator(permission_required("cms.view_page"), name="dispatch")
19 class PageTreeView(TemplateView, PageContextMixin):
20 """
21 View for showing the page tree
22 """
23
24 #: Template for list of non-archived pages
25 template = "pages/page_tree.html"
26 #: Template for list of archived pages
27 template_archived = "pages/page_tree_archived.html"
28 #: Whether or not to show archived pages
29 archived = False
30
31 @property
32 def template_name(self):
33 """
34 Select correct HTML template, depending on :attr:`~integreat_cms.cms.views.pages.page_tree_view.PageTreeView.archived` flag
35 (see :class:`~django.views.generic.base.TemplateResponseMixin`)
36
37 :return: Path to HTML template
38 :rtype: str
39 """
40
41 return self.template_archived if self.archived else self.template
42
43 def get(self, request, *args, **kwargs):
44 r"""
45 Render page tree
46
47 :param request: The current request
48 :type request: ~django.http.HttpResponse
49
50 :param \*args: The supplied arguments
51 :type \*args: list
52
53 :param \**kwargs: The supplied keyword arguments
54 :type \**kwargs: dict
55
56 :return: The rendered template response
57 :rtype: ~django.template.response.TemplateResponse
58 """
59
60 # current region
61 region_slug = kwargs.get("region_slug")
62 region = request.region
63
64 # current language
65 language_slug = kwargs.get("language_slug")
66 if language_slug:
67 language = Language.objects.get(slug=language_slug)
68 elif region.default_language:
69 return redirect(
70 "pages",
71 **{
72 "region_slug": region_slug,
73 "language_slug": region.default_language.slug,
74 }
75 )
76 else:
77 messages.error(
78 request,
79 _("Please create at least one language node before creating pages."),
80 )
81 return redirect(
82 "language_tree",
83 **{
84 "region_slug": region_slug,
85 }
86 )
87
88 if not request.user.has_perm("cms.change_page"):
89 messages.warning(
90 request, _("You don't have the permission to edit or create pages.")
91 )
92
93 # Filter pages according to given filters, if any
94 filter_data = kwargs.get("filter_data")
95
96 if filter_data or self.archived:
97 page_queryset = region.pages.all()
98 else:
99 page_queryset = region.pages.filter(lft=1)
100 pages = page_queryset.cache_tree(archived=self.archived)[0]
101
102 if filter_data:
103 # Set data for filter form rendering
104 filter_form = PageFilterForm(data=filter_data)
105 pages = self.filter_pages(pages, language_slug, filter_form)
106 else:
107 filter_form = PageFilterForm()
108 filter_form.changed_data.clear()
109
110 return render(
111 request,
112 self.template_name,
113 {
114 **self.get_context_data(**kwargs),
115 "pages": pages,
116 "language": language,
117 "languages": region.active_languages,
118 "filter_form": filter_form,
119 },
120 )
121
122 def post(self, request, *args, **kwargs):
123 r"""
124 Apply page filters and render page tree
125
126 :param request: The current request
127 :type request: ~django.http.HttpResponse
128
129 :param \*args: The supplied arguments
130 :type \*args: list
131
132 :param \**kwargs: The supplied keyword arguments
133 :type \**kwargs: dict
134
135 :return: The rendered template response
136 :rtype: ~django.template.response.TemplateResponse
137 """
138 return self.get(request, *args, **kwargs, filter_data=request.POST)
139
140 @staticmethod
141 def filter_pages(pages, language_slug, filter_form):
142 """
143 Filter the pages list according to the given filter data
144
145 :param pages: The list of pages
146 :type pages: list
147
148 :param language_slug: The slug of the current language
149 :type language_slug: str
150
151 :param filter_form: The filter form
152 :type filter_form: integreat_cms.cms.forms.pages.page_filter_form.PageFilterForm
153
154 :return: The filtered page list
155 :rtype: list
156 """
157 if filter_form.is_valid():
158 query = filter_form.cleaned_data["query"]
159 if query:
160 # Buffer variable because the pages list should not be modified during iteration
161 filtered_pages = []
162 for page in pages:
163 translation = page.get_translation(language_slug)
164 if translation and (
165 query.lower() in translation.slug
166 or query.lower() in translation.title.lower()
167 ):
168 filtered_pages.append(page)
169 pages = filtered_pages
170
171 selected_status = filter_form.cleaned_data["translation_status"]
172 # Only filter if at least one checkbox but not all are checked
173 if 0 < len(selected_status) < len(translation_status.CHOICES):
174 # Buffer variable because the pages list should not be modified during iteration
175 filtered_pages = []
176 for page in pages:
177 translation_state = page.translation_states.get(language_slug)
178 if translation_state and translation_state[1] in selected_status:
179 filtered_pages.append(page)
180 pages = filtered_pages
181 return pages
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/integreat_cms/cms/views/pages/page_tree_view.py b/integreat_cms/cms/views/pages/page_tree_view.py
--- a/integreat_cms/cms/views/pages/page_tree_view.py
+++ b/integreat_cms/cms/views/pages/page_tree_view.py
@@ -107,7 +107,7 @@
filter_form = PageFilterForm()
filter_form.changed_data.clear()
- return render(
+ response = render(
request,
self.template_name,
{
@@ -118,6 +118,9 @@
"filter_form": filter_form,
},
)
+ # Disable browser cache of page tree to prevent subpages from being expanded after using "back"-button
+ response["Cache-Control"] = "no-store, must-revalidate"
+ return response
def post(self, request, *args, **kwargs):
r"""
|
{"golden_diff": "diff --git a/integreat_cms/cms/views/pages/page_tree_view.py b/integreat_cms/cms/views/pages/page_tree_view.py\n--- a/integreat_cms/cms/views/pages/page_tree_view.py\n+++ b/integreat_cms/cms/views/pages/page_tree_view.py\n@@ -107,7 +107,7 @@\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n \n- return render(\n+ response = render(\n request,\n self.template_name,\n {\n@@ -118,6 +118,9 @@\n \"filter_form\": filter_form,\n },\n )\n+ # Disable browser cache of page tree to prevent subpages from being expanded after using \"back\"-button\n+ response[\"Cache-Control\"] = \"no-store, must-revalidate\"\n+ return response\n \n def post(self, request, *args, **kwargs):\n r\"\"\"\n", "issue": "Page tree broken after searching and going back\n### Describe the Bug\r\nWhen using the back button of the browser after searching, the page tree is expanded. After that, the collapsing function is broken.\r\n\r\nCan we tell the browser to reload the full page after using the back button? However, it would be the best solution to actually keep the status of the tree after using the back button.\r\n\r\n~~This could be related to #1131~~\n", "before_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import translation_status\nfrom ...decorators import permission_required\nfrom ...forms import PageFilterForm\nfrom ...models import Language\nfrom .page_context_mixin import PageContextMixin\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_page\"), name=\"dispatch\")\nclass PageTreeView(TemplateView, PageContextMixin):\n \"\"\"\n View for showing the page tree\n \"\"\"\n\n #: Template for list of non-archived pages\n template = \"pages/page_tree.html\"\n #: Template for list of archived pages\n template_archived = \"pages/page_tree_archived.html\"\n #: Whether or not to show archived pages\n archived = False\n\n @property\n def template_name(self):\n \"\"\"\n Select correct HTML template, depending on :attr:`~integreat_cms.cms.views.pages.page_tree_view.PageTreeView.archived` flag\n (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n\n :return: Path to HTML template\n :rtype: str\n \"\"\"\n\n return self.template_archived if self.archived else self.template\n\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n # current region\n region_slug = kwargs.get(\"region_slug\")\n region = request.region\n\n # current language\n language_slug = kwargs.get(\"language_slug\")\n if language_slug:\n language = Language.objects.get(slug=language_slug)\n elif region.default_language:\n return redirect(\n \"pages\",\n **{\n \"region_slug\": region_slug,\n \"language_slug\": region.default_language.slug,\n }\n )\n else:\n messages.error(\n request,\n _(\"Please create at least one language node before creating pages.\"),\n )\n return redirect(\n \"language_tree\",\n **{\n \"region_slug\": region_slug,\n }\n )\n\n if not request.user.has_perm(\"cms.change_page\"):\n messages.warning(\n request, _(\"You don't have the permission to edit or create pages.\")\n )\n\n # Filter pages according to given filters, if any\n filter_data = kwargs.get(\"filter_data\")\n\n if filter_data or self.archived:\n page_queryset = region.pages.all()\n else:\n page_queryset = region.pages.filter(lft=1)\n pages = page_queryset.cache_tree(archived=self.archived)[0]\n\n if filter_data:\n # Set data for filter form rendering\n filter_form = PageFilterForm(data=filter_data)\n pages = self.filter_pages(pages, language_slug, filter_form)\n else:\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n\n return render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"pages\": pages,\n \"language\": language,\n \"languages\": region.active_languages,\n \"filter_form\": filter_form,\n },\n )\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply page filters and render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n return self.get(request, *args, **kwargs, filter_data=request.POST)\n\n @staticmethod\n def filter_pages(pages, language_slug, filter_form):\n \"\"\"\n Filter the pages list according to the given filter data\n\n :param pages: The list of pages\n :type pages: list\n\n :param language_slug: The slug of the current language\n :type language_slug: str\n\n :param filter_form: The filter form\n :type filter_form: integreat_cms.cms.forms.pages.page_filter_form.PageFilterForm\n\n :return: The filtered page list\n :rtype: list\n \"\"\"\n if filter_form.is_valid():\n query = filter_form.cleaned_data[\"query\"]\n if query:\n # Buffer variable because the pages list should not be modified during iteration\n filtered_pages = []\n for page in pages:\n translation = page.get_translation(language_slug)\n if translation and (\n query.lower() in translation.slug\n or query.lower() in translation.title.lower()\n ):\n filtered_pages.append(page)\n pages = filtered_pages\n\n selected_status = filter_form.cleaned_data[\"translation_status\"]\n # Only filter if at least one checkbox but not all are checked\n if 0 < len(selected_status) < len(translation_status.CHOICES):\n # Buffer variable because the pages list should not be modified during iteration\n filtered_pages = []\n for page in pages:\n translation_state = page.translation_states.get(language_slug)\n if translation_state and translation_state[1] in selected_status:\n filtered_pages.append(page)\n pages = filtered_pages\n return pages\n", "path": "integreat_cms/cms/views/pages/page_tree_view.py"}], "after_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import translation_status\nfrom ...decorators import permission_required\nfrom ...forms import PageFilterForm\nfrom ...models import Language\nfrom .page_context_mixin import PageContextMixin\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(permission_required(\"cms.view_page\"), name=\"dispatch\")\nclass PageTreeView(TemplateView, PageContextMixin):\n \"\"\"\n View for showing the page tree\n \"\"\"\n\n #: Template for list of non-archived pages\n template = \"pages/page_tree.html\"\n #: Template for list of archived pages\n template_archived = \"pages/page_tree_archived.html\"\n #: Whether or not to show archived pages\n archived = False\n\n @property\n def template_name(self):\n \"\"\"\n Select correct HTML template, depending on :attr:`~integreat_cms.cms.views.pages.page_tree_view.PageTreeView.archived` flag\n (see :class:`~django.views.generic.base.TemplateResponseMixin`)\n\n :return: Path to HTML template\n :rtype: str\n \"\"\"\n\n return self.template_archived if self.archived else self.template\n\n def get(self, request, *args, **kwargs):\n r\"\"\"\n Render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n\n # current region\n region_slug = kwargs.get(\"region_slug\")\n region = request.region\n\n # current language\n language_slug = kwargs.get(\"language_slug\")\n if language_slug:\n language = Language.objects.get(slug=language_slug)\n elif region.default_language:\n return redirect(\n \"pages\",\n **{\n \"region_slug\": region_slug,\n \"language_slug\": region.default_language.slug,\n }\n )\n else:\n messages.error(\n request,\n _(\"Please create at least one language node before creating pages.\"),\n )\n return redirect(\n \"language_tree\",\n **{\n \"region_slug\": region_slug,\n }\n )\n\n if not request.user.has_perm(\"cms.change_page\"):\n messages.warning(\n request, _(\"You don't have the permission to edit or create pages.\")\n )\n\n # Filter pages according to given filters, if any\n filter_data = kwargs.get(\"filter_data\")\n\n if filter_data or self.archived:\n page_queryset = region.pages.all()\n else:\n page_queryset = region.pages.filter(lft=1)\n pages = page_queryset.cache_tree(archived=self.archived)[0]\n\n if filter_data:\n # Set data for filter form rendering\n filter_form = PageFilterForm(data=filter_data)\n pages = self.filter_pages(pages, language_slug, filter_form)\n else:\n filter_form = PageFilterForm()\n filter_form.changed_data.clear()\n\n response = render(\n request,\n self.template_name,\n {\n **self.get_context_data(**kwargs),\n \"pages\": pages,\n \"language\": language,\n \"languages\": region.active_languages,\n \"filter_form\": filter_form,\n },\n )\n # Disable browser cache of page tree to prevent subpages from being expanded after using \"back\"-button\n response[\"Cache-Control\"] = \"no-store, must-revalidate\"\n return response\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply page filters and render page tree\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The rendered template response\n :rtype: ~django.template.response.TemplateResponse\n \"\"\"\n return self.get(request, *args, **kwargs, filter_data=request.POST)\n\n @staticmethod\n def filter_pages(pages, language_slug, filter_form):\n \"\"\"\n Filter the pages list according to the given filter data\n\n :param pages: The list of pages\n :type pages: list\n\n :param language_slug: The slug of the current language\n :type language_slug: str\n\n :param filter_form: The filter form\n :type filter_form: integreat_cms.cms.forms.pages.page_filter_form.PageFilterForm\n\n :return: The filtered page list\n :rtype: list\n \"\"\"\n if filter_form.is_valid():\n query = filter_form.cleaned_data[\"query\"]\n if query:\n # Buffer variable because the pages list should not be modified during iteration\n filtered_pages = []\n for page in pages:\n translation = page.get_translation(language_slug)\n if translation and (\n query.lower() in translation.slug\n or query.lower() in translation.title.lower()\n ):\n filtered_pages.append(page)\n pages = filtered_pages\n\n selected_status = filter_form.cleaned_data[\"translation_status\"]\n # Only filter if at least one checkbox but not all are checked\n if 0 < len(selected_status) < len(translation_status.CHOICES):\n # Buffer variable because the pages list should not be modified during iteration\n filtered_pages = []\n for page in pages:\n translation_state = page.translation_states.get(language_slug)\n if translation_state and translation_state[1] in selected_status:\n filtered_pages.append(page)\n pages = filtered_pages\n return pages\n", "path": "integreat_cms/cms/views/pages/page_tree_view.py"}]}
| 2,002 | 197 |
gh_patches_debug_6676
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1513
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fox.com.tr not work with Streamlink
## **Checklist**
- [x] This is a bug report.
- [ ] This is a feature request.
- [ ] ] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
## **Description**
i cant see anything at fox.com.tr
i have test it with this links but i became black screen
## **Reproduction steps / Explicit stream URLs to test**
https://www.fox.com.tr/canli-yayin
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/foxtr.py`
Content:
```
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http
6 from streamlink.plugin.api import validate
7 from streamlink.stream import HLSStream
8
9
10 class FoxTR(Plugin):
11 """
12 Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin
13 """
14 url_re = re.compile(r"https?://www.fox.com.tr/canli-yayin")
15 playervars_re = re.compile(r"desktop\s*:\s*\[\s*\{\s*src\s*:\s*'(.*?)'", re.DOTALL)
16
17 @classmethod
18 def can_handle_url(cls, url):
19 return cls.url_re.match(url) is not None
20
21 def _get_streams(self):
22 res = http.get(self.url)
23 match = self.playervars_re.search(res.text)
24 if match:
25 stream_url = match.group(1)
26 return HLSStream.parse_variant_playlist(self.session, stream_url)
27
28
29 __plugin__ = FoxTR
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/foxtr.py b/src/streamlink/plugins/foxtr.py
--- a/src/streamlink/plugins/foxtr.py
+++ b/src/streamlink/plugins/foxtr.py
@@ -12,7 +12,7 @@
Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin
"""
url_re = re.compile(r"https?://www.fox.com.tr/canli-yayin")
- playervars_re = re.compile(r"desktop\s*:\s*\[\s*\{\s*src\s*:\s*'(.*?)'", re.DOTALL)
+ playervars_re = re.compile(r"source\s*:\s*\[\s*\{\s*videoSrc\s*:\s*'(.*?)'", re.DOTALL)
@classmethod
def can_handle_url(cls, url):
|
{"golden_diff": "diff --git a/src/streamlink/plugins/foxtr.py b/src/streamlink/plugins/foxtr.py\n--- a/src/streamlink/plugins/foxtr.py\n+++ b/src/streamlink/plugins/foxtr.py\n@@ -12,7 +12,7 @@\n Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin\n \"\"\"\n url_re = re.compile(r\"https?://www.fox.com.tr/canli-yayin\")\n- playervars_re = re.compile(r\"desktop\\s*:\\s*\\[\\s*\\{\\s*src\\s*:\\s*'(.*?)'\", re.DOTALL)\n+ playervars_re = re.compile(r\"source\\s*:\\s*\\[\\s*\\{\\s*videoSrc\\s*:\\s*'(.*?)'\", re.DOTALL)\n \n @classmethod\n def can_handle_url(cls, url):\n", "issue": "Fox.com.tr not work with Streamlink\n## **Checklist**\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n## **Description**\r\n\r\n i cant see anything at fox.com.tr \r\n i have test it with this links but i became black screen \r\n\r\n## **Reproduction steps / Explicit stream URLs to test**\r\n\r\nhttps://www.fox.com.tr/canli-yayin\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass FoxTR(Plugin):\n \"\"\"\n Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin\n \"\"\"\n url_re = re.compile(r\"https?://www.fox.com.tr/canli-yayin\")\n playervars_re = re.compile(r\"desktop\\s*:\\s*\\[\\s*\\{\\s*src\\s*:\\s*'(.*?)'\", re.DOTALL)\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url)\n match = self.playervars_re.search(res.text)\n if match:\n stream_url = match.group(1)\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n\n\n__plugin__ = FoxTR\n", "path": "src/streamlink/plugins/foxtr.py"}], "after_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass FoxTR(Plugin):\n \"\"\"\n Support for Turkish Fox live stream: http://www.fox.com.tr/canli-yayin\n \"\"\"\n url_re = re.compile(r\"https?://www.fox.com.tr/canli-yayin\")\n playervars_re = re.compile(r\"source\\s*:\\s*\\[\\s*\\{\\s*videoSrc\\s*:\\s*'(.*?)'\", re.DOTALL)\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url)\n match = self.playervars_re.search(res.text)\n if match:\n stream_url = match.group(1)\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n\n\n__plugin__ = FoxTR\n", "path": "src/streamlink/plugins/foxtr.py"}]}
| 651 | 191 |
gh_patches_debug_2182
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-4769
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test_join_sort.py in CI failed by timeout with new Dask release - 2022.2.0
Error: https://github.com/modin-project/modin/runs/5195622251?check_suite_focus=true
Dask release - https://github.com/dask/dask/releases/tag/2022.02.0
Fastest option here - pin `dask<2022.2.0`, but it also requires an investigation into the cause.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import versioneer
3 import sys
4
5 PANDAS_VERSION = "1.4.3" if sys.version_info >= (3, 8) else "1.1.5"
6
7 with open("README.md", "r", encoding="utf-8") as fh:
8 long_description = fh.read()
9
10 dask_deps = ["dask>=2.22.0,<2022.2.0", "distributed>=2.22.0,<2022.2.0"]
11 if sys.version_info < (3, 8):
12 dask_deps.append("pickle5")
13
14 ray_deps = [
15 "ray[default]>=1.4.0",
16 "pyarrow>=4.0.1",
17 "redis>=3.5.0,<4.0.0",
18 ]
19 remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
20 spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
21 sql_deps = ["dfsql>=0.4.2", "pyparsing<=2.4.7"]
22 all_deps = dask_deps + ray_deps + remote_deps + spreadsheet_deps
23
24 # Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.
25 # This file provides the "import pandas before Ray init" feature if specific
26 # environment variable is set (see https://github.com/modin-project/modin/issues/4564).
27 cmdclass = versioneer.get_cmdclass()
28 extra_files = ["modin-autoimport-pandas.pth"]
29
30
31 class AddPthFileBuild(cmdclass["build_py"]):
32 def _get_data_files(self):
33 return (super()._get_data_files() or []) + [
34 (".", ".", self.build_lib, extra_files)
35 ]
36
37
38 class AddPthFileSDist(cmdclass["sdist"]):
39 def make_distribution(self):
40 self.filelist.extend(extra_files)
41 return super().make_distribution()
42
43
44 cmdclass["build_py"] = AddPthFileBuild
45 cmdclass["sdist"] = AddPthFileSDist
46
47 setup(
48 name="modin",
49 version=versioneer.get_version(),
50 cmdclass=cmdclass,
51 description="Modin: Make your pandas code run faster by changing one line of code.",
52 packages=find_packages(exclude=["scripts", "scripts.*"]),
53 include_package_data=True,
54 license="Apache 2",
55 url="https://github.com/modin-project/modin",
56 long_description=long_description,
57 long_description_content_type="text/markdown",
58 install_requires=[f"pandas=={PANDAS_VERSION}", "packaging", "numpy>=1.18.5", "fsspec", "psutil"],
59 extras_require={
60 # can be installed by pip install modin[dask]
61 "dask": dask_deps,
62 "ray": ray_deps,
63 "remote": remote_deps,
64 "spreadsheet": spreadsheet_deps,
65 "sql": sql_deps,
66 "all": all_deps,
67 },
68 python_requires=">=3.6",
69 )
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
-dask_deps = ["dask>=2.22.0,<2022.2.0", "distributed>=2.22.0,<2022.2.0"]
+dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
if sys.version_info < (3, 8):
dask_deps.append("pickle5")
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n with open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n \n-dask_deps = [\"dask>=2.22.0,<2022.2.0\", \"distributed>=2.22.0,<2022.2.0\"]\n+dask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n if sys.version_info < (3, 8):\n dask_deps.append(\"pickle5\")\n", "issue": "test_join_sort.py in CI failed by timeout with new Dask release - 2022.2.0\nError: https://github.com/modin-project/modin/runs/5195622251?check_suite_focus=true\r\n\r\nDask release - https://github.com/dask/dask/releases/tag/2022.02.0\r\n\r\nFastest option here - pin `dask<2022.2.0`, but it also requires an investigation into the cause.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\nimport sys\n\nPANDAS_VERSION = \"1.4.3\" if sys.version_info >= (3, 8) else \"1.1.5\"\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0,<2022.2.0\", \"distributed>=2.22.0,<2022.2.0\"]\nif sys.version_info < (3, 8):\n dask_deps.append(\"pickle5\")\n\nray_deps = [\n \"ray[default]>=1.4.0\",\n \"pyarrow>=4.0.1\",\n \"redis>=3.5.0,<4.0.0\",\n]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[f\"pandas=={PANDAS_VERSION}\", \"packaging\", \"numpy>=1.18.5\", \"fsspec\", \"psutil\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.6\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\nimport sys\n\nPANDAS_VERSION = \"1.4.3\" if sys.version_info >= (3, 8) else \"1.1.5\"\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\nif sys.version_info < (3, 8):\n dask_deps.append(\"pickle5\")\n\nray_deps = [\n \"ray[default]>=1.4.0\",\n \"pyarrow>=4.0.1\",\n \"redis>=3.5.0,<4.0.0\",\n]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[f\"pandas=={PANDAS_VERSION}\", \"packaging\", \"numpy>=1.18.5\", \"fsspec\", \"psutil\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.6\",\n)\n", "path": "setup.py"}]}
| 1,164 | 144 |
gh_patches_debug_33208
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-3135
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transition to GA: costmanagement
Command module `costmanagement` has been released for a long time and is using stable sdk version `2019-11-01`.
Please check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/costmanagement/setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8
9 from codecs import open
10 from setuptools import setup, find_packages
11
12 # TODO: Confirm this is the right version number you want and it matches your
13 # HISTORY.rst entry.
14 VERSION = '0.1.0'
15
16 # The full list of classifiers is available at
17 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
18 CLASSIFIERS = [
19 'Development Status :: 4 - Beta',
20 'Intended Audience :: Developers',
21 'Intended Audience :: System Administrators',
22 'Programming Language :: Python',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.6',
25 'Programming Language :: Python :: 3.7',
26 'Programming Language :: Python :: 3.8',
27 'License :: OSI Approved :: MIT License',
28 ]
29
30 # TODO: Add any additional SDK dependencies here
31 DEPENDENCIES = []
32
33 with open('README.md', 'r', encoding='utf-8') as f:
34 README = f.read()
35 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
36 HISTORY = f.read()
37
38 setup(
39 name='costmanagement',
40 version=VERSION,
41 description='Microsoft Azure Command-Line Tools CostManagementClient Extension',
42 # TODO: Update author and email, if applicable
43 author='Microsoft Corporation',
44 author_email='[email protected]',
45 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/costmanagement',
46 long_description=README + '\n\n' + HISTORY,
47 license='MIT',
48 classifiers=CLASSIFIERS,
49 packages=find_packages(),
50 install_requires=DEPENDENCIES,
51 package_data={'azext_costmanagement': ['azext_metadata.json']},
52 )
53
```
Path: `src/costmanagement/azext_costmanagement/manual/commands.py`
Content:
```
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from azure.cli.core.commands import CliCommandType
7
8
9 def load_command_table(self, _):
10
11 from azext_costmanagement.generated._client_factory import cf_query
12 costmanagement_query = CliCommandType(
13 operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'
14 '_query_operations#QueryOperations.{}',
15 client_factory=cf_query)
16 with self.command_group('costmanagement', costmanagement_query,
17 client_factory=cf_query, is_experimental=True) as g:
18 g.custom_command('query', 'costmanagement_query')
19
20 from azext_costmanagement.generated._client_factory import cf_export
21 costmanagement_export = CliCommandType(
22 operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'
23 'ns.{}',
24 client_factory=cf_export)
25 with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export,
26 is_experimental=True) as g:
27 g.custom_command('list', 'costmanagement_export_list')
28 g.custom_show_command('show', 'costmanagement_export_show')
29 g.custom_command('create', 'costmanagement_export_create')
30 g.custom_command('update', 'costmanagement_export_update')
31 g.custom_command('delete', 'costmanagement_export_delete', confirmation=True)
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/costmanagement/azext_costmanagement/manual/commands.py b/src/costmanagement/azext_costmanagement/manual/commands.py
--- a/src/costmanagement/azext_costmanagement/manual/commands.py
+++ b/src/costmanagement/azext_costmanagement/manual/commands.py
@@ -13,8 +13,7 @@
operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'
'_query_operations#QueryOperations.{}',
client_factory=cf_query)
- with self.command_group('costmanagement', costmanagement_query,
- client_factory=cf_query, is_experimental=True) as g:
+ with self.command_group('costmanagement', costmanagement_query, client_factory=cf_query) as g:
g.custom_command('query', 'costmanagement_query')
from azext_costmanagement.generated._client_factory import cf_export
@@ -22,8 +21,7 @@
operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'
'ns.{}',
client_factory=cf_export)
- with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export,
- is_experimental=True) as g:
+ with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export) as g:
g.custom_command('list', 'costmanagement_export_list')
g.custom_show_command('show', 'costmanagement_export_show')
g.custom_command('create', 'costmanagement_export_create')
diff --git a/src/costmanagement/setup.py b/src/costmanagement/setup.py
--- a/src/costmanagement/setup.py
+++ b/src/costmanagement/setup.py
@@ -11,7 +11,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
-VERSION = '0.1.0'
+VERSION = '0.1.1'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
{"golden_diff": "diff --git a/src/costmanagement/azext_costmanagement/manual/commands.py b/src/costmanagement/azext_costmanagement/manual/commands.py\n--- a/src/costmanagement/azext_costmanagement/manual/commands.py\n+++ b/src/costmanagement/azext_costmanagement/manual/commands.py\n@@ -13,8 +13,7 @@\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'\n '_query_operations#QueryOperations.{}',\n client_factory=cf_query)\n- with self.command_group('costmanagement', costmanagement_query,\n- client_factory=cf_query, is_experimental=True) as g:\n+ with self.command_group('costmanagement', costmanagement_query, client_factory=cf_query) as g:\n g.custom_command('query', 'costmanagement_query')\n \n from azext_costmanagement.generated._client_factory import cf_export\n@@ -22,8 +21,7 @@\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'\n 'ns.{}',\n client_factory=cf_export)\n- with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export,\n- is_experimental=True) as g:\n+ with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export) as g:\n g.custom_command('list', 'costmanagement_export_list')\n g.custom_show_command('show', 'costmanagement_export_show')\n g.custom_command('create', 'costmanagement_export_create')\ndiff --git a/src/costmanagement/setup.py b/src/costmanagement/setup.py\n--- a/src/costmanagement/setup.py\n+++ b/src/costmanagement/setup.py\n@@ -11,7 +11,7 @@\n \r\n # TODO: Confirm this is the right version number you want and it matches your\r\n # HISTORY.rst entry.\r\n-VERSION = '0.1.0'\r\n+VERSION = '0.1.1'\r\n \r\n # The full list of classifiers is available at\r\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "Transition to GA: costmanagement\nCommand module `costmanagement` has been released for a long time and is using stable sdk version `2019-11-01`.\r\n\r\nPlease check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\r\n\r\n# --------------------------------------------------------------------------------------------\r\n# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License. See License.txt in the project root for license information.\r\n# --------------------------------------------------------------------------------------------\r\n\r\n\r\nfrom codecs import open\r\nfrom setuptools import setup, find_packages\r\n\r\n# TODO: Confirm this is the right version number you want and it matches your\r\n# HISTORY.rst entry.\r\nVERSION = '0.1.0'\r\n\r\n# The full list of classifiers is available at\r\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\r\nCLASSIFIERS = [\r\n 'Development Status :: 4 - Beta',\r\n 'Intended Audience :: Developers',\r\n 'Intended Audience :: System Administrators',\r\n 'Programming Language :: Python',\r\n 'Programming Language :: Python :: 3',\r\n 'Programming Language :: Python :: 3.6',\r\n 'Programming Language :: Python :: 3.7',\r\n 'Programming Language :: Python :: 3.8',\r\n 'License :: OSI Approved :: MIT License',\r\n]\r\n\r\n# TODO: Add any additional SDK dependencies here\r\nDEPENDENCIES = []\r\n\r\nwith open('README.md', 'r', encoding='utf-8') as f:\r\n README = f.read()\r\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\r\n HISTORY = f.read()\r\n\r\nsetup(\r\n name='costmanagement',\r\n version=VERSION,\r\n description='Microsoft Azure Command-Line Tools CostManagementClient Extension',\r\n # TODO: Update author and email, if applicable\r\n author='Microsoft Corporation',\r\n author_email='[email protected]',\r\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/costmanagement',\r\n long_description=README + '\\n\\n' + HISTORY,\r\n license='MIT',\r\n classifiers=CLASSIFIERS,\r\n packages=find_packages(),\r\n install_requires=DEPENDENCIES,\r\n package_data={'azext_costmanagement': ['azext_metadata.json']},\r\n)\r\n", "path": "src/costmanagement/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_costmanagement.generated._client_factory import cf_query\n costmanagement_query = CliCommandType(\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'\n '_query_operations#QueryOperations.{}',\n client_factory=cf_query)\n with self.command_group('costmanagement', costmanagement_query,\n client_factory=cf_query, is_experimental=True) as g:\n g.custom_command('query', 'costmanagement_query')\n\n from azext_costmanagement.generated._client_factory import cf_export\n costmanagement_export = CliCommandType(\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'\n 'ns.{}',\n client_factory=cf_export)\n with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export,\n is_experimental=True) as g:\n g.custom_command('list', 'costmanagement_export_list')\n g.custom_show_command('show', 'costmanagement_export_show')\n g.custom_command('create', 'costmanagement_export_create')\n g.custom_command('update', 'costmanagement_export_update')\n g.custom_command('delete', 'costmanagement_export_delete', confirmation=True)\n", "path": "src/costmanagement/azext_costmanagement/manual/commands.py"}], "after_files": [{"content": "#!/usr/bin/env python\r\n\r\n# --------------------------------------------------------------------------------------------\r\n# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License. See License.txt in the project root for license information.\r\n# --------------------------------------------------------------------------------------------\r\n\r\n\r\nfrom codecs import open\r\nfrom setuptools import setup, find_packages\r\n\r\n# TODO: Confirm this is the right version number you want and it matches your\r\n# HISTORY.rst entry.\r\nVERSION = '0.1.1'\r\n\r\n# The full list of classifiers is available at\r\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\r\nCLASSIFIERS = [\r\n 'Development Status :: 4 - Beta',\r\n 'Intended Audience :: Developers',\r\n 'Intended Audience :: System Administrators',\r\n 'Programming Language :: Python',\r\n 'Programming Language :: Python :: 3',\r\n 'Programming Language :: Python :: 3.6',\r\n 'Programming Language :: Python :: 3.7',\r\n 'Programming Language :: Python :: 3.8',\r\n 'License :: OSI Approved :: MIT License',\r\n]\r\n\r\n# TODO: Add any additional SDK dependencies here\r\nDEPENDENCIES = []\r\n\r\nwith open('README.md', 'r', encoding='utf-8') as f:\r\n README = f.read()\r\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\r\n HISTORY = f.read()\r\n\r\nsetup(\r\n name='costmanagement',\r\n version=VERSION,\r\n description='Microsoft Azure Command-Line Tools CostManagementClient Extension',\r\n # TODO: Update author and email, if applicable\r\n author='Microsoft Corporation',\r\n author_email='[email protected]',\r\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/costmanagement',\r\n long_description=README + '\\n\\n' + HISTORY,\r\n license='MIT',\r\n classifiers=CLASSIFIERS,\r\n packages=find_packages(),\r\n install_requires=DEPENDENCIES,\r\n package_data={'azext_costmanagement': ['azext_metadata.json']},\r\n)\r\n", "path": "src/costmanagement/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_costmanagement.generated._client_factory import cf_query\n costmanagement_query = CliCommandType(\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations.'\n '_query_operations#QueryOperations.{}',\n client_factory=cf_query)\n with self.command_group('costmanagement', costmanagement_query, client_factory=cf_query) as g:\n g.custom_command('query', 'costmanagement_query')\n\n from azext_costmanagement.generated._client_factory import cf_export\n costmanagement_export = CliCommandType(\n operations_tmpl='azext_costmanagement.vendored_sdks.costmanagement.operations._export_operations#ExportOperatio'\n 'ns.{}',\n client_factory=cf_export)\n with self.command_group('costmanagement export', costmanagement_export, client_factory=cf_export) as g:\n g.custom_command('list', 'costmanagement_export_list')\n g.custom_show_command('show', 'costmanagement_export_show')\n g.custom_command('create', 'costmanagement_export_create')\n g.custom_command('update', 'costmanagement_export_update')\n g.custom_command('delete', 'costmanagement_export_delete', confirmation=True)\n", "path": "src/costmanagement/azext_costmanagement/manual/commands.py"}]}
| 1,253 | 457 |
gh_patches_debug_38816
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3454
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider valero is broken
During the global build at 2021-07-14-14-42-22, spider **valero** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/valero.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/valero.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/valero.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/valero.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 from locations.items import GeojsonPointItem
5
6
7 class ValeroSpider(scrapy.Spider):
8 name = "valero"
9 item_attributes = {'brand': "Valero", 'brand_wikidata': 'Q1283291'}
10 allowed_domains = ["valeromaps.valero.com"]
11
12 def start_requests(self):
13 yield scrapy.FormRequest(
14 'https://valeromaps.valero.com/Home/Search?SPHostUrl=https:%2F%2Fwww.valero.com%2Fen-us',
15 method='POST',
16 headers={
17 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
18 },
19 formdata={
20 'NEBound_Lat': '90',
21 'NEBound_Long': '180',
22 'SWBound_Lat': '-90',
23 'SWBound_Long': '-180',
24 'center_Lat': '0',
25 'center_Long': '0'
26 }
27 )
28
29 def parse(self, response):
30 result = json.loads(response.body_as_unicode())
31 for store in result['StoreList']:
32 details = ', '.join([d['DetailName'] for d in store['Details']])
33 yield GeojsonPointItem(
34 lon=store['Longitude'],
35 lat=store['Latitude'],
36 ref=store['UniqueID'],
37 name=store['StationName'],
38 addr_full=store['Address'],
39 phone=store['Phone'],
40 opening_hours='24/7' if '24 Hours' in details else None,
41 extras={
42 'amenity:fuel': True,
43 'amenity:toilets': 'Restroom' in details or None,
44 'atm': 'ATM' in details,
45 'car_wash': 'Car Wash' in details,
46 'fuel:diesel': 'Diesel' in details or None,
47 'fuel:e85': 'E-85' in details or None,
48 }
49 )
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/valero.py b/locations/spiders/valero.py
--- a/locations/spiders/valero.py
+++ b/locations/spiders/valero.py
@@ -1,49 +1,39 @@
# -*- coding: utf-8 -*-
import scrapy
import json
+
from locations.items import GeojsonPointItem
class ValeroSpider(scrapy.Spider):
name = "valero"
- item_attributes = {'brand': "Valero", 'brand_wikidata': 'Q1283291'}
- allowed_domains = ["valeromaps.valero.com"]
-
- def start_requests(self):
- yield scrapy.FormRequest(
- 'https://valeromaps.valero.com/Home/Search?SPHostUrl=https:%2F%2Fwww.valero.com%2Fen-us',
- method='POST',
- headers={
- 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
- },
- formdata={
- 'NEBound_Lat': '90',
- 'NEBound_Long': '180',
- 'SWBound_Lat': '-90',
- 'SWBound_Long': '-180',
- 'center_Lat': '0',
- 'center_Long': '0'
- }
- )
+ item_attributes = {"brand": "Valero", "brand_wikidata": "Q1283291"}
+ allowed_domains = ["valero.com"]
+ start_urls = ["https://locations.valero.com/sitemap.xml"]
def parse(self, response):
- result = json.loads(response.body_as_unicode())
- for store in result['StoreList']:
- details = ', '.join([d['DetailName'] for d in store['Details']])
- yield GeojsonPointItem(
- lon=store['Longitude'],
- lat=store['Latitude'],
- ref=store['UniqueID'],
- name=store['StationName'],
- addr_full=store['Address'],
- phone=store['Phone'],
- opening_hours='24/7' if '24 Hours' in details else None,
- extras={
- 'amenity:fuel': True,
- 'amenity:toilets': 'Restroom' in details or None,
- 'atm': 'ATM' in details,
- 'car_wash': 'Car Wash' in details,
- 'fuel:diesel': 'Diesel' in details or None,
- 'fuel:e85': 'E-85' in details or None,
- }
- )
+ response.selector.remove_namespaces()
+ for url in response.xpath("//loc/text()").extract():
+ yield scrapy.Request(url, callback=self.parse_store)
+
+ def parse_store(self, response):
+ amenities = [s.strip() for s in response.xpath('//div[@class="amenityIconLabel"]/text()').extract()]
+ properties = {
+ "lat": response.xpath('//meta[@property="place:location:latitude"]/@content').get(),
+ "lon": response.xpath('//meta[@property="place:location:longitude"]/@content').get(),
+ "ref": response.url.rsplit("/", 1)[-1],
+ "website": response.url,
+ "name": response.xpath('normalize-space(//*[@id="pageTitleStoreName"])').get(),
+ "addr_full": response.xpath('normalize-space(//div[@class="locationDetailsContactRow"][1]//br/..)').get(),
+ "phone": response.xpath('//a[contains(@href,"tel:")]/text()').get(),
+ "opening_hours": "24/7" if "24 Hour" in amenities else None,
+ "extras": {
+ "atm": "ATM" in amenities,
+ "amenity:fuel": True,
+ "amenity:toilets": "Public Restroom" in amenities or None,
+ "car_wash": "Car Wash" in amenities,
+ "fuel:diesel": "Diesel" in amenities or None,
+ "fuel:e85": "E-85" in amenities or None,
+ },
+ }
+ yield GeojsonPointItem(**properties)
|
{"golden_diff": "diff --git a/locations/spiders/valero.py b/locations/spiders/valero.py\n--- a/locations/spiders/valero.py\n+++ b/locations/spiders/valero.py\n@@ -1,49 +1,39 @@\n # -*- coding: utf-8 -*-\n import scrapy\n import json\n+\n from locations.items import GeojsonPointItem\n \n \n class ValeroSpider(scrapy.Spider):\n name = \"valero\"\n- item_attributes = {'brand': \"Valero\", 'brand_wikidata': 'Q1283291'}\n- allowed_domains = [\"valeromaps.valero.com\"]\n-\n- def start_requests(self):\n- yield scrapy.FormRequest(\n- 'https://valeromaps.valero.com/Home/Search?SPHostUrl=https:%2F%2Fwww.valero.com%2Fen-us',\n- method='POST',\n- headers={\n- 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'\n- },\n- formdata={\n- 'NEBound_Lat': '90',\n- 'NEBound_Long': '180',\n- 'SWBound_Lat': '-90',\n- 'SWBound_Long': '-180',\n- 'center_Lat': '0',\n- 'center_Long': '0'\n- }\n- )\n+ item_attributes = {\"brand\": \"Valero\", \"brand_wikidata\": \"Q1283291\"}\n+ allowed_domains = [\"valero.com\"]\n+ start_urls = [\"https://locations.valero.com/sitemap.xml\"]\n \n def parse(self, response):\n- result = json.loads(response.body_as_unicode())\n- for store in result['StoreList']:\n- details = ', '.join([d['DetailName'] for d in store['Details']])\n- yield GeojsonPointItem(\n- lon=store['Longitude'],\n- lat=store['Latitude'],\n- ref=store['UniqueID'],\n- name=store['StationName'],\n- addr_full=store['Address'],\n- phone=store['Phone'],\n- opening_hours='24/7' if '24 Hours' in details else None,\n- extras={\n- 'amenity:fuel': True,\n- 'amenity:toilets': 'Restroom' in details or None,\n- 'atm': 'ATM' in details,\n- 'car_wash': 'Car Wash' in details,\n- 'fuel:diesel': 'Diesel' in details or None,\n- 'fuel:e85': 'E-85' in details or None,\n- }\n- )\n+ response.selector.remove_namespaces()\n+ for url in response.xpath(\"//loc/text()\").extract():\n+ yield scrapy.Request(url, callback=self.parse_store)\n+\n+ def parse_store(self, response):\n+ amenities = [s.strip() for s in response.xpath('//div[@class=\"amenityIconLabel\"]/text()').extract()]\n+ properties = {\n+ \"lat\": response.xpath('//meta[@property=\"place:location:latitude\"]/@content').get(),\n+ \"lon\": response.xpath('//meta[@property=\"place:location:longitude\"]/@content').get(),\n+ \"ref\": response.url.rsplit(\"/\", 1)[-1],\n+ \"website\": response.url,\n+ \"name\": response.xpath('normalize-space(//*[@id=\"pageTitleStoreName\"])').get(),\n+ \"addr_full\": response.xpath('normalize-space(//div[@class=\"locationDetailsContactRow\"][1]//br/..)').get(),\n+ \"phone\": response.xpath('//a[contains(@href,\"tel:\")]/text()').get(),\n+ \"opening_hours\": \"24/7\" if \"24 Hour\" in amenities else None,\n+ \"extras\": {\n+ \"atm\": \"ATM\" in amenities,\n+ \"amenity:fuel\": True,\n+ \"amenity:toilets\": \"Public Restroom\" in amenities or None,\n+ \"car_wash\": \"Car Wash\" in amenities,\n+ \"fuel:diesel\": \"Diesel\" in amenities or None,\n+ \"fuel:e85\": \"E-85\" in amenities or None,\n+ },\n+ }\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider valero is broken\nDuring the global build at 2021-07-14-14-42-22, spider **valero** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/valero.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/valero.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/valero.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\n\nclass ValeroSpider(scrapy.Spider):\n name = \"valero\"\n item_attributes = {'brand': \"Valero\", 'brand_wikidata': 'Q1283291'}\n allowed_domains = [\"valeromaps.valero.com\"]\n\n def start_requests(self):\n yield scrapy.FormRequest(\n 'https://valeromaps.valero.com/Home/Search?SPHostUrl=https:%2F%2Fwww.valero.com%2Fen-us',\n method='POST',\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'\n },\n formdata={\n 'NEBound_Lat': '90',\n 'NEBound_Long': '180',\n 'SWBound_Lat': '-90',\n 'SWBound_Long': '-180',\n 'center_Lat': '0',\n 'center_Long': '0'\n }\n )\n\n def parse(self, response):\n result = json.loads(response.body_as_unicode())\n for store in result['StoreList']:\n details = ', '.join([d['DetailName'] for d in store['Details']])\n yield GeojsonPointItem(\n lon=store['Longitude'],\n lat=store['Latitude'],\n ref=store['UniqueID'],\n name=store['StationName'],\n addr_full=store['Address'],\n phone=store['Phone'],\n opening_hours='24/7' if '24 Hours' in details else None,\n extras={\n 'amenity:fuel': True,\n 'amenity:toilets': 'Restroom' in details or None,\n 'atm': 'ATM' in details,\n 'car_wash': 'Car Wash' in details,\n 'fuel:diesel': 'Diesel' in details or None,\n 'fuel:e85': 'E-85' in details or None,\n }\n )\n", "path": "locations/spiders/valero.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\n\nclass ValeroSpider(scrapy.Spider):\n name = \"valero\"\n item_attributes = {\"brand\": \"Valero\", \"brand_wikidata\": \"Q1283291\"}\n allowed_domains = [\"valero.com\"]\n start_urls = [\"https://locations.valero.com/sitemap.xml\"]\n\n def parse(self, response):\n response.selector.remove_namespaces()\n for url in response.xpath(\"//loc/text()\").extract():\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n amenities = [s.strip() for s in response.xpath('//div[@class=\"amenityIconLabel\"]/text()').extract()]\n properties = {\n \"lat\": response.xpath('//meta[@property=\"place:location:latitude\"]/@content').get(),\n \"lon\": response.xpath('//meta[@property=\"place:location:longitude\"]/@content').get(),\n \"ref\": response.url.rsplit(\"/\", 1)[-1],\n \"website\": response.url,\n \"name\": response.xpath('normalize-space(//*[@id=\"pageTitleStoreName\"])').get(),\n \"addr_full\": response.xpath('normalize-space(//div[@class=\"locationDetailsContactRow\"][1]//br/..)').get(),\n \"phone\": response.xpath('//a[contains(@href,\"tel:\")]/text()').get(),\n \"opening_hours\": \"24/7\" if \"24 Hour\" in amenities else None,\n \"extras\": {\n \"atm\": \"ATM\" in amenities,\n \"amenity:fuel\": True,\n \"amenity:toilets\": \"Public Restroom\" in amenities or None,\n \"car_wash\": \"Car Wash\" in amenities,\n \"fuel:diesel\": \"Diesel\" in amenities or None,\n \"fuel:e85\": \"E-85\" in amenities or None,\n },\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/valero.py"}]}
| 969 | 938 |
gh_patches_debug_14564
|
rasdani/github-patches
|
git_diff
|
WordPress__openverse-api-477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Distinguish between staging & prod when sending data refresh slack updates
## Problem
<!-- Describe a problem solved by this feature; or delete the section entirely. -->
It's not easy to tell based on the slack messages produced by the ingestion server whether a given ingestion is being run in production or staging.
## Description
<!-- Describe the feature and how it solves the problem. -->
We should pre-pend the messages (or change the bot's name) based on which environment the data refresh is running in.
## Alternatives
<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->
We _could_ turn off messages altogether for staging (how it is currently) but that would make it harder for us to know when a staging data refresh is complete.
## Additional context
<!-- Add any other context about the feature here; or delete the section entirely. -->
## Implementation
<!-- Replace the [ ] with [x] to check the box. -->
- [x] 🙋 I would be interested in implementing this feature.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ingestion_server/ingestion_server/slack.py`
Content:
```
1 import logging
2 import os
3
4 import requests
5
6
7 log = logging.getLogger(__name__)
8 SLACK_WEBHOOK = "SLACK_WEBHOOK"
9
10
11 def message(text: str, summary: str = None) -> None:
12 """
13 Send a Slack message to a channel specified by a Slack webhook variable.
14
15 A message is only sent if the SLACK_WEBHOOK environment variable is undefined.
16 """
17 if not (webhook := os.getenv(SLACK_WEBHOOK)):
18 log.debug(
19 f"{SLACK_WEBHOOK} variable not defined, skipping slack message: {text}"
20 )
21 return
22 if not summary:
23 if "\n" in text:
24 summary = "Ingestion server message"
25 else:
26 summary = text
27
28 data = {
29 "blocks": [{"text": {"text": text, "type": "mrkdwn"}, "type": "section"}],
30 "text": summary,
31 "username": "Data Refresh Notification",
32 "icon_emoji": "arrows_counterclockwise",
33 }
34 try:
35 requests.post(webhook, json=data)
36 except Exception as err:
37 log.exception(f"Unable to issue slack message: {err}")
38 pass
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ingestion_server/ingestion_server/slack.py b/ingestion_server/ingestion_server/slack.py
--- a/ingestion_server/ingestion_server/slack.py
+++ b/ingestion_server/ingestion_server/slack.py
@@ -2,6 +2,7 @@
import os
import requests
+from decouple import config
log = logging.getLogger(__name__)
@@ -25,10 +26,12 @@
else:
summary = text
+ environment = config("ENVIRONMENT", default="local")
+
data = {
"blocks": [{"text": {"text": text, "type": "mrkdwn"}, "type": "section"}],
"text": summary,
- "username": "Data Refresh Notification",
+ "username": f"Data Refresh Notification | {environment.upper()}",
"icon_emoji": "arrows_counterclockwise",
}
try:
|
{"golden_diff": "diff --git a/ingestion_server/ingestion_server/slack.py b/ingestion_server/ingestion_server/slack.py\n--- a/ingestion_server/ingestion_server/slack.py\n+++ b/ingestion_server/ingestion_server/slack.py\n@@ -2,6 +2,7 @@\n import os\n \n import requests\n+from decouple import config\n \n \n log = logging.getLogger(__name__)\n@@ -25,10 +26,12 @@\n else:\n summary = text\n \n+ environment = config(\"ENVIRONMENT\", default=\"local\")\n+\n data = {\n \"blocks\": [{\"text\": {\"text\": text, \"type\": \"mrkdwn\"}, \"type\": \"section\"}],\n \"text\": summary,\n- \"username\": \"Data Refresh Notification\",\n+ \"username\": f\"Data Refresh Notification | {environment.upper()}\",\n \"icon_emoji\": \"arrows_counterclockwise\",\n }\n try:\n", "issue": "Distinguish between staging & prod when sending data refresh slack updates\n## Problem\r\n<!-- Describe a problem solved by this feature; or delete the section entirely. -->\r\nIt's not easy to tell based on the slack messages produced by the ingestion server whether a given ingestion is being run in production or staging.\r\n\r\n## Description\r\n<!-- Describe the feature and how it solves the problem. -->\r\nWe should pre-pend the messages (or change the bot's name) based on which environment the data refresh is running in. \r\n\r\n## Alternatives\r\n<!-- Describe any alternative solutions or features you have considered. How is this feature better? -->\r\nWe _could_ turn off messages altogether for staging (how it is currently) but that would make it harder for us to know when a staging data refresh is complete.\r\n\r\n## Additional context\r\n<!-- Add any other context about the feature here; or delete the section entirely. -->\r\n\r\n## Implementation\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [x] \ud83d\ude4b I would be interested in implementing this feature.\r\n\n", "before_files": [{"content": "import logging\nimport os\n\nimport requests\n\n\nlog = logging.getLogger(__name__)\nSLACK_WEBHOOK = \"SLACK_WEBHOOK\"\n\n\ndef message(text: str, summary: str = None) -> None:\n \"\"\"\n Send a Slack message to a channel specified by a Slack webhook variable.\n\n A message is only sent if the SLACK_WEBHOOK environment variable is undefined.\n \"\"\"\n if not (webhook := os.getenv(SLACK_WEBHOOK)):\n log.debug(\n f\"{SLACK_WEBHOOK} variable not defined, skipping slack message: {text}\"\n )\n return\n if not summary:\n if \"\\n\" in text:\n summary = \"Ingestion server message\"\n else:\n summary = text\n\n data = {\n \"blocks\": [{\"text\": {\"text\": text, \"type\": \"mrkdwn\"}, \"type\": \"section\"}],\n \"text\": summary,\n \"username\": \"Data Refresh Notification\",\n \"icon_emoji\": \"arrows_counterclockwise\",\n }\n try:\n requests.post(webhook, json=data)\n except Exception as err:\n log.exception(f\"Unable to issue slack message: {err}\")\n pass\n", "path": "ingestion_server/ingestion_server/slack.py"}], "after_files": [{"content": "import logging\nimport os\n\nimport requests\nfrom decouple import config\n\n\nlog = logging.getLogger(__name__)\nSLACK_WEBHOOK = \"SLACK_WEBHOOK\"\n\n\ndef message(text: str, summary: str = None) -> None:\n \"\"\"\n Send a Slack message to a channel specified by a Slack webhook variable.\n\n A message is only sent if the SLACK_WEBHOOK environment variable is undefined.\n \"\"\"\n if not (webhook := os.getenv(SLACK_WEBHOOK)):\n log.debug(\n f\"{SLACK_WEBHOOK} variable not defined, skipping slack message: {text}\"\n )\n return\n if not summary:\n if \"\\n\" in text:\n summary = \"Ingestion server message\"\n else:\n summary = text\n\n environment = config(\"ENVIRONMENT\", default=\"local\")\n\n data = {\n \"blocks\": [{\"text\": {\"text\": text, \"type\": \"mrkdwn\"}, \"type\": \"section\"}],\n \"text\": summary,\n \"username\": f\"Data Refresh Notification | {environment.upper()}\",\n \"icon_emoji\": \"arrows_counterclockwise\",\n }\n try:\n requests.post(webhook, json=data)\n except Exception as err:\n log.exception(f\"Unable to issue slack message: {err}\")\n pass\n", "path": "ingestion_server/ingestion_server/slack.py"}]}
| 804 | 205 |
gh_patches_debug_26399
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-1503
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix `cirq.control` documentation talking about `__control__` instead of `controlled_by`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/protocols/control.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, TYPE_CHECKING, TypeVar, Union, Sequence
16
17 import collections
18
19 from cirq.ops import op_tree
20
21 if TYPE_CHECKING:
22 # pylint: disable=unused-import
23 import cirq
24
25 # This is a special indicator value used by the control method to determine
26 # whether or not the caller provided a 'default' argument.
27 RaiseTypeErrorIfNotProvided = ([],) # type: Any
28
29
30 TDefault = TypeVar('TDefault')
31
32 def control(controllee: Union['cirq.Gate', op_tree.OP_TREE],
33 control_qubits: Sequence['cirq.Qid'] = None,
34 default: Any = RaiseTypeErrorIfNotProvided) -> Any:
35 """Returns a Controlled version of the given value, if defined.
36
37 Controllees define how to be controlled by defining a method
38 __control__(self, control_qubits). Note that the method may return
39 NotImplemented to indicate a particular controlling can't be done.
40
41 Args:
42 controllee: The gate, operation or iterable of operations to control.
43 control_qubits: A list of Qids that would control this controllee.
44 default: Determines the fallback behavior when `controllee` doesn't
45 have a controlling defined. If `default` is not set and the
46 fallback occurs, a TypeError is raised instead.
47
48 Returns:
49 If `controllee` has a __control__ method that returns something besides
50 NotImplemented, that result is returned. For an OP_TREE, transformation
51 is applied at the leaf. Otherwise, if a default value was specified,
52 the default value is returned.
53
54 Raises:
55 TypeError: `controllee` doesn't have a __control__ method (or that
56 method returned NotImplemented) and no `default` was specified.
57 """
58 if control_qubits is None:
59 control_qubits = []
60 controller = getattr(controllee, 'controlled_by', None)
61 result = NotImplemented if controller is None else controller(
62 *control_qubits)
63 if result is not NotImplemented:
64 return result
65
66 if isinstance(controllee, collections.Iterable):
67 return op_tree.transform_op_tree(controllee, op_transformation=
68 lambda op: control(op, control_qubits))
69
70 if default is not RaiseTypeErrorIfNotProvided:
71 return default
72
73 if controller is None:
74 raise TypeError("object of type '{}' has no controlled_by "
75 "method.".format(type(controllee)))
76 raise TypeError("object of type '{}' does have a controlled_by method, "
77 "but it returned NotImplemented.".format(type(controllee)))
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq/protocols/control.py b/cirq/protocols/control.py
--- a/cirq/protocols/control.py
+++ b/cirq/protocols/control.py
@@ -35,7 +35,7 @@
"""Returns a Controlled version of the given value, if defined.
Controllees define how to be controlled by defining a method
- __control__(self, control_qubits). Note that the method may return
+ controlled_by(self, control_qubits). Note that the method may return
NotImplemented to indicate a particular controlling can't be done.
Args:
@@ -46,13 +46,13 @@
fallback occurs, a TypeError is raised instead.
Returns:
- If `controllee` has a __control__ method that returns something besides
- NotImplemented, that result is returned. For an OP_TREE, transformation
- is applied at the leaf. Otherwise, if a default value was specified,
- the default value is returned.
+ If `controllee` has a controlled_by method that returns something
+ besides NotImplemented, that result is returned. For an OP_TREE,
+ transformation is applied at the leaf. Otherwise, if a default value
+ was specified, the default value is returned.
Raises:
- TypeError: `controllee` doesn't have a __control__ method (or that
+ TypeError: `controllee` doesn't have a controlled_by method (or that
method returned NotImplemented) and no `default` was specified.
"""
if control_qubits is None:
|
{"golden_diff": "diff --git a/cirq/protocols/control.py b/cirq/protocols/control.py\n--- a/cirq/protocols/control.py\n+++ b/cirq/protocols/control.py\n@@ -35,7 +35,7 @@\n \"\"\"Returns a Controlled version of the given value, if defined.\n \n Controllees define how to be controlled by defining a method\n- __control__(self, control_qubits). Note that the method may return\n+ controlled_by(self, control_qubits). Note that the method may return\n NotImplemented to indicate a particular controlling can't be done.\n \n Args:\n@@ -46,13 +46,13 @@\n fallback occurs, a TypeError is raised instead.\n \n Returns:\n- If `controllee` has a __control__ method that returns something besides\n- NotImplemented, that result is returned. For an OP_TREE, transformation\n- is applied at the leaf. Otherwise, if a default value was specified,\n- the default value is returned.\n+ If `controllee` has a controlled_by method that returns something\n+ besides NotImplemented, that result is returned. For an OP_TREE,\n+ transformation is applied at the leaf. Otherwise, if a default value\n+ was specified, the default value is returned.\n \n Raises:\n- TypeError: `controllee` doesn't have a __control__ method (or that\n+ TypeError: `controllee` doesn't have a controlled_by method (or that\n method returned NotImplemented) and no `default` was specified.\n \"\"\"\n if control_qubits is None:\n", "issue": "Fix `cirq.control` documentation talking about `__control__` instead of `controlled_by`\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, TYPE_CHECKING, TypeVar, Union, Sequence\n\nimport collections\n\nfrom cirq.ops import op_tree\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n import cirq\n\n# This is a special indicator value used by the control method to determine\n# whether or not the caller provided a 'default' argument.\nRaiseTypeErrorIfNotProvided = ([],) # type: Any\n\n\nTDefault = TypeVar('TDefault')\n\ndef control(controllee: Union['cirq.Gate', op_tree.OP_TREE],\n control_qubits: Sequence['cirq.Qid'] = None,\n default: Any = RaiseTypeErrorIfNotProvided) -> Any:\n \"\"\"Returns a Controlled version of the given value, if defined.\n\n Controllees define how to be controlled by defining a method\n __control__(self, control_qubits). Note that the method may return\n NotImplemented to indicate a particular controlling can't be done.\n\n Args:\n controllee: The gate, operation or iterable of operations to control.\n control_qubits: A list of Qids that would control this controllee.\n default: Determines the fallback behavior when `controllee` doesn't\n have a controlling defined. If `default` is not set and the\n fallback occurs, a TypeError is raised instead.\n\n Returns:\n If `controllee` has a __control__ method that returns something besides\n NotImplemented, that result is returned. For an OP_TREE, transformation\n is applied at the leaf. Otherwise, if a default value was specified,\n the default value is returned.\n\n Raises:\n TypeError: `controllee` doesn't have a __control__ method (or that\n method returned NotImplemented) and no `default` was specified.\n \"\"\"\n if control_qubits is None:\n control_qubits = []\n controller = getattr(controllee, 'controlled_by', None)\n result = NotImplemented if controller is None else controller(\n *control_qubits)\n if result is not NotImplemented:\n return result\n\n if isinstance(controllee, collections.Iterable):\n return op_tree.transform_op_tree(controllee, op_transformation=\n lambda op: control(op, control_qubits))\n\n if default is not RaiseTypeErrorIfNotProvided:\n return default\n\n if controller is None:\n raise TypeError(\"object of type '{}' has no controlled_by \"\n \"method.\".format(type(controllee)))\n raise TypeError(\"object of type '{}' does have a controlled_by method, \"\n \"but it returned NotImplemented.\".format(type(controllee)))\n", "path": "cirq/protocols/control.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, TYPE_CHECKING, TypeVar, Union, Sequence\n\nimport collections\n\nfrom cirq.ops import op_tree\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n import cirq\n\n# This is a special indicator value used by the control method to determine\n# whether or not the caller provided a 'default' argument.\nRaiseTypeErrorIfNotProvided = ([],) # type: Any\n\n\nTDefault = TypeVar('TDefault')\n\ndef control(controllee: Union['cirq.Gate', op_tree.OP_TREE],\n control_qubits: Sequence['cirq.Qid'] = None,\n default: Any = RaiseTypeErrorIfNotProvided) -> Any:\n \"\"\"Returns a Controlled version of the given value, if defined.\n\n Controllees define how to be controlled by defining a method\n controlled_by(self, control_qubits). Note that the method may return\n NotImplemented to indicate a particular controlling can't be done.\n\n Args:\n controllee: The gate, operation or iterable of operations to control.\n control_qubits: A list of Qids that would control this controllee.\n default: Determines the fallback behavior when `controllee` doesn't\n have a controlling defined. If `default` is not set and the\n fallback occurs, a TypeError is raised instead.\n\n Returns:\n If `controllee` has a controlled_by method that returns something\n besides NotImplemented, that result is returned. For an OP_TREE,\n transformation is applied at the leaf. Otherwise, if a default value\n was specified, the default value is returned.\n\n Raises:\n TypeError: `controllee` doesn't have a controlled_by method (or that\n method returned NotImplemented) and no `default` was specified.\n \"\"\"\n if control_qubits is None:\n control_qubits = []\n controller = getattr(controllee, 'controlled_by', None)\n result = NotImplemented if controller is None else controller(\n *control_qubits)\n if result is not NotImplemented:\n return result\n\n if isinstance(controllee, collections.Iterable):\n return op_tree.transform_op_tree(controllee, op_transformation=\n lambda op: control(op, control_qubits))\n\n if default is not RaiseTypeErrorIfNotProvided:\n return default\n\n if controller is None:\n raise TypeError(\"object of type '{}' has no controlled_by \"\n \"method.\".format(type(controllee)))\n raise TypeError(\"object of type '{}' does have a controlled_by method, \"\n \"but it returned NotImplemented.\".format(type(controllee)))\n", "path": "cirq/protocols/control.py"}]}
| 1,111 | 339 |
gh_patches_debug_20936
|
rasdani/github-patches
|
git_diff
|
Zeroto521__my-data-toolkit-706
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH: New geoaccessor to generate great circle distances matrix
<!--
Thanks for contributing a pull request!
Please follow these standard acronyms to start the commit message:
- ENH: enhancement
- BUG: bug fix
- DOC: documentation
- TYP: type annotations
- TST: addition or modification of tests
- MAINT: maintenance commit (refactoring, typos, etc.)
- BLD: change related to building
- REL: related to releasing
- API: an (incompatible) API change
- DEP: deprecate something, or remove a deprecated object
- DEV: development tool or utility
- REV: revert an earlier commit
- PERF: performance improvement
- BOT: always commit via a bot
- CI: related to CI or CD
- CLN: Code cleanup
-->
- [x] closes #699
- [ ] whatsnew entry
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dtoolkit/geoaccessor/geoseries/geodistance_matrix.py`
Content:
```
1 from __future__ import annotations
2
3 import geopandas as gpd
4 import numpy as np
5 import pandas as pd
6
7 from dtoolkit.geoaccessor.register import register_geoseries_method
8
9
10 @register_geoseries_method
11 def geodistance_matrix(
12 s: gpd.GeoSeries,
13 /,
14 other: gpd.GeoSeries | gpd.GeoDataFrame | None = None,
15 radius: float = 6371008.7714150598,
16 ) -> pd.DataFrame:
17 """
18 Returns a ``DataFrame`` containing the `great-circle`__ distances matrix between in
19 ``s`` and ``other`` via haversine formula.
20
21 __ https://en.wikipedia.org/wiki/Great-circle_distance
22
23 .. math::
24
25 D(x, y) = 2 \\arcsin [
26 \\sqrt{
27 \\sin^2 ((x_1 - y_1) / 2)
28 + \\cos(x_1) \\cos(y_1) \\sin^2 ((x_2 - y_2) / 2)
29 }
30 ]
31
32 Parameters
33 ----------
34 other : GeoSeries, or GeoDataFrame, default None
35 If None, uses ``other=s``.
36
37 radius : float, default 6371008.7714150598
38 Great-circle distance uses a spherical model of the earth, using the mean earth
39 radius as defined by the International Union of Geodesy and Geophysics,
40 (2\\ *a* + *b*)/3 = 6371008.7714150598 meters for WGS-84.
41
42 Returns
43 -------
44 DataFrame
45 - The index and columns are the same as the index of ``s`` and ``other``.
46 - The values are the great-circle distances and its unit is meters.
47
48 Raises
49 ------
50 ModuleNotFoundError
51 If don't have module named 'sklearn'.
52
53 ValueError
54 If the CRS is not ``ESGP:4326``.
55
56 See Also
57 --------
58 sklearn.metrics.pairwise.haversine_distances
59 dtoolkit.geoaccessor.geoseries.geodistance
60 dtoolkit.geoaccessor.geoseries.geodistance_matrix
61 dtoolkit.geoaccessor.geodataframe.geodistance
62 dtoolkit.geoaccessor.geodataframe.geodistance_matrix
63
64 Notes
65 -----
66 - Currently, only supports Point geometry.
67 - The great-circle distance is the angular distance between two points on the
68 surface of a sphere. As the Earth is nearly spherical, the haversine formula
69 provides a good approximation of the distance between two points of the Earth
70 surface, with a less than 1% error on average.
71
72 Examples
73 --------
74 >>> import dtoolkit.geoaccessor
75 >>> df = pd.DataFrame(
76 ... {
77 ... "x": [120, 122, 100],
78 ... "y":[30, 55, 1],
79 ... },
80 ... ).from_xy("x", "y", crs=4326)
81 >>> df
82 x y geometry
83 0 120 30 POINT (120.00000 30.00000)
84 1 122 55 POINT (122.00000 55.00000)
85 2 100 1 POINT (100.00000 1.00000)
86 >>> other = pd.DataFrame(
87 ... {
88 ... "x": [120, 110],
89 ... "y":[30, 40],
90 ... },
91 ... ).from_xy("x", "y", crs=4326)
92 >>> other
93 x y geometry
94 0 120 30 POINT (120.00000 30.00000)
95 1 110 40 POINT (110.00000 40.00000)
96 >>> df.geodistance_matrix(other)
97 0 1
98 0 0.000000e+00 1.203540e+06
99 1 1.439971e+06 1.511958e+06
100 2 2.418544e+06 1.522752e+06
101 """
102 from sklearn.metrics.pairwise import haversine_distances
103
104 if s.crs != 4326:
105 raise ValueError(f"Only support 'EPSG:4326' CRS, but got {s.crs!r}.")
106
107 if isinstance(other, gpd.base.GeoPandasBase):
108 if other.crs != 4326:
109 raise ValueError(f"Only support 'EPSG:4326' CRS, but got {other.crs!r}.")
110
111 # Force convert to GeoSeries
112 other = other.geometry
113
114 X = np.radians(np.stack((s.x, s.y), axis=1))
115 Y = np.radians(np.stack((other.x, other.y), axis=1)) if other is not None else other
116 return pd.DataFrame(
117 radius * haversine_distances(X, Y),
118 index=s.index,
119 columns=other.index,
120 )
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dtoolkit/geoaccessor/geoseries/geodistance_matrix.py b/dtoolkit/geoaccessor/geoseries/geodistance_matrix.py
--- a/dtoolkit/geoaccessor/geoseries/geodistance_matrix.py
+++ b/dtoolkit/geoaccessor/geoseries/geodistance_matrix.py
@@ -95,9 +95,9 @@
1 110 40 POINT (110.00000 40.00000)
>>> df.geodistance_matrix(other)
0 1
- 0 0.000000e+00 1.203540e+06
- 1 1.439971e+06 1.511958e+06
- 2 2.418544e+06 1.522752e+06
+ 0 0.000000e+00 1.435335e+06
+ 1 2.784435e+06 1.889892e+06
+ 2 3.855604e+06 4.453100e+06
"""
from sklearn.metrics.pairwise import haversine_distances
@@ -111,8 +111,8 @@
# Force convert to GeoSeries
other = other.geometry
- X = np.radians(np.stack((s.x, s.y), axis=1))
- Y = np.radians(np.stack((other.x, other.y), axis=1)) if other is not None else other
+ X = np.radians(np.stack((s.y, s.x), axis=1))
+ Y = np.radians(np.stack((other.y, other.x), axis=1)) if other is not None else other
return pd.DataFrame(
radius * haversine_distances(X, Y),
index=s.index,
|
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/geoseries/geodistance_matrix.py b/dtoolkit/geoaccessor/geoseries/geodistance_matrix.py\n--- a/dtoolkit/geoaccessor/geoseries/geodistance_matrix.py\n+++ b/dtoolkit/geoaccessor/geoseries/geodistance_matrix.py\n@@ -95,9 +95,9 @@\n 1 110 40 POINT (110.00000 40.00000)\n >>> df.geodistance_matrix(other)\n 0 1\n- 0 0.000000e+00 1.203540e+06\n- 1 1.439971e+06 1.511958e+06\n- 2 2.418544e+06 1.522752e+06\n+ 0 0.000000e+00 1.435335e+06\n+ 1 2.784435e+06 1.889892e+06\n+ 2 3.855604e+06 4.453100e+06\n \"\"\"\n from sklearn.metrics.pairwise import haversine_distances\n \n@@ -111,8 +111,8 @@\n # Force convert to GeoSeries\n other = other.geometry\n \n- X = np.radians(np.stack((s.x, s.y), axis=1))\n- Y = np.radians(np.stack((other.x, other.y), axis=1)) if other is not None else other\n+ X = np.radians(np.stack((s.y, s.x), axis=1))\n+ Y = np.radians(np.stack((other.y, other.x), axis=1)) if other is not None else other\n return pd.DataFrame(\n radius * haversine_distances(X, Y),\n index=s.index,\n", "issue": "ENH: New geoaccessor to generate great circle distances matrix\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes #699\r\n- [ ] whatsnew entry\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\n\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef geodistance_matrix(\n s: gpd.GeoSeries,\n /,\n other: gpd.GeoSeries | gpd.GeoDataFrame | None = None,\n radius: float = 6371008.7714150598,\n) -> pd.DataFrame:\n \"\"\"\n Returns a ``DataFrame`` containing the `great-circle`__ distances matrix between in\n ``s`` and ``other`` via haversine formula.\n\n __ https://en.wikipedia.org/wiki/Great-circle_distance\n\n .. math::\n\n D(x, y) = 2 \\\\arcsin [\n \\\\sqrt{\n \\\\sin^2 ((x_1 - y_1) / 2)\n + \\\\cos(x_1) \\\\cos(y_1) \\\\sin^2 ((x_2 - y_2) / 2)\n }\n ]\n\n Parameters\n ----------\n other : GeoSeries, or GeoDataFrame, default None\n If None, uses ``other=s``.\n\n radius : float, default 6371008.7714150598\n Great-circle distance uses a spherical model of the earth, using the mean earth\n radius as defined by the International Union of Geodesy and Geophysics,\n (2\\\\ *a* + *b*)/3 = 6371008.7714150598 meters for WGS-84.\n\n Returns\n -------\n DataFrame\n - The index and columns are the same as the index of ``s`` and ``other``.\n - The values are the great-circle distances and its unit is meters.\n\n Raises\n ------\n ModuleNotFoundError\n If don't have module named 'sklearn'.\n\n ValueError\n If the CRS is not ``ESGP:4326``.\n\n See Also\n --------\n sklearn.metrics.pairwise.haversine_distances\n dtoolkit.geoaccessor.geoseries.geodistance\n dtoolkit.geoaccessor.geoseries.geodistance_matrix\n dtoolkit.geoaccessor.geodataframe.geodistance\n dtoolkit.geoaccessor.geodataframe.geodistance_matrix\n\n Notes\n -----\n - Currently, only supports Point geometry.\n - The great-circle distance is the angular distance between two points on the\n surface of a sphere. As the Earth is nearly spherical, the haversine formula\n provides a good approximation of the distance between two points of the Earth\n surface, with a less than 1% error on average.\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> df = pd.DataFrame(\n ... {\n ... \"x\": [120, 122, 100],\n ... \"y\":[30, 55, 1],\n ... },\n ... ).from_xy(\"x\", \"y\", crs=4326)\n >>> df\n x y geometry\n 0 120 30 POINT (120.00000 30.00000)\n 1 122 55 POINT (122.00000 55.00000)\n 2 100 1 POINT (100.00000 1.00000)\n >>> other = pd.DataFrame(\n ... {\n ... \"x\": [120, 110],\n ... \"y\":[30, 40],\n ... },\n ... ).from_xy(\"x\", \"y\", crs=4326)\n >>> other\n x y geometry\n 0 120 30 POINT (120.00000 30.00000)\n 1 110 40 POINT (110.00000 40.00000)\n >>> df.geodistance_matrix(other)\n 0 1\n 0 0.000000e+00 1.203540e+06\n 1 1.439971e+06 1.511958e+06\n 2 2.418544e+06 1.522752e+06\n \"\"\"\n from sklearn.metrics.pairwise import haversine_distances\n\n if s.crs != 4326:\n raise ValueError(f\"Only support 'EPSG:4326' CRS, but got {s.crs!r}.\")\n\n if isinstance(other, gpd.base.GeoPandasBase):\n if other.crs != 4326:\n raise ValueError(f\"Only support 'EPSG:4326' CRS, but got {other.crs!r}.\")\n\n # Force convert to GeoSeries\n other = other.geometry\n\n X = np.radians(np.stack((s.x, s.y), axis=1))\n Y = np.radians(np.stack((other.x, other.y), axis=1)) if other is not None else other\n return pd.DataFrame(\n radius * haversine_distances(X, Y),\n index=s.index,\n columns=other.index,\n )\n", "path": "dtoolkit/geoaccessor/geoseries/geodistance_matrix.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport geopandas as gpd\nimport numpy as np\nimport pandas as pd\n\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef geodistance_matrix(\n s: gpd.GeoSeries,\n /,\n other: gpd.GeoSeries | gpd.GeoDataFrame | None = None,\n radius: float = 6371008.7714150598,\n) -> pd.DataFrame:\n \"\"\"\n Returns a ``DataFrame`` containing the `great-circle`__ distances matrix between in\n ``s`` and ``other`` via haversine formula.\n\n __ https://en.wikipedia.org/wiki/Great-circle_distance\n\n .. math::\n\n D(x, y) = 2 \\\\arcsin [\n \\\\sqrt{\n \\\\sin^2 ((x_1 - y_1) / 2)\n + \\\\cos(x_1) \\\\cos(y_1) \\\\sin^2 ((x_2 - y_2) / 2)\n }\n ]\n\n Parameters\n ----------\n other : GeoSeries, or GeoDataFrame, default None\n If None, uses ``other=s``.\n\n radius : float, default 6371008.7714150598\n Great-circle distance uses a spherical model of the earth, using the mean earth\n radius as defined by the International Union of Geodesy and Geophysics,\n (2\\\\ *a* + *b*)/3 = 6371008.7714150598 meters for WGS-84.\n\n Returns\n -------\n DataFrame\n - The index and columns are the same as the index of ``s`` and ``other``.\n - The values are the great-circle distances and its unit is meters.\n\n Raises\n ------\n ModuleNotFoundError\n If don't have module named 'sklearn'.\n\n ValueError\n If the CRS is not ``ESGP:4326``.\n\n See Also\n --------\n sklearn.metrics.pairwise.haversine_distances\n dtoolkit.geoaccessor.geoseries.geodistance\n dtoolkit.geoaccessor.geoseries.geodistance_matrix\n dtoolkit.geoaccessor.geodataframe.geodistance\n dtoolkit.geoaccessor.geodataframe.geodistance_matrix\n\n Notes\n -----\n - Currently, only supports Point geometry.\n - The great-circle distance is the angular distance between two points on the\n surface of a sphere. As the Earth is nearly spherical, the haversine formula\n provides a good approximation of the distance between two points of the Earth\n surface, with a less than 1% error on average.\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> df = pd.DataFrame(\n ... {\n ... \"x\": [120, 122, 100],\n ... \"y\":[30, 55, 1],\n ... },\n ... ).from_xy(\"x\", \"y\", crs=4326)\n >>> df\n x y geometry\n 0 120 30 POINT (120.00000 30.00000)\n 1 122 55 POINT (122.00000 55.00000)\n 2 100 1 POINT (100.00000 1.00000)\n >>> other = pd.DataFrame(\n ... {\n ... \"x\": [120, 110],\n ... \"y\":[30, 40],\n ... },\n ... ).from_xy(\"x\", \"y\", crs=4326)\n >>> other\n x y geometry\n 0 120 30 POINT (120.00000 30.00000)\n 1 110 40 POINT (110.00000 40.00000)\n >>> df.geodistance_matrix(other)\n 0 1\n 0 0.000000e+00 1.435335e+06\n 1 2.784435e+06 1.889892e+06\n 2 3.855604e+06 4.453100e+06\n \"\"\"\n from sklearn.metrics.pairwise import haversine_distances\n\n if s.crs != 4326:\n raise ValueError(f\"Only support 'EPSG:4326' CRS, but got {s.crs!r}.\")\n\n if isinstance(other, gpd.base.GeoPandasBase):\n if other.crs != 4326:\n raise ValueError(f\"Only support 'EPSG:4326' CRS, but got {other.crs!r}.\")\n\n # Force convert to GeoSeries\n other = other.geometry\n\n X = np.radians(np.stack((s.y, s.x), axis=1))\n Y = np.radians(np.stack((other.y, other.x), axis=1)) if other is not None else other\n return pd.DataFrame(\n radius * haversine_distances(X, Y),\n index=s.index,\n columns=other.index,\n )\n", "path": "dtoolkit/geoaccessor/geoseries/geodistance_matrix.py"}]}
| 1,986 | 487 |
gh_patches_debug_13663
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-493
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New CSV imports use autogenerated column names.
## Description
<!-- A clear and concise description of what the bug is. -->
According to #459, newly imported CSVs are supposed to use the first row as headers by default. However, newly uploaded CSVs are showing autogenerated column names.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
The first row of the CSV should be used as header names by default.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Upload a CSV from the Mathesar UI. Column names will be of the form `column_0`, etc.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/serializers.py`
Content:
```
1 from django.urls import reverse
2 from rest_framework import serializers
3
4 from mathesar.models import Table, Schema, DataFile, Database
5
6
7 class NestedTableSerializer(serializers.HyperlinkedModelSerializer):
8 url = serializers.SerializerMethodField()
9
10 class Meta:
11 model = Table
12 fields = ['id', 'name', 'url']
13
14 def get_url(self, obj):
15 request = self.context['request']
16 return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))
17
18
19 class ModelNameField(serializers.CharField):
20 """
21 De-serializes the request field as a string, but serializes the response field as
22 `model.name`. Required to support passing and returing a model name from the
23 endpoint, while also storing the model as a related field.
24 """
25 def to_representation(self, value):
26 return value.name
27
28
29 class SchemaSerializer(serializers.HyperlinkedModelSerializer):
30 tables = NestedTableSerializer(many=True, read_only=True)
31 name = serializers.CharField()
32 database = ModelNameField(max_length=128)
33
34 class Meta:
35 model = Schema
36 fields = ['id', 'name', 'tables', 'database', 'has_dependencies']
37
38
39 class SimpleColumnSerializer(serializers.Serializer):
40 name = serializers.CharField()
41 type = serializers.CharField()
42
43
44 class ColumnSerializer(SimpleColumnSerializer):
45 index = serializers.IntegerField(source='column_index', read_only=True)
46 nullable = serializers.BooleanField(default=True)
47 primary_key = serializers.BooleanField(default=False)
48 valid_target_types = serializers.ListField(read_only=True)
49
50
51 class TableSerializer(serializers.ModelSerializer):
52 columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')
53 records = serializers.SerializerMethodField()
54 name = serializers.CharField()
55 data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())
56
57 class Meta:
58 model = Table
59 fields = ['id', 'name', 'schema', 'created_at', 'updated_at',
60 'columns', 'records', 'data_files', 'has_dependencies']
61
62 def get_records(self, obj):
63 if isinstance(obj, Table):
64 # Only get records if we are serializing an existing table
65 request = self.context['request']
66 return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))
67 else:
68 return None
69
70
71 class RecordSerializer(serializers.BaseSerializer):
72 def to_representation(self, instance):
73 return instance._asdict()
74
75
76 class RecordListParameterSerializer(serializers.Serializer):
77 filters = serializers.JSONField(required=False, default=[])
78 order_by = serializers.JSONField(required=False, default=[])
79 group_count_by = serializers.JSONField(required=False, default=[])
80
81
82 class DatabaseSerializer(serializers.ModelSerializer):
83 supported_types = serializers.ListField(child=serializers.CharField())
84
85 class Meta:
86 model = Database
87 fields = ['id', 'name', 'deleted', 'supported_types']
88 read_only_fields = ['id', 'name', 'deleted', 'supported_types']
89
90
91 class DataFileSerializer(serializers.ModelSerializer):
92 user = serializers.PrimaryKeyRelatedField(
93 default=serializers.CurrentUserDefault(), read_only=True
94 )
95
96 class Meta:
97 model = DataFile
98 fields = [
99 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'
100 ]
101 extra_kwargs = {'delimiter': {'trim_whitespace': False},
102 'escapechar': {'trim_whitespace': False},
103 'quotechar': {'trim_whitespace': False}}
104 # We only currently support importing to a new table, so setting a table via API is invalid.
105 # User should be set automatically, not submitted via the API.
106 read_only_fields = ['table_imported_to']
107
108 def save(self, **kwargs):
109 """
110 Set user to current user while saving the data file.
111 """
112 current_user = self.fields['user'].get_default()
113 if current_user.is_authenticated:
114 kwargs['user'] = current_user
115 return super().save(**kwargs)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/serializers.py b/mathesar/serializers.py
--- a/mathesar/serializers.py
+++ b/mathesar/serializers.py
@@ -100,7 +100,8 @@
]
extra_kwargs = {'delimiter': {'trim_whitespace': False},
'escapechar': {'trim_whitespace': False},
- 'quotechar': {'trim_whitespace': False}}
+ 'quotechar': {'trim_whitespace': False},
+ 'header': {'required': True}}
# We only currently support importing to a new table, so setting a table via API is invalid.
# User should be set automatically, not submitted via the API.
read_only_fields = ['table_imported_to']
|
{"golden_diff": "diff --git a/mathesar/serializers.py b/mathesar/serializers.py\n--- a/mathesar/serializers.py\n+++ b/mathesar/serializers.py\n@@ -100,7 +100,8 @@\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n- 'quotechar': {'trim_whitespace': False}}\n+ 'quotechar': {'trim_whitespace': False},\n+ 'header': {'required': True}}\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n", "issue": "New CSV imports use autogenerated column names.\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nAccording to #459, newly imported CSVs are supposed to use the first row as headers by default. However, newly uploaded CSVs are showing autogenerated column names.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe first row of the CSV should be used as header names by default.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\nUpload a CSV from the Mathesar UI. Column names will be of the form `column_0`, etc.\n", "before_files": [{"content": "from django.urls import reverse\nfrom rest_framework import serializers\n\nfrom mathesar.models import Table, Schema, DataFile, Database\n\n\nclass NestedTableSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.SerializerMethodField()\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'url']\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))\n\n\nclass ModelNameField(serializers.CharField):\n \"\"\"\n De-serializes the request field as a string, but serializes the response field as\n `model.name`. Required to support passing and returing a model name from the\n endpoint, while also storing the model as a related field.\n \"\"\"\n def to_representation(self, value):\n return value.name\n\n\nclass SchemaSerializer(serializers.HyperlinkedModelSerializer):\n tables = NestedTableSerializer(many=True, read_only=True)\n name = serializers.CharField()\n database = ModelNameField(max_length=128)\n\n class Meta:\n model = Schema\n fields = ['id', 'name', 'tables', 'database', 'has_dependencies']\n\n\nclass SimpleColumnSerializer(serializers.Serializer):\n name = serializers.CharField()\n type = serializers.CharField()\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n index = serializers.IntegerField(source='column_index', read_only=True)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n valid_target_types = serializers.ListField(read_only=True)\n\n\nclass TableSerializer(serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')\n records = serializers.SerializerMethodField()\n name = serializers.CharField()\n data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at',\n 'columns', 'records', 'data_files', 'has_dependencies']\n\n def get_records(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n\nclass RecordSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n return instance._asdict()\n\n\nclass RecordListParameterSerializer(serializers.Serializer):\n filters = serializers.JSONField(required=False, default=[])\n order_by = serializers.JSONField(required=False, default=[])\n group_count_by = serializers.JSONField(required=False, default=[])\n\n\nclass DatabaseSerializer(serializers.ModelSerializer):\n supported_types = serializers.ListField(child=serializers.CharField())\n\n class Meta:\n model = Database\n fields = ['id', 'name', 'deleted', 'supported_types']\n read_only_fields = ['id', 'name', 'deleted', 'supported_types']\n\n\nclass DataFileSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n\n class Meta:\n model = DataFile\n fields = [\n 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n 'quotechar': {'trim_whitespace': False}}\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n\n def save(self, **kwargs):\n \"\"\"\n Set user to current user while saving the data file.\n \"\"\"\n current_user = self.fields['user'].get_default()\n if current_user.is_authenticated:\n kwargs['user'] = current_user\n return super().save(**kwargs)\n", "path": "mathesar/serializers.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom rest_framework import serializers\n\nfrom mathesar.models import Table, Schema, DataFile, Database\n\n\nclass NestedTableSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.SerializerMethodField()\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'url']\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))\n\n\nclass ModelNameField(serializers.CharField):\n \"\"\"\n De-serializes the request field as a string, but serializes the response field as\n `model.name`. Required to support passing and returing a model name from the\n endpoint, while also storing the model as a related field.\n \"\"\"\n def to_representation(self, value):\n return value.name\n\n\nclass SchemaSerializer(serializers.HyperlinkedModelSerializer):\n tables = NestedTableSerializer(many=True, read_only=True)\n name = serializers.CharField()\n database = ModelNameField(max_length=128)\n\n class Meta:\n model = Schema\n fields = ['id', 'name', 'tables', 'database', 'has_dependencies']\n\n\nclass SimpleColumnSerializer(serializers.Serializer):\n name = serializers.CharField()\n type = serializers.CharField()\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n index = serializers.IntegerField(source='column_index', read_only=True)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n valid_target_types = serializers.ListField(read_only=True)\n\n\nclass TableSerializer(serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')\n records = serializers.SerializerMethodField()\n name = serializers.CharField()\n data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at',\n 'columns', 'records', 'data_files', 'has_dependencies']\n\n def get_records(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n\nclass RecordSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n return instance._asdict()\n\n\nclass RecordListParameterSerializer(serializers.Serializer):\n filters = serializers.JSONField(required=False, default=[])\n order_by = serializers.JSONField(required=False, default=[])\n group_count_by = serializers.JSONField(required=False, default=[])\n\n\nclass DatabaseSerializer(serializers.ModelSerializer):\n supported_types = serializers.ListField(child=serializers.CharField())\n\n class Meta:\n model = Database\n fields = ['id', 'name', 'deleted', 'supported_types']\n read_only_fields = ['id', 'name', 'deleted', 'supported_types']\n\n\nclass DataFileSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n\n class Meta:\n model = DataFile\n fields = [\n 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n 'quotechar': {'trim_whitespace': False},\n 'header': {'required': True}}\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n\n def save(self, **kwargs):\n \"\"\"\n Set user to current user while saving the data file.\n \"\"\"\n current_user = self.fields['user'].get_default()\n if current_user.is_authenticated:\n kwargs['user'] = current_user\n return super().save(**kwargs)\n", "path": "mathesar/serializers.py"}]}
| 1,517 | 156 |
gh_patches_debug_4145
|
rasdani/github-patches
|
git_diff
|
benoitc__gunicorn-1414
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gunicorn.pidfile.validate crashes gunicorn when PID exists but is from a different user
```
Traceback (most recent call last):
File "/opt/python2.7/bin/gunicorn", line 11, in <module>
sys.exit(run())
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/wsgiapp.py", line 74, in run
WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/base.py", line 189, in run
super(Application, self).run()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/base.py", line 72, in run
Arbiter(self).run()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/arbiter.py", line 171, in run
self.start()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/arbiter.py", line 125, in start
self.pidfile.create(self.pid)
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/pidfile.py", line 23, in create
oldpid = self.validate()
File "/opt/python2.7/lib/python2.7/site-packages/gunicorn/pidfile.py", line 75, in validate
os.kill(wpid, 0)
OSError: [Errno 1] Operation not permitted
```
This happens because the process identified by the pid-file exists, but belongs to a different user than the one starting gunicorn.
(This is with gunicorn 19.3.0)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn/pidfile.py`
Content:
```
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 import errno
7 import os
8 import tempfile
9
10
11 class Pidfile(object):
12 """\
13 Manage a PID file. If a specific name is provided
14 it and '"%s.oldpid" % name' will be used. Otherwise
15 we create a temp file using os.mkstemp.
16 """
17
18 def __init__(self, fname):
19 self.fname = fname
20 self.pid = None
21
22 def create(self, pid):
23 oldpid = self.validate()
24 if oldpid:
25 if oldpid == os.getpid():
26 return
27 msg = "Already running on PID %s (or pid file '%s' is stale)"
28 raise RuntimeError(msg % (oldpid, self.fname))
29
30 self.pid = pid
31
32 # Write pidfile
33 fdir = os.path.dirname(self.fname)
34 if fdir and not os.path.isdir(fdir):
35 raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir)
36 fd, fname = tempfile.mkstemp(dir=fdir)
37 os.write(fd, ("%s\n" % self.pid).encode('utf-8'))
38 if self.fname:
39 os.rename(fname, self.fname)
40 else:
41 self.fname = fname
42 os.close(fd)
43
44 # set permissions to -rw-r--r--
45 os.chmod(self.fname, 420)
46
47 def rename(self, path):
48 self.unlink()
49 self.fname = path
50 self.create(self.pid)
51
52 def unlink(self):
53 """ delete pidfile"""
54 try:
55 with open(self.fname, "r") as f:
56 pid1 = int(f.read() or 0)
57
58 if pid1 == self.pid:
59 os.unlink(self.fname)
60 except:
61 pass
62
63 def validate(self):
64 """ Validate pidfile and make it stale if needed"""
65 if not self.fname:
66 return
67 try:
68 with open(self.fname, "r") as f:
69 try:
70 wpid = int(f.read())
71 except ValueError:
72 return
73
74 try:
75 os.kill(wpid, 0)
76 return wpid
77 except OSError as e:
78 if e.args[0] == errno.ESRCH:
79 return
80 raise
81 except IOError as e:
82 if e.args[0] == errno.ENOENT:
83 return
84 raise
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gunicorn/pidfile.py b/gunicorn/pidfile.py
--- a/gunicorn/pidfile.py
+++ b/gunicorn/pidfile.py
@@ -75,6 +75,8 @@
os.kill(wpid, 0)
return wpid
except OSError as e:
+ if e.args[0] == errno.EPERM:
+ return wpid
if e.args[0] == errno.ESRCH:
return
raise
|
{"golden_diff": "diff --git a/gunicorn/pidfile.py b/gunicorn/pidfile.py\n--- a/gunicorn/pidfile.py\n+++ b/gunicorn/pidfile.py\n@@ -75,6 +75,8 @@\n os.kill(wpid, 0)\n return wpid\n except OSError as e:\n+ if e.args[0] == errno.EPERM:\n+ return wpid\n if e.args[0] == errno.ESRCH:\n return\n raise\n", "issue": "gunicorn.pidfile.validate crashes gunicorn when PID exists but is from a different user\n```\nTraceback (most recent call last):\n File \"/opt/python2.7/bin/gunicorn\", line 11, in <module>\n sys.exit(run())\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/wsgiapp.py\", line 74, in run\n WSGIApplication(\"%(prog)s [OPTIONS] [APP_MODULE]\").run()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/base.py\", line 189, in run\n super(Application, self).run()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/app/base.py\", line 72, in run\n Arbiter(self).run()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/arbiter.py\", line 171, in run\n self.start()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/arbiter.py\", line 125, in start\n self.pidfile.create(self.pid)\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/pidfile.py\", line 23, in create\n oldpid = self.validate()\n File \"/opt/python2.7/lib/python2.7/site-packages/gunicorn/pidfile.py\", line 75, in validate\n os.kill(wpid, 0)\nOSError: [Errno 1] Operation not permitted\n```\n\nThis happens because the process identified by the pid-file exists, but belongs to a different user than the one starting gunicorn.\n\n(This is with gunicorn 19.3.0)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport errno\nimport os\nimport tempfile\n\n\nclass Pidfile(object):\n \"\"\"\\\n Manage a PID file. If a specific name is provided\n it and '\"%s.oldpid\" % name' will be used. Otherwise\n we create a temp file using os.mkstemp.\n \"\"\"\n\n def __init__(self, fname):\n self.fname = fname\n self.pid = None\n\n def create(self, pid):\n oldpid = self.validate()\n if oldpid:\n if oldpid == os.getpid():\n return\n msg = \"Already running on PID %s (or pid file '%s' is stale)\"\n raise RuntimeError(msg % (oldpid, self.fname))\n\n self.pid = pid\n\n # Write pidfile\n fdir = os.path.dirname(self.fname)\n if fdir and not os.path.isdir(fdir):\n raise RuntimeError(\"%s doesn't exist. Can't create pidfile.\" % fdir)\n fd, fname = tempfile.mkstemp(dir=fdir)\n os.write(fd, (\"%s\\n\" % self.pid).encode('utf-8'))\n if self.fname:\n os.rename(fname, self.fname)\n else:\n self.fname = fname\n os.close(fd)\n\n # set permissions to -rw-r--r--\n os.chmod(self.fname, 420)\n\n def rename(self, path):\n self.unlink()\n self.fname = path\n self.create(self.pid)\n\n def unlink(self):\n \"\"\" delete pidfile\"\"\"\n try:\n with open(self.fname, \"r\") as f:\n pid1 = int(f.read() or 0)\n\n if pid1 == self.pid:\n os.unlink(self.fname)\n except:\n pass\n\n def validate(self):\n \"\"\" Validate pidfile and make it stale if needed\"\"\"\n if not self.fname:\n return\n try:\n with open(self.fname, \"r\") as f:\n try:\n wpid = int(f.read())\n except ValueError:\n return\n\n try:\n os.kill(wpid, 0)\n return wpid\n except OSError as e:\n if e.args[0] == errno.ESRCH:\n return\n raise\n except IOError as e:\n if e.args[0] == errno.ENOENT:\n return\n raise\n", "path": "gunicorn/pidfile.py"}], "after_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport errno\nimport os\nimport tempfile\n\n\nclass Pidfile(object):\n \"\"\"\\\n Manage a PID file. If a specific name is provided\n it and '\"%s.oldpid\" % name' will be used. Otherwise\n we create a temp file using os.mkstemp.\n \"\"\"\n\n def __init__(self, fname):\n self.fname = fname\n self.pid = None\n\n def create(self, pid):\n oldpid = self.validate()\n if oldpid:\n if oldpid == os.getpid():\n return\n msg = \"Already running on PID %s (or pid file '%s' is stale)\"\n raise RuntimeError(msg % (oldpid, self.fname))\n\n self.pid = pid\n\n # Write pidfile\n fdir = os.path.dirname(self.fname)\n if fdir and not os.path.isdir(fdir):\n raise RuntimeError(\"%s doesn't exist. Can't create pidfile.\" % fdir)\n fd, fname = tempfile.mkstemp(dir=fdir)\n os.write(fd, (\"%s\\n\" % self.pid).encode('utf-8'))\n if self.fname:\n os.rename(fname, self.fname)\n else:\n self.fname = fname\n os.close(fd)\n\n # set permissions to -rw-r--r--\n os.chmod(self.fname, 420)\n\n def rename(self, path):\n self.unlink()\n self.fname = path\n self.create(self.pid)\n\n def unlink(self):\n \"\"\" delete pidfile\"\"\"\n try:\n with open(self.fname, \"r\") as f:\n pid1 = int(f.read() or 0)\n\n if pid1 == self.pid:\n os.unlink(self.fname)\n except:\n pass\n\n def validate(self):\n \"\"\" Validate pidfile and make it stale if needed\"\"\"\n if not self.fname:\n return\n try:\n with open(self.fname, \"r\") as f:\n try:\n wpid = int(f.read())\n except ValueError:\n return\n\n try:\n os.kill(wpid, 0)\n return wpid\n except OSError as e:\n if e.args[0] == errno.EPERM:\n return wpid\n if e.args[0] == errno.ESRCH:\n return\n raise\n except IOError as e:\n if e.args[0] == errno.ENOENT:\n return\n raise\n", "path": "gunicorn/pidfile.py"}]}
| 1,335 | 108 |
gh_patches_debug_64391
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-5886
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
name=Bankomat should not be added for amenity=atm
It is like `name=ATM`
The same goes for `name=Wpłatomat` (for ATM accepting cash)
https://www.alltheplaces.xyz/map/#16.82/50.072257/20.036549

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/santander_pl.py`
Content:
```
1 import chompjs
2 from scrapy import Spider
3
4 from locations.categories import Categories, apply_category, apply_yes_no
5 from locations.dict_parser import DictParser
6 from locations.hours import DAYS, OpeningHours
7 from locations.items import Feature
8
9
10 class SantanderPLSpider(Spider):
11 name = "santander_pl"
12 item_attributes = {"brand": "Santander", "brand_wikidata": "Q806653"}
13 # The "20000000000000" needs to be a valid date time, but it seems it's just there to stop the page being cached by
14 # the CDN. We always get the same data.
15 start_urls = ["https://www.santander.pl/_js_places/time20000000000000/places.js"]
16
17 def parse(self, response, **kwargs):
18 data = chompjs.parse_js_object(response.text)
19 for ref, branch in data["atm"].items():
20 yield self.parse_item(ref, branch, Categories.ATM)
21 for ref, branch in data["branch"].items():
22 yield self.parse_item(ref, branch, Categories.BANK)
23 for ref, branch in data["cashin"].items():
24 item = self.parse_item(ref, branch, Categories.ATM)
25 apply_yes_no("cash_in", item, True)
26 yield item
27
28 @staticmethod
29 def parse_item(ref: str, data: dict, category) -> Feature:
30 data["basicParameters"]["street_address"] = data["basicParameters"].pop("street")
31 item = DictParser.parse(data["basicParameters"])
32 item["ref"] = ref
33
34 if data["open_24h"]:
35 item["opening_hours"] = "24/7"
36 else:
37 item["opening_hours"] = OpeningHours()
38 for day, hours in data["basicParameters"]["opening_hours"].items():
39 start_time, end_time = hours.split("-")
40 item["opening_hours"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())
41
42 apply_category(category, item)
43
44 return item
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/santander_pl.py b/locations/spiders/santander_pl.py
--- a/locations/spiders/santander_pl.py
+++ b/locations/spiders/santander_pl.py
@@ -39,6 +39,9 @@
start_time, end_time = hours.split("-")
item["opening_hours"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())
+ if category == Categories.ATM:
+ item["name"] = None
+
apply_category(category, item)
return item
|
{"golden_diff": "diff --git a/locations/spiders/santander_pl.py b/locations/spiders/santander_pl.py\n--- a/locations/spiders/santander_pl.py\n+++ b/locations/spiders/santander_pl.py\n@@ -39,6 +39,9 @@\n start_time, end_time = hours.split(\"-\")\n item[\"opening_hours\"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())\n \n+ if category == Categories.ATM:\n+ item[\"name\"] = None\n+\n apply_category(category, item)\n \n return item\n", "issue": "name=Bankomat should not be added for amenity=atm\nIt is like `name=ATM`\r\n\r\nThe same goes for `name=Wp\u0142atomat` (for ATM accepting cash)\r\n\r\nhttps://www.alltheplaces.xyz/map/#16.82/50.072257/20.036549\r\n\r\n\r\n\n", "before_files": [{"content": "import chompjs\nfrom scrapy import Spider\n\nfrom locations.categories import Categories, apply_category, apply_yes_no\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS, OpeningHours\nfrom locations.items import Feature\n\n\nclass SantanderPLSpider(Spider):\n name = \"santander_pl\"\n item_attributes = {\"brand\": \"Santander\", \"brand_wikidata\": \"Q806653\"}\n # The \"20000000000000\" needs to be a valid date time, but it seems it's just there to stop the page being cached by\n # the CDN. We always get the same data.\n start_urls = [\"https://www.santander.pl/_js_places/time20000000000000/places.js\"]\n\n def parse(self, response, **kwargs):\n data = chompjs.parse_js_object(response.text)\n for ref, branch in data[\"atm\"].items():\n yield self.parse_item(ref, branch, Categories.ATM)\n for ref, branch in data[\"branch\"].items():\n yield self.parse_item(ref, branch, Categories.BANK)\n for ref, branch in data[\"cashin\"].items():\n item = self.parse_item(ref, branch, Categories.ATM)\n apply_yes_no(\"cash_in\", item, True)\n yield item\n\n @staticmethod\n def parse_item(ref: str, data: dict, category) -> Feature:\n data[\"basicParameters\"][\"street_address\"] = data[\"basicParameters\"].pop(\"street\")\n item = DictParser.parse(data[\"basicParameters\"])\n item[\"ref\"] = ref\n\n if data[\"open_24h\"]:\n item[\"opening_hours\"] = \"24/7\"\n else:\n item[\"opening_hours\"] = OpeningHours()\n for day, hours in data[\"basicParameters\"][\"opening_hours\"].items():\n start_time, end_time = hours.split(\"-\")\n item[\"opening_hours\"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())\n\n apply_category(category, item)\n\n return item\n", "path": "locations/spiders/santander_pl.py"}], "after_files": [{"content": "import chompjs\nfrom scrapy import Spider\n\nfrom locations.categories import Categories, apply_category, apply_yes_no\nfrom locations.dict_parser import DictParser\nfrom locations.hours import DAYS, OpeningHours\nfrom locations.items import Feature\n\n\nclass SantanderPLSpider(Spider):\n name = \"santander_pl\"\n item_attributes = {\"brand\": \"Santander\", \"brand_wikidata\": \"Q806653\"}\n # The \"20000000000000\" needs to be a valid date time, but it seems it's just there to stop the page being cached by\n # the CDN. We always get the same data.\n start_urls = [\"https://www.santander.pl/_js_places/time20000000000000/places.js\"]\n\n def parse(self, response, **kwargs):\n data = chompjs.parse_js_object(response.text)\n for ref, branch in data[\"atm\"].items():\n yield self.parse_item(ref, branch, Categories.ATM)\n for ref, branch in data[\"branch\"].items():\n yield self.parse_item(ref, branch, Categories.BANK)\n for ref, branch in data[\"cashin\"].items():\n item = self.parse_item(ref, branch, Categories.ATM)\n apply_yes_no(\"cash_in\", item, True)\n yield item\n\n @staticmethod\n def parse_item(ref: str, data: dict, category) -> Feature:\n data[\"basicParameters\"][\"street_address\"] = data[\"basicParameters\"].pop(\"street\")\n item = DictParser.parse(data[\"basicParameters\"])\n item[\"ref\"] = ref\n\n if data[\"open_24h\"]:\n item[\"opening_hours\"] = \"24/7\"\n else:\n item[\"opening_hours\"] = OpeningHours()\n for day, hours in data[\"basicParameters\"][\"opening_hours\"].items():\n start_time, end_time = hours.split(\"-\")\n item[\"opening_hours\"].add_range(DAYS[int(day) - 2], start_time.strip(), end_time.strip())\n\n if category == Categories.ATM:\n item[\"name\"] = None\n\n apply_category(category, item)\n\n return item\n", "path": "locations/spiders/santander_pl.py"}]}
| 943 | 129 |
gh_patches_debug_20505
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-499
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Suite output text - fix first part
1. Add explanation for the “!” (warning) 2. add link to docs where written (or remove wording), where it says:"Suites, checks and conditions can all be modified (see tutorial [link])."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/base/display_suite.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Handle display of suite result."""
12 from typing import List, Union
13
14 # pylint: disable=protected-access
15 import sys
16 import tqdm
17 import pandas as pd
18 from IPython.core.display import display_html
19 from IPython import get_ipython
20
21 from deepchecks import errors
22 from deepchecks.utils.ipython import is_widgets_enabled
23 from deepchecks.utils.strings import get_random_string
24 from deepchecks.base.check import CheckResult, CheckFailure
25 from deepchecks.base.display_pandas import dataframe_to_html, display_conditions_table
26
27
28 __all__ = ['display_suite_result', 'ProgressBar']
29
30
31 class ProgressBar:
32 """Progress bar for display while running suite."""
33
34 def __init__(self, name, length):
35 """Initialize progress bar."""
36 shared_args = {'total': length, 'desc': name, 'unit': ' Check', 'leave': False, 'file': sys.stdout}
37 if is_widgets_enabled():
38 self.pbar = tqdm.tqdm_notebook(**shared_args, colour='#9d60fb')
39 else:
40 # Normal tqdm with colour in notebooks produce bug that the cleanup doesn't remove all characters. so
41 # until bug fixed, doesn't add the colour to regular tqdm
42 self.pbar = tqdm.tqdm(**shared_args, bar_format=f'{{l_bar}}{{bar:{length}}}{{r_bar}}')
43
44 def set_text(self, text):
45 """Set current running check."""
46 self.pbar.set_postfix(Check=text)
47
48 def close(self):
49 """Close the progress bar."""
50 self.pbar.close()
51
52 def inc_progress(self):
53 """Increase progress bar value by 1."""
54 self.pbar.update(1)
55
56
57 def get_display_exists_icon(exists: bool):
58 if exists:
59 return '<div style="text-align: center">Yes</div>'
60 return '<div style="text-align: center">No</div>'
61
62
63 def display_suite_result(suite_name: str, results: List[Union[CheckResult, CheckFailure]]):
64 """Display results of suite in IPython."""
65 if len(results) == 0:
66 display_html(f"""<h1>{suite_name}</h1><p>Suite is empty.</p>""", raw=True)
67 return
68 if 'google.colab' in str(get_ipython()):
69 unique_id = ''
70 else:
71 unique_id = get_random_string()
72 checks_with_conditions = []
73 display_table: List[CheckResult] = []
74 others_table = []
75
76 for result in results:
77 if isinstance(result, CheckResult):
78 if result.have_conditions():
79 checks_with_conditions.append(result)
80 if result.have_display():
81 display_table.append(result)
82 else:
83 others_table.append([result.get_header(), 'Nothing found', 2])
84 elif isinstance(result, CheckFailure):
85 msg = result.exception.__class__.__name__ + ': ' + str(result.exception)
86 name = result.check.name()
87 others_table.append([name, msg, 1])
88 else:
89 # Should never reach here!
90 raise errors.DeepchecksValueError(
91 f"Expecting list of 'CheckResult'|'CheckFailure', but got {type(result)}."
92 )
93
94 display_table = sorted(display_table, key=lambda it: it.priority)
95
96 light_hr = '<hr style="background-color: #eee;border: 0 none;color: #eee;height: 1px;">'
97 bold_hr = '<hr style="background-color: black;border: 0 none;color: black;height: 1px;">'
98
99 icons = """
100 <span style="color: green;display:inline-block">\U00002713</span> /
101 <span style="color: red;display:inline-block">\U00002716</span> /
102 <span style="color: orange;font-weight:bold;display:inline-block">\U00000021</span>
103 """
104
105 check_names = list(set(it.check.name() for it in results))
106 prologue = (
107 f"The suite is composed of various checks such as: {', '.join(check_names[:3])}, etc..."
108 if len(check_names) > 3
109 else f"The suite is composed of the following checks: {', '.join(check_names)}."
110 )
111
112 display_html(
113 f"""
114 <h1 id="summary_{unique_id}">{suite_name}</h1>
115 <p>{prologue}<br>
116 Each check may contain conditions (which results in {icons}),
117 as well as other outputs such as plots or tables.<br>
118 Suites, checks and conditions can all be modified (see tutorial [link]).</p>
119 {bold_hr}<h2>Conditions Summary</h2>
120 """,
121 raw=True
122 )
123
124 if checks_with_conditions:
125 display_conditions_table(checks_with_conditions, unique_id)
126 else:
127 display_html('<p>No conditions defined on checks in the suite.</p>', raw=True)
128
129 display_html(f'{bold_hr}<h2>Additional Outputs</h2>', raw=True)
130 if display_table:
131 for i, r in enumerate(display_table):
132 r.show(show_conditions=False, unique_id=unique_id)
133 if i < len(display_table) - 1:
134 display_html(light_hr, raw=True)
135 else:
136 display_html('<p>No outputs to show.</p>', raw=True)
137
138 if others_table:
139 others_table = pd.DataFrame(data=others_table, columns=['Check', 'Reason', 'sort'])
140 others_table.sort_values(by=['sort'], inplace=True)
141 others_table.drop('sort', axis=1, inplace=True)
142 html = f"""{bold_hr}
143 <h2>Other Checks That Weren't Displayed</h2>
144 {dataframe_to_html(others_table.style.hide_index())}
145 """
146 display_html(html, raw=True)
147
148 display_html(f'<br><a href="#summary_{unique_id}" style="font-size: 14px">Go to top</a>', raw=True)
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deepchecks/base/display_suite.py b/deepchecks/base/display_suite.py
--- a/deepchecks/base/display_suite.py
+++ b/deepchecks/base/display_suite.py
@@ -109,14 +109,20 @@
else f"The suite is composed of the following checks: {', '.join(check_names)}."
)
+ suite_creation_example_link = 'https://docs.deepchecks.com/en/stable/examples/guides/create_a_custom_suite.html'
+
display_html(
f"""
<h1 id="summary_{unique_id}">{suite_name}</h1>
- <p>{prologue}<br>
- Each check may contain conditions (which results in {icons}),
- as well as other outputs such as plots or tables.<br>
- Suites, checks and conditions can all be modified (see tutorial [link]).</p>
- {bold_hr}<h2>Conditions Summary</h2>
+ <p>
+ {prologue}<br>
+ Each check may contain conditions (which will result in pass / fail / warning, represented by {icons})
+ as well as other outputs such as plots or tables.<br>
+ Suites, checks and conditions can all be modified (see the
+ <a href={suite_creation_example_link}>Create a Custom Suite</a> tutorial).
+ </p>
+ {bold_hr}
+ <h2>Conditions Summary</h2>
""",
raw=True
)
|
{"golden_diff": "diff --git a/deepchecks/base/display_suite.py b/deepchecks/base/display_suite.py\n--- a/deepchecks/base/display_suite.py\n+++ b/deepchecks/base/display_suite.py\n@@ -109,14 +109,20 @@\n else f\"The suite is composed of the following checks: {', '.join(check_names)}.\"\n )\n \n+ suite_creation_example_link = 'https://docs.deepchecks.com/en/stable/examples/guides/create_a_custom_suite.html'\n+\n display_html(\n f\"\"\"\n <h1 id=\"summary_{unique_id}\">{suite_name}</h1>\n- <p>{prologue}<br>\n- Each check may contain conditions (which results in {icons}),\n- as well as other outputs such as plots or tables.<br>\n- Suites, checks and conditions can all be modified (see tutorial [link]).</p>\n- {bold_hr}<h2>Conditions Summary</h2>\n+ <p>\n+ {prologue}<br>\n+ Each check may contain conditions (which will result in pass / fail / warning, represented by {icons})\n+ as well as other outputs such as plots or tables.<br>\n+ Suites, checks and conditions can all be modified (see the\n+ <a href={suite_creation_example_link}>Create a Custom Suite</a> tutorial).\n+ </p>\n+ {bold_hr}\n+ <h2>Conditions Summary</h2>\n \"\"\",\n raw=True\n )\n", "issue": "Suite output text - fix first part\n1. Add explanation for the \u201c!\u201d (warning) 2. add link to docs where written (or remove wording), where it says:\"Suites, checks and conditions can all be modified (see tutorial [link]).\"\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Handle display of suite result.\"\"\"\nfrom typing import List, Union\n\n# pylint: disable=protected-access\nimport sys\nimport tqdm\nimport pandas as pd\nfrom IPython.core.display import display_html\nfrom IPython import get_ipython\n\nfrom deepchecks import errors\nfrom deepchecks.utils.ipython import is_widgets_enabled\nfrom deepchecks.utils.strings import get_random_string\nfrom deepchecks.base.check import CheckResult, CheckFailure\nfrom deepchecks.base.display_pandas import dataframe_to_html, display_conditions_table\n\n\n__all__ = ['display_suite_result', 'ProgressBar']\n\n\nclass ProgressBar:\n \"\"\"Progress bar for display while running suite.\"\"\"\n\n def __init__(self, name, length):\n \"\"\"Initialize progress bar.\"\"\"\n shared_args = {'total': length, 'desc': name, 'unit': ' Check', 'leave': False, 'file': sys.stdout}\n if is_widgets_enabled():\n self.pbar = tqdm.tqdm_notebook(**shared_args, colour='#9d60fb')\n else:\n # Normal tqdm with colour in notebooks produce bug that the cleanup doesn't remove all characters. so\n # until bug fixed, doesn't add the colour to regular tqdm\n self.pbar = tqdm.tqdm(**shared_args, bar_format=f'{{l_bar}}{{bar:{length}}}{{r_bar}}')\n\n def set_text(self, text):\n \"\"\"Set current running check.\"\"\"\n self.pbar.set_postfix(Check=text)\n\n def close(self):\n \"\"\"Close the progress bar.\"\"\"\n self.pbar.close()\n\n def inc_progress(self):\n \"\"\"Increase progress bar value by 1.\"\"\"\n self.pbar.update(1)\n\n\ndef get_display_exists_icon(exists: bool):\n if exists:\n return '<div style=\"text-align: center\">Yes</div>'\n return '<div style=\"text-align: center\">No</div>'\n\n\ndef display_suite_result(suite_name: str, results: List[Union[CheckResult, CheckFailure]]):\n \"\"\"Display results of suite in IPython.\"\"\"\n if len(results) == 0:\n display_html(f\"\"\"<h1>{suite_name}</h1><p>Suite is empty.</p>\"\"\", raw=True)\n return\n if 'google.colab' in str(get_ipython()):\n unique_id = ''\n else:\n unique_id = get_random_string()\n checks_with_conditions = []\n display_table: List[CheckResult] = []\n others_table = []\n\n for result in results:\n if isinstance(result, CheckResult):\n if result.have_conditions():\n checks_with_conditions.append(result)\n if result.have_display():\n display_table.append(result)\n else:\n others_table.append([result.get_header(), 'Nothing found', 2])\n elif isinstance(result, CheckFailure):\n msg = result.exception.__class__.__name__ + ': ' + str(result.exception)\n name = result.check.name()\n others_table.append([name, msg, 1])\n else:\n # Should never reach here!\n raise errors.DeepchecksValueError(\n f\"Expecting list of 'CheckResult'|'CheckFailure', but got {type(result)}.\"\n )\n\n display_table = sorted(display_table, key=lambda it: it.priority)\n\n light_hr = '<hr style=\"background-color: #eee;border: 0 none;color: #eee;height: 1px;\">'\n bold_hr = '<hr style=\"background-color: black;border: 0 none;color: black;height: 1px;\">'\n\n icons = \"\"\"\n <span style=\"color: green;display:inline-block\">\\U00002713</span> /\n <span style=\"color: red;display:inline-block\">\\U00002716</span> /\n <span style=\"color: orange;font-weight:bold;display:inline-block\">\\U00000021</span>\n \"\"\"\n\n check_names = list(set(it.check.name() for it in results))\n prologue = (\n f\"The suite is composed of various checks such as: {', '.join(check_names[:3])}, etc...\"\n if len(check_names) > 3\n else f\"The suite is composed of the following checks: {', '.join(check_names)}.\"\n )\n\n display_html(\n f\"\"\"\n <h1 id=\"summary_{unique_id}\">{suite_name}</h1>\n <p>{prologue}<br>\n Each check may contain conditions (which results in {icons}),\n as well as other outputs such as plots or tables.<br>\n Suites, checks and conditions can all be modified (see tutorial [link]).</p>\n {bold_hr}<h2>Conditions Summary</h2>\n \"\"\",\n raw=True\n )\n\n if checks_with_conditions:\n display_conditions_table(checks_with_conditions, unique_id)\n else:\n display_html('<p>No conditions defined on checks in the suite.</p>', raw=True)\n\n display_html(f'{bold_hr}<h2>Additional Outputs</h2>', raw=True)\n if display_table:\n for i, r in enumerate(display_table):\n r.show(show_conditions=False, unique_id=unique_id)\n if i < len(display_table) - 1:\n display_html(light_hr, raw=True)\n else:\n display_html('<p>No outputs to show.</p>', raw=True)\n\n if others_table:\n others_table = pd.DataFrame(data=others_table, columns=['Check', 'Reason', 'sort'])\n others_table.sort_values(by=['sort'], inplace=True)\n others_table.drop('sort', axis=1, inplace=True)\n html = f\"\"\"{bold_hr}\n <h2>Other Checks That Weren't Displayed</h2>\n {dataframe_to_html(others_table.style.hide_index())}\n \"\"\"\n display_html(html, raw=True)\n\n display_html(f'<br><a href=\"#summary_{unique_id}\" style=\"font-size: 14px\">Go to top</a>', raw=True)\n", "path": "deepchecks/base/display_suite.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Handle display of suite result.\"\"\"\nfrom typing import List, Union\n\n# pylint: disable=protected-access\nimport sys\nimport tqdm\nimport pandas as pd\nfrom IPython.core.display import display_html\nfrom IPython import get_ipython\n\nfrom deepchecks import errors\nfrom deepchecks.utils.ipython import is_widgets_enabled\nfrom deepchecks.utils.strings import get_random_string\nfrom deepchecks.base.check import CheckResult, CheckFailure\nfrom deepchecks.base.display_pandas import dataframe_to_html, display_conditions_table\n\n\n__all__ = ['display_suite_result', 'ProgressBar']\n\n\nclass ProgressBar:\n \"\"\"Progress bar for display while running suite.\"\"\"\n\n def __init__(self, name, length):\n \"\"\"Initialize progress bar.\"\"\"\n shared_args = {'total': length, 'desc': name, 'unit': ' Check', 'leave': False, 'file': sys.stdout}\n if is_widgets_enabled():\n self.pbar = tqdm.tqdm_notebook(**shared_args, colour='#9d60fb')\n else:\n # Normal tqdm with colour in notebooks produce bug that the cleanup doesn't remove all characters. so\n # until bug fixed, doesn't add the colour to regular tqdm\n self.pbar = tqdm.tqdm(**shared_args, bar_format=f'{{l_bar}}{{bar:{length}}}{{r_bar}}')\n\n def set_text(self, text):\n \"\"\"Set current running check.\"\"\"\n self.pbar.set_postfix(Check=text)\n\n def close(self):\n \"\"\"Close the progress bar.\"\"\"\n self.pbar.close()\n\n def inc_progress(self):\n \"\"\"Increase progress bar value by 1.\"\"\"\n self.pbar.update(1)\n\n\ndef get_display_exists_icon(exists: bool):\n if exists:\n return '<div style=\"text-align: center\">Yes</div>'\n return '<div style=\"text-align: center\">No</div>'\n\n\ndef display_suite_result(suite_name: str, results: List[Union[CheckResult, CheckFailure]]):\n \"\"\"Display results of suite in IPython.\"\"\"\n if len(results) == 0:\n display_html(f\"\"\"<h1>{suite_name}</h1><p>Suite is empty.</p>\"\"\", raw=True)\n return\n if 'google.colab' in str(get_ipython()):\n unique_id = ''\n else:\n unique_id = get_random_string()\n checks_with_conditions = []\n display_table: List[CheckResult] = []\n others_table = []\n\n for result in results:\n if isinstance(result, CheckResult):\n if result.have_conditions():\n checks_with_conditions.append(result)\n if result.have_display():\n display_table.append(result)\n else:\n others_table.append([result.get_header(), 'Nothing found', 2])\n elif isinstance(result, CheckFailure):\n msg = result.exception.__class__.__name__ + ': ' + str(result.exception)\n name = result.check.name()\n others_table.append([name, msg, 1])\n else:\n # Should never reach here!\n raise errors.DeepchecksValueError(\n f\"Expecting list of 'CheckResult'|'CheckFailure', but got {type(result)}.\"\n )\n\n display_table = sorted(display_table, key=lambda it: it.priority)\n\n light_hr = '<hr style=\"background-color: #eee;border: 0 none;color: #eee;height: 1px;\">'\n bold_hr = '<hr style=\"background-color: black;border: 0 none;color: black;height: 1px;\">'\n\n icons = \"\"\"\n <span style=\"color: green;display:inline-block\">\\U00002713</span> /\n <span style=\"color: red;display:inline-block\">\\U00002716</span> /\n <span style=\"color: orange;font-weight:bold;display:inline-block\">\\U00000021</span>\n \"\"\"\n\n check_names = list(set(it.check.name() for it in results))\n prologue = (\n f\"The suite is composed of various checks such as: {', '.join(check_names[:3])}, etc...\"\n if len(check_names) > 3\n else f\"The suite is composed of the following checks: {', '.join(check_names)}.\"\n )\n\n suite_creation_example_link = 'https://docs.deepchecks.com/en/stable/examples/guides/create_a_custom_suite.html'\n\n display_html(\n f\"\"\"\n <h1 id=\"summary_{unique_id}\">{suite_name}</h1>\n <p>\n {prologue}<br>\n Each check may contain conditions (which will result in pass / fail / warning, represented by {icons})\n as well as other outputs such as plots or tables.<br>\n Suites, checks and conditions can all be modified (see the\n <a href={suite_creation_example_link}>Create a Custom Suite</a> tutorial).\n </p>\n {bold_hr}\n <h2>Conditions Summary</h2>\n \"\"\",\n raw=True\n )\n\n if checks_with_conditions:\n display_conditions_table(checks_with_conditions, unique_id)\n else:\n display_html('<p>No conditions defined on checks in the suite.</p>', raw=True)\n\n display_html(f'{bold_hr}<h2>Additional Outputs</h2>', raw=True)\n if display_table:\n for i, r in enumerate(display_table):\n r.show(show_conditions=False, unique_id=unique_id)\n if i < len(display_table) - 1:\n display_html(light_hr, raw=True)\n else:\n display_html('<p>No outputs to show.</p>', raw=True)\n\n if others_table:\n others_table = pd.DataFrame(data=others_table, columns=['Check', 'Reason', 'sort'])\n others_table.sort_values(by=['sort'], inplace=True)\n others_table.drop('sort', axis=1, inplace=True)\n html = f\"\"\"{bold_hr}\n <h2>Other Checks That Weren't Displayed</h2>\n {dataframe_to_html(others_table.style.hide_index())}\n \"\"\"\n display_html(html, raw=True)\n\n display_html(f'<br><a href=\"#summary_{unique_id}\" style=\"font-size: 14px\">Go to top</a>', raw=True)\n", "path": "deepchecks/base/display_suite.py"}]}
| 2,023 | 324 |
gh_patches_debug_24114
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-5188
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Program PDF report returns an XLS report
# What were you doing?
Follow the steps in test "Download PDF Program overview".
# What should've happened?
A PDF report should've been received in the email, but an XLS report was delivered.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/views/py_reports/email_report.py`
Content:
```
1 import logging
2
3 from datetime import timedelta
4 from django.db.models import Q
5 from django.utils.timezone import now
6 from akvo.rsr.models import EmailReportJob
7
8 from . import (
9 program_overview_pdf_report,
10 program_overview_excel_report,
11 program_period_labels_overview_pdf_report,
12 results_indicators_with_map_pdf_reports,
13 nuffic_country_level_map_report,
14 )
15
16 TIMEOUT = timedelta(minutes=30)
17 MAX_ATTEMPTS = 3
18 HANDLER = {
19 program_overview_pdf_report.REPORT_NAME: program_overview_excel_report.handle_email_report,
20 program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,
21 program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,
22 results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,
23 nuffic_country_level_map_report.REPORT_NAME: nuffic_country_level_map_report.handle_email_report,
24 }
25
26 logger = logging.getLogger(__name__)
27
28
29 def run_job():
30 pending_jobs = _get_pending_jobs()
31 if not pending_jobs.exists():
32 return
33 job = pending_jobs.first()
34 job.mark_started()
35 try:
36 handler = HANDLER.get(job.report, None)
37 if handler:
38 handler(job.payload, job.recipient)
39 job.mark_finished()
40 except Exception:
41 logger.exception(f'Failed to genereate report {job.report} for {job.recipient}')
42
43
44 def _get_pending_jobs():
45 started_timeout = now() - TIMEOUT
46 return EmailReportJob.objects\
47 .order_by('created_at')\
48 .filter(finished_at__isnull=True)\
49 .exclude(Q(attempts__gte=MAX_ATTEMPTS) | Q(started_at__gte=started_timeout))
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rsr/views/py_reports/email_report.py b/akvo/rsr/views/py_reports/email_report.py
--- a/akvo/rsr/views/py_reports/email_report.py
+++ b/akvo/rsr/views/py_reports/email_report.py
@@ -16,7 +16,7 @@
TIMEOUT = timedelta(minutes=30)
MAX_ATTEMPTS = 3
HANDLER = {
- program_overview_pdf_report.REPORT_NAME: program_overview_excel_report.handle_email_report,
+ program_overview_pdf_report.REPORT_NAME: program_overview_pdf_report.handle_email_report,
program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,
program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,
results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,
@@ -35,6 +35,7 @@
try:
handler = HANDLER.get(job.report, None)
if handler:
+ logger.info("Handling job %s for report %s with %s", job.id, job.report)
handler(job.payload, job.recipient)
job.mark_finished()
except Exception:
|
{"golden_diff": "diff --git a/akvo/rsr/views/py_reports/email_report.py b/akvo/rsr/views/py_reports/email_report.py\n--- a/akvo/rsr/views/py_reports/email_report.py\n+++ b/akvo/rsr/views/py_reports/email_report.py\n@@ -16,7 +16,7 @@\n TIMEOUT = timedelta(minutes=30)\n MAX_ATTEMPTS = 3\n HANDLER = {\n- program_overview_pdf_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n+ program_overview_pdf_report.REPORT_NAME: program_overview_pdf_report.handle_email_report,\n program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,\n results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,\n@@ -35,6 +35,7 @@\n try:\n handler = HANDLER.get(job.report, None)\n if handler:\n+ logger.info(\"Handling job %s for report %s with %s\", job.id, job.report)\n handler(job.payload, job.recipient)\n job.mark_finished()\n except Exception:\n", "issue": "Program PDF report returns an XLS report\n# What were you doing?\n\nFollow the steps in test \"Download PDF Program overview\".\n\n# What should've happened?\n\nA PDF report should've been received in the email, but an XLS report was delivered.\n", "before_files": [{"content": "import logging\n\nfrom datetime import timedelta\nfrom django.db.models import Q\nfrom django.utils.timezone import now\nfrom akvo.rsr.models import EmailReportJob\n\nfrom . import (\n program_overview_pdf_report,\n program_overview_excel_report,\n program_period_labels_overview_pdf_report,\n results_indicators_with_map_pdf_reports,\n nuffic_country_level_map_report,\n)\n\nTIMEOUT = timedelta(minutes=30)\nMAX_ATTEMPTS = 3\nHANDLER = {\n program_overview_pdf_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,\n results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,\n nuffic_country_level_map_report.REPORT_NAME: nuffic_country_level_map_report.handle_email_report,\n}\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_job():\n pending_jobs = _get_pending_jobs()\n if not pending_jobs.exists():\n return\n job = pending_jobs.first()\n job.mark_started()\n try:\n handler = HANDLER.get(job.report, None)\n if handler:\n handler(job.payload, job.recipient)\n job.mark_finished()\n except Exception:\n logger.exception(f'Failed to genereate report {job.report} for {job.recipient}')\n\n\ndef _get_pending_jobs():\n started_timeout = now() - TIMEOUT\n return EmailReportJob.objects\\\n .order_by('created_at')\\\n .filter(finished_at__isnull=True)\\\n .exclude(Q(attempts__gte=MAX_ATTEMPTS) | Q(started_at__gte=started_timeout))\n", "path": "akvo/rsr/views/py_reports/email_report.py"}], "after_files": [{"content": "import logging\n\nfrom datetime import timedelta\nfrom django.db.models import Q\nfrom django.utils.timezone import now\nfrom akvo.rsr.models import EmailReportJob\n\nfrom . import (\n program_overview_pdf_report,\n program_overview_excel_report,\n program_period_labels_overview_pdf_report,\n results_indicators_with_map_pdf_reports,\n nuffic_country_level_map_report,\n)\n\nTIMEOUT = timedelta(minutes=30)\nMAX_ATTEMPTS = 3\nHANDLER = {\n program_overview_pdf_report.REPORT_NAME: program_overview_pdf_report.handle_email_report,\n program_overview_excel_report.REPORT_NAME: program_overview_excel_report.handle_email_report,\n program_period_labels_overview_pdf_report.REPORT_NAME: program_period_labels_overview_pdf_report.handle_email_report,\n results_indicators_with_map_pdf_reports.ORG_PROJECTS_REPORT_NAME: results_indicators_with_map_pdf_reports.handle_org_projects_email_report,\n nuffic_country_level_map_report.REPORT_NAME: nuffic_country_level_map_report.handle_email_report,\n}\n\nlogger = logging.getLogger(__name__)\n\n\ndef run_job():\n pending_jobs = _get_pending_jobs()\n if not pending_jobs.exists():\n return\n job = pending_jobs.first()\n job.mark_started()\n try:\n handler = HANDLER.get(job.report, None)\n if handler:\n logger.info(\"Handling job %s for report %s with %s\", job.id, job.report)\n handler(job.payload, job.recipient)\n job.mark_finished()\n except Exception:\n logger.exception(f'Failed to genereate report {job.report} for {job.recipient}')\n\n\ndef _get_pending_jobs():\n started_timeout = now() - TIMEOUT\n return EmailReportJob.objects\\\n .order_by('created_at')\\\n .filter(finished_at__isnull=True)\\\n .exclude(Q(attempts__gte=MAX_ATTEMPTS) | Q(started_at__gte=started_timeout))\n", "path": "akvo/rsr/views/py_reports/email_report.py"}]}
| 792 | 269 |
gh_patches_debug_5510
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-3215
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HKDF key-length inconsistency
For too small key sizes, `HKDF.derive()` outputs an empty array instead of a small key:
Program:
```python
#!/usr/bin/env python3.5
import cryptography
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
print("cryptography.io:{}".format(cryptography.__version__))
hkdf = HKDF(algorithm=hashes.SHA256(), length=4, salt=b"salt",
info=b"some-test", backend=default_backend())
key = hkdf.derive(b"my secret passphrase")
print("Derived key: {}".format(key))
```
Output:
```
cryptography.io:1.5.2
Derived key: b''
```
Suggested fix:
I am not quite sure why the division by 8 in the snippet below was added. The cumulative size of the output array is always `self._algorithm.digest_size * len(output)` and thus we can stop after `self._algorithm.digest_size * len(output) >= self._length`. At first I thought this might be a clever trick taken from the paper, but I didn't find it there. I guess there was a mixup between bits and bytes at some point.
```python
# class HKDFExpand
def _expand(self, key_material):
output = [b""]
counter = 1
while (self._algorithm.digest_size // 8) * len(output) < self._length:
h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
h.update(output[-1])
h.update(self._info)
h.update(six.int2byte(counter))
output.append(h.finalize())
counter += 1
return b"".join(output)[:self._length]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/kdf/hkdf.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import six
8
9 from cryptography import utils
10 from cryptography.exceptions import (
11 AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
12 )
13 from cryptography.hazmat.backends.interfaces import HMACBackend
14 from cryptography.hazmat.primitives import constant_time, hmac
15 from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
16
17
18 @utils.register_interface(KeyDerivationFunction)
19 class HKDF(object):
20 def __init__(self, algorithm, length, salt, info, backend):
21 if not isinstance(backend, HMACBackend):
22 raise UnsupportedAlgorithm(
23 "Backend object does not implement HMACBackend.",
24 _Reasons.BACKEND_MISSING_INTERFACE
25 )
26
27 self._algorithm = algorithm
28
29 if not (salt is None or isinstance(salt, bytes)):
30 raise TypeError("salt must be bytes.")
31
32 if salt is None:
33 salt = b"\x00" * (self._algorithm.digest_size // 8)
34
35 self._salt = salt
36
37 self._backend = backend
38
39 self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)
40
41 def _extract(self, key_material):
42 h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)
43 h.update(key_material)
44 return h.finalize()
45
46 def derive(self, key_material):
47 if not isinstance(key_material, bytes):
48 raise TypeError("key_material must be bytes.")
49
50 return self._hkdf_expand.derive(self._extract(key_material))
51
52 def verify(self, key_material, expected_key):
53 if not constant_time.bytes_eq(self.derive(key_material), expected_key):
54 raise InvalidKey
55
56
57 @utils.register_interface(KeyDerivationFunction)
58 class HKDFExpand(object):
59 def __init__(self, algorithm, length, info, backend):
60 if not isinstance(backend, HMACBackend):
61 raise UnsupportedAlgorithm(
62 "Backend object does not implement HMACBackend.",
63 _Reasons.BACKEND_MISSING_INTERFACE
64 )
65
66 self._algorithm = algorithm
67
68 self._backend = backend
69
70 max_length = 255 * (algorithm.digest_size // 8)
71
72 if length > max_length:
73 raise ValueError(
74 "Can not derive keys larger than {0} octets.".format(
75 max_length
76 ))
77
78 self._length = length
79
80 if not (info is None or isinstance(info, bytes)):
81 raise TypeError("info must be bytes.")
82
83 if info is None:
84 info = b""
85
86 self._info = info
87
88 self._used = False
89
90 def _expand(self, key_material):
91 output = [b""]
92 counter = 1
93
94 while (self._algorithm.digest_size // 8) * len(output) < self._length:
95 h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
96 h.update(output[-1])
97 h.update(self._info)
98 h.update(six.int2byte(counter))
99 output.append(h.finalize())
100 counter += 1
101
102 return b"".join(output)[:self._length]
103
104 def derive(self, key_material):
105 if not isinstance(key_material, bytes):
106 raise TypeError("key_material must be bytes.")
107
108 if self._used:
109 raise AlreadyFinalized
110
111 self._used = True
112 return self._expand(key_material)
113
114 def verify(self, key_material, expected_key):
115 if not constant_time.bytes_eq(self.derive(key_material), expected_key):
116 raise InvalidKey
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py
--- a/src/cryptography/hazmat/primitives/kdf/hkdf.py
+++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py
@@ -91,7 +91,7 @@
output = [b""]
counter = 1
- while (self._algorithm.digest_size // 8) * len(output) < self._length:
+ while self._algorithm.digest_size * (len(output) - 1) < self._length:
h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)
h.update(output[-1])
h.update(self._info)
|
{"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/kdf/hkdf.py b/src/cryptography/hazmat/primitives/kdf/hkdf.py\n--- a/src/cryptography/hazmat/primitives/kdf/hkdf.py\n+++ b/src/cryptography/hazmat/primitives/kdf/hkdf.py\n@@ -91,7 +91,7 @@\n output = [b\"\"]\n counter = 1\n \n- while (self._algorithm.digest_size // 8) * len(output) < self._length:\n+ while self._algorithm.digest_size * (len(output) - 1) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n", "issue": "HKDF key-length inconsistency\nFor too small key sizes, `HKDF.derive()` outputs an empty array instead of a small key:\r\n\r\nProgram:\r\n```python\r\n#!/usr/bin/env python3.5\r\nimport cryptography\r\nfrom cryptography.hazmat.primitives import hashes\r\nfrom cryptography.hazmat.primitives.kdf.hkdf import HKDF\r\nfrom cryptography.hazmat.backends import default_backend\r\n\r\nprint(\"cryptography.io:{}\".format(cryptography.__version__))\r\n\r\nhkdf = HKDF(algorithm=hashes.SHA256(), length=4, salt=b\"salt\",\r\n info=b\"some-test\", backend=default_backend())\r\n\r\nkey = hkdf.derive(b\"my secret passphrase\")\r\nprint(\"Derived key: {}\".format(key))\r\n```\r\n\r\nOutput:\r\n```\r\ncryptography.io:1.5.2\r\nDerived key: b''\r\n```\r\n\r\nSuggested fix:\r\n\r\nI am not quite sure why the division by 8 in the snippet below was added. The cumulative size of the output array is always `self._algorithm.digest_size * len(output)` and thus we can stop after `self._algorithm.digest_size * len(output) >= self._length`. At first I thought this might be a clever trick taken from the paper, but I didn't find it there. I guess there was a mixup between bits and bytes at some point.\r\n\r\n```python\r\n# class HKDFExpand\r\ndef _expand(self, key_material):\r\n output = [b\"\"]\r\n counter = 1\r\n\r\n while (self._algorithm.digest_size // 8) * len(output) < self._length:\r\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\r\n h.update(output[-1])\r\n h.update(self._info)\r\n h.update(six.int2byte(counter))\r\n output.append(h.finalize())\r\n counter += 1\r\n\r\n return b\"\".join(output)[:self._length]\r\n```\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HMACBackend\nfrom cryptography.hazmat.primitives import constant_time, hmac\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDF(object):\n def __init__(self, algorithm, length, salt, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n if not (salt is None or isinstance(salt, bytes)):\n raise TypeError(\"salt must be bytes.\")\n\n if salt is None:\n salt = b\"\\x00\" * (self._algorithm.digest_size // 8)\n\n self._salt = salt\n\n self._backend = backend\n\n self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)\n\n def _extract(self, key_material):\n h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)\n h.update(key_material)\n return h.finalize()\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n return self._hkdf_expand.derive(self._extract(key_material))\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDFExpand(object):\n def __init__(self, algorithm, length, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n self._backend = backend\n\n max_length = 255 * (algorithm.digest_size // 8)\n\n if length > max_length:\n raise ValueError(\n \"Can not derive keys larger than {0} octets.\".format(\n max_length\n ))\n\n self._length = length\n\n if not (info is None or isinstance(info, bytes)):\n raise TypeError(\"info must be bytes.\")\n\n if info is None:\n info = b\"\"\n\n self._info = info\n\n self._used = False\n\n def _expand(self, key_material):\n output = [b\"\"]\n counter = 1\n\n while (self._algorithm.digest_size // 8) * len(output) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n h.update(six.int2byte(counter))\n output.append(h.finalize())\n counter += 1\n\n return b\"\".join(output)[:self._length]\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n if self._used:\n raise AlreadyFinalized\n\n self._used = True\n return self._expand(key_material)\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n", "path": "src/cryptography/hazmat/primitives/kdf/hkdf.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HMACBackend\nfrom cryptography.hazmat.primitives import constant_time, hmac\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDF(object):\n def __init__(self, algorithm, length, salt, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n if not (salt is None or isinstance(salt, bytes)):\n raise TypeError(\"salt must be bytes.\")\n\n if salt is None:\n salt = b\"\\x00\" * (self._algorithm.digest_size // 8)\n\n self._salt = salt\n\n self._backend = backend\n\n self._hkdf_expand = HKDFExpand(self._algorithm, length, info, backend)\n\n def _extract(self, key_material):\n h = hmac.HMAC(self._salt, self._algorithm, backend=self._backend)\n h.update(key_material)\n return h.finalize()\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n return self._hkdf_expand.derive(self._extract(key_material))\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n\n\[email protected]_interface(KeyDerivationFunction)\nclass HKDFExpand(object):\n def __init__(self, algorithm, length, info, backend):\n if not isinstance(backend, HMACBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HMACBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._algorithm = algorithm\n\n self._backend = backend\n\n max_length = 255 * (algorithm.digest_size // 8)\n\n if length > max_length:\n raise ValueError(\n \"Can not derive keys larger than {0} octets.\".format(\n max_length\n ))\n\n self._length = length\n\n if not (info is None or isinstance(info, bytes)):\n raise TypeError(\"info must be bytes.\")\n\n if info is None:\n info = b\"\"\n\n self._info = info\n\n self._used = False\n\n def _expand(self, key_material):\n output = [b\"\"]\n counter = 1\n\n while self._algorithm.digest_size * (len(output) - 1) < self._length:\n h = hmac.HMAC(key_material, self._algorithm, backend=self._backend)\n h.update(output[-1])\n h.update(self._info)\n h.update(six.int2byte(counter))\n output.append(h.finalize())\n counter += 1\n\n return b\"\".join(output)[:self._length]\n\n def derive(self, key_material):\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n\n if self._used:\n raise AlreadyFinalized\n\n self._used = True\n return self._expand(key_material)\n\n def verify(self, key_material, expected_key):\n if not constant_time.bytes_eq(self.derive(key_material), expected_key):\n raise InvalidKey\n", "path": "src/cryptography/hazmat/primitives/kdf/hkdf.py"}]}
| 1,713 | 169 |
gh_patches_debug_11975
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-1394
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compile and execute use a progress bar even if a single circuit is passed.
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Informations
- **Qiskit Terra version**: master
- **Python version**: 3.7
- **Operating system**: OSX
### What is the current behavior?
The `compile` and `execute` functions make use of a progress bar for a single circuit.
### Steps to reproduce the problem
```
from qiskit import *
from qiskit.tools.jupyter import TextProgressBar
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.h(q[0])
qc.cx(q[0], q[1])
qc.measure(q, c)
backend = Aer.get_backend('qasm_simulator')
TextProgressBar()
qobj = compile([qc], backend)
```
### What is the expected behavior?
A progress bar should not be used for a single circuit.
### Suggested solutions
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/transpiler/_parallel.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2018, IBM.
4 #
5 # This source code is licensed under the Apache License, Version 2.0 found in
6 # the LICENSE.txt file in the root directory of this source tree.
7
8 # This file is part of QuTiP: Quantum Toolbox in Python.
9 #
10 # Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
11 # All rights reserved.
12 #
13 # Redistribution and use in source and binary forms, with or without
14 # modification, are permitted provided that the following conditions are
15 # met:
16 #
17 # 1. Redistributions of source code must retain the above copyright notice,
18 # this list of conditions and the following disclaimer.
19 #
20 # 2. Redistributions in binary form must reproduce the above copyright
21 # notice, this list of conditions and the following disclaimer in the
22 # documentation and/or other materials provided with the distribution.
23 #
24 # 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
25 # of its contributors may be used to endorse or promote products derived
26 # from this software without specific prior written permission.
27 #
28 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
31 # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 ###############################################################################
40
41 """
42 Routines for running Python functions in parallel using process pools
43 from the multiprocessing library.
44 """
45
46 import os
47 import platform
48 from multiprocessing import Pool
49 from qiskit._qiskiterror import QISKitError
50 from qiskit._util import local_hardware_info
51 from qiskit._pubsub import Publisher
52
53 # Number of local physical cpus
54 CPU_COUNT = local_hardware_info()['cpus']
55
56
57 def parallel_map(task, values, task_args=tuple(), task_kwargs={}, # pylint: disable=W0102
58 num_processes=CPU_COUNT):
59 """
60 Parallel execution of a mapping of `values` to the function `task`. This
61 is functionally equivalent to::
62
63 result = [task(value, *task_args, **task_kwargs) for value in values]
64
65 On Windows this function defaults to a serial implementation to avoid the
66 overhead from spawning processes in Windows.
67
68 Args:
69 task (func): Function that is to be called for each value in ``task_vec``.
70 values (array_like): List or array of values for which the ``task``
71 function is to be evaluated.
72 task_args (list): Optional additional arguments to the ``task`` function.
73 task_kwargs (dict): Optional additional keyword argument to the ``task`` function.
74 num_processes (int): Number of processes to spawn.
75
76 Returns:
77 result: The result list contains the value of
78 ``task(value, *task_args, **task_kwargs)`` for
79 each value in ``values``.
80
81 Raises:
82 QISKitError: If user interrupts via keyboard.
83
84 Events:
85 terra.transpiler.parallel.start: The collection of parallel tasks are about to start.
86 terra.transpiler.parallel.update: One of the parallel task has finished.
87 terra.transpiler.parallel.finish: All the parallel tasks have finished.
88 """
89 Publisher().publish("terra.transpiler.parallel.start", len(values))
90 if len(values) == 1:
91 Publisher().publish("terra.transpiler.parallel.finish")
92 return [task(values[0], *task_args, **task_kwargs)]
93
94 nfinished = [0]
95
96 def _callback(_):
97 nfinished[0] += 1
98 Publisher().publish("terra.transpiler.parallel.done", nfinished[0])
99
100 # Run in parallel if not Win and not in parallel already
101 if platform.system() != 'Windows' and num_processes > 1 \
102 and os.getenv('QISKIT_IN_PARALLEL') == 'FALSE':
103 os.environ['QISKIT_IN_PARALLEL'] = 'TRUE'
104 try:
105 pool = Pool(processes=num_processes)
106
107 async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs,
108 _callback) for value in values]
109
110 while not all([item.ready() for item in async_res]):
111 for item in async_res:
112 item.wait(timeout=0.1)
113
114 pool.terminate()
115 pool.join()
116
117 except KeyboardInterrupt:
118 pool.terminate()
119 pool.join()
120 Publisher().publish("terra.parallel.parallel.finish")
121 raise QISKitError('Keyboard interrupt in parallel_map.')
122
123 Publisher().publish("terra.transpiler.parallel.finish")
124 os.environ['QISKIT_IN_PARALLEL'] = 'FALSE'
125 return [ar.get() for ar in async_res]
126
127 # Cannot do parallel on Windows , if another parallel_map is running in parallel,
128 # or len(values) == 1.
129 results = []
130 for _, value in enumerate(values):
131 result = task(value, *task_args, **task_kwargs)
132 results.append(result)
133 _callback(0)
134 Publisher().publish("terra.transpiler.parallel.finish")
135 return results
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/transpiler/_parallel.py b/qiskit/transpiler/_parallel.py
--- a/qiskit/transpiler/_parallel.py
+++ b/qiskit/transpiler/_parallel.py
@@ -86,11 +86,10 @@
terra.transpiler.parallel.update: One of the parallel task has finished.
terra.transpiler.parallel.finish: All the parallel tasks have finished.
"""
- Publisher().publish("terra.transpiler.parallel.start", len(values))
if len(values) == 1:
- Publisher().publish("terra.transpiler.parallel.finish")
return [task(values[0], *task_args, **task_kwargs)]
+ Publisher().publish("terra.transpiler.parallel.start", len(values))
nfinished = [0]
def _callback(_):
|
{"golden_diff": "diff --git a/qiskit/transpiler/_parallel.py b/qiskit/transpiler/_parallel.py\n--- a/qiskit/transpiler/_parallel.py\n+++ b/qiskit/transpiler/_parallel.py\n@@ -86,11 +86,10 @@\n terra.transpiler.parallel.update: One of the parallel task has finished.\n terra.transpiler.parallel.finish: All the parallel tasks have finished.\n \"\"\"\n- Publisher().publish(\"terra.transpiler.parallel.start\", len(values))\n if len(values) == 1:\n- Publisher().publish(\"terra.transpiler.parallel.finish\")\n return [task(values[0], *task_args, **task_kwargs)]\n \n+ Publisher().publish(\"terra.transpiler.parallel.start\", len(values))\n nfinished = [0]\n \n def _callback(_):\n", "issue": "Compile and execute use a progress bar even if a single circuit is passed.\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Informations\r\n\r\n- **Qiskit Terra version**: master\r\n- **Python version**: 3.7\r\n- **Operating system**: OSX\r\n\r\n### What is the current behavior?\r\nThe `compile` and `execute` functions make use of a progress bar for a single circuit.\r\n\r\n### Steps to reproduce the problem\r\n```\r\nfrom qiskit import *\r\nfrom qiskit.tools.jupyter import TextProgressBar\r\n\r\nq = QuantumRegister(2)\r\nc = ClassicalRegister(2)\r\nqc = QuantumCircuit(q, c)\r\n\r\nqc.h(q[0])\r\nqc.cx(q[0], q[1])\r\nqc.measure(q, c)\r\n\r\nbackend = Aer.get_backend('qasm_simulator')\r\nTextProgressBar()\r\nqobj = compile([qc], backend)\r\n```\r\n\r\n\r\n### What is the expected behavior?\r\nA progress bar should not be used for a single circuit.\r\n\r\n### Suggested solutions\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n\"\"\"\nRoutines for running Python functions in parallel using process pools\nfrom the multiprocessing library.\n\"\"\"\n\nimport os\nimport platform\nfrom multiprocessing import Pool\nfrom qiskit._qiskiterror import QISKitError\nfrom qiskit._util import local_hardware_info\nfrom qiskit._pubsub import Publisher\n\n# Number of local physical cpus\nCPU_COUNT = local_hardware_info()['cpus']\n\n\ndef parallel_map(task, values, task_args=tuple(), task_kwargs={}, # pylint: disable=W0102\n num_processes=CPU_COUNT):\n \"\"\"\n Parallel execution of a mapping of `values` to the function `task`. This\n is functionally equivalent to::\n\n result = [task(value, *task_args, **task_kwargs) for value in values]\n\n On Windows this function defaults to a serial implementation to avoid the\n overhead from spawning processes in Windows.\n\n Args:\n task (func): Function that is to be called for each value in ``task_vec``.\n values (array_like): List or array of values for which the ``task``\n function is to be evaluated.\n task_args (list): Optional additional arguments to the ``task`` function.\n task_kwargs (dict): Optional additional keyword argument to the ``task`` function.\n num_processes (int): Number of processes to spawn.\n\n Returns:\n result: The result list contains the value of\n ``task(value, *task_args, **task_kwargs)`` for\n each value in ``values``.\n\n Raises:\n QISKitError: If user interrupts via keyboard.\n\n Events:\n terra.transpiler.parallel.start: The collection of parallel tasks are about to start.\n terra.transpiler.parallel.update: One of the parallel task has finished.\n terra.transpiler.parallel.finish: All the parallel tasks have finished.\n \"\"\"\n Publisher().publish(\"terra.transpiler.parallel.start\", len(values))\n if len(values) == 1:\n Publisher().publish(\"terra.transpiler.parallel.finish\")\n return [task(values[0], *task_args, **task_kwargs)]\n\n nfinished = [0]\n\n def _callback(_):\n nfinished[0] += 1\n Publisher().publish(\"terra.transpiler.parallel.done\", nfinished[0])\n\n # Run in parallel if not Win and not in parallel already\n if platform.system() != 'Windows' and num_processes > 1 \\\n and os.getenv('QISKIT_IN_PARALLEL') == 'FALSE':\n os.environ['QISKIT_IN_PARALLEL'] = 'TRUE'\n try:\n pool = Pool(processes=num_processes)\n\n async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs,\n _callback) for value in values]\n\n while not all([item.ready() for item in async_res]):\n for item in async_res:\n item.wait(timeout=0.1)\n\n pool.terminate()\n pool.join()\n\n except KeyboardInterrupt:\n pool.terminate()\n pool.join()\n Publisher().publish(\"terra.parallel.parallel.finish\")\n raise QISKitError('Keyboard interrupt in parallel_map.')\n\n Publisher().publish(\"terra.transpiler.parallel.finish\")\n os.environ['QISKIT_IN_PARALLEL'] = 'FALSE'\n return [ar.get() for ar in async_res]\n\n # Cannot do parallel on Windows , if another parallel_map is running in parallel,\n # or len(values) == 1.\n results = []\n for _, value in enumerate(values):\n result = task(value, *task_args, **task_kwargs)\n results.append(result)\n _callback(0)\n Publisher().publish(\"terra.transpiler.parallel.finish\")\n return results\n", "path": "qiskit/transpiler/_parallel.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n\"\"\"\nRoutines for running Python functions in parallel using process pools\nfrom the multiprocessing library.\n\"\"\"\n\nimport os\nimport platform\nfrom multiprocessing import Pool\nfrom qiskit._qiskiterror import QISKitError\nfrom qiskit._util import local_hardware_info\nfrom qiskit._pubsub import Publisher\n\n# Number of local physical cpus\nCPU_COUNT = local_hardware_info()['cpus']\n\n\ndef parallel_map(task, values, task_args=tuple(), task_kwargs={}, # pylint: disable=W0102\n num_processes=CPU_COUNT):\n \"\"\"\n Parallel execution of a mapping of `values` to the function `task`. This\n is functionally equivalent to::\n\n result = [task(value, *task_args, **task_kwargs) for value in values]\n\n On Windows this function defaults to a serial implementation to avoid the\n overhead from spawning processes in Windows.\n\n Args:\n task (func): Function that is to be called for each value in ``task_vec``.\n values (array_like): List or array of values for which the ``task``\n function is to be evaluated.\n task_args (list): Optional additional arguments to the ``task`` function.\n task_kwargs (dict): Optional additional keyword argument to the ``task`` function.\n num_processes (int): Number of processes to spawn.\n\n Returns:\n result: The result list contains the value of\n ``task(value, *task_args, **task_kwargs)`` for\n each value in ``values``.\n\n Raises:\n QISKitError: If user interrupts via keyboard.\n\n Events:\n terra.transpiler.parallel.start: The collection of parallel tasks are about to start.\n terra.transpiler.parallel.update: One of the parallel task has finished.\n terra.transpiler.parallel.finish: All the parallel tasks have finished.\n \"\"\"\n if len(values) == 1:\n return [task(values[0], *task_args, **task_kwargs)]\n\n Publisher().publish(\"terra.transpiler.parallel.start\", len(values))\n nfinished = [0]\n\n def _callback(_):\n nfinished[0] += 1\n Publisher().publish(\"terra.transpiler.parallel.done\", nfinished[0])\n\n # Run in parallel if not Win and not in parallel already\n if platform.system() != 'Windows' and num_processes > 1 \\\n and os.getenv('QISKIT_IN_PARALLEL') == 'FALSE':\n os.environ['QISKIT_IN_PARALLEL'] = 'TRUE'\n try:\n pool = Pool(processes=num_processes)\n\n async_res = [pool.apply_async(task, (value,) + task_args, task_kwargs,\n _callback) for value in values]\n\n while not all([item.ready() for item in async_res]):\n for item in async_res:\n item.wait(timeout=0.1)\n\n pool.terminate()\n pool.join()\n\n except KeyboardInterrupt:\n pool.terminate()\n pool.join()\n Publisher().publish(\"terra.parallel.parallel.finish\")\n raise QISKitError('Keyboard interrupt in parallel_map.')\n\n Publisher().publish(\"terra.transpiler.parallel.finish\")\n os.environ['QISKIT_IN_PARALLEL'] = 'FALSE'\n return [ar.get() for ar in async_res]\n\n # Cannot do parallel on Windows , if another parallel_map is running in parallel,\n # or len(values) == 1.\n results = []\n for _, value in enumerate(values):\n result = task(value, *task_args, **task_kwargs)\n results.append(result)\n _callback(0)\n Publisher().publish(\"terra.transpiler.parallel.finish\")\n return results\n", "path": "qiskit/transpiler/_parallel.py"}]}
| 2,005 | 171 |
gh_patches_debug_14774
|
rasdani/github-patches
|
git_diff
|
speechbrain__speechbrain-124
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replicability Issue
Every time I run the same experiment (e.,g minimal_examples/neural_networks/autoencoder) on the same machine I got slightly different results. Since we set up the seed, this shouldn't happen.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/TIMIT/ASR_CTC/experiment.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 import sys
4 import torch
5 import speechbrain as sb
6 import speechbrain.data_io.wer as wer_io
7 import speechbrain.utils.edit_distance as edit_distance
8 from speechbrain.data_io.data_io import convert_index_to_lab
9 from speechbrain.decoders.ctc import ctc_greedy_decode
10 from speechbrain.decoders.decoders import undo_padding
11 from speechbrain.utils.checkpoints import ckpt_recency
12 from speechbrain.utils.train_logger import summarize_error_rate
13
14 # This hack needed to import data preparation script from ..
15 current_dir = os.path.dirname(os.path.abspath(__file__))
16 sys.path.append(os.path.dirname(current_dir))
17 from timit_prepare import TIMITPreparer # noqa E402
18
19 # Load hyperparameters file with command-line overrides
20 params_file, overrides = sb.core.parse_arguments(sys.argv[1:])
21 if "seed" in overrides:
22 torch.manual_seed(overrides["seed"])
23 with open(params_file) as fin:
24 params = sb.yaml.load_extended_yaml(fin, overrides)
25
26 # Create experiment directory
27 sb.core.create_experiment_directory(
28 experiment_directory=params.output_folder,
29 params_to_save=params_file,
30 overrides=overrides,
31 )
32
33
34 # Define training procedure
35 class ASR(sb.core.Brain):
36 def compute_forward(self, x, train_mode=True, init_params=False):
37 ids, wavs, wav_lens = x
38 wavs, wav_lens = wavs.to(params.device), wav_lens.to(params.device)
39 if hasattr(params, "augmentation"):
40 wavs = params.augmentation(wavs, wav_lens, init_params)
41 feats = params.compute_features(wavs, init_params)
42 feats = params.normalize(feats, wav_lens)
43 out = params.model(feats, init_params)
44 out = params.output(out, init_params)
45 pout = params.log_softmax(out)
46 return pout, wav_lens
47
48 def compute_objectives(self, predictions, targets, train_mode=True):
49 pout, pout_lens = predictions
50 ids, phns, phn_lens = targets
51 phns, phn_lens = phns.to(params.device), phn_lens.to(params.device)
52 loss = params.compute_cost(pout, phns, [pout_lens, phn_lens])
53
54 if not train_mode:
55 ind2lab = params.train_loader.label_dict["phn"]["index2lab"]
56 sequence = ctc_greedy_decode(pout, pout_lens, blank_id=-1)
57 sequence = convert_index_to_lab(sequence, ind2lab)
58 phns = undo_padding(phns, phn_lens)
59 phns = convert_index_to_lab(phns, ind2lab)
60 stats = edit_distance.wer_details_for_batch(
61 ids, phns, sequence, compute_alignments=True
62 )
63 stats = {"PER": stats}
64 return loss, stats
65
66 return loss
67
68 def on_epoch_end(self, epoch, train_stats, valid_stats=None):
69 per = summarize_error_rate(valid_stats["PER"])
70 old_lr, new_lr = params.lr_annealing([params.optimizer], epoch, per)
71 epoch_stats = {"epoch": epoch, "lr": old_lr}
72 params.train_logger.log_stats(epoch_stats, train_stats, valid_stats)
73
74 params.checkpointer.save_and_keep_only(
75 meta={"PER": per},
76 importance_keys=[ckpt_recency, lambda c: -c.meta["PER"]],
77 )
78
79
80 # Prepare data
81 prepare = TIMITPreparer(
82 data_folder=params.data_folder,
83 splits=["train", "dev", "test"],
84 save_folder=params.data_folder,
85 )
86 prepare()
87 train_set = params.train_loader()
88 valid_set = params.valid_loader()
89 first_x, first_y = next(zip(*train_set))
90
91 # Modules are passed to optimizer and have train/eval called on them
92 modules = [params.model, params.output]
93 if hasattr(params, "augmentation"):
94 modules.append(params.augmentation)
95
96 # Create brain object for training
97 asr_brain = ASR(
98 modules=modules, optimizer=params.optimizer, first_inputs=[first_x],
99 )
100
101 # Load latest checkpoint to resume training
102 params.checkpointer.recover_if_possible()
103 asr_brain.fit(params.epoch_counter, train_set, valid_set)
104
105 # Load best checkpoint for evaluation
106 params.checkpointer.recover_if_possible(lambda c: -c.meta["PER"])
107 test_stats = asr_brain.evaluate(params.test_loader())
108 params.train_logger.log_stats(
109 stats_meta={"Epoch loaded": params.epoch_counter.current},
110 test_stats=test_stats,
111 )
112
113 # Write alignments to file
114 per_summary = edit_distance.wer_summary(test_stats["PER"])
115 with open(params.wer_file, "w") as fo:
116 wer_io.print_wer_summary(per_summary, fo)
117 wer_io.print_alignments(test_stats["PER"], fo)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/TIMIT/ASR_CTC/experiment.py b/recipes/TIMIT/ASR_CTC/experiment.py
--- a/recipes/TIMIT/ASR_CTC/experiment.py
+++ b/recipes/TIMIT/ASR_CTC/experiment.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python3
import os
import sys
-import torch
import speechbrain as sb
import speechbrain.data_io.wer as wer_io
import speechbrain.utils.edit_distance as edit_distance
@@ -18,8 +17,6 @@
# Load hyperparameters file with command-line overrides
params_file, overrides = sb.core.parse_arguments(sys.argv[1:])
-if "seed" in overrides:
- torch.manual_seed(overrides["seed"])
with open(params_file) as fin:
params = sb.yaml.load_extended_yaml(fin, overrides)
|
{"golden_diff": "diff --git a/recipes/TIMIT/ASR_CTC/experiment.py b/recipes/TIMIT/ASR_CTC/experiment.py\n--- a/recipes/TIMIT/ASR_CTC/experiment.py\n+++ b/recipes/TIMIT/ASR_CTC/experiment.py\n@@ -1,7 +1,6 @@\n #!/usr/bin/env python3\n import os\n import sys\n-import torch\n import speechbrain as sb\n import speechbrain.data_io.wer as wer_io\n import speechbrain.utils.edit_distance as edit_distance\n@@ -18,8 +17,6 @@\n \n # Load hyperparameters file with command-line overrides\n params_file, overrides = sb.core.parse_arguments(sys.argv[1:])\n-if \"seed\" in overrides:\n- torch.manual_seed(overrides[\"seed\"])\n with open(params_file) as fin:\n params = sb.yaml.load_extended_yaml(fin, overrides)\n", "issue": "Replicability Issue\nEvery time I run the same experiment (e.,g minimal_examples/neural_networks/autoencoder) on the same machine I got slightly different results. Since we set up the seed, this shouldn't happen. \n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nimport sys\nimport torch\nimport speechbrain as sb\nimport speechbrain.data_io.wer as wer_io\nimport speechbrain.utils.edit_distance as edit_distance\nfrom speechbrain.data_io.data_io import convert_index_to_lab\nfrom speechbrain.decoders.ctc import ctc_greedy_decode\nfrom speechbrain.decoders.decoders import undo_padding\nfrom speechbrain.utils.checkpoints import ckpt_recency\nfrom speechbrain.utils.train_logger import summarize_error_rate\n\n# This hack needed to import data preparation script from ..\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(current_dir))\nfrom timit_prepare import TIMITPreparer # noqa E402\n\n# Load hyperparameters file with command-line overrides\nparams_file, overrides = sb.core.parse_arguments(sys.argv[1:])\nif \"seed\" in overrides:\n torch.manual_seed(overrides[\"seed\"])\nwith open(params_file) as fin:\n params = sb.yaml.load_extended_yaml(fin, overrides)\n\n# Create experiment directory\nsb.core.create_experiment_directory(\n experiment_directory=params.output_folder,\n params_to_save=params_file,\n overrides=overrides,\n)\n\n\n# Define training procedure\nclass ASR(sb.core.Brain):\n def compute_forward(self, x, train_mode=True, init_params=False):\n ids, wavs, wav_lens = x\n wavs, wav_lens = wavs.to(params.device), wav_lens.to(params.device)\n if hasattr(params, \"augmentation\"):\n wavs = params.augmentation(wavs, wav_lens, init_params)\n feats = params.compute_features(wavs, init_params)\n feats = params.normalize(feats, wav_lens)\n out = params.model(feats, init_params)\n out = params.output(out, init_params)\n pout = params.log_softmax(out)\n return pout, wav_lens\n\n def compute_objectives(self, predictions, targets, train_mode=True):\n pout, pout_lens = predictions\n ids, phns, phn_lens = targets\n phns, phn_lens = phns.to(params.device), phn_lens.to(params.device)\n loss = params.compute_cost(pout, phns, [pout_lens, phn_lens])\n\n if not train_mode:\n ind2lab = params.train_loader.label_dict[\"phn\"][\"index2lab\"]\n sequence = ctc_greedy_decode(pout, pout_lens, blank_id=-1)\n sequence = convert_index_to_lab(sequence, ind2lab)\n phns = undo_padding(phns, phn_lens)\n phns = convert_index_to_lab(phns, ind2lab)\n stats = edit_distance.wer_details_for_batch(\n ids, phns, sequence, compute_alignments=True\n )\n stats = {\"PER\": stats}\n return loss, stats\n\n return loss\n\n def on_epoch_end(self, epoch, train_stats, valid_stats=None):\n per = summarize_error_rate(valid_stats[\"PER\"])\n old_lr, new_lr = params.lr_annealing([params.optimizer], epoch, per)\n epoch_stats = {\"epoch\": epoch, \"lr\": old_lr}\n params.train_logger.log_stats(epoch_stats, train_stats, valid_stats)\n\n params.checkpointer.save_and_keep_only(\n meta={\"PER\": per},\n importance_keys=[ckpt_recency, lambda c: -c.meta[\"PER\"]],\n )\n\n\n# Prepare data\nprepare = TIMITPreparer(\n data_folder=params.data_folder,\n splits=[\"train\", \"dev\", \"test\"],\n save_folder=params.data_folder,\n)\nprepare()\ntrain_set = params.train_loader()\nvalid_set = params.valid_loader()\nfirst_x, first_y = next(zip(*train_set))\n\n# Modules are passed to optimizer and have train/eval called on them\nmodules = [params.model, params.output]\nif hasattr(params, \"augmentation\"):\n modules.append(params.augmentation)\n\n# Create brain object for training\nasr_brain = ASR(\n modules=modules, optimizer=params.optimizer, first_inputs=[first_x],\n)\n\n# Load latest checkpoint to resume training\nparams.checkpointer.recover_if_possible()\nasr_brain.fit(params.epoch_counter, train_set, valid_set)\n\n# Load best checkpoint for evaluation\nparams.checkpointer.recover_if_possible(lambda c: -c.meta[\"PER\"])\ntest_stats = asr_brain.evaluate(params.test_loader())\nparams.train_logger.log_stats(\n stats_meta={\"Epoch loaded\": params.epoch_counter.current},\n test_stats=test_stats,\n)\n\n# Write alignments to file\nper_summary = edit_distance.wer_summary(test_stats[\"PER\"])\nwith open(params.wer_file, \"w\") as fo:\n wer_io.print_wer_summary(per_summary, fo)\n wer_io.print_alignments(test_stats[\"PER\"], fo)\n", "path": "recipes/TIMIT/ASR_CTC/experiment.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nimport sys\nimport speechbrain as sb\nimport speechbrain.data_io.wer as wer_io\nimport speechbrain.utils.edit_distance as edit_distance\nfrom speechbrain.data_io.data_io import convert_index_to_lab\nfrom speechbrain.decoders.ctc import ctc_greedy_decode\nfrom speechbrain.decoders.decoders import undo_padding\nfrom speechbrain.utils.checkpoints import ckpt_recency\nfrom speechbrain.utils.train_logger import summarize_error_rate\n\n# This hack needed to import data preparation script from ..\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(current_dir))\nfrom timit_prepare import TIMITPreparer # noqa E402\n\n# Load hyperparameters file with command-line overrides\nparams_file, overrides = sb.core.parse_arguments(sys.argv[1:])\nwith open(params_file) as fin:\n params = sb.yaml.load_extended_yaml(fin, overrides)\n\n# Create experiment directory\nsb.core.create_experiment_directory(\n experiment_directory=params.output_folder,\n params_to_save=params_file,\n overrides=overrides,\n)\n\n\n# Define training procedure\nclass ASR(sb.core.Brain):\n def compute_forward(self, x, train_mode=True, init_params=False):\n ids, wavs, wav_lens = x\n wavs, wav_lens = wavs.to(params.device), wav_lens.to(params.device)\n if hasattr(params, \"augmentation\"):\n wavs = params.augmentation(wavs, wav_lens, init_params)\n feats = params.compute_features(wavs, init_params)\n feats = params.normalize(feats, wav_lens)\n out = params.model(feats, init_params)\n out = params.output(out, init_params)\n pout = params.log_softmax(out)\n return pout, wav_lens\n\n def compute_objectives(self, predictions, targets, train_mode=True):\n pout, pout_lens = predictions\n ids, phns, phn_lens = targets\n phns, phn_lens = phns.to(params.device), phn_lens.to(params.device)\n loss = params.compute_cost(pout, phns, [pout_lens, phn_lens])\n\n if not train_mode:\n ind2lab = params.train_loader.label_dict[\"phn\"][\"index2lab\"]\n sequence = ctc_greedy_decode(pout, pout_lens, blank_id=-1)\n sequence = convert_index_to_lab(sequence, ind2lab)\n phns = undo_padding(phns, phn_lens)\n phns = convert_index_to_lab(phns, ind2lab)\n stats = edit_distance.wer_details_for_batch(\n ids, phns, sequence, compute_alignments=True\n )\n stats = {\"PER\": stats}\n return loss, stats\n\n return loss\n\n def on_epoch_end(self, epoch, train_stats, valid_stats=None):\n per = summarize_error_rate(valid_stats[\"PER\"])\n old_lr, new_lr = params.lr_annealing([params.optimizer], epoch, per)\n epoch_stats = {\"epoch\": epoch, \"lr\": old_lr}\n params.train_logger.log_stats(epoch_stats, train_stats, valid_stats)\n\n params.checkpointer.save_and_keep_only(\n meta={\"PER\": per},\n importance_keys=[ckpt_recency, lambda c: -c.meta[\"PER\"]],\n )\n\n\n# Prepare data\nprepare = TIMITPreparer(\n data_folder=params.data_folder,\n splits=[\"train\", \"dev\", \"test\"],\n save_folder=params.data_folder,\n)\nprepare()\ntrain_set = params.train_loader()\nvalid_set = params.valid_loader()\nfirst_x, first_y = next(zip(*train_set))\n\n# Modules are passed to optimizer and have train/eval called on them\nmodules = [params.model, params.output]\nif hasattr(params, \"augmentation\"):\n modules.append(params.augmentation)\n\n# Create brain object for training\nasr_brain = ASR(\n modules=modules, optimizer=params.optimizer, first_inputs=[first_x],\n)\n\n# Load latest checkpoint to resume training\nparams.checkpointer.recover_if_possible()\nasr_brain.fit(params.epoch_counter, train_set, valid_set)\n\n# Load best checkpoint for evaluation\nparams.checkpointer.recover_if_possible(lambda c: -c.meta[\"PER\"])\ntest_stats = asr_brain.evaluate(params.test_loader())\nparams.train_logger.log_stats(\n stats_meta={\"Epoch loaded\": params.epoch_counter.current},\n test_stats=test_stats,\n)\n\n# Write alignments to file\nper_summary = edit_distance.wer_summary(test_stats[\"PER\"])\nwith open(params.wer_file, \"w\") as fo:\n wer_io.print_wer_summary(per_summary, fo)\n wer_io.print_alignments(test_stats[\"PER\"], fo)\n", "path": "recipes/TIMIT/ASR_CTC/experiment.py"}]}
| 1,576 | 188 |
gh_patches_debug_5694
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-1558
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[zo] Give feedback when a file or URL is not found in the cell value
New users (and me when I have a few z related `open-cell-XYZ` commands 🙃) may get confused at the purpose of `zo`, when it is usually `zENTER` they want.
I propose the change, whereby a status is given to the user to tell them that a `file` or `URL` wasn't found (and therefore nothing happens). Also maybe suggest they want `zENTER`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/_open.py`
Content:
```
1 from visidata import *
2
3
4 vd.option('filetype', '', 'specify file type', replay=True)
5
6
7 @VisiData.api
8 def inputFilename(vd, prompt, *args, **kwargs):
9 return vd.input(prompt, type="filename", *args, completer=_completeFilename, **kwargs).strip()
10
11
12 @VisiData.api
13 def inputPath(vd, *args, **kwargs):
14 return Path(vd.inputFilename(*args, **kwargs))
15
16
17 def _completeFilename(val, state):
18 i = val.rfind('/')
19 if i < 0: # no /
20 base = ''
21 partial = val
22 elif i == 0: # root /
23 base = '/'
24 partial = val[1:]
25 else:
26 base = val[:i]
27 partial = val[i+1:]
28
29 files = []
30 for f in os.listdir(Path(base or '.')):
31 if f.startswith(partial):
32 files.append(os.path.join(base, f))
33
34 files.sort()
35 return files[state%len(files)]
36
37
38 @VisiData.api
39 def openPath(vd, p, filetype=None, create=False):
40 '''Call ``open_<filetype>(p)`` or ``openurl_<p.scheme>(p, filetype)``. Return constructed but unloaded sheet of appropriate type.
41 If True, *create* will return a new, blank **Sheet** if file does not exist.'''
42 if p.scheme and not p.has_fp():
43 schemes = p.scheme.split('+')
44 openfuncname = 'openurl_' + schemes[-1]
45
46 openfunc = getattr(vd, openfuncname, None) or vd.getGlobals().get(openfuncname, None)
47 if not openfunc:
48 vd.fail(f'no loader for url scheme: {p.scheme}')
49
50 return openfunc(p, filetype=filetype)
51
52 if not p.exists() and not create:
53 return None
54
55 if not filetype:
56 if p.is_dir():
57 filetype = 'dir'
58 else:
59 filetype = p.ext or options.filetype or 'txt'
60
61 filetype = filetype.lower()
62
63 if not p.exists():
64 if not create:
65 return None
66 newfunc = getattr(vd, 'new_' + filetype, vd.getGlobals().get('new_' + filetype))
67 if not newfunc:
68 vd.warning('%s does not exist, creating new sheet' % p)
69 return vd.newSheet(p.name, 1, source=p)
70
71 vd.status('creating blank %s' % (p.given))
72 return newfunc(p)
73
74 openfunc = getattr(vd, 'open_' + filetype, vd.getGlobals().get('open_' + filetype))
75 if not openfunc:
76 vd.warning('unknown "%s" filetype' % filetype)
77 filetype = 'txt'
78 openfunc = vd.open_txt
79
80 vd.status('opening %s as %s' % (p.given, filetype))
81
82 return openfunc(p)
83
84
85 @VisiData.api
86 def openSource(vd, p, filetype=None, create=False, **kwargs):
87 '''Return unloaded sheet object for *p* opened as the given *filetype* and with *kwargs* as option overrides. *p* can be a Path or a string (filename, url, or "-" for stdin).
88 when true, *create* will return a blank sheet, if file does not exist.'''
89 if not filetype:
90 filetype = options.getonly('filetype', 'global', '')
91
92 vs = None
93 if isinstance(p, str):
94 if '://' in p:
95 vs = vd.openPath(Path(p), filetype=filetype) # convert to Path and recurse
96 elif p == '-':
97 vs = vd.openPath(vd.stdinSource, filetype=filetype)
98 else:
99 vs = vd.openPath(Path(p), filetype=filetype, create=create) # convert to Path and recurse
100 else:
101 vs = vd.openPath(p, filetype=filetype, create=create)
102
103 for optname, optval in kwargs.items():
104 vs.options[optname] = optval
105
106 return vs
107
108
109 #### enable external addons
110 @VisiData.api
111 def open_txt(vd, p):
112 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.'
113 with p.open_text(encoding=vd.options.encoding) as fp:
114 try:
115 if options.delimiter in next(fp): # peek at the first line
116 return vd.open_tsv(p) # TSV often have .txt extension
117 except StopIteration:
118 return Sheet(p.name, columns=[SettableColumn()], source=p)
119 return TextSheet(p.name, source=p)
120
121
122 @VisiData.api
123 def loadInternalSheet(vd, cls, p, **kwargs):
124 'Load internal sheet of given class. Internal sheets are always tsv.'
125 vs = cls(p.name, source=p, **kwargs)
126 options._set('encoding', 'utf8', vs)
127 if p.exists():
128 vd.sheets.insert(0, vs)
129 vs.reload.__wrapped__(vs)
130 vd.sheets.pop(0)
131 return vs
132
133
134 BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename("open: "), create=True))', 'Open file or URL')
135 TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay))', 'Open file or URL from path in current cell')
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/visidata/_open.py b/visidata/_open.py
--- a/visidata/_open.py
+++ b/visidata/_open.py
@@ -132,4 +132,4 @@
BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename("open: "), create=True))', 'Open file or URL')
-TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay))', 'Open file or URL from path in current cell')
+TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f"file {cursorDisplay} does not exist"))', 'Open file or URL from path in current cell')
|
{"golden_diff": "diff --git a/visidata/_open.py b/visidata/_open.py\n--- a/visidata/_open.py\n+++ b/visidata/_open.py\n@@ -132,4 +132,4 @@\n \n \n BaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename(\"open: \"), create=True))', 'Open file or URL')\n-TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay))', 'Open file or URL from path in current cell')\n+TableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f\"file {cursorDisplay} does not exist\"))', 'Open file or URL from path in current cell')\n", "issue": "[zo] Give feedback when a file or URL is not found in the cell value\nNew users (and me when I have a few z related `open-cell-XYZ` commands \ud83d\ude43) may get confused at the purpose of `zo`, when it is usually `zENTER` they want.\r\n\r\nI propose the change, whereby a status is given to the user to tell them that a `file` or `URL` wasn't found (and therefore nothing happens). Also maybe suggest they want `zENTER`?\n", "before_files": [{"content": "from visidata import *\n\n\nvd.option('filetype', '', 'specify file type', replay=True)\n\n\[email protected]\ndef inputFilename(vd, prompt, *args, **kwargs):\n return vd.input(prompt, type=\"filename\", *args, completer=_completeFilename, **kwargs).strip()\n\n\[email protected]\ndef inputPath(vd, *args, **kwargs):\n return Path(vd.inputFilename(*args, **kwargs))\n\n\ndef _completeFilename(val, state):\n i = val.rfind('/')\n if i < 0: # no /\n base = ''\n partial = val\n elif i == 0: # root /\n base = '/'\n partial = val[1:]\n else:\n base = val[:i]\n partial = val[i+1:]\n\n files = []\n for f in os.listdir(Path(base or '.')):\n if f.startswith(partial):\n files.append(os.path.join(base, f))\n\n files.sort()\n return files[state%len(files)]\n\n\[email protected]\ndef openPath(vd, p, filetype=None, create=False):\n '''Call ``open_<filetype>(p)`` or ``openurl_<p.scheme>(p, filetype)``. Return constructed but unloaded sheet of appropriate type.\n If True, *create* will return a new, blank **Sheet** if file does not exist.'''\n if p.scheme and not p.has_fp():\n schemes = p.scheme.split('+')\n openfuncname = 'openurl_' + schemes[-1]\n\n openfunc = getattr(vd, openfuncname, None) or vd.getGlobals().get(openfuncname, None)\n if not openfunc:\n vd.fail(f'no loader for url scheme: {p.scheme}')\n\n return openfunc(p, filetype=filetype)\n\n if not p.exists() and not create:\n return None\n\n if not filetype:\n if p.is_dir():\n filetype = 'dir'\n else:\n filetype = p.ext or options.filetype or 'txt'\n\n filetype = filetype.lower()\n\n if not p.exists():\n if not create:\n return None\n newfunc = getattr(vd, 'new_' + filetype, vd.getGlobals().get('new_' + filetype))\n if not newfunc:\n vd.warning('%s does not exist, creating new sheet' % p)\n return vd.newSheet(p.name, 1, source=p)\n\n vd.status('creating blank %s' % (p.given))\n return newfunc(p)\n\n openfunc = getattr(vd, 'open_' + filetype, vd.getGlobals().get('open_' + filetype))\n if not openfunc:\n vd.warning('unknown \"%s\" filetype' % filetype)\n filetype = 'txt'\n openfunc = vd.open_txt\n\n vd.status('opening %s as %s' % (p.given, filetype))\n\n return openfunc(p)\n\n\[email protected]\ndef openSource(vd, p, filetype=None, create=False, **kwargs):\n '''Return unloaded sheet object for *p* opened as the given *filetype* and with *kwargs* as option overrides. *p* can be a Path or a string (filename, url, or \"-\" for stdin).\n when true, *create* will return a blank sheet, if file does not exist.'''\n if not filetype:\n filetype = options.getonly('filetype', 'global', '')\n\n vs = None\n if isinstance(p, str):\n if '://' in p:\n vs = vd.openPath(Path(p), filetype=filetype) # convert to Path and recurse\n elif p == '-':\n vs = vd.openPath(vd.stdinSource, filetype=filetype)\n else:\n vs = vd.openPath(Path(p), filetype=filetype, create=create) # convert to Path and recurse\n else:\n vs = vd.openPath(p, filetype=filetype, create=create)\n\n for optname, optval in kwargs.items():\n vs.options[optname] = optval\n\n return vs\n\n\n#### enable external addons\[email protected]\ndef open_txt(vd, p):\n 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.'\n with p.open_text(encoding=vd.options.encoding) as fp:\n try:\n if options.delimiter in next(fp): # peek at the first line\n return vd.open_tsv(p) # TSV often have .txt extension\n except StopIteration:\n return Sheet(p.name, columns=[SettableColumn()], source=p)\n return TextSheet(p.name, source=p)\n\n\[email protected]\ndef loadInternalSheet(vd, cls, p, **kwargs):\n 'Load internal sheet of given class. Internal sheets are always tsv.'\n vs = cls(p.name, source=p, **kwargs)\n options._set('encoding', 'utf8', vs)\n if p.exists():\n vd.sheets.insert(0, vs)\n vs.reload.__wrapped__(vs)\n vd.sheets.pop(0)\n return vs\n\n\nBaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename(\"open: \"), create=True))', 'Open file or URL')\nTableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay))', 'Open file or URL from path in current cell')\n", "path": "visidata/_open.py"}], "after_files": [{"content": "from visidata import *\n\n\nvd.option('filetype', '', 'specify file type', replay=True)\n\n\[email protected]\ndef inputFilename(vd, prompt, *args, **kwargs):\n return vd.input(prompt, type=\"filename\", *args, completer=_completeFilename, **kwargs).strip()\n\n\[email protected]\ndef inputPath(vd, *args, **kwargs):\n return Path(vd.inputFilename(*args, **kwargs))\n\n\ndef _completeFilename(val, state):\n i = val.rfind('/')\n if i < 0: # no /\n base = ''\n partial = val\n elif i == 0: # root /\n base = '/'\n partial = val[1:]\n else:\n base = val[:i]\n partial = val[i+1:]\n\n files = []\n for f in os.listdir(Path(base or '.')):\n if f.startswith(partial):\n files.append(os.path.join(base, f))\n\n files.sort()\n return files[state%len(files)]\n\n\[email protected]\ndef openPath(vd, p, filetype=None, create=False):\n '''Call ``open_<filetype>(p)`` or ``openurl_<p.scheme>(p, filetype)``. Return constructed but unloaded sheet of appropriate type.\n If True, *create* will return a new, blank **Sheet** if file does not exist.'''\n if p.scheme and not p.has_fp():\n schemes = p.scheme.split('+')\n openfuncname = 'openurl_' + schemes[-1]\n\n openfunc = getattr(vd, openfuncname, None) or vd.getGlobals().get(openfuncname, None)\n if not openfunc:\n vd.fail(f'no loader for url scheme: {p.scheme}')\n\n return openfunc(p, filetype=filetype)\n\n if not p.exists() and not create:\n return None\n\n if not filetype:\n if p.is_dir():\n filetype = 'dir'\n else:\n filetype = p.ext or options.filetype or 'txt'\n\n filetype = filetype.lower()\n\n if not p.exists():\n if not create:\n return None\n newfunc = getattr(vd, 'new_' + filetype, vd.getGlobals().get('new_' + filetype))\n if not newfunc:\n vd.warning('%s does not exist, creating new sheet' % p)\n return vd.newSheet(p.name, 1, source=p)\n\n vd.status('creating blank %s' % (p.given))\n return newfunc(p)\n\n openfunc = getattr(vd, 'open_' + filetype, vd.getGlobals().get('open_' + filetype))\n if not openfunc:\n vd.warning('unknown \"%s\" filetype' % filetype)\n filetype = 'txt'\n openfunc = vd.open_txt\n\n vd.status('opening %s as %s' % (p.given, filetype))\n\n return openfunc(p)\n\n\[email protected]\ndef openSource(vd, p, filetype=None, create=False, **kwargs):\n '''Return unloaded sheet object for *p* opened as the given *filetype* and with *kwargs* as option overrides. *p* can be a Path or a string (filename, url, or \"-\" for stdin).\n when true, *create* will return a blank sheet, if file does not exist.'''\n if not filetype:\n filetype = options.getonly('filetype', 'global', '')\n\n vs = None\n if isinstance(p, str):\n if '://' in p:\n vs = vd.openPath(Path(p), filetype=filetype) # convert to Path and recurse\n elif p == '-':\n vs = vd.openPath(vd.stdinSource, filetype=filetype)\n else:\n vs = vd.openPath(Path(p), filetype=filetype, create=create) # convert to Path and recurse\n else:\n vs = vd.openPath(p, filetype=filetype, create=create)\n\n for optname, optval in kwargs.items():\n vs.options[optname] = optval\n\n return vs\n\n\n#### enable external addons\[email protected]\ndef open_txt(vd, p):\n 'Create sheet from `.txt` file at Path `p`, checking whether it is TSV.'\n with p.open_text(encoding=vd.options.encoding) as fp:\n try:\n if options.delimiter in next(fp): # peek at the first line\n return vd.open_tsv(p) # TSV often have .txt extension\n except StopIteration:\n return Sheet(p.name, columns=[SettableColumn()], source=p)\n return TextSheet(p.name, source=p)\n\n\[email protected]\ndef loadInternalSheet(vd, cls, p, **kwargs):\n 'Load internal sheet of given class. Internal sheets are always tsv.'\n vs = cls(p.name, source=p, **kwargs)\n options._set('encoding', 'utf8', vs)\n if p.exists():\n vd.sheets.insert(0, vs)\n vs.reload.__wrapped__(vs)\n vd.sheets.pop(0)\n return vs\n\n\nBaseSheet.addCommand('o', 'open-file', 'vd.push(openSource(inputFilename(\"open: \"), create=True))', 'Open file or URL')\nTableSheet.addCommand('zo', 'open-cell-file', 'vd.push(openSource(cursorDisplay) or fail(f\"file {cursorDisplay} does not exist\"))', 'Open file or URL from path in current cell')\n", "path": "visidata/_open.py"}]}
| 1,835 | 162 |
gh_patches_debug_32284
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-1011
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
reST listings can’t number lines, while code-blocks do it incompatibly
1. One can’t do a `.. listing::` in Nikola with line numbers.
2. In `.. code-block::`s, one must use the reST `:number-lines:` option syntax. However, Sphinx wants `:linenos:` instead. (#770)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/compile/rest/listing.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27
28 """ Define and register a listing directive using the existing CodeBlock """
29
30
31 from __future__ import unicode_literals
32 from codecs import open as codecs_open # for patching purposes
33 import os
34 try:
35 from urlparse import urlunsplit
36 except ImportError:
37 from urllib.parse import urlunsplit # NOQA
38
39 from docutils import core
40 from docutils import nodes
41 from docutils.parsers.rst import Directive, directives
42 from docutils.parsers.rst.directives.misc import Include
43 try:
44 from docutils.parsers.rst.directives.body import CodeBlock
45 except ImportError: # docutils < 0.9 (Debian Sid For The Loss)
46 class CodeBlock(Directive):
47 required_arguments = 1
48 has_content = True
49 CODE = '<pre>{0}</pre>'
50
51 def run(self):
52 """ Required by the Directive interface. Create docutils nodes """
53 return [nodes.raw('', self.CODE.format('\n'.join(self.content)), format='html')]
54 directives.register_directive('code', CodeBlock)
55
56
57 from nikola.plugin_categories import RestExtension
58
59
60 class Plugin(RestExtension):
61
62 name = "rest_listing"
63
64 def set_site(self, site):
65 self.site = site
66 # Even though listings don't use CodeBlock anymore, I am
67 # leaving these to make the code directive work with
68 # docutils < 0.9
69 directives.register_directive('code-block', CodeBlock)
70 directives.register_directive('sourcecode', CodeBlock)
71 directives.register_directive('listing', Listing)
72 return super(Plugin, self).set_site(site)
73
74
75 class Listing(Include):
76 """ listing directive: create a highlighted block of code from a file in listings/
77
78 Usage:
79
80 .. listing:: nikola.py python
81 :number-lines:
82
83 """
84 has_content = False
85 required_arguments = 1
86 optional_arguments = 1
87
88 def run(self):
89 fname = self.arguments.pop(0)
90 lang = self.arguments.pop(0)
91 fpath = os.path.join('listings', fname)
92 self.arguments.insert(0, fpath)
93 self.options['code'] = lang
94 with codecs_open(fpath, 'rb+', 'utf8') as fileobject:
95 self.content = fileobject.read().splitlines()
96 self.state.document.settings.record_dependencies.add(fpath)
97 target = urlunsplit(("link", 'listing', fname, '', ''))
98 generated_nodes = (
99 [core.publish_doctree('`{0} <{1}>`_'.format(fname, target))[0]])
100 generated_nodes += self.get_code_from_file(fileobject)
101 return generated_nodes
102
103 def get_code_from_file(self, data):
104 """ Create CodeBlock nodes from file object content """
105 return super(Listing, self).run()
106
107 def assert_has_content(self):
108 """ Listing has no content, override check from superclass """
109 pass
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/compile/rest/listing.py b/nikola/plugins/compile/rest/listing.py
--- a/nikola/plugins/compile/rest/listing.py
+++ b/nikola/plugins/compile/rest/listing.py
@@ -56,6 +56,18 @@
from nikola.plugin_categories import RestExtension
+# Add sphinx compatibility option
+CodeBlock.option_spec['linenos'] = directives.unchanged
+
+
+class FlexibleCodeBlock(CodeBlock):
+
+ def run(self):
+ if 'linenos' in self.options:
+ self.options['number-lines'] = self.options['linenos']
+ return super(FlexibleCodeBlock, self).run()
+CodeBlock = FlexibleCodeBlock
+
class Plugin(RestExtension):
@@ -71,6 +83,11 @@
directives.register_directive('listing', Listing)
return super(Plugin, self).set_site(site)
+# Add sphinx compatibility option
+listing_spec = Include.option_spec
+listing_spec['linenos'] = directives.unchanged
+print(listing_spec)
+
class Listing(Include):
""" listing directive: create a highlighted block of code from a file in listings/
@@ -84,6 +101,7 @@
has_content = False
required_arguments = 1
optional_arguments = 1
+ option_spec = listing_spec
def run(self):
fname = self.arguments.pop(0)
@@ -91,6 +109,8 @@
fpath = os.path.join('listings', fname)
self.arguments.insert(0, fpath)
self.options['code'] = lang
+ if 'linenos' in self.options:
+ self.options['number-lines'] = self.options['linenos']
with codecs_open(fpath, 'rb+', 'utf8') as fileobject:
self.content = fileobject.read().splitlines()
self.state.document.settings.record_dependencies.add(fpath)
|
{"golden_diff": "diff --git a/nikola/plugins/compile/rest/listing.py b/nikola/plugins/compile/rest/listing.py\n--- a/nikola/plugins/compile/rest/listing.py\n+++ b/nikola/plugins/compile/rest/listing.py\n@@ -56,6 +56,18 @@\n \n from nikola.plugin_categories import RestExtension\n \n+# Add sphinx compatibility option\n+CodeBlock.option_spec['linenos'] = directives.unchanged\n+\n+\n+class FlexibleCodeBlock(CodeBlock):\n+\n+ def run(self):\n+ if 'linenos' in self.options:\n+ self.options['number-lines'] = self.options['linenos']\n+ return super(FlexibleCodeBlock, self).run()\n+CodeBlock = FlexibleCodeBlock\n+\n \n class Plugin(RestExtension):\n \n@@ -71,6 +83,11 @@\n directives.register_directive('listing', Listing)\n return super(Plugin, self).set_site(site)\n \n+# Add sphinx compatibility option\n+listing_spec = Include.option_spec\n+listing_spec['linenos'] = directives.unchanged\n+print(listing_spec)\n+\n \n class Listing(Include):\n \"\"\" listing directive: create a highlighted block of code from a file in listings/\n@@ -84,6 +101,7 @@\n has_content = False\n required_arguments = 1\n optional_arguments = 1\n+ option_spec = listing_spec\n \n def run(self):\n fname = self.arguments.pop(0)\n@@ -91,6 +109,8 @@\n fpath = os.path.join('listings', fname)\n self.arguments.insert(0, fpath)\n self.options['code'] = lang\n+ if 'linenos' in self.options:\n+ self.options['number-lines'] = self.options['linenos']\n with codecs_open(fpath, 'rb+', 'utf8') as fileobject:\n self.content = fileobject.read().splitlines()\n self.state.document.settings.record_dependencies.add(fpath)\n", "issue": "reST listings can\u2019t number lines, while code-blocks do it incompatibly\n1. One can\u2019t do a `.. listing::` in Nikola with line numbers.\n2. In `.. code-block::`s, one must use the reST `:number-lines:` option syntax. However, Sphinx wants `:linenos:` instead. (#770)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\" Define and register a listing directive using the existing CodeBlock \"\"\"\n\n\nfrom __future__ import unicode_literals\nfrom codecs import open as codecs_open # for patching purposes\nimport os\ntry:\n from urlparse import urlunsplit\nexcept ImportError:\n from urllib.parse import urlunsplit # NOQA\n\nfrom docutils import core\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nfrom docutils.parsers.rst.directives.misc import Include\ntry:\n from docutils.parsers.rst.directives.body import CodeBlock\nexcept ImportError: # docutils < 0.9 (Debian Sid For The Loss)\n class CodeBlock(Directive):\n required_arguments = 1\n has_content = True\n CODE = '<pre>{0}</pre>'\n\n def run(self):\n \"\"\" Required by the Directive interface. Create docutils nodes \"\"\"\n return [nodes.raw('', self.CODE.format('\\n'.join(self.content)), format='html')]\n directives.register_directive('code', CodeBlock)\n\n\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n\n name = \"rest_listing\"\n\n def set_site(self, site):\n self.site = site\n # Even though listings don't use CodeBlock anymore, I am\n # leaving these to make the code directive work with\n # docutils < 0.9\n directives.register_directive('code-block', CodeBlock)\n directives.register_directive('sourcecode', CodeBlock)\n directives.register_directive('listing', Listing)\n return super(Plugin, self).set_site(site)\n\n\nclass Listing(Include):\n \"\"\" listing directive: create a highlighted block of code from a file in listings/\n\n Usage:\n\n .. listing:: nikola.py python\n :number-lines:\n\n \"\"\"\n has_content = False\n required_arguments = 1\n optional_arguments = 1\n\n def run(self):\n fname = self.arguments.pop(0)\n lang = self.arguments.pop(0)\n fpath = os.path.join('listings', fname)\n self.arguments.insert(0, fpath)\n self.options['code'] = lang\n with codecs_open(fpath, 'rb+', 'utf8') as fileobject:\n self.content = fileobject.read().splitlines()\n self.state.document.settings.record_dependencies.add(fpath)\n target = urlunsplit((\"link\", 'listing', fname, '', ''))\n generated_nodes = (\n [core.publish_doctree('`{0} <{1}>`_'.format(fname, target))[0]])\n generated_nodes += self.get_code_from_file(fileobject)\n return generated_nodes\n\n def get_code_from_file(self, data):\n \"\"\" Create CodeBlock nodes from file object content \"\"\"\n return super(Listing, self).run()\n\n def assert_has_content(self):\n \"\"\" Listing has no content, override check from superclass \"\"\"\n pass\n", "path": "nikola/plugins/compile/rest/listing.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\" Define and register a listing directive using the existing CodeBlock \"\"\"\n\n\nfrom __future__ import unicode_literals\nfrom codecs import open as codecs_open # for patching purposes\nimport os\ntry:\n from urlparse import urlunsplit\nexcept ImportError:\n from urllib.parse import urlunsplit # NOQA\n\nfrom docutils import core\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nfrom docutils.parsers.rst.directives.misc import Include\ntry:\n from docutils.parsers.rst.directives.body import CodeBlock\nexcept ImportError: # docutils < 0.9 (Debian Sid For The Loss)\n class CodeBlock(Directive):\n required_arguments = 1\n has_content = True\n CODE = '<pre>{0}</pre>'\n\n def run(self):\n \"\"\" Required by the Directive interface. Create docutils nodes \"\"\"\n return [nodes.raw('', self.CODE.format('\\n'.join(self.content)), format='html')]\n directives.register_directive('code', CodeBlock)\n\n\nfrom nikola.plugin_categories import RestExtension\n\n# Add sphinx compatibility option\nCodeBlock.option_spec['linenos'] = directives.unchanged\n\n\nclass FlexibleCodeBlock(CodeBlock):\n\n def run(self):\n if 'linenos' in self.options:\n self.options['number-lines'] = self.options['linenos']\n return super(FlexibleCodeBlock, self).run()\nCodeBlock = FlexibleCodeBlock\n\n\nclass Plugin(RestExtension):\n\n name = \"rest_listing\"\n\n def set_site(self, site):\n self.site = site\n # Even though listings don't use CodeBlock anymore, I am\n # leaving these to make the code directive work with\n # docutils < 0.9\n directives.register_directive('code-block', CodeBlock)\n directives.register_directive('sourcecode', CodeBlock)\n directives.register_directive('listing', Listing)\n return super(Plugin, self).set_site(site)\n\n# Add sphinx compatibility option\nlisting_spec = Include.option_spec\nlisting_spec['linenos'] = directives.unchanged\nprint(listing_spec)\n\n\nclass Listing(Include):\n \"\"\" listing directive: create a highlighted block of code from a file in listings/\n\n Usage:\n\n .. listing:: nikola.py python\n :number-lines:\n\n \"\"\"\n has_content = False\n required_arguments = 1\n optional_arguments = 1\n option_spec = listing_spec\n\n def run(self):\n fname = self.arguments.pop(0)\n lang = self.arguments.pop(0)\n fpath = os.path.join('listings', fname)\n self.arguments.insert(0, fpath)\n self.options['code'] = lang\n if 'linenos' in self.options:\n self.options['number-lines'] = self.options['linenos']\n with codecs_open(fpath, 'rb+', 'utf8') as fileobject:\n self.content = fileobject.read().splitlines()\n self.state.document.settings.record_dependencies.add(fpath)\n target = urlunsplit((\"link\", 'listing', fname, '', ''))\n generated_nodes = (\n [core.publish_doctree('`{0} <{1}>`_'.format(fname, target))[0]])\n generated_nodes += self.get_code_from_file(fileobject)\n return generated_nodes\n\n def get_code_from_file(self, data):\n \"\"\" Create CodeBlock nodes from file object content \"\"\"\n return super(Listing, self).run()\n\n def assert_has_content(self):\n \"\"\" Listing has no content, override check from superclass \"\"\"\n pass\n", "path": "nikola/plugins/compile/rest/listing.py"}]}
| 1,448 | 430 |
gh_patches_debug_4104
|
rasdani/github-patches
|
git_diff
|
google__TensorNetwork-250
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python version requirement
Our current setup file is still happy with python>=3.5, whereas we stopped testing for it and it seems that Jax also discontinued support. Should we not be stricter in the setup especially if we move to Jax as default?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # Copyright 2019 The TensorNetwork Developers
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # https://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from setuptools import find_packages, setup
17
18 # This reads the __version__ variable from tensornetwork/version.py
19 with open('tensornetwork/version.py') as f:
20 exec(f.read(), globals())
21
22 description = ('A high level tensor network API for tensorflow.')
23
24 # Reading long Description from README.md file.
25 with open("README.md", "r") as fh:
26 long_description = fh.read()
27
28 # Read in requirements
29 requirements = [
30 requirement.strip() for requirement in open('requirements.txt').readlines()
31 ]
32
33 setup(
34 name='tensornetwork',
35 version=__version__,
36 url='http://github.com/google/TensorNetwork',
37 author='The TensorNetwork Developers',
38 author_email='[email protected]',
39 python_requires=('>=3.5.0'),
40 install_requires=requirements,
41 license='Apache 2.0',
42 description=description,
43 long_description=long_description,
44 long_description_content_type="text/markdown",
45 packages=find_packages(),
46 )
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@
url='http://github.com/google/TensorNetwork',
author='The TensorNetwork Developers',
author_email='[email protected]',
- python_requires=('>=3.5.0'),
+ python_requires=('>=3.6.0'),
install_requires=requirements,
license='Apache 2.0',
description=description,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,7 +36,7 @@\n url='http://github.com/google/TensorNetwork',\n author='The TensorNetwork Developers',\n author_email='[email protected]',\n- python_requires=('>=3.5.0'),\n+ python_requires=('>=3.6.0'),\n install_requires=requirements,\n license='Apache 2.0',\n description=description,\n", "issue": "Python version requirement\nOur current setup file is still happy with python>=3.5, whereas we stopped testing for it and it seems that Jax also discontinued support. Should we not be stricter in the setup especially if we move to Jax as default?\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2019 The TensorNetwork Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from tensornetwork/version.py\nwith open('tensornetwork/version.py') as f:\n exec(f.read(), globals())\n\ndescription = ('A high level tensor network API for tensorflow.')\n\n# Reading long Description from README.md file.\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# Read in requirements\nrequirements = [\n requirement.strip() for requirement in open('requirements.txt').readlines()\n]\n\nsetup(\n name='tensornetwork',\n version=__version__,\n url='http://github.com/google/TensorNetwork',\n author='The TensorNetwork Developers',\n author_email='[email protected]',\n python_requires=('>=3.5.0'),\n install_requires=requirements,\n license='Apache 2.0',\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright 2019 The TensorNetwork Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\n# This reads the __version__ variable from tensornetwork/version.py\nwith open('tensornetwork/version.py') as f:\n exec(f.read(), globals())\n\ndescription = ('A high level tensor network API for tensorflow.')\n\n# Reading long Description from README.md file.\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n# Read in requirements\nrequirements = [\n requirement.strip() for requirement in open('requirements.txt').readlines()\n]\n\nsetup(\n name='tensornetwork',\n version=__version__,\n url='http://github.com/google/TensorNetwork',\n author='The TensorNetwork Developers',\n author_email='[email protected]',\n python_requires=('>=3.6.0'),\n install_requires=requirements,\n license='Apache 2.0',\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n)\n", "path": "setup.py"}]}
| 745 | 107 |
gh_patches_debug_1639
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-318
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Access to the Generator.random
It would be nice if one could gain access to the Generator.random variable so that one could save/set the state. I realize I can pass in the seed, but one currently has no way of gathering what the seed/state is if using the automatically generated seed. I don't want to use a fixed seed, but I do want to log/print the seed used _if_ the tests fail.
That is, I'd like to be able to do something like: `faker.generator.getstate()` (which gets the random state w/o exposing random) or `faker.generator.random.getstate()` (which gives access to the random variable)
For now, the workaround appears to be to create a Faker object with your own Generator.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/generator.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import re
6 import random
7
8
9 _re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
10 random = random.Random()
11
12
13 class Generator(object):
14
15 __config = {}
16
17 def __init__(self, **config):
18 self.providers = []
19 self.__config = dict(
20 list(self.__config.items()) + list(config.items()))
21
22 def add_provider(self, provider):
23
24 if type(provider) is type:
25 provider = provider(self)
26
27 self.providers.insert(0, provider)
28
29 for method_name in dir(provider):
30 # skip 'private' method
31 if method_name.startswith('_'):
32 continue
33
34 faker_function = getattr(provider, method_name)
35
36 if hasattr(faker_function, '__call__') or \
37 isinstance(faker_function, (classmethod, staticmethod)):
38 # add all faker method to generator
39 self.set_formatter(method_name, faker_function)
40
41 def provider(self, name):
42 try:
43 lst = [p for p in self.get_providers()
44 if p.__provider__ == name.lower()]
45 return lst[0]
46 except IndexError:
47 return None
48
49 def get_providers(self):
50 """Returns added providers."""
51 return self.providers
52
53 def seed(self, seed=None):
54 """Calls random.seed"""
55 random.seed(seed)
56
57 def format(self, formatter, *args, **kwargs):
58 """
59 This is a secure way to make a fake from another Provider.
60 """
61 # TODO: data export?
62 return self.get_formatter(formatter)(*args, **kwargs)
63
64 def get_formatter(self, formatter):
65 try:
66 return getattr(self, formatter)
67 except AttributeError:
68 raise AttributeError('Unknown formatter "{0}"'.format(formatter))
69
70 def set_formatter(self, name, method):
71 """
72 This method adds a provider method to generator.
73 Override this method to add some decoration or logging stuff.
74 """
75 setattr(self, name, method)
76
77 def parse(self, text):
78 """
79 Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')
80 with the result from the token method call.
81 """
82 return _re_token.sub(self.__format_token, text)
83
84 def __format_token(self, matches):
85 formatter = list(matches.groups())
86 formatter[1] = self.format(formatter[1])
87 return ''.join(formatter)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/generator.py b/faker/generator.py
--- a/faker/generator.py
+++ b/faker/generator.py
@@ -50,6 +50,10 @@
"""Returns added providers."""
return self.providers
+ @property
+ def random(self):
+ return random
+
def seed(self, seed=None):
"""Calls random.seed"""
random.seed(seed)
|
{"golden_diff": "diff --git a/faker/generator.py b/faker/generator.py\n--- a/faker/generator.py\n+++ b/faker/generator.py\n@@ -50,6 +50,10 @@\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n \n+ @property\n+ def random(self):\n+ return random\n+\n def seed(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n random.seed(seed)\n", "issue": "Access to the Generator.random\nIt would be nice if one could gain access to the Generator.random variable so that one could save/set the state. I realize I can pass in the seed, but one currently has no way of gathering what the seed/state is if using the automatically generated seed. I don't want to use a fixed seed, but I do want to log/print the seed used _if_ the tests fail.\n\nThat is, I'd like to be able to do something like: `faker.generator.getstate()` (which gets the random state w/o exposing random) or `faker.generator.random.getstate()` (which gives access to the random variable)\n\nFor now, the workaround appears to be to create a Faker object with your own Generator.\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\nimport random\n\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nrandom = random.Random()\n\n\nclass Generator(object):\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n\n def add_provider(self, provider):\n\n if type(provider) is type:\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if hasattr(faker_function, '__call__') or \\\n isinstance(faker_function, (classmethod, staticmethod)):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n def seed(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n raise AttributeError('Unknown formatter \"{0}\"'.format(formatter))\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = self.format(formatter[1])\n return ''.join(formatter)\n", "path": "faker/generator.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport re\nimport random\n\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nrandom = random.Random()\n\n\nclass Generator(object):\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n\n def add_provider(self, provider):\n\n if type(provider) is type:\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if hasattr(faker_function, '__call__') or \\\n isinstance(faker_function, (classmethod, staticmethod)):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self):\n return random\n\n def seed(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n raise AttributeError('Unknown formatter \"{0}\"'.format(formatter))\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = self.format(formatter[1])\n return ''.join(formatter)\n", "path": "faker/generator.py"}]}
| 1,102 | 92 |
gh_patches_debug_12588
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-9255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The SCIM JSON for User and Group schema attribute allows only one value
The SCIM JSON for User and Group schema attribute allows only one value. The schemas attribute check should only check for a minimum number of entries not for a maximum.
For example I would like to be able so support the following
"schemas": [
"urn:ietf:params:scim:schemas:extension:servicenow:2.0:User",
"urn:ietf:params:scim:schemas:core:2.0:User",
"urn:ietf:params:scim:schemas:extension:enterprise:2.0:User"
]
To Reproduce
Steps to reproduce the behavior:
Create Property mapping, add
return {
"schemas" : [
"urn:ietf:params:scim:schemas:core:2.0:User",
"urn:ietf:params:scim:schemas:extention:enterprise:2.0:User"
],
"userName": request.user.username,
"name": {
"formatted": request.user.name,
"givenName": givenName,
"familyName": familyName,
},
Expected behavior
JSON containing schemas attribute
Instead I get
Stopping sync due to error: Error 1 validation error for User schemas Tuple should have at most 1 item after validation, not 2 [type=too_long, input_value=['urn:ietf:params:scim:sc...on:enterprise:2.0:User'], input_type=list] For further information visit https://errors.pydantic.dev/2.4/v/too_long
Version and Deployment (please complete the following information):
authentik version: [2023.10.2]
Deployment: [docker-compose]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/providers/scim/clients/schema.py`
Content:
```
1 """Custom SCIM schemas"""
2
3 from pydanticscim.group import Group as BaseGroup
4 from pydanticscim.responses import PatchRequest as BasePatchRequest
5 from pydanticscim.responses import SCIMError as BaseSCIMError
6 from pydanticscim.service_provider import Bulk, ChangePassword, Filter, Patch, Sort
7 from pydanticscim.service_provider import (
8 ServiceProviderConfiguration as BaseServiceProviderConfiguration,
9 )
10 from pydanticscim.user import User as BaseUser
11
12
13 class User(BaseUser):
14 """Modified User schema with added externalId field"""
15
16 schemas: tuple[str] = ("urn:ietf:params:scim:schemas:core:2.0:User",)
17 externalId: str | None = None
18
19
20 class Group(BaseGroup):
21 """Modified Group schema with added externalId field"""
22
23 schemas: tuple[str] = ("urn:ietf:params:scim:schemas:core:2.0:Group",)
24 externalId: str | None = None
25
26
27 class ServiceProviderConfiguration(BaseServiceProviderConfiguration):
28 """ServiceProviderConfig with fallback"""
29
30 _is_fallback: bool | None = False
31
32 @property
33 def is_fallback(self) -> bool:
34 """Check if this service provider config was retrieved from the API endpoint
35 or a fallback was used"""
36 return self._is_fallback
37
38 @staticmethod
39 def default() -> "ServiceProviderConfiguration":
40 """Get default configuration, which doesn't support any optional features as fallback"""
41 return ServiceProviderConfiguration(
42 patch=Patch(supported=False),
43 bulk=Bulk(supported=False),
44 filter=Filter(supported=False),
45 changePassword=ChangePassword(supported=False),
46 sort=Sort(supported=False),
47 authenticationSchemes=[],
48 _is_fallback=True,
49 )
50
51
52 class PatchRequest(BasePatchRequest):
53 """PatchRequest which correctly sets schemas"""
54
55 schemas: tuple[str] = ("urn:ietf:params:scim:api:messages:2.0:PatchOp",)
56
57
58 class SCIMError(BaseSCIMError):
59 """SCIM error with optional status code"""
60
61 status: int | None
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/providers/scim/clients/schema.py b/authentik/providers/scim/clients/schema.py
--- a/authentik/providers/scim/clients/schema.py
+++ b/authentik/providers/scim/clients/schema.py
@@ -13,14 +13,18 @@
class User(BaseUser):
"""Modified User schema with added externalId field"""
- schemas: tuple[str] = ("urn:ietf:params:scim:schemas:core:2.0:User",)
+ schemas: list[str] = [
+ "urn:ietf:params:scim:schemas:core:2.0:User",
+ ]
externalId: str | None = None
class Group(BaseGroup):
"""Modified Group schema with added externalId field"""
- schemas: tuple[str] = ("urn:ietf:params:scim:schemas:core:2.0:Group",)
+ schemas: list[str] = [
+ "urn:ietf:params:scim:schemas:core:2.0:Group",
+ ]
externalId: str | None = None
|
{"golden_diff": "diff --git a/authentik/providers/scim/clients/schema.py b/authentik/providers/scim/clients/schema.py\n--- a/authentik/providers/scim/clients/schema.py\n+++ b/authentik/providers/scim/clients/schema.py\n@@ -13,14 +13,18 @@\n class User(BaseUser):\n \"\"\"Modified User schema with added externalId field\"\"\"\n \n- schemas: tuple[str] = (\"urn:ietf:params:scim:schemas:core:2.0:User\",)\n+ schemas: list[str] = [\n+ \"urn:ietf:params:scim:schemas:core:2.0:User\",\n+ ]\n externalId: str | None = None\n \n \n class Group(BaseGroup):\n \"\"\"Modified Group schema with added externalId field\"\"\"\n \n- schemas: tuple[str] = (\"urn:ietf:params:scim:schemas:core:2.0:Group\",)\n+ schemas: list[str] = [\n+ \"urn:ietf:params:scim:schemas:core:2.0:Group\",\n+ ]\n externalId: str | None = None\n", "issue": "The SCIM JSON for User and Group schema attribute allows only one value\nThe SCIM JSON for User and Group schema attribute allows only one value. The schemas attribute check should only check for a minimum number of entries not for a maximum. \r\n\r\nFor example I would like to be able so support the following\r\n\r\n \"schemas\": [\r\n \"urn:ietf:params:scim:schemas:extension:servicenow:2.0:User\",\r\n \"urn:ietf:params:scim:schemas:core:2.0:User\",\r\n \"urn:ietf:params:scim:schemas:extension:enterprise:2.0:User\"\r\n ]\r\n\r\nTo Reproduce\r\nSteps to reproduce the behavior:\r\nCreate Property mapping, add\r\n\r\nreturn {\r\n \"schemas\" : [\r\n \"urn:ietf:params:scim:schemas:core:2.0:User\",\r\n \"urn:ietf:params:scim:schemas:extention:enterprise:2.0:User\"\r\n ],\r\n \"userName\": request.user.username,\r\n \"name\": {\r\n \"formatted\": request.user.name,\r\n \"givenName\": givenName,\r\n \"familyName\": familyName,\r\n },\r\n\r\nExpected behavior\r\nJSON containing schemas attribute \r\n\r\nInstead I get \r\nStopping sync due to error: Error 1 validation error for User schemas Tuple should have at most 1 item after validation, not 2 [type=too_long, input_value=['urn:ietf:params:scim:sc...on:enterprise:2.0:User'], input_type=list] For further information visit https://errors.pydantic.dev/2.4/v/too_long\r\n\r\nVersion and Deployment (please complete the following information):\r\n\r\nauthentik version: [2023.10.2]\r\nDeployment: [docker-compose]\n", "before_files": [{"content": "\"\"\"Custom SCIM schemas\"\"\"\n\nfrom pydanticscim.group import Group as BaseGroup\nfrom pydanticscim.responses import PatchRequest as BasePatchRequest\nfrom pydanticscim.responses import SCIMError as BaseSCIMError\nfrom pydanticscim.service_provider import Bulk, ChangePassword, Filter, Patch, Sort\nfrom pydanticscim.service_provider import (\n ServiceProviderConfiguration as BaseServiceProviderConfiguration,\n)\nfrom pydanticscim.user import User as BaseUser\n\n\nclass User(BaseUser):\n \"\"\"Modified User schema with added externalId field\"\"\"\n\n schemas: tuple[str] = (\"urn:ietf:params:scim:schemas:core:2.0:User\",)\n externalId: str | None = None\n\n\nclass Group(BaseGroup):\n \"\"\"Modified Group schema with added externalId field\"\"\"\n\n schemas: tuple[str] = (\"urn:ietf:params:scim:schemas:core:2.0:Group\",)\n externalId: str | None = None\n\n\nclass ServiceProviderConfiguration(BaseServiceProviderConfiguration):\n \"\"\"ServiceProviderConfig with fallback\"\"\"\n\n _is_fallback: bool | None = False\n\n @property\n def is_fallback(self) -> bool:\n \"\"\"Check if this service provider config was retrieved from the API endpoint\n or a fallback was used\"\"\"\n return self._is_fallback\n\n @staticmethod\n def default() -> \"ServiceProviderConfiguration\":\n \"\"\"Get default configuration, which doesn't support any optional features as fallback\"\"\"\n return ServiceProviderConfiguration(\n patch=Patch(supported=False),\n bulk=Bulk(supported=False),\n filter=Filter(supported=False),\n changePassword=ChangePassword(supported=False),\n sort=Sort(supported=False),\n authenticationSchemes=[],\n _is_fallback=True,\n )\n\n\nclass PatchRequest(BasePatchRequest):\n \"\"\"PatchRequest which correctly sets schemas\"\"\"\n\n schemas: tuple[str] = (\"urn:ietf:params:scim:api:messages:2.0:PatchOp\",)\n\n\nclass SCIMError(BaseSCIMError):\n \"\"\"SCIM error with optional status code\"\"\"\n\n status: int | None\n", "path": "authentik/providers/scim/clients/schema.py"}], "after_files": [{"content": "\"\"\"Custom SCIM schemas\"\"\"\n\nfrom pydanticscim.group import Group as BaseGroup\nfrom pydanticscim.responses import PatchRequest as BasePatchRequest\nfrom pydanticscim.responses import SCIMError as BaseSCIMError\nfrom pydanticscim.service_provider import Bulk, ChangePassword, Filter, Patch, Sort\nfrom pydanticscim.service_provider import (\n ServiceProviderConfiguration as BaseServiceProviderConfiguration,\n)\nfrom pydanticscim.user import User as BaseUser\n\n\nclass User(BaseUser):\n \"\"\"Modified User schema with added externalId field\"\"\"\n\n schemas: list[str] = [\n \"urn:ietf:params:scim:schemas:core:2.0:User\",\n ]\n externalId: str | None = None\n\n\nclass Group(BaseGroup):\n \"\"\"Modified Group schema with added externalId field\"\"\"\n\n schemas: list[str] = [\n \"urn:ietf:params:scim:schemas:core:2.0:Group\",\n ]\n externalId: str | None = None\n\n\nclass ServiceProviderConfiguration(BaseServiceProviderConfiguration):\n \"\"\"ServiceProviderConfig with fallback\"\"\"\n\n _is_fallback: bool | None = False\n\n @property\n def is_fallback(self) -> bool:\n \"\"\"Check if this service provider config was retrieved from the API endpoint\n or a fallback was used\"\"\"\n return self._is_fallback\n\n @staticmethod\n def default() -> \"ServiceProviderConfiguration\":\n \"\"\"Get default configuration, which doesn't support any optional features as fallback\"\"\"\n return ServiceProviderConfiguration(\n patch=Patch(supported=False),\n bulk=Bulk(supported=False),\n filter=Filter(supported=False),\n changePassword=ChangePassword(supported=False),\n sort=Sort(supported=False),\n authenticationSchemes=[],\n _is_fallback=True,\n )\n\n\nclass PatchRequest(BasePatchRequest):\n \"\"\"PatchRequest which correctly sets schemas\"\"\"\n\n schemas: tuple[str] = (\"urn:ietf:params:scim:api:messages:2.0:PatchOp\",)\n\n\nclass SCIMError(BaseSCIMError):\n \"\"\"SCIM error with optional status code\"\"\"\n\n status: int | None\n", "path": "authentik/providers/scim/clients/schema.py"}]}
| 1,207 | 243 |
gh_patches_debug_7517
|
rasdani/github-patches
|
git_diff
|
ranaroussi__yfinance-1807
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImportError using yf.Ticker
Hello. Hope everyone is doing well. Getting an error after upgraded yfinance today (11/28/23) with the following code ```%pip install yfinance --upgrade --no-cache-dir --pre```.
I am using python version 3.10 and yfinance version 02.32
Error I am getting is as follows:
ImportError: cannot import name 'DO_NOT_CACHE' from 'requests_cache' (C:\Users\17147\anaconda3\envs\ib230729\Lib\site-packages\requests_cache\__init__.py)
Simple code that reproduces the problem:
session = requests_cache.CachedSession('yfinance.cache')
session.headers['User-agent'] = 'my-program/1.0'
msft = yf.Ticker('msft', session=session)
msft.actions
Traceback below:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
Cell In[2], line 3
1 session = requests_cache.CachedSession('yfinance.cache')
2 session.headers['User-agent'] = 'my-program/1.0'
----> 3 msft = yf.Ticker('msft', session=session)
File ~\anaconda3\envs\ib230729\Lib\site-packages\yfinance\ticker.py:34, in Ticker.__init__(self, ticker, session)
33 def __init__(self, ticker, session=None):
---> 34 super(Ticker, self).__init__(ticker, session=session)
35 self._expirations = {}
36 self._underlying = {}
File ~\anaconda3\envs\ib230729\Lib\site-packages\yfinance\base.py:71, in TickerBase.__init__(self, ticker, session)
68 if utils.is_isin(self.ticker):
69 self.ticker = utils.get_ticker_by_isin(self.ticker, None, session)
---> 71 self._data: YfData = YfData(session=session)
73 self._analysis = Analysis(self._data, ticker)
74 self._holders = Holders(self._data, ticker)
File ~\anaconda3\envs\ib230729\Lib\site-packages\yfinance\data.py:47, in SingletonMeta.__call__(cls, *args, **kwargs)
45 with cls._lock:
46 if cls not in cls._instances:
---> 47 instance = super().__call__(*args, **kwargs)
48 cls._instances[cls] = instance
49 else:
File ~\anaconda3\envs\ib230729\Lib\site-packages\yfinance\data.py:75, in YfData.__init__(self, session)
70 else:
71 # Is caching. This is annoying.
72 # Can't simply use a non-caching session to fetch cookie & crumb,
73 # because then the caching-session won't have cookie.
74 self._session_is_caching = True
---> 75 from requests_cache import DO_NOT_CACHE
76 self._expire_after = DO_NOT_CACHE
77 self._crumb = None
ImportError: cannot import name 'DO_NOT_CACHE' from 'requests_cache' (C:\Users\17147\anaconda3\envs\ib230729\Lib\site-packages\requests_cache\__init__.py)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: UTF-8 -*-
3 #
4 # yfinance - market data downloader
5 # https://github.com/ranaroussi/yfinance
6
7 """yfinance - market data downloader"""
8
9 from setuptools import setup, find_packages
10 # from codecs import open
11 import io
12 from os import path
13
14 # --- get version ---
15 version = "unknown"
16 with open("yfinance/version.py") as f:
17 line = f.read().strip()
18 version = line.replace("version = ", "").replace('"', '')
19 # --- /get version ---
20
21
22 here = path.abspath(path.dirname(__file__))
23
24 # Get the long description from the README file
25 with io.open(path.join(here, 'README.md'), encoding='utf-8') as f:
26 long_description = f.read()
27
28 setup(
29 name='yfinance',
30 version=version,
31 description='Download market data from Yahoo! Finance API',
32 long_description=long_description,
33 long_description_content_type='text/markdown',
34 url='https://github.com/ranaroussi/yfinance',
35 author='Ran Aroussi',
36 author_email='[email protected]',
37 license='Apache',
38 classifiers=[
39 'License :: OSI Approved :: Apache Software License',
40 # 'Development Status :: 3 - Alpha',
41 'Development Status :: 4 - Beta',
42 # 'Development Status :: 5 - Production/Stable',
43
44
45 'Operating System :: OS Independent',
46 'Intended Audience :: Developers',
47 'Topic :: Office/Business :: Financial',
48 'Topic :: Office/Business :: Financial :: Investment',
49 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
50 'Topic :: Software Development :: Libraries',
51 'Topic :: Software Development :: Libraries :: Python Modules',
52
53 'Programming Language :: Python :: 3.6',
54 'Programming Language :: Python :: 3.7',
55 'Programming Language :: Python :: 3.8',
56 'Programming Language :: Python :: 3.9',
57 'Programming Language :: Python :: 3.10',
58 ],
59 platforms=['any'],
60 keywords='pandas, yahoo finance, pandas datareader',
61 packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),
62 install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',
63 'requests>=2.31', 'multitasking>=0.0.7',
64 'lxml>=4.9.1', 'appdirs>=1.4.4', 'pytz>=2022.5',
65 'frozendict>=2.3.4', 'peewee>=3.16.2',
66 'beautifulsoup4>=4.11.1', 'html5lib>=1.1'],
67 extras_require={
68 'nospam': ['requests_cache>=1.1.1', 'requests_ratelimiter>=0.4.2'],
69 'repair': ['scipy>=1.6.3'],
70 },
71 # Note: Pandas.read_html() needs html5lib & beautifulsoup4
72 entry_points={
73 'console_scripts': [
74 'sample=sample:main',
75 ],
76 },
77 )
78
79 print("""
80 NOTE: yfinance is not affiliated, endorsed, or vetted by Yahoo, Inc.
81
82 You should refer to Yahoo!'s terms of use for details on your rights
83 to use the actual data downloaded.""")
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,7 +65,7 @@
'frozendict>=2.3.4', 'peewee>=3.16.2',
'beautifulsoup4>=4.11.1', 'html5lib>=1.1'],
extras_require={
- 'nospam': ['requests_cache>=1.1.1', 'requests_ratelimiter>=0.4.2'],
+ 'nospam': ['requests_cache>=1.0', 'requests_ratelimiter>=0.3.1'],
'repair': ['scipy>=1.6.3'],
},
# Note: Pandas.read_html() needs html5lib & beautifulsoup4
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,7 +65,7 @@\n 'frozendict>=2.3.4', 'peewee>=3.16.2',\n 'beautifulsoup4>=4.11.1', 'html5lib>=1.1'],\n extras_require={\n- 'nospam': ['requests_cache>=1.1.1', 'requests_ratelimiter>=0.4.2'],\n+ 'nospam': ['requests_cache>=1.0', 'requests_ratelimiter>=0.3.1'],\n 'repair': ['scipy>=1.6.3'],\n },\n # Note: Pandas.read_html() needs html5lib & beautifulsoup4\n", "issue": "ImportError using yf.Ticker\nHello. Hope everyone is doing well. Getting an error after upgraded yfinance today (11/28/23) with the following code ```%pip install yfinance --upgrade --no-cache-dir --pre```. \r\n\r\nI am using python version 3.10 and yfinance version 02.32\r\n\r\nError I am getting is as follows:\r\n\r\nImportError: cannot import name 'DO_NOT_CACHE' from 'requests_cache' (C:\\Users\\17147\\anaconda3\\envs\\ib230729\\Lib\\site-packages\\requests_cache\\__init__.py)\r\n\r\n\r\nSimple code that reproduces the problem:\r\nsession = requests_cache.CachedSession('yfinance.cache')\r\nsession.headers['User-agent'] = 'my-program/1.0'\r\nmsft = yf.Ticker('msft', session=session)\r\nmsft.actions\r\n\r\n\r\nTraceback below:\r\n---------------------------------------------------------------------------\r\nImportError Traceback (most recent call last)\r\nCell In[2], line 3\r\n 1 session = requests_cache.CachedSession('yfinance.cache')\r\n 2 session.headers['User-agent'] = 'my-program/1.0'\r\n----> 3 msft = yf.Ticker('msft', session=session)\r\n\r\nFile ~\\anaconda3\\envs\\ib230729\\Lib\\site-packages\\yfinance\\ticker.py:34, in Ticker.__init__(self, ticker, session)\r\n 33 def __init__(self, ticker, session=None):\r\n---> 34 super(Ticker, self).__init__(ticker, session=session)\r\n 35 self._expirations = {}\r\n 36 self._underlying = {}\r\n\r\nFile ~\\anaconda3\\envs\\ib230729\\Lib\\site-packages\\yfinance\\base.py:71, in TickerBase.__init__(self, ticker, session)\r\n 68 if utils.is_isin(self.ticker):\r\n 69 self.ticker = utils.get_ticker_by_isin(self.ticker, None, session)\r\n---> 71 self._data: YfData = YfData(session=session)\r\n 73 self._analysis = Analysis(self._data, ticker)\r\n 74 self._holders = Holders(self._data, ticker)\r\n\r\nFile ~\\anaconda3\\envs\\ib230729\\Lib\\site-packages\\yfinance\\data.py:47, in SingletonMeta.__call__(cls, *args, **kwargs)\r\n 45 with cls._lock:\r\n 46 if cls not in cls._instances:\r\n---> 47 instance = super().__call__(*args, **kwargs)\r\n 48 cls._instances[cls] = instance\r\n 49 else:\r\n\r\nFile ~\\anaconda3\\envs\\ib230729\\Lib\\site-packages\\yfinance\\data.py:75, in YfData.__init__(self, session)\r\n 70 else:\r\n 71 # Is caching. This is annoying. \r\n 72 # Can't simply use a non-caching session to fetch cookie & crumb, \r\n 73 # because then the caching-session won't have cookie.\r\n 74 self._session_is_caching = True\r\n---> 75 from requests_cache import DO_NOT_CACHE\r\n 76 self._expire_after = DO_NOT_CACHE\r\n 77 self._crumb = None\r\n\r\nImportError: cannot import name 'DO_NOT_CACHE' from 'requests_cache' (C:\\Users\\17147\\anaconda3\\envs\\ib230729\\Lib\\site-packages\\requests_cache\\__init__.py)\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n\n\"\"\"yfinance - market data downloader\"\"\"\n\nfrom setuptools import setup, find_packages\n# from codecs import open\nimport io\nfrom os import path\n\n# --- get version ---\nversion = \"unknown\"\nwith open(\"yfinance/version.py\") as f:\n line = f.read().strip()\n version = line.replace(\"version = \", \"\").replace('\"', '')\n# --- /get version ---\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith io.open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='yfinance',\n version=version,\n description='Download market data from Yahoo! Finance API',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/ranaroussi/yfinance',\n author='Ran Aroussi',\n author_email='[email protected]',\n license='Apache',\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n # 'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n # 'Development Status :: 5 - Production/Stable',\n\n\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Topic :: Office/Business :: Financial',\n 'Topic :: Office/Business :: Financial :: Investment',\n 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n platforms=['any'],\n keywords='pandas, yahoo finance, pandas datareader',\n packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),\n install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',\n 'requests>=2.31', 'multitasking>=0.0.7',\n 'lxml>=4.9.1', 'appdirs>=1.4.4', 'pytz>=2022.5',\n 'frozendict>=2.3.4', 'peewee>=3.16.2',\n 'beautifulsoup4>=4.11.1', 'html5lib>=1.1'],\n extras_require={\n 'nospam': ['requests_cache>=1.1.1', 'requests_ratelimiter>=0.4.2'],\n 'repair': ['scipy>=1.6.3'],\n },\n # Note: Pandas.read_html() needs html5lib & beautifulsoup4\n entry_points={\n 'console_scripts': [\n 'sample=sample:main',\n ],\n },\n)\n\nprint(\"\"\"\nNOTE: yfinance is not affiliated, endorsed, or vetted by Yahoo, Inc.\n\nYou should refer to Yahoo!'s terms of use for details on your rights\nto use the actual data downloaded.\"\"\")\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n\n\"\"\"yfinance - market data downloader\"\"\"\n\nfrom setuptools import setup, find_packages\n# from codecs import open\nimport io\nfrom os import path\n\n# --- get version ---\nversion = \"unknown\"\nwith open(\"yfinance/version.py\") as f:\n line = f.read().strip()\n version = line.replace(\"version = \", \"\").replace('\"', '')\n# --- /get version ---\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith io.open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='yfinance',\n version=version,\n description='Download market data from Yahoo! Finance API',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/ranaroussi/yfinance',\n author='Ran Aroussi',\n author_email='[email protected]',\n license='Apache',\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n # 'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n # 'Development Status :: 5 - Production/Stable',\n\n\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Topic :: Office/Business :: Financial',\n 'Topic :: Office/Business :: Financial :: Investment',\n 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n platforms=['any'],\n keywords='pandas, yahoo finance, pandas datareader',\n packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),\n install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',\n 'requests>=2.31', 'multitasking>=0.0.7',\n 'lxml>=4.9.1', 'appdirs>=1.4.4', 'pytz>=2022.5',\n 'frozendict>=2.3.4', 'peewee>=3.16.2',\n 'beautifulsoup4>=4.11.1', 'html5lib>=1.1'],\n extras_require={\n 'nospam': ['requests_cache>=1.0', 'requests_ratelimiter>=0.3.1'],\n 'repair': ['scipy>=1.6.3'],\n },\n # Note: Pandas.read_html() needs html5lib & beautifulsoup4\n entry_points={\n 'console_scripts': [\n 'sample=sample:main',\n ],\n },\n)\n\nprint(\"\"\"\nNOTE: yfinance is not affiliated, endorsed, or vetted by Yahoo, Inc.\n\nYou should refer to Yahoo!'s terms of use for details on your rights\nto use the actual data downloaded.\"\"\")\n", "path": "setup.py"}]}
| 1,961 | 173 |
gh_patches_debug_6569
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-bolts-230
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CPCv2-Resnet18 pretrained not available
Hi,
When I try to load a pretrained CPCv2-resnet18 model, I get an `urllib.error.HTTPError: HTTP Error 403: Forbidden` error. The code I use to load the pretrained model is
```
model = CPCV2(encoder='resnet18', pretrained=True)
x = torch.rand(1, 3, 224, 224)
feats = model.encoder(x)
```
The following URL doesn't seem to open on my browser (Chrome) too.
https://github.com/PyTorchLightning/pytorch-lightning-bolts/blob/32fb560a429532dfb40a5935ca7674990dae1f66/pl_bolts/utils/pretrained_weights.py#L6
Any help is much appreciated.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pl_bolts/utils/pretrained_weights.py`
Content:
```
1
2
3 vae_imagenet2012 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \
4 'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'
5
6 cpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \
7 'cpc/resnet18_version_6/checkpoints/epoch%3D85.ckpt'
8 urls = {
9 'vae-imagenet2012': vae_imagenet2012,
10 'CPCV2-resnet18': cpcv2_resnet18
11 }
12
13
14 def load_pretrained(model, class_name=None): # pragma: no-cover
15 if class_name is None:
16 class_name = model.__class__.__name__
17 ckpt_url = urls[class_name]
18 weights_model = model.__class__.load_from_checkpoint(ckpt_url)
19 model.load_state_dict(weights_model.state_dict())
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pl_bolts/utils/pretrained_weights.py b/pl_bolts/utils/pretrained_weights.py
--- a/pl_bolts/utils/pretrained_weights.py
+++ b/pl_bolts/utils/pretrained_weights.py
@@ -4,7 +4,7 @@
'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'
cpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \
- 'cpc/resnet18_version_6/checkpoints/epoch%3D85.ckpt'
+ 'cpc/resnet18-v6/epoch%3D85.ckpt'
urls = {
'vae-imagenet2012': vae_imagenet2012,
'CPCV2-resnet18': cpcv2_resnet18
|
{"golden_diff": "diff --git a/pl_bolts/utils/pretrained_weights.py b/pl_bolts/utils/pretrained_weights.py\n--- a/pl_bolts/utils/pretrained_weights.py\n+++ b/pl_bolts/utils/pretrained_weights.py\n@@ -4,7 +4,7 @@\n 'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'\n \n cpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \\\n- 'cpc/resnet18_version_6/checkpoints/epoch%3D85.ckpt'\n+ 'cpc/resnet18-v6/epoch%3D85.ckpt'\n urls = {\n 'vae-imagenet2012': vae_imagenet2012,\n 'CPCV2-resnet18': cpcv2_resnet18\n", "issue": "CPCv2-Resnet18 pretrained not available\nHi, \r\nWhen I try to load a pretrained CPCv2-resnet18 model, I get an `urllib.error.HTTPError: HTTP Error 403: Forbidden` error. The code I use to load the pretrained model is \r\n```\r\nmodel = CPCV2(encoder='resnet18', pretrained=True)\r\nx = torch.rand(1, 3, 224, 224)\r\nfeats = model.encoder(x)\r\n```\r\nThe following URL doesn't seem to open on my browser (Chrome) too. \r\nhttps://github.com/PyTorchLightning/pytorch-lightning-bolts/blob/32fb560a429532dfb40a5935ca7674990dae1f66/pl_bolts/utils/pretrained_weights.py#L6\r\n\r\nAny help is much appreciated.\r\nThanks! \r\n\n", "before_files": [{"content": "\n\nvae_imagenet2012 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \\\n 'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'\n\ncpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \\\n 'cpc/resnet18_version_6/checkpoints/epoch%3D85.ckpt'\nurls = {\n 'vae-imagenet2012': vae_imagenet2012,\n 'CPCV2-resnet18': cpcv2_resnet18\n}\n\n\ndef load_pretrained(model, class_name=None): # pragma: no-cover\n if class_name is None:\n class_name = model.__class__.__name__\n ckpt_url = urls[class_name]\n weights_model = model.__class__.load_from_checkpoint(ckpt_url)\n model.load_state_dict(weights_model.state_dict())\n", "path": "pl_bolts/utils/pretrained_weights.py"}], "after_files": [{"content": "\n\nvae_imagenet2012 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \\\n 'vae/imagenet_06_22_2019/checkpoints/epoch%3D63.ckpt'\n\ncpcv2_resnet18 = 'https://pl-bolts-weights.s3.us-east-2.amazonaws.com/' \\\n 'cpc/resnet18-v6/epoch%3D85.ckpt'\nurls = {\n 'vae-imagenet2012': vae_imagenet2012,\n 'CPCV2-resnet18': cpcv2_resnet18\n}\n\n\ndef load_pretrained(model, class_name=None): # pragma: no-cover\n if class_name is None:\n class_name = model.__class__.__name__\n ckpt_url = urls[class_name]\n weights_model = model.__class__.load_from_checkpoint(ckpt_url)\n model.load_state_dict(weights_model.state_dict())\n", "path": "pl_bolts/utils/pretrained_weights.py"}]}
| 726 | 205 |
gh_patches_debug_20599
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-1650
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error when serving images through the URL generator
I posted a comment on https://github.com/torchbox/wagtail/issues/983 but probably better to open a new issue. Looks like the same problem to me though.
Hi guys, I think I'm having the same problem but when serving images using the URL generator. It does work if I'm logged-in in the site (cache not working) but doesn't when I'm not (cache full on).
Cheers,
Jordi
Internal Server Error: /images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/
Traceback (most recent call last):
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 204, in get_response
response = middleware_method(request, response)
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/middleware/cache.py", line 121, in process_response
self.cache.set(cache_key, response, timeout)
File "/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/redis_cache/cache.py", line 239, in set
result = self._set(key, pickle.dumps(value), timeout, client, _add_only)
File "/var/www/buildability/venvs/buildability.co.nz/lib/python2.7/copy_reg.py", line 70, in _reduce_ex
raise TypeError, "can't pickle %s objects" % base.__name__
TypeError: can't pickle instancemethod objects
Request repr():
<WSGIRequest
path:/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/,
GET:<QueryDict: {}>,
POST:<QueryDict: {}>,
COOKIES:{'_ga': 'GA1.3.1219121887.1434427204',
'csrftoken': 'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs'},
META:{'CONTENT_LENGTH': '',
'CONTENT_TYPE': '',
u'CSRF_COOKIE': u'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',
'DOCUMENT_ROOT': '/usr/share/nginx/html',
'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,_/_;q=0.8',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate, sdch',
'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',
'HTTP_CACHE_CONTROL': 'max-age=0',
'HTTP_CONNECTION': 'keep-alive',
'HTTP_COOKIE': '_ga=GA1.3.1219121887.1434427204; csrftoken=GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',
'HTTP_HOST': 'www.buildability.co.nz',
'HTTP_UPGRADE_INSECURE_REQUESTS': '1',
'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36',
'PATH_INFO': u'/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/',
'QUERY_STRING': '',
'REMOTE_ADDR': '131.203.137.142',
'REMOTE_PORT': '51455',
'REQUEST_METHOD': 'GET',
'REQUEST_URI': '/images/2dMQIUOPwS5DlZuprp_E_WFdfhw%3D/47/width-75/',
u'SCRIPT_NAME': u'',
'SERVER_NAME': 'www.buildability.co.nz',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'UWSGI_SCHEME': 'http',
'uwsgi.core': 7,
'uwsgi.node': 'avinton',
'uwsgi.version': '1.9.17.1-debian',
'wsgi.errors': <open file 'wsgi_errors', mode 'w' at 0x7f0548a548a0>,
'wsgi.file_wrapper': <built-in function uwsgi_sendfile>,
'wsgi.input': <uwsgi._Input object at 0x7f0548a20a08>,
'wsgi.multiprocess': True,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': 'http',
'wsgi.version': (1, 0)}>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailimages/views/frontend.py`
Content:
```
1 from wsgiref.util import FileWrapper
2 import imghdr
3
4 from django.shortcuts import get_object_or_404
5 from django.http import HttpResponse
6 from django.core.exceptions import PermissionDenied
7
8 from wagtail.wagtailimages.models import get_image_model
9 from wagtail.wagtailimages.utils import verify_signature
10 from wagtail.wagtailimages.exceptions import InvalidFilterSpecError
11
12
13 def serve(request, signature, image_id, filter_spec):
14 image = get_object_or_404(get_image_model(), id=image_id)
15
16 if not verify_signature(signature.encode(), image_id, filter_spec):
17 raise PermissionDenied
18
19 try:
20 rendition = image.get_rendition(filter_spec)
21 rendition.file.open('rb')
22 image_format = imghdr.what(rendition.file)
23 return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
24 except InvalidFilterSpecError:
25 return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/wagtailimages/views/frontend.py b/wagtail/wagtailimages/views/frontend.py
--- a/wagtail/wagtailimages/views/frontend.py
+++ b/wagtail/wagtailimages/views/frontend.py
@@ -2,7 +2,7 @@
import imghdr
from django.shortcuts import get_object_or_404
-from django.http import HttpResponse
+from django.http import HttpResponse, StreamingHttpResponse
from django.core.exceptions import PermissionDenied
from wagtail.wagtailimages.models import get_image_model
@@ -20,6 +20,6 @@
rendition = image.get_rendition(filter_spec)
rendition.file.open('rb')
image_format = imghdr.what(rendition.file)
- return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
+ return StreamingHttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)
except InvalidFilterSpecError:
return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
|
{"golden_diff": "diff --git a/wagtail/wagtailimages/views/frontend.py b/wagtail/wagtailimages/views/frontend.py\n--- a/wagtail/wagtailimages/views/frontend.py\n+++ b/wagtail/wagtailimages/views/frontend.py\n@@ -2,7 +2,7 @@\n import imghdr\n \n from django.shortcuts import get_object_or_404\n-from django.http import HttpResponse\n+from django.http import HttpResponse, StreamingHttpResponse\n from django.core.exceptions import PermissionDenied\n \n from wagtail.wagtailimages.models import get_image_model\n@@ -20,6 +20,6 @@\n rendition = image.get_rendition(filter_spec)\n rendition.file.open('rb')\n image_format = imghdr.what(rendition.file)\n- return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n+ return StreamingHttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n except InvalidFilterSpecError:\n return HttpResponse(\"Invalid filter spec: \" + filter_spec, content_type='text/plain', status=400)\n", "issue": "Error when serving images through the URL generator\nI posted a comment on https://github.com/torchbox/wagtail/issues/983 but probably better to open a new issue. Looks like the same problem to me though.\n\nHi guys, I think I'm having the same problem but when serving images using the URL generator. It does work if I'm logged-in in the site (cache not working) but doesn't when I'm not (cache full on).\n\nCheers,\nJordi\n\nInternal Server Error: /images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/\nTraceback (most recent call last):\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/core/handlers/base.py\", line 204, in get_response\n response = middleware_method(request, response)\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/django/middleware/cache.py\", line 121, in process_response\n self.cache.set(cache_key, response, timeout)\n File \"/var/www/buildability/venvs/buildability.co.nz/local/lib/python2.7/site-packages/redis_cache/cache.py\", line 239, in set\n result = self._set(key, pickle.dumps(value), timeout, client, _add_only)\n File \"/var/www/buildability/venvs/buildability.co.nz/lib/python2.7/copy_reg.py\", line 70, in _reduce_ex\n raise TypeError, \"can't pickle %s objects\" % base.__name__\nTypeError: can't pickle instancemethod objects\n\nRequest repr(): \n<WSGIRequest\npath:/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/,\nGET:<QueryDict: {}>,\nPOST:<QueryDict: {}>,\nCOOKIES:{'_ga': 'GA1.3.1219121887.1434427204',\n 'csrftoken': 'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs'},\nMETA:{'CONTENT_LENGTH': '',\n 'CONTENT_TYPE': '',\n u'CSRF_COOKIE': u'GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',\n 'DOCUMENT_ROOT': '/usr/share/nginx/html',\n 'HTTP_ACCEPT': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,_/_;q=0.8',\n 'HTTP_ACCEPT_ENCODING': 'gzip, deflate, sdch',\n 'HTTP_ACCEPT_LANGUAGE': 'en-US,en;q=0.8',\n 'HTTP_CACHE_CONTROL': 'max-age=0',\n 'HTTP_CONNECTION': 'keep-alive',\n 'HTTP_COOKIE': '_ga=GA1.3.1219121887.1434427204; csrftoken=GNhfTEGBu40y8wRAFPa15lQTV66F9WCs',\n 'HTTP_HOST': 'www.buildability.co.nz',\n 'HTTP_UPGRADE_INSECURE_REQUESTS': '1',\n 'HTTP_USER_AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36',\n 'PATH_INFO': u'/images/2dMQIUOPwS5DlZuprp_E_WFdfhw=/47/width-75/',\n 'QUERY_STRING': '',\n 'REMOTE_ADDR': '131.203.137.142',\n 'REMOTE_PORT': '51455',\n 'REQUEST_METHOD': 'GET',\n 'REQUEST_URI': '/images/2dMQIUOPwS5DlZuprp_E_WFdfhw%3D/47/width-75/',\n u'SCRIPT_NAME': u'',\n 'SERVER_NAME': 'www.buildability.co.nz',\n 'SERVER_PORT': '80',\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'UWSGI_SCHEME': 'http',\n 'uwsgi.core': 7,\n 'uwsgi.node': 'avinton',\n 'uwsgi.version': '1.9.17.1-debian',\n 'wsgi.errors': <open file 'wsgi_errors', mode 'w' at 0x7f0548a548a0>,\n 'wsgi.file_wrapper': <built-in function uwsgi_sendfile>,\n 'wsgi.input': <uwsgi._Input object at 0x7f0548a20a08>,\n 'wsgi.multiprocess': True,\n 'wsgi.multithread': True,\n 'wsgi.run_once': False,\n 'wsgi.url_scheme': 'http',\n 'wsgi.version': (1, 0)}>\n\n", "before_files": [{"content": "from wsgiref.util import FileWrapper\nimport imghdr\n\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\nfrom django.core.exceptions import PermissionDenied\n\nfrom wagtail.wagtailimages.models import get_image_model\nfrom wagtail.wagtailimages.utils import verify_signature\nfrom wagtail.wagtailimages.exceptions import InvalidFilterSpecError\n\n\ndef serve(request, signature, image_id, filter_spec):\n image = get_object_or_404(get_image_model(), id=image_id)\n\n if not verify_signature(signature.encode(), image_id, filter_spec):\n raise PermissionDenied\n\n try:\n rendition = image.get_rendition(filter_spec)\n rendition.file.open('rb')\n image_format = imghdr.what(rendition.file)\n return HttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n except InvalidFilterSpecError:\n return HttpResponse(\"Invalid filter spec: \" + filter_spec, content_type='text/plain', status=400)\n", "path": "wagtail/wagtailimages/views/frontend.py"}], "after_files": [{"content": "from wsgiref.util import FileWrapper\nimport imghdr\n\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse, StreamingHttpResponse\nfrom django.core.exceptions import PermissionDenied\n\nfrom wagtail.wagtailimages.models import get_image_model\nfrom wagtail.wagtailimages.utils import verify_signature\nfrom wagtail.wagtailimages.exceptions import InvalidFilterSpecError\n\n\ndef serve(request, signature, image_id, filter_spec):\n image = get_object_or_404(get_image_model(), id=image_id)\n\n if not verify_signature(signature.encode(), image_id, filter_spec):\n raise PermissionDenied\n\n try:\n rendition = image.get_rendition(filter_spec)\n rendition.file.open('rb')\n image_format = imghdr.what(rendition.file)\n return StreamingHttpResponse(FileWrapper(rendition.file), content_type='image/' + image_format)\n except InvalidFilterSpecError:\n return HttpResponse(\"Invalid filter spec: \" + filter_spec, content_type='text/plain', status=400)\n", "path": "wagtail/wagtailimages/views/frontend.py"}]}
| 1,617 | 232 |
gh_patches_debug_13497
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-8493
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
import error
ray 0.8.5,when I try to use ray, it occurs Ray must be imported before pickle5 because Ray requires a specific version of pickle5 (which is packaged along with Ray.
I want to know it must import pickle5 before import ray, Right?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/__init__.py`
Content:
```
1 import os
2 import logging
3 from os.path import dirname
4 import sys
5
6 logger = logging.getLogger(__name__)
7
8 # MUST add pickle5 to the import path because it will be imported by some
9 # raylet modules.
10
11 if "pickle5" in sys.modules:
12 raise ImportError("Ray must be imported before pickle5 because Ray "
13 "requires a specific version of pickle5 (which is "
14 "packaged along with Ray).")
15
16 if "OMP_NUM_THREADS" not in os.environ:
17 logger.debug("[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "
18 "degradation with many workers (issue #6998). You can "
19 "override this by explicitly setting OMP_NUM_THREADS.")
20 os.environ["OMP_NUM_THREADS"] = "1"
21
22 # Add the directory containing pickle5 to the Python path so that we find the
23 # pickle5 version packaged with ray and not a pre-existing pickle5.
24 pickle5_path = os.path.join(
25 os.path.abspath(os.path.dirname(__file__)), "pickle5_files")
26 sys.path.insert(0, pickle5_path)
27
28 # Importing psutil & setproctitle. Must be before ray._raylet is initialized.
29 thirdparty_files = os.path.join(
30 os.path.abspath(os.path.dirname(__file__)), "thirdparty_files")
31 sys.path.insert(0, thirdparty_files)
32
33 if sys.platform == "win32":
34 import ray.compat # noqa: E402
35 ray.compat.patch_redis_empty_recv()
36
37 # Expose ray ABI symbols which may be dependent by other shared
38 # libraries such as _streaming.so. See BUILD.bazel:_raylet
39 python_shared_lib_suffix = ".so" if sys.platform != "win32" else ".pyd"
40 so_path = os.path.join(dirname(__file__), "_raylet" + python_shared_lib_suffix)
41 if os.path.exists(so_path):
42 import ctypes
43 from ctypes import CDLL
44 CDLL(so_path, ctypes.RTLD_GLOBAL)
45
46 import ray._raylet # noqa: E402
47
48 from ray._raylet import (
49 ActorCheckpointID,
50 ActorClassID,
51 ActorID,
52 ClientID,
53 Config as _Config,
54 JobID,
55 WorkerID,
56 FunctionID,
57 ObjectID,
58 TaskID,
59 UniqueID,
60 Language,
61 ) # noqa: E402
62
63 _config = _Config()
64
65 from ray.profiling import profile # noqa: E402
66 from ray.state import (jobs, nodes, actors, objects, timeline,
67 object_transfer_timeline, cluster_resources,
68 available_resources, errors) # noqa: E402
69 from ray.worker import (
70 LOCAL_MODE,
71 SCRIPT_MODE,
72 WORKER_MODE,
73 cancel,
74 connect,
75 disconnect,
76 get,
77 get_gpu_ids,
78 get_resource_ids,
79 get_webui_url,
80 init,
81 is_initialized,
82 put,
83 kill,
84 register_custom_serializer,
85 remote,
86 shutdown,
87 show_in_webui,
88 wait,
89 ) # noqa: E402
90 import ray.internal # noqa: E402
91 import ray.projects # noqa: E402
92 # We import ray.actor because some code is run in actor.py which initializes
93 # some functions in the worker.
94 import ray.actor # noqa: F401
95 from ray.actor import method # noqa: E402
96 from ray.cross_language import java_function, java_actor_class # noqa: E402
97 from ray import util # noqa: E402
98
99 # Replaced with the current commit when building the wheels.
100 __commit__ = "{{RAY_COMMIT_SHA}}"
101 __version__ = "0.9.0.dev0"
102
103 __all__ = [
104 "jobs",
105 "nodes",
106 "actors",
107 "objects",
108 "timeline",
109 "object_transfer_timeline",
110 "cluster_resources",
111 "available_resources",
112 "errors",
113 "LOCAL_MODE",
114 "PYTHON_MODE",
115 "SCRIPT_MODE",
116 "WORKER_MODE",
117 "__version__",
118 "_config",
119 "_get_runtime_context",
120 "actor",
121 "cancel",
122 "connect",
123 "disconnect",
124 "get",
125 "get_gpu_ids",
126 "get_resource_ids",
127 "get_webui_url",
128 "init",
129 "internal",
130 "is_initialized",
131 "method",
132 "profile",
133 "projects",
134 "put",
135 "kill",
136 "register_custom_serializer",
137 "remote",
138 "shutdown",
139 "show_in_webui",
140 "wait",
141 "Language",
142 "java_function",
143 "java_actor_class",
144 "util",
145 ]
146
147 # ID types
148 __all__ += [
149 "ActorCheckpointID",
150 "ActorClassID",
151 "ActorID",
152 "ClientID",
153 "JobID",
154 "WorkerID",
155 "FunctionID",
156 "ObjectID",
157 "TaskID",
158 "UniqueID",
159 ]
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -9,9 +9,13 @@
# raylet modules.
if "pickle5" in sys.modules:
- raise ImportError("Ray must be imported before pickle5 because Ray "
- "requires a specific version of pickle5 (which is "
- "packaged along with Ray).")
+ import pkg_resources
+ version_info = pkg_resources.require("pickle5")
+ version = tuple(int(n) for n in version_info[0].version.split("."))
+ if version < (0, 0, 10):
+ raise ImportError("You are using an old version of pickle5 that "
+ "leaks memory, please run 'pip install pickle5 -U' "
+ "to upgrade")
if "OMP_NUM_THREADS" not in os.environ:
logger.debug("[ray] Forcing OMP_NUM_THREADS=1 to avoid performance "
|
{"golden_diff": "diff --git a/python/ray/__init__.py b/python/ray/__init__.py\n--- a/python/ray/__init__.py\n+++ b/python/ray/__init__.py\n@@ -9,9 +9,13 @@\n # raylet modules.\n \n if \"pickle5\" in sys.modules:\n- raise ImportError(\"Ray must be imported before pickle5 because Ray \"\n- \"requires a specific version of pickle5 (which is \"\n- \"packaged along with Ray).\")\n+ import pkg_resources\n+ version_info = pkg_resources.require(\"pickle5\")\n+ version = tuple(int(n) for n in version_info[0].version.split(\".\"))\n+ if version < (0, 0, 10):\n+ raise ImportError(\"You are using an old version of pickle5 that \"\n+ \"leaks memory, please run 'pip install pickle5 -U' \"\n+ \"to upgrade\")\n \n if \"OMP_NUM_THREADS\" not in os.environ:\n logger.debug(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n", "issue": "import error\nray 0.8.5,when I try to use ray\uff0c it occurs Ray must be imported before pickle5 because Ray requires a specific version of pickle5 (which is packaged along with Ray.\r\n\r\nI want to know it must import pickle5 before import ray, Right?\n", "before_files": [{"content": "import os\nimport logging\nfrom os.path import dirname\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n# MUST add pickle5 to the import path because it will be imported by some\n# raylet modules.\n\nif \"pickle5\" in sys.modules:\n raise ImportError(\"Ray must be imported before pickle5 because Ray \"\n \"requires a specific version of pickle5 (which is \"\n \"packaged along with Ray).\")\n\nif \"OMP_NUM_THREADS\" not in os.environ:\n logger.debug(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n \"degradation with many workers (issue #6998). You can \"\n \"override this by explicitly setting OMP_NUM_THREADS.\")\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n# Add the directory containing pickle5 to the Python path so that we find the\n# pickle5 version packaged with ray and not a pre-existing pickle5.\npickle5_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pickle5_files\")\nsys.path.insert(0, pickle5_path)\n\n# Importing psutil & setproctitle. Must be before ray._raylet is initialized.\nthirdparty_files = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"thirdparty_files\")\nsys.path.insert(0, thirdparty_files)\n\nif sys.platform == \"win32\":\n import ray.compat # noqa: E402\n ray.compat.patch_redis_empty_recv()\n\n# Expose ray ABI symbols which may be dependent by other shared\n# libraries such as _streaming.so. See BUILD.bazel:_raylet\npython_shared_lib_suffix = \".so\" if sys.platform != \"win32\" else \".pyd\"\nso_path = os.path.join(dirname(__file__), \"_raylet\" + python_shared_lib_suffix)\nif os.path.exists(so_path):\n import ctypes\n from ctypes import CDLL\n CDLL(so_path, ctypes.RTLD_GLOBAL)\n\nimport ray._raylet # noqa: E402\n\nfrom ray._raylet import (\n ActorCheckpointID,\n ActorClassID,\n ActorID,\n ClientID,\n Config as _Config,\n JobID,\n WorkerID,\n FunctionID,\n ObjectID,\n TaskID,\n UniqueID,\n Language,\n) # noqa: E402\n\n_config = _Config()\n\nfrom ray.profiling import profile # noqa: E402\nfrom ray.state import (jobs, nodes, actors, objects, timeline,\n object_transfer_timeline, cluster_resources,\n available_resources, errors) # noqa: E402\nfrom ray.worker import (\n LOCAL_MODE,\n SCRIPT_MODE,\n WORKER_MODE,\n cancel,\n connect,\n disconnect,\n get,\n get_gpu_ids,\n get_resource_ids,\n get_webui_url,\n init,\n is_initialized,\n put,\n kill,\n register_custom_serializer,\n remote,\n shutdown,\n show_in_webui,\n wait,\n) # noqa: E402\nimport ray.internal # noqa: E402\nimport ray.projects # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\nfrom ray.cross_language import java_function, java_actor_class # noqa: E402\nfrom ray import util # noqa: E402\n\n# Replaced with the current commit when building the wheels.\n__commit__ = \"{{RAY_COMMIT_SHA}}\"\n__version__ = \"0.9.0.dev0\"\n\n__all__ = [\n \"jobs\",\n \"nodes\",\n \"actors\",\n \"objects\",\n \"timeline\",\n \"object_transfer_timeline\",\n \"cluster_resources\",\n \"available_resources\",\n \"errors\",\n \"LOCAL_MODE\",\n \"PYTHON_MODE\",\n \"SCRIPT_MODE\",\n \"WORKER_MODE\",\n \"__version__\",\n \"_config\",\n \"_get_runtime_context\",\n \"actor\",\n \"cancel\",\n \"connect\",\n \"disconnect\",\n \"get\",\n \"get_gpu_ids\",\n \"get_resource_ids\",\n \"get_webui_url\",\n \"init\",\n \"internal\",\n \"is_initialized\",\n \"method\",\n \"profile\",\n \"projects\",\n \"put\",\n \"kill\",\n \"register_custom_serializer\",\n \"remote\",\n \"shutdown\",\n \"show_in_webui\",\n \"wait\",\n \"Language\",\n \"java_function\",\n \"java_actor_class\",\n \"util\",\n]\n\n# ID types\n__all__ += [\n \"ActorCheckpointID\",\n \"ActorClassID\",\n \"ActorID\",\n \"ClientID\",\n \"JobID\",\n \"WorkerID\",\n \"FunctionID\",\n \"ObjectID\",\n \"TaskID\",\n \"UniqueID\",\n]\n", "path": "python/ray/__init__.py"}], "after_files": [{"content": "import os\nimport logging\nfrom os.path import dirname\nimport sys\n\nlogger = logging.getLogger(__name__)\n\n# MUST add pickle5 to the import path because it will be imported by some\n# raylet modules.\n\nif \"pickle5\" in sys.modules:\n import pkg_resources\n version_info = pkg_resources.require(\"pickle5\")\n version = tuple(int(n) for n in version_info[0].version.split(\".\"))\n if version < (0, 0, 10):\n raise ImportError(\"You are using an old version of pickle5 that \"\n \"leaks memory, please run 'pip install pickle5 -U' \"\n \"to upgrade\")\n\nif \"OMP_NUM_THREADS\" not in os.environ:\n logger.debug(\"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n \"degradation with many workers (issue #6998). You can \"\n \"override this by explicitly setting OMP_NUM_THREADS.\")\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n# Add the directory containing pickle5 to the Python path so that we find the\n# pickle5 version packaged with ray and not a pre-existing pickle5.\npickle5_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pickle5_files\")\nsys.path.insert(0, pickle5_path)\n\n# Importing psutil & setproctitle. Must be before ray._raylet is initialized.\nthirdparty_files = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"thirdparty_files\")\nsys.path.insert(0, thirdparty_files)\n\nif sys.platform == \"win32\":\n import ray.compat # noqa: E402\n ray.compat.patch_redis_empty_recv()\n\n# Expose ray ABI symbols which may be dependent by other shared\n# libraries such as _streaming.so. See BUILD.bazel:_raylet\npython_shared_lib_suffix = \".so\" if sys.platform != \"win32\" else \".pyd\"\nso_path = os.path.join(dirname(__file__), \"_raylet\" + python_shared_lib_suffix)\nif os.path.exists(so_path):\n import ctypes\n from ctypes import CDLL\n CDLL(so_path, ctypes.RTLD_GLOBAL)\n\nimport ray._raylet # noqa: E402\n\nfrom ray._raylet import (\n ActorCheckpointID,\n ActorClassID,\n ActorID,\n ClientID,\n Config as _Config,\n JobID,\n WorkerID,\n FunctionID,\n ObjectID,\n TaskID,\n UniqueID,\n Language,\n) # noqa: E402\n\n_config = _Config()\n\nfrom ray.profiling import profile # noqa: E402\nfrom ray.state import (jobs, nodes, actors, objects, timeline,\n object_transfer_timeline, cluster_resources,\n available_resources, errors) # noqa: E402\nfrom ray.worker import (\n LOCAL_MODE,\n SCRIPT_MODE,\n WORKER_MODE,\n cancel,\n connect,\n disconnect,\n get,\n get_gpu_ids,\n get_resource_ids,\n get_webui_url,\n init,\n is_initialized,\n put,\n kill,\n register_custom_serializer,\n remote,\n shutdown,\n show_in_webui,\n wait,\n) # noqa: E402\nimport ray.internal # noqa: E402\nimport ray.projects # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\nfrom ray.cross_language import java_function, java_actor_class # noqa: E402\nfrom ray import util # noqa: E402\n\n# Replaced with the current commit when building the wheels.\n__commit__ = \"{{RAY_COMMIT_SHA}}\"\n__version__ = \"0.9.0.dev0\"\n\n__all__ = [\n \"jobs\",\n \"nodes\",\n \"actors\",\n \"objects\",\n \"timeline\",\n \"object_transfer_timeline\",\n \"cluster_resources\",\n \"available_resources\",\n \"errors\",\n \"LOCAL_MODE\",\n \"PYTHON_MODE\",\n \"SCRIPT_MODE\",\n \"WORKER_MODE\",\n \"__version__\",\n \"_config\",\n \"_get_runtime_context\",\n \"actor\",\n \"cancel\",\n \"connect\",\n \"disconnect\",\n \"get\",\n \"get_gpu_ids\",\n \"get_resource_ids\",\n \"get_webui_url\",\n \"init\",\n \"internal\",\n \"is_initialized\",\n \"method\",\n \"profile\",\n \"projects\",\n \"put\",\n \"kill\",\n \"register_custom_serializer\",\n \"remote\",\n \"shutdown\",\n \"show_in_webui\",\n \"wait\",\n \"Language\",\n \"java_function\",\n \"java_actor_class\",\n \"util\",\n]\n\n# ID types\n__all__ += [\n \"ActorCheckpointID\",\n \"ActorClassID\",\n \"ActorID\",\n \"ClientID\",\n \"JobID\",\n \"WorkerID\",\n \"FunctionID\",\n \"ObjectID\",\n \"TaskID\",\n \"UniqueID\",\n]\n", "path": "python/ray/__init__.py"}]}
| 1,754 | 228 |
gh_patches_debug_16437
|
rasdani/github-patches
|
git_diff
|
google__pytype-773
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Automatically added git ignore file to sub directory
This PR automatically creates a .gitignore file to the output directory of pytype/tools/analyze_project/main.py
Issue #759
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytype/tools/analyze_project/main.py`
Content:
```
1 """Analyze an entire project using pytype."""
2
3 import logging
4 import sys
5 import tempfile
6
7 import importlab.environment
8 import importlab.fs
9 import importlab.graph
10 import importlab.output
11
12 from pytype import io
13 from pytype.tools import environment
14 from pytype.tools import tool_utils
15 from pytype.tools.analyze_project import config
16 from pytype.tools.analyze_project import environment as analyze_project_env
17 from pytype.tools.analyze_project import parse_args
18 from pytype.tools.analyze_project import pytype_runner
19
20
21 def main():
22 parser = parse_args.make_parser()
23 args = parser.parse_args(sys.argv[1:])
24
25 if args.version:
26 print(io.get_pytype_version())
27 sys.exit(0)
28
29 tool_utils.setup_logging_or_die(args.verbosity)
30
31 if args.generate_config:
32 config.generate_sample_config_or_die(args.generate_config,
33 parser.pytype_single_args)
34 sys.exit(0)
35
36 conf = parser.config_from_defaults()
37 # File options overwrite defaults.
38 file_config = config.read_config_file_or_die(args.config)
39 parser.postprocess(file_config, from_strings=True)
40 conf.populate_from(file_config)
41 # Command line arguments overwrite file options.
42 conf.populate_from(args)
43 conf.inputs -= conf.exclude
44 if args.no_cache:
45 conf.output = tempfile.mkdtemp()
46 if not conf.pythonpath:
47 conf.pythonpath = environment.compute_pythonpath(conf.inputs)
48 logging.info('\n '.join(['Configuration:'] + str(conf).split('\n')))
49
50 if not conf.inputs:
51 parser.parser.error('Need an input.')
52
53 # Importlab needs the python exe, so we check it as early as possible.
54 environment.check_python_exe_or_die(conf.python_version)
55
56 typeshed = environment.initialize_typeshed_or_die()
57 env = analyze_project_env.create_importlab_environment(conf, typeshed)
58 print('Computing dependencies')
59 import_graph = importlab.graph.ImportGraph.create(env, conf.inputs, trim=True)
60
61 if args.tree:
62 print('Source tree:')
63 importlab.output.print_tree(import_graph)
64 sys.exit(0)
65
66 if args.unresolved:
67 print('Unresolved dependencies:')
68 for imp in sorted(import_graph.get_all_unresolved()):
69 print(' ', imp.name)
70 sys.exit(0)
71
72 # Main usage mode: analyze the project file by file in dependency order.
73
74 logging.info('Source tree:\n%s',
75 importlab.output.formatted_deps_list(import_graph))
76 tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')
77 deps = pytype_runner.deps_from_import_graph(import_graph)
78 runner = pytype_runner.PytypeRunner(conf, deps)
79 return runner.run()
80
81
82 if __name__ == '__main__':
83 sys.exit(main())
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytype/tools/analyze_project/main.py b/pytype/tools/analyze_project/main.py
--- a/pytype/tools/analyze_project/main.py
+++ b/pytype/tools/analyze_project/main.py
@@ -1,6 +1,7 @@
"""Analyze an entire project using pytype."""
import logging
+import os
import sys
import tempfile
@@ -74,6 +75,8 @@
logging.info('Source tree:\n%s',
importlab.output.formatted_deps_list(import_graph))
tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')
+ with open(os.path.join(conf.output, '.gitignore'), 'w') as f:
+ f.write('# Automatically created by pytype\n*')
deps = pytype_runner.deps_from_import_graph(import_graph)
runner = pytype_runner.PytypeRunner(conf, deps)
return runner.run()
|
{"golden_diff": "diff --git a/pytype/tools/analyze_project/main.py b/pytype/tools/analyze_project/main.py\n--- a/pytype/tools/analyze_project/main.py\n+++ b/pytype/tools/analyze_project/main.py\n@@ -1,6 +1,7 @@\n \"\"\"Analyze an entire project using pytype.\"\"\"\n \n import logging\n+import os\n import sys\n import tempfile\n \n@@ -74,6 +75,8 @@\n logging.info('Source tree:\\n%s',\n importlab.output.formatted_deps_list(import_graph))\n tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')\n+ with open(os.path.join(conf.output, '.gitignore'), 'w') as f:\n+ f.write('# Automatically created by pytype\\n*')\n deps = pytype_runner.deps_from_import_graph(import_graph)\n runner = pytype_runner.PytypeRunner(conf, deps)\n return runner.run()\n", "issue": "Automatically added git ignore file to sub directory\nThis PR automatically creates a .gitignore file to the output directory of pytype/tools/analyze_project/main.py\r\n\r\nIssue #759 \n", "before_files": [{"content": "\"\"\"Analyze an entire project using pytype.\"\"\"\n\nimport logging\nimport sys\nimport tempfile\n\nimport importlab.environment\nimport importlab.fs\nimport importlab.graph\nimport importlab.output\n\nfrom pytype import io\nfrom pytype.tools import environment\nfrom pytype.tools import tool_utils\nfrom pytype.tools.analyze_project import config\nfrom pytype.tools.analyze_project import environment as analyze_project_env\nfrom pytype.tools.analyze_project import parse_args\nfrom pytype.tools.analyze_project import pytype_runner\n\n\ndef main():\n parser = parse_args.make_parser()\n args = parser.parse_args(sys.argv[1:])\n\n if args.version:\n print(io.get_pytype_version())\n sys.exit(0)\n\n tool_utils.setup_logging_or_die(args.verbosity)\n\n if args.generate_config:\n config.generate_sample_config_or_die(args.generate_config,\n parser.pytype_single_args)\n sys.exit(0)\n\n conf = parser.config_from_defaults()\n # File options overwrite defaults.\n file_config = config.read_config_file_or_die(args.config)\n parser.postprocess(file_config, from_strings=True)\n conf.populate_from(file_config)\n # Command line arguments overwrite file options.\n conf.populate_from(args)\n conf.inputs -= conf.exclude\n if args.no_cache:\n conf.output = tempfile.mkdtemp()\n if not conf.pythonpath:\n conf.pythonpath = environment.compute_pythonpath(conf.inputs)\n logging.info('\\n '.join(['Configuration:'] + str(conf).split('\\n')))\n\n if not conf.inputs:\n parser.parser.error('Need an input.')\n\n # Importlab needs the python exe, so we check it as early as possible.\n environment.check_python_exe_or_die(conf.python_version)\n\n typeshed = environment.initialize_typeshed_or_die()\n env = analyze_project_env.create_importlab_environment(conf, typeshed)\n print('Computing dependencies')\n import_graph = importlab.graph.ImportGraph.create(env, conf.inputs, trim=True)\n\n if args.tree:\n print('Source tree:')\n importlab.output.print_tree(import_graph)\n sys.exit(0)\n\n if args.unresolved:\n print('Unresolved dependencies:')\n for imp in sorted(import_graph.get_all_unresolved()):\n print(' ', imp.name)\n sys.exit(0)\n\n # Main usage mode: analyze the project file by file in dependency order.\n\n logging.info('Source tree:\\n%s',\n importlab.output.formatted_deps_list(import_graph))\n tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')\n deps = pytype_runner.deps_from_import_graph(import_graph)\n runner = pytype_runner.PytypeRunner(conf, deps)\n return runner.run()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "pytype/tools/analyze_project/main.py"}], "after_files": [{"content": "\"\"\"Analyze an entire project using pytype.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport tempfile\n\nimport importlab.environment\nimport importlab.fs\nimport importlab.graph\nimport importlab.output\n\nfrom pytype import io\nfrom pytype.tools import environment\nfrom pytype.tools import tool_utils\nfrom pytype.tools.analyze_project import config\nfrom pytype.tools.analyze_project import environment as analyze_project_env\nfrom pytype.tools.analyze_project import parse_args\nfrom pytype.tools.analyze_project import pytype_runner\n\n\ndef main():\n parser = parse_args.make_parser()\n args = parser.parse_args(sys.argv[1:])\n\n if args.version:\n print(io.get_pytype_version())\n sys.exit(0)\n\n tool_utils.setup_logging_or_die(args.verbosity)\n\n if args.generate_config:\n config.generate_sample_config_or_die(args.generate_config,\n parser.pytype_single_args)\n sys.exit(0)\n\n conf = parser.config_from_defaults()\n # File options overwrite defaults.\n file_config = config.read_config_file_or_die(args.config)\n parser.postprocess(file_config, from_strings=True)\n conf.populate_from(file_config)\n # Command line arguments overwrite file options.\n conf.populate_from(args)\n conf.inputs -= conf.exclude\n if args.no_cache:\n conf.output = tempfile.mkdtemp()\n if not conf.pythonpath:\n conf.pythonpath = environment.compute_pythonpath(conf.inputs)\n logging.info('\\n '.join(['Configuration:'] + str(conf).split('\\n')))\n\n if not conf.inputs:\n parser.parser.error('Need an input.')\n\n # Importlab needs the python exe, so we check it as early as possible.\n environment.check_python_exe_or_die(conf.python_version)\n\n typeshed = environment.initialize_typeshed_or_die()\n env = analyze_project_env.create_importlab_environment(conf, typeshed)\n print('Computing dependencies')\n import_graph = importlab.graph.ImportGraph.create(env, conf.inputs, trim=True)\n\n if args.tree:\n print('Source tree:')\n importlab.output.print_tree(import_graph)\n sys.exit(0)\n\n if args.unresolved:\n print('Unresolved dependencies:')\n for imp in sorted(import_graph.get_all_unresolved()):\n print(' ', imp.name)\n sys.exit(0)\n\n # Main usage mode: analyze the project file by file in dependency order.\n\n logging.info('Source tree:\\n%s',\n importlab.output.formatted_deps_list(import_graph))\n tool_utils.makedirs_or_die(conf.output, 'Could not create output directory')\n with open(os.path.join(conf.output, '.gitignore'), 'w') as f:\n f.write('# Automatically created by pytype\\n*')\n deps = pytype_runner.deps_from_import_graph(import_graph)\n runner = pytype_runner.PytypeRunner(conf, deps)\n return runner.run()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "path": "pytype/tools/analyze_project/main.py"}]}
| 1,047 | 193 |
gh_patches_debug_15680
|
rasdani/github-patches
|
git_diff
|
liqd__a4-product-837
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
#2151 contact form field labels
In EN:
It should say „Your name“ instead of „your first and last name“
It should say „I want to receive a copy of my message“ instead of „
I want to receicve a copy of my message as email“
in DE:
It should say „Ihr Name” instead of „Ihr Vor- und Nachname“
It should say „Eine Kopie der Nachricht an mich senden“ instead of „Eine Kopie der Anfrage an mich senden“
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/cms/contacts/models.py`
Content:
```
1 import json
2
3 from django.contrib import messages
4 from django.core.serializers.json import DjangoJSONEncoder
5 from django.db import models
6 from django.shortcuts import redirect
7 from django.utils.translation import ugettext_lazy as _
8 from modelcluster.fields import ParentalKey
9 from wagtail.admin.edit_handlers import FieldPanel
10 from wagtail.admin.edit_handlers import FieldRowPanel
11 from wagtail.admin.edit_handlers import MultiFieldPanel
12 from wagtail.admin.edit_handlers import ObjectList
13 from wagtail.admin.edit_handlers import TabbedInterface
14 from wagtail.contrib.forms.models import AbstractEmailForm
15 from wagtail.contrib.forms.models import AbstractFormField
16 from wagtail.contrib.forms.models import AbstractFormSubmission
17 from wagtail.core.fields import RichTextField
18 from wagtail.images.edit_handlers import ImageChooserPanel
19
20 from apps.cms.emails import AnswerToContactFormEmail
21 from apps.contrib.translations import TranslatedField
22
23
24 class FormField(AbstractFormField):
25 page = ParentalKey('FormPage',
26 on_delete=models.CASCADE,
27 related_name='form_fields')
28
29
30 class CustomFormSubmission(AbstractFormSubmission):
31 email = models.EmailField()
32 message = models.TextField()
33 telephone_number = models.CharField(max_length=100, blank=True)
34 name = models.CharField(max_length=100, blank=True)
35
36 def get_data(self):
37 form_data = super().get_data()
38 form_data.update({
39 'email': self.email,
40 'message': self.message,
41 'telephone_number': self.telephone_number,
42 'name': self.name
43 })
44
45 return form_data
46
47
48 class FormPage(AbstractEmailForm):
49 header_de = models.CharField(
50 max_length=500, blank=True, verbose_name="Header")
51 header_en = models.CharField(
52 max_length=500, blank=True, verbose_name="Header")
53
54 intro_en = RichTextField(blank=True)
55 intro_de = RichTextField(blank=True)
56
57 thank_you_text_en = models.TextField(blank=True)
58 thank_you_text_de = models.TextField(blank=True)
59
60 contact_person_name = models.CharField(max_length=100, blank=True)
61 contact_person_image = models.ForeignKey(
62 'wagtailimages.Image',
63 null=True,
64 blank=True,
65 on_delete=models.SET_NULL,
66 related_name='+',
67 verbose_name="Image of contact person",
68 help_text="The Image will be shown "
69 "besides the name of the contact person"
70 )
71
72 header = TranslatedField(
73 'header_de',
74 'header_en'
75 )
76
77 intro = TranslatedField(
78 'intro_de',
79 'intro_en'
80 )
81
82 thank_you_text = TranslatedField(
83 'thank_you_text_de',
84 'thank_you_text_en'
85 )
86
87 def get_submission_class(self):
88 return CustomFormSubmission
89
90 def process_form_submission(self, form):
91 data = form.cleaned_data
92 submission = self.get_submission_class().objects.create(
93 form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder),
94 page=self, email=data['email'], message=data['message'],
95 telephone_number=data['telephone_number'], name=data['name']
96 )
97 if self.to_address:
98 self.send_mail(form)
99 if form.cleaned_data['receive_copy']:
100 AnswerToContactFormEmail.send(submission)
101 return submission
102
103 def render_landing_page(
104 self, request, form_submission=None, *args, **kwargs):
105 if 'HTTP_REFERER' in request.META \
106 and request.META.get('HTTP_REFERER'):
107 messages.add_message(request, messages.SUCCESS,
108 self.thank_you_text)
109 return redirect(request.META['HTTP_REFERER'])
110 return super().render_landing_page(
111 request, form_submission, *args, **kwargs)
112
113 def get_form_fields(self):
114 fields = list(super().get_form_fields())
115 fields.insert(0, FormField(
116 label='receive_copy',
117 field_type='checkbox',
118 help_text=_('I want to receicve a copy of my message as email'),
119 required=False))
120
121 fields.insert(0, FormField(
122 label='message',
123 help_text=_('Your message'),
124 field_type='multiline',
125 required=True))
126
127 fields.insert(0, FormField(
128 label='email',
129 help_text=_('Your email address'),
130 field_type='email',
131 required=True))
132
133 fields.insert(0, FormField(
134 label='telephone_number',
135 help_text=_('Your telephone number'),
136 field_type='singleline',
137 required=False))
138
139 fields.insert(0, FormField(
140 label='name',
141 help_text=_('Your first and last name'),
142 field_type='singleline',
143 required=False))
144 return fields
145
146 en_content_panels = [
147 FieldPanel('header_en'),
148 FieldPanel('intro_en'),
149 FieldPanel('thank_you_text_en'),
150 ]
151
152 de_content_panels = [
153 FieldPanel('header_de'),
154 FieldPanel('intro_de'),
155 FieldPanel('thank_you_text_de'),
156 ]
157
158 common_panels = [
159 FieldPanel('title'),
160 FieldPanel('slug'),
161 MultiFieldPanel([
162 FieldRowPanel([
163 FieldPanel('from_address', classname="col6"),
164 FieldPanel('to_address', classname="col6"),
165 ]),
166 FieldPanel('subject'),
167 ], "Email"),
168 MultiFieldPanel([
169 FieldRowPanel([
170 FieldPanel('contact_person_name', classname="col6"),
171 ImageChooserPanel('contact_person_image', classname="col6"),
172 ]),
173 ], "Contact Person"),
174
175 ]
176
177 edit_handler = TabbedInterface([
178 ObjectList(common_panels, heading='Common'),
179 ObjectList(en_content_panels, heading='English'),
180 ObjectList(de_content_panels, heading='German')
181 ])
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/cms/contacts/models.py b/apps/cms/contacts/models.py
--- a/apps/cms/contacts/models.py
+++ b/apps/cms/contacts/models.py
@@ -115,7 +115,7 @@
fields.insert(0, FormField(
label='receive_copy',
field_type='checkbox',
- help_text=_('I want to receicve a copy of my message as email'),
+ help_text=_('I want to receive a copy of my message'),
required=False))
fields.insert(0, FormField(
@@ -138,7 +138,7 @@
fields.insert(0, FormField(
label='name',
- help_text=_('Your first and last name'),
+ help_text=_('Your name'),
field_type='singleline',
required=False))
return fields
|
{"golden_diff": "diff --git a/apps/cms/contacts/models.py b/apps/cms/contacts/models.py\n--- a/apps/cms/contacts/models.py\n+++ b/apps/cms/contacts/models.py\n@@ -115,7 +115,7 @@\n fields.insert(0, FormField(\n label='receive_copy',\n field_type='checkbox',\n- help_text=_('I want to receicve a copy of my message as email'),\n+ help_text=_('I want to receive a copy of my message'),\n required=False))\n \n fields.insert(0, FormField(\n@@ -138,7 +138,7 @@\n \n fields.insert(0, FormField(\n label='name',\n- help_text=_('Your first and last name'),\n+ help_text=_('Your name'),\n field_type='singleline',\n required=False))\n return fields\n", "issue": "#2151 contact form field labels\nIn EN: \r\n\r\nIt should say \u201eYour name\u201c instead of \u201eyour first and last name\u201c\r\nIt should say \u201eI want to receive a copy of my message\u201c instead of \u201e\r\nI want to receicve a copy of my message as email\u201c\r\n\r\nin DE:\r\n\r\nIt should say \u201eIhr Name\u201d instead of \u201eIhr Vor- und Nachname\u201c\r\n\r\nIt should say \u201eEine Kopie der Nachricht an mich senden\u201c instead of \u201eEine Kopie der Anfrage an mich senden\u201c\r\n\n", "before_files": [{"content": "import json\n\nfrom django.contrib import messages\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db import models\nfrom django.shortcuts import redirect\nfrom django.utils.translation import ugettext_lazy as _\nfrom modelcluster.fields import ParentalKey\nfrom wagtail.admin.edit_handlers import FieldPanel\nfrom wagtail.admin.edit_handlers import FieldRowPanel\nfrom wagtail.admin.edit_handlers import MultiFieldPanel\nfrom wagtail.admin.edit_handlers import ObjectList\nfrom wagtail.admin.edit_handlers import TabbedInterface\nfrom wagtail.contrib.forms.models import AbstractEmailForm\nfrom wagtail.contrib.forms.models import AbstractFormField\nfrom wagtail.contrib.forms.models import AbstractFormSubmission\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\n\nfrom apps.cms.emails import AnswerToContactFormEmail\nfrom apps.contrib.translations import TranslatedField\n\n\nclass FormField(AbstractFormField):\n page = ParentalKey('FormPage',\n on_delete=models.CASCADE,\n related_name='form_fields')\n\n\nclass CustomFormSubmission(AbstractFormSubmission):\n email = models.EmailField()\n message = models.TextField()\n telephone_number = models.CharField(max_length=100, blank=True)\n name = models.CharField(max_length=100, blank=True)\n\n def get_data(self):\n form_data = super().get_data()\n form_data.update({\n 'email': self.email,\n 'message': self.message,\n 'telephone_number': self.telephone_number,\n 'name': self.name\n })\n\n return form_data\n\n\nclass FormPage(AbstractEmailForm):\n header_de = models.CharField(\n max_length=500, blank=True, verbose_name=\"Header\")\n header_en = models.CharField(\n max_length=500, blank=True, verbose_name=\"Header\")\n\n intro_en = RichTextField(blank=True)\n intro_de = RichTextField(blank=True)\n\n thank_you_text_en = models.TextField(blank=True)\n thank_you_text_de = models.TextField(blank=True)\n\n contact_person_name = models.CharField(max_length=100, blank=True)\n contact_person_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+',\n verbose_name=\"Image of contact person\",\n help_text=\"The Image will be shown \"\n \"besides the name of the contact person\"\n )\n\n header = TranslatedField(\n 'header_de',\n 'header_en'\n )\n\n intro = TranslatedField(\n 'intro_de',\n 'intro_en'\n )\n\n thank_you_text = TranslatedField(\n 'thank_you_text_de',\n 'thank_you_text_en'\n )\n\n def get_submission_class(self):\n return CustomFormSubmission\n\n def process_form_submission(self, form):\n data = form.cleaned_data\n submission = self.get_submission_class().objects.create(\n form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder),\n page=self, email=data['email'], message=data['message'],\n telephone_number=data['telephone_number'], name=data['name']\n )\n if self.to_address:\n self.send_mail(form)\n if form.cleaned_data['receive_copy']:\n AnswerToContactFormEmail.send(submission)\n return submission\n\n def render_landing_page(\n self, request, form_submission=None, *args, **kwargs):\n if 'HTTP_REFERER' in request.META \\\n and request.META.get('HTTP_REFERER'):\n messages.add_message(request, messages.SUCCESS,\n self.thank_you_text)\n return redirect(request.META['HTTP_REFERER'])\n return super().render_landing_page(\n request, form_submission, *args, **kwargs)\n\n def get_form_fields(self):\n fields = list(super().get_form_fields())\n fields.insert(0, FormField(\n label='receive_copy',\n field_type='checkbox',\n help_text=_('I want to receicve a copy of my message as email'),\n required=False))\n\n fields.insert(0, FormField(\n label='message',\n help_text=_('Your message'),\n field_type='multiline',\n required=True))\n\n fields.insert(0, FormField(\n label='email',\n help_text=_('Your email address'),\n field_type='email',\n required=True))\n\n fields.insert(0, FormField(\n label='telephone_number',\n help_text=_('Your telephone number'),\n field_type='singleline',\n required=False))\n\n fields.insert(0, FormField(\n label='name',\n help_text=_('Your first and last name'),\n field_type='singleline',\n required=False))\n return fields\n\n en_content_panels = [\n FieldPanel('header_en'),\n FieldPanel('intro_en'),\n FieldPanel('thank_you_text_en'),\n ]\n\n de_content_panels = [\n FieldPanel('header_de'),\n FieldPanel('intro_de'),\n FieldPanel('thank_you_text_de'),\n ]\n\n common_panels = [\n FieldPanel('title'),\n FieldPanel('slug'),\n MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('from_address', classname=\"col6\"),\n FieldPanel('to_address', classname=\"col6\"),\n ]),\n FieldPanel('subject'),\n ], \"Email\"),\n MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('contact_person_name', classname=\"col6\"),\n ImageChooserPanel('contact_person_image', classname=\"col6\"),\n ]),\n ], \"Contact Person\"),\n\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(common_panels, heading='Common'),\n ObjectList(en_content_panels, heading='English'),\n ObjectList(de_content_panels, heading='German')\n ])\n", "path": "apps/cms/contacts/models.py"}], "after_files": [{"content": "import json\n\nfrom django.contrib import messages\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db import models\nfrom django.shortcuts import redirect\nfrom django.utils.translation import ugettext_lazy as _\nfrom modelcluster.fields import ParentalKey\nfrom wagtail.admin.edit_handlers import FieldPanel\nfrom wagtail.admin.edit_handlers import FieldRowPanel\nfrom wagtail.admin.edit_handlers import MultiFieldPanel\nfrom wagtail.admin.edit_handlers import ObjectList\nfrom wagtail.admin.edit_handlers import TabbedInterface\nfrom wagtail.contrib.forms.models import AbstractEmailForm\nfrom wagtail.contrib.forms.models import AbstractFormField\nfrom wagtail.contrib.forms.models import AbstractFormSubmission\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\n\nfrom apps.cms.emails import AnswerToContactFormEmail\nfrom apps.contrib.translations import TranslatedField\n\n\nclass FormField(AbstractFormField):\n page = ParentalKey('FormPage',\n on_delete=models.CASCADE,\n related_name='form_fields')\n\n\nclass CustomFormSubmission(AbstractFormSubmission):\n email = models.EmailField()\n message = models.TextField()\n telephone_number = models.CharField(max_length=100, blank=True)\n name = models.CharField(max_length=100, blank=True)\n\n def get_data(self):\n form_data = super().get_data()\n form_data.update({\n 'email': self.email,\n 'message': self.message,\n 'telephone_number': self.telephone_number,\n 'name': self.name\n })\n\n return form_data\n\n\nclass FormPage(AbstractEmailForm):\n header_de = models.CharField(\n max_length=500, blank=True, verbose_name=\"Header\")\n header_en = models.CharField(\n max_length=500, blank=True, verbose_name=\"Header\")\n\n intro_en = RichTextField(blank=True)\n intro_de = RichTextField(blank=True)\n\n thank_you_text_en = models.TextField(blank=True)\n thank_you_text_de = models.TextField(blank=True)\n\n contact_person_name = models.CharField(max_length=100, blank=True)\n contact_person_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+',\n verbose_name=\"Image of contact person\",\n help_text=\"The Image will be shown \"\n \"besides the name of the contact person\"\n )\n\n header = TranslatedField(\n 'header_de',\n 'header_en'\n )\n\n intro = TranslatedField(\n 'intro_de',\n 'intro_en'\n )\n\n thank_you_text = TranslatedField(\n 'thank_you_text_de',\n 'thank_you_text_en'\n )\n\n def get_submission_class(self):\n return CustomFormSubmission\n\n def process_form_submission(self, form):\n data = form.cleaned_data\n submission = self.get_submission_class().objects.create(\n form_data=json.dumps(form.cleaned_data, cls=DjangoJSONEncoder),\n page=self, email=data['email'], message=data['message'],\n telephone_number=data['telephone_number'], name=data['name']\n )\n if self.to_address:\n self.send_mail(form)\n if form.cleaned_data['receive_copy']:\n AnswerToContactFormEmail.send(submission)\n return submission\n\n def render_landing_page(\n self, request, form_submission=None, *args, **kwargs):\n if 'HTTP_REFERER' in request.META \\\n and request.META.get('HTTP_REFERER'):\n messages.add_message(request, messages.SUCCESS,\n self.thank_you_text)\n return redirect(request.META['HTTP_REFERER'])\n return super().render_landing_page(\n request, form_submission, *args, **kwargs)\n\n def get_form_fields(self):\n fields = list(super().get_form_fields())\n fields.insert(0, FormField(\n label='receive_copy',\n field_type='checkbox',\n help_text=_('I want to receive a copy of my message'),\n required=False))\n\n fields.insert(0, FormField(\n label='message',\n help_text=_('Your message'),\n field_type='multiline',\n required=True))\n\n fields.insert(0, FormField(\n label='email',\n help_text=_('Your email address'),\n field_type='email',\n required=True))\n\n fields.insert(0, FormField(\n label='telephone_number',\n help_text=_('Your telephone number'),\n field_type='singleline',\n required=False))\n\n fields.insert(0, FormField(\n label='name',\n help_text=_('Your name'),\n field_type='singleline',\n required=False))\n return fields\n\n en_content_panels = [\n FieldPanel('header_en'),\n FieldPanel('intro_en'),\n FieldPanel('thank_you_text_en'),\n ]\n\n de_content_panels = [\n FieldPanel('header_de'),\n FieldPanel('intro_de'),\n FieldPanel('thank_you_text_de'),\n ]\n\n common_panels = [\n FieldPanel('title'),\n FieldPanel('slug'),\n MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('from_address', classname=\"col6\"),\n FieldPanel('to_address', classname=\"col6\"),\n ]),\n FieldPanel('subject'),\n ], \"Email\"),\n MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('contact_person_name', classname=\"col6\"),\n ImageChooserPanel('contact_person_image', classname=\"col6\"),\n ]),\n ], \"Contact Person\"),\n\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(common_panels, heading='Common'),\n ObjectList(en_content_panels, heading='English'),\n ObjectList(de_content_panels, heading='German')\n ])\n", "path": "apps/cms/contacts/models.py"}]}
| 2,028 | 182 |
gh_patches_debug_10989
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-3323
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Culprit not shown for iOS
For some iOS events the culprit is not shown in the UI.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/lang/native/plugin.py`
Content:
```
1 from __future__ import absolute_import, print_function
2
3 import logging
4 import posixpath
5
6 from sentry.models import Project, EventError
7 from sentry.plugins import Plugin2
8 from sentry.lang.native.symbolizer import Symbolizer, have_symsynd
9 from sentry.models.dsymfile import SDK_MAPPING
10
11
12 logger = logging.getLogger(__name__)
13
14
15 def append_error(data, err):
16 data.setdefault('errors', []).append(err)
17
18
19 def exception_from_apple_error_or_diagnosis(error, diagnosis=None):
20 error = error or {}
21
22 if error:
23 nsexception = error.get('nsexception')
24 if nsexception:
25 return {
26 'type': nsexception['name'],
27 'value': error['reason'],
28 }
29
30 if diagnosis:
31 return {
32 'type': 'Error',
33 'value': diagnosis
34 }
35
36
37 def inject_apple_backtrace(data, frames, diagnosis=None, error=None,
38 system=None):
39 # TODO:
40 # user report stacktraces from unity
41
42 app_uuid = None
43 if system:
44 app_uuid = system.get('app_uuid')
45 if app_uuid is not None:
46 app_uuid = app_uuid.lower()
47
48 converted_frames = []
49 longest_addr = 0
50 for frame in reversed(frames):
51 fn = frame.get('filename')
52 in_app = False
53
54 if app_uuid is not None:
55 frame_uuid = frame.get('uuid')
56 if frame_uuid == app_uuid:
57 in_app = True
58
59 # We only record the offset if we found a symbol but we did not
60 # find a line number. In that case it's the offset in bytes from
61 # the beginning of the symbol.
62 function = frame['symbol_name'] or '<unknown>'
63 lineno = frame.get('line')
64 offset = None
65 if not lineno:
66 offset = frame['instruction_addr'] - frame['symbol_addr']
67
68 cframe = {
69 'in_app': in_app,
70 'abs_path': fn,
71 'filename': fn and posixpath.basename(fn) or None,
72 # This can come back as `None` from the symbolizer, in which
73 # case we need to fill something else in or we will fail
74 # later fulfill the interface requirements which say that a
75 # function needs to be provided.
76 'function': function,
77 'package': frame['object_name'],
78 'symbol_addr': '%x' % frame['symbol_addr'],
79 'instruction_addr': '%x' % frame['instruction_addr'],
80 'instruction_offset': offset,
81 'lineno': lineno,
82 }
83 converted_frames.append(cframe)
84 longest_addr = max(longest_addr, len(cframe['symbol_addr']),
85 len(cframe['instruction_addr']))
86
87 # Pad out addresses to be of the same length and add prefix
88 for frame in converted_frames:
89 for key in 'symbol_addr', 'instruction_addr':
90 frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0')
91
92 stacktrace = {'frames': converted_frames}
93
94 if error or diagnosis:
95 error = error or {}
96 exc = exception_from_apple_error_or_diagnosis(error, diagnosis)
97 if exc is not None:
98 exc['stacktrace'] = stacktrace
99 data['sentry.interfaces.Exception'] = exc
100 return
101
102 data['sentry.interfaces.Stacktrace'] = stacktrace
103
104
105 def inject_apple_device_data(data, system):
106 container = data.setdefault('device', {})
107 try:
108 container['name'] = SDK_MAPPING[system['system_name']]
109 except LookupError:
110 container['name'] = system.get('system_name') or 'Generic Apple'
111
112 if 'system_version' in system:
113 container['version'] = system['system_version']
114 if 'os_version' in system:
115 container['build'] = system['os_version']
116
117 extra = container.setdefault('data', {})
118 if 'cpu_arch' in system:
119 extra['cpu_arch'] = system['cpu_arch']
120 if 'model' in system:
121 extra['device_model_id'] = system['model']
122 if 'machine' in system:
123 extra['device_model'] = system['machine']
124 if 'kernel_version' in system:
125 extra['kernel_version'] = system['kernel_version']
126
127
128 def preprocess_apple_crash_event(data):
129 crash_report = data.get('sentry.interfaces.AppleCrashReport')
130 if crash_report is None:
131 return
132
133 project = Project.objects.get_from_cache(
134 id=data['project'],
135 )
136
137 crash = crash_report['crash']
138 crashed_thread = None
139 for thread in crash['threads']:
140 if thread['crashed']:
141 crashed_thread = thread
142 if crashed_thread is None:
143 append_error(data, {
144 'type': EventError.NATIVE_NO_CRASHED_THREAD,
145 })
146
147 else:
148 system = crash_report.get('system')
149 try:
150 sym = Symbolizer(project, crash_report['binary_images'],
151 threads=[crashed_thread])
152 with sym:
153 bt = sym.symbolize_backtrace(
154 crashed_thread['backtrace']['contents'], system)
155 inject_apple_backtrace(data, bt, crash.get('diagnosis'),
156 crash.get('error'), system)
157 except Exception as e:
158 logger.exception('Failed to symbolicate')
159 append_error(data, {
160 'type': EventError.NATIVE_INTERNAL_FAILURE,
161 'error': '%s: %s' % (e.__class__.__name__, str(e)),
162 })
163 return
164
165 if system:
166 inject_apple_device_data(data, system)
167
168 return data
169
170
171 class NativePlugin(Plugin2):
172 can_disable = False
173
174 def get_event_preprocessors(self, **kwargs):
175 if not have_symsynd:
176 return []
177 return [preprocess_apple_crash_event]
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/lang/native/plugin.py b/src/sentry/lang/native/plugin.py
--- a/src/sentry/lang/native/plugin.py
+++ b/src/sentry/lang/native/plugin.py
@@ -96,7 +96,11 @@
exc = exception_from_apple_error_or_diagnosis(error, diagnosis)
if exc is not None:
exc['stacktrace'] = stacktrace
- data['sentry.interfaces.Exception'] = exc
+ data['sentry.interfaces.Exception'] = {'values': [exc]}
+ # Since we inject the exception late we need to make sure that
+ # we set the event type to error as it would be set to
+ # 'default' otherwise.
+ data['type'] = 'error'
return
data['sentry.interfaces.Stacktrace'] = stacktrace
|
{"golden_diff": "diff --git a/src/sentry/lang/native/plugin.py b/src/sentry/lang/native/plugin.py\n--- a/src/sentry/lang/native/plugin.py\n+++ b/src/sentry/lang/native/plugin.py\n@@ -96,7 +96,11 @@\n exc = exception_from_apple_error_or_diagnosis(error, diagnosis)\n if exc is not None:\n exc['stacktrace'] = stacktrace\n- data['sentry.interfaces.Exception'] = exc\n+ data['sentry.interfaces.Exception'] = {'values': [exc]}\n+ # Since we inject the exception late we need to make sure that\n+ # we set the event type to error as it would be set to\n+ # 'default' otherwise.\n+ data['type'] = 'error'\n return\n \n data['sentry.interfaces.Stacktrace'] = stacktrace\n", "issue": "Culprit not shown for iOS\nFor some iOS events the culprit is not shown in the UI.\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport logging\nimport posixpath\n\nfrom sentry.models import Project, EventError\nfrom sentry.plugins import Plugin2\nfrom sentry.lang.native.symbolizer import Symbolizer, have_symsynd\nfrom sentry.models.dsymfile import SDK_MAPPING\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef append_error(data, err):\n data.setdefault('errors', []).append(err)\n\n\ndef exception_from_apple_error_or_diagnosis(error, diagnosis=None):\n error = error or {}\n\n if error:\n nsexception = error.get('nsexception')\n if nsexception:\n return {\n 'type': nsexception['name'],\n 'value': error['reason'],\n }\n\n if diagnosis:\n return {\n 'type': 'Error',\n 'value': diagnosis\n }\n\n\ndef inject_apple_backtrace(data, frames, diagnosis=None, error=None,\n system=None):\n # TODO:\n # user report stacktraces from unity\n\n app_uuid = None\n if system:\n app_uuid = system.get('app_uuid')\n if app_uuid is not None:\n app_uuid = app_uuid.lower()\n\n converted_frames = []\n longest_addr = 0\n for frame in reversed(frames):\n fn = frame.get('filename')\n in_app = False\n\n if app_uuid is not None:\n frame_uuid = frame.get('uuid')\n if frame_uuid == app_uuid:\n in_app = True\n\n # We only record the offset if we found a symbol but we did not\n # find a line number. In that case it's the offset in bytes from\n # the beginning of the symbol.\n function = frame['symbol_name'] or '<unknown>'\n lineno = frame.get('line')\n offset = None\n if not lineno:\n offset = frame['instruction_addr'] - frame['symbol_addr']\n\n cframe = {\n 'in_app': in_app,\n 'abs_path': fn,\n 'filename': fn and posixpath.basename(fn) or None,\n # This can come back as `None` from the symbolizer, in which\n # case we need to fill something else in or we will fail\n # later fulfill the interface requirements which say that a\n # function needs to be provided.\n 'function': function,\n 'package': frame['object_name'],\n 'symbol_addr': '%x' % frame['symbol_addr'],\n 'instruction_addr': '%x' % frame['instruction_addr'],\n 'instruction_offset': offset,\n 'lineno': lineno,\n }\n converted_frames.append(cframe)\n longest_addr = max(longest_addr, len(cframe['symbol_addr']),\n len(cframe['instruction_addr']))\n\n # Pad out addresses to be of the same length and add prefix\n for frame in converted_frames:\n for key in 'symbol_addr', 'instruction_addr':\n frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0')\n\n stacktrace = {'frames': converted_frames}\n\n if error or diagnosis:\n error = error or {}\n exc = exception_from_apple_error_or_diagnosis(error, diagnosis)\n if exc is not None:\n exc['stacktrace'] = stacktrace\n data['sentry.interfaces.Exception'] = exc\n return\n\n data['sentry.interfaces.Stacktrace'] = stacktrace\n\n\ndef inject_apple_device_data(data, system):\n container = data.setdefault('device', {})\n try:\n container['name'] = SDK_MAPPING[system['system_name']]\n except LookupError:\n container['name'] = system.get('system_name') or 'Generic Apple'\n\n if 'system_version' in system:\n container['version'] = system['system_version']\n if 'os_version' in system:\n container['build'] = system['os_version']\n\n extra = container.setdefault('data', {})\n if 'cpu_arch' in system:\n extra['cpu_arch'] = system['cpu_arch']\n if 'model' in system:\n extra['device_model_id'] = system['model']\n if 'machine' in system:\n extra['device_model'] = system['machine']\n if 'kernel_version' in system:\n extra['kernel_version'] = system['kernel_version']\n\n\ndef preprocess_apple_crash_event(data):\n crash_report = data.get('sentry.interfaces.AppleCrashReport')\n if crash_report is None:\n return\n\n project = Project.objects.get_from_cache(\n id=data['project'],\n )\n\n crash = crash_report['crash']\n crashed_thread = None\n for thread in crash['threads']:\n if thread['crashed']:\n crashed_thread = thread\n if crashed_thread is None:\n append_error(data, {\n 'type': EventError.NATIVE_NO_CRASHED_THREAD,\n })\n\n else:\n system = crash_report.get('system')\n try:\n sym = Symbolizer(project, crash_report['binary_images'],\n threads=[crashed_thread])\n with sym:\n bt = sym.symbolize_backtrace(\n crashed_thread['backtrace']['contents'], system)\n inject_apple_backtrace(data, bt, crash.get('diagnosis'),\n crash.get('error'), system)\n except Exception as e:\n logger.exception('Failed to symbolicate')\n append_error(data, {\n 'type': EventError.NATIVE_INTERNAL_FAILURE,\n 'error': '%s: %s' % (e.__class__.__name__, str(e)),\n })\n return\n\n if system:\n inject_apple_device_data(data, system)\n\n return data\n\n\nclass NativePlugin(Plugin2):\n can_disable = False\n\n def get_event_preprocessors(self, **kwargs):\n if not have_symsynd:\n return []\n return [preprocess_apple_crash_event]\n", "path": "src/sentry/lang/native/plugin.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport logging\nimport posixpath\n\nfrom sentry.models import Project, EventError\nfrom sentry.plugins import Plugin2\nfrom sentry.lang.native.symbolizer import Symbolizer, have_symsynd\nfrom sentry.models.dsymfile import SDK_MAPPING\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef append_error(data, err):\n data.setdefault('errors', []).append(err)\n\n\ndef exception_from_apple_error_or_diagnosis(error, diagnosis=None):\n error = error or {}\n\n if error:\n nsexception = error.get('nsexception')\n if nsexception:\n return {\n 'type': nsexception['name'],\n 'value': error['reason'],\n }\n\n if diagnosis:\n return {\n 'type': 'Error',\n 'value': diagnosis\n }\n\n\ndef inject_apple_backtrace(data, frames, diagnosis=None, error=None,\n system=None):\n # TODO:\n # user report stacktraces from unity\n\n app_uuid = None\n if system:\n app_uuid = system.get('app_uuid')\n if app_uuid is not None:\n app_uuid = app_uuid.lower()\n\n converted_frames = []\n longest_addr = 0\n for frame in reversed(frames):\n fn = frame.get('filename')\n in_app = False\n\n if app_uuid is not None:\n frame_uuid = frame.get('uuid')\n if frame_uuid == app_uuid:\n in_app = True\n\n # We only record the offset if we found a symbol but we did not\n # find a line number. In that case it's the offset in bytes from\n # the beginning of the symbol.\n function = frame['symbol_name'] or '<unknown>'\n lineno = frame.get('line')\n offset = None\n if not lineno:\n offset = frame['instruction_addr'] - frame['symbol_addr']\n\n cframe = {\n 'in_app': in_app,\n 'abs_path': fn,\n 'filename': fn and posixpath.basename(fn) or None,\n # This can come back as `None` from the symbolizer, in which\n # case we need to fill something else in or we will fail\n # later fulfill the interface requirements which say that a\n # function needs to be provided.\n 'function': function,\n 'package': frame['object_name'],\n 'symbol_addr': '%x' % frame['symbol_addr'],\n 'instruction_addr': '%x' % frame['instruction_addr'],\n 'instruction_offset': offset,\n 'lineno': lineno,\n }\n converted_frames.append(cframe)\n longest_addr = max(longest_addr, len(cframe['symbol_addr']),\n len(cframe['instruction_addr']))\n\n # Pad out addresses to be of the same length and add prefix\n for frame in converted_frames:\n for key in 'symbol_addr', 'instruction_addr':\n frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0')\n\n stacktrace = {'frames': converted_frames}\n\n if error or diagnosis:\n error = error or {}\n exc = exception_from_apple_error_or_diagnosis(error, diagnosis)\n if exc is not None:\n exc['stacktrace'] = stacktrace\n data['sentry.interfaces.Exception'] = {'values': [exc]}\n # Since we inject the exception late we need to make sure that\n # we set the event type to error as it would be set to\n # 'default' otherwise.\n data['type'] = 'error'\n return\n\n data['sentry.interfaces.Stacktrace'] = stacktrace\n\n\ndef inject_apple_device_data(data, system):\n container = data.setdefault('device', {})\n try:\n container['name'] = SDK_MAPPING[system['system_name']]\n except LookupError:\n container['name'] = system.get('system_name') or 'Generic Apple'\n\n if 'system_version' in system:\n container['version'] = system['system_version']\n if 'os_version' in system:\n container['build'] = system['os_version']\n\n extra = container.setdefault('data', {})\n if 'cpu_arch' in system:\n extra['cpu_arch'] = system['cpu_arch']\n if 'model' in system:\n extra['device_model_id'] = system['model']\n if 'machine' in system:\n extra['device_model'] = system['machine']\n if 'kernel_version' in system:\n extra['kernel_version'] = system['kernel_version']\n\n\ndef preprocess_apple_crash_event(data):\n crash_report = data.get('sentry.interfaces.AppleCrashReport')\n if crash_report is None:\n return\n\n project = Project.objects.get_from_cache(\n id=data['project'],\n )\n\n crash = crash_report['crash']\n crashed_thread = None\n for thread in crash['threads']:\n if thread['crashed']:\n crashed_thread = thread\n if crashed_thread is None:\n append_error(data, {\n 'type': EventError.NATIVE_NO_CRASHED_THREAD,\n })\n\n else:\n system = crash_report.get('system')\n try:\n sym = Symbolizer(project, crash_report['binary_images'],\n threads=[crashed_thread])\n with sym:\n bt = sym.symbolize_backtrace(\n crashed_thread['backtrace']['contents'], system)\n inject_apple_backtrace(data, bt, crash.get('diagnosis'),\n crash.get('error'), system)\n except Exception as e:\n logger.exception('Failed to symbolicate')\n append_error(data, {\n 'type': EventError.NATIVE_INTERNAL_FAILURE,\n 'error': '%s: %s' % (e.__class__.__name__, str(e)),\n })\n return\n\n if system:\n inject_apple_device_data(data, system)\n\n return data\n\n\nclass NativePlugin(Plugin2):\n can_disable = False\n\n def get_event_preprocessors(self, **kwargs):\n if not have_symsynd:\n return []\n return [preprocess_apple_crash_event]\n", "path": "src/sentry/lang/native/plugin.py"}]}
| 1,967 | 180 |
gh_patches_debug_2205
|
rasdani/github-patches
|
git_diff
|
zigpy__zha-device-handlers-891
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Device Support Request] Lidl _TZ3000_oh7jddmx TS0502A
**Is your feature request related to a problem? Please describe.**
Very similar to #808 I have a LIDL ceiling light panel, which only supports CCT but is incorrectly reported to HA.
**Describe the solution you'd like**
Only exposing the color temperature.
**Device signature**
```
{
"node_descriptor": "NodeDescriptor(byte1=1, byte2=64, mac_capability_flags=142, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=0, *allocate_address=True, *complex_descriptor_available=False, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False, *is_valid=True, *logical_type=<LogicalType.Router: 1>, *user_descriptor_available=False)",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x010c",
"in_clusters": [
"0x0000",
"0x0003",
"0x0004",
"0x0005",
"0x0006",
"0x0008",
"0x0300",
"0x1000"
],
"out_clusters": [
"0x000a",
"0x0019"
]
},
"242": {
"profile_id": 41440,
"device_type": "0x0061",
"in_clusters": [],
"out_clusters": [
"0x0021"
]
}
},
"manufacturer": "_TZ3000_oh7jddmx",
"model": "TS0502A",
"class": "zigpy.device.Device"
}
```
**Additional context**
I'm assuming adding the signature into
https://github.com/zigpy/zha-device-handlers/blob/b180e4f7ab4a096688f4d4ad9b47ac1b3efa9fe2/zhaquirks/lidl/cct.py#L40-L46
will fix this. ~~I'll test it and open a PR if I find time for it.~~
Update: Successfully tested. PR opened.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/lidl/cct.py`
Content:
```
1 """Quirk for LIDL CCT bulb."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomCluster, CustomDevice
4 from zigpy.zcl.clusters.general import (
5 Basic,
6 GreenPowerProxy,
7 Groups,
8 Identify,
9 LevelControl,
10 OnOff,
11 Ota,
12 Scenes,
13 Time,
14 )
15 from zigpy.zcl.clusters.lighting import Color
16 from zigpy.zcl.clusters.lightlink import LightLink
17
18 from zhaquirks.const import (
19 DEVICE_TYPE,
20 ENDPOINTS,
21 INPUT_CLUSTERS,
22 MODELS_INFO,
23 OUTPUT_CLUSTERS,
24 PROFILE_ID,
25 )
26
27
28 class LidlCCTColorCluster(CustomCluster, Color):
29 """Lidl CCT Lighting custom cluster."""
30
31 # Remove RGB color wheel for CCT Lighting: only expose color temperature
32 # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)
33 _CONSTANT_ATTRIBUTES = {0x400A: 16}
34
35
36 class CCTLight(CustomDevice):
37 """Lidl CCT Lighting device."""
38
39 signature = {
40 MODELS_INFO: [
41 ("_TZ3000_49qchf10", "TS0502A"),
42 ("_TZ3000_oborybow", "TS0502A"),
43 ("_TZ3000_9evm3otq", "TS0502A"),
44 ("_TZ3000_rylaozuc", "TS0502A"),
45 ("_TZ3000_el5kt5im", "TS0502A"),
46 ],
47 ENDPOINTS: {
48 1: {
49 # <SimpleDescriptor endpoint=1 profile=260 device_type=268
50 # device_version=1
51 # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]
52 # output_clusters=[10, 25]
53 PROFILE_ID: zha.PROFILE_ID,
54 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
55 INPUT_CLUSTERS: [
56 Basic.cluster_id,
57 Identify.cluster_id,
58 Groups.cluster_id,
59 Scenes.cluster_id,
60 OnOff.cluster_id,
61 LevelControl.cluster_id,
62 Color.cluster_id,
63 LightLink.cluster_id,
64 ],
65 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
66 },
67 242: {
68 # <SimpleDescriptor endpoint=242 profile=41440 device_type=97
69 # device_version=0
70 # input_clusters=[]
71 # output_clusters=[33]
72 PROFILE_ID: 41440,
73 DEVICE_TYPE: 97,
74 INPUT_CLUSTERS: [],
75 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
76 },
77 },
78 }
79
80 replacement = {
81 ENDPOINTS: {
82 1: {
83 PROFILE_ID: zha.PROFILE_ID,
84 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
85 INPUT_CLUSTERS: [
86 Basic.cluster_id,
87 Identify.cluster_id,
88 Groups.cluster_id,
89 Scenes.cluster_id,
90 OnOff.cluster_id,
91 LevelControl.cluster_id,
92 LidlCCTColorCluster,
93 LightLink.cluster_id,
94 ],
95 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
96 },
97 242: {
98 PROFILE_ID: 41440,
99 DEVICE_TYPE: 97,
100 INPUT_CLUSTERS: [],
101 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
102 },
103 }
104 }
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py
--- a/zhaquirks/lidl/cct.py
+++ b/zhaquirks/lidl/cct.py
@@ -43,6 +43,7 @@
("_TZ3000_9evm3otq", "TS0502A"),
("_TZ3000_rylaozuc", "TS0502A"),
("_TZ3000_el5kt5im", "TS0502A"),
+ ("_TZ3000_oh7jddmx", "TS0502A"),
],
ENDPOINTS: {
1: {
|
{"golden_diff": "diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py\n--- a/zhaquirks/lidl/cct.py\n+++ b/zhaquirks/lidl/cct.py\n@@ -43,6 +43,7 @@\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n (\"_TZ3000_el5kt5im\", \"TS0502A\"),\n+ (\"_TZ3000_oh7jddmx\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n", "issue": "[Device Support Request] Lidl _TZ3000_oh7jddmx TS0502A\n**Is your feature request related to a problem? Please describe.**\r\nVery similar to #808 I have a LIDL ceiling light panel, which only supports CCT but is incorrectly reported to HA.\r\n\r\n**Describe the solution you'd like**\r\nOnly exposing the color temperature.\r\n\r\n**Device signature**\r\n```\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(byte1=1, byte2=64, mac_capability_flags=142, manufacturer_code=4098, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=0, *allocate_address=True, *complex_descriptor_available=False, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False, *is_valid=True, *logical_type=<LogicalType.Router: 1>, *user_descriptor_available=False)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x010c\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0003\",\r\n \"0x0004\",\r\n \"0x0005\",\r\n \"0x0006\",\r\n \"0x0008\",\r\n \"0x0300\",\r\n \"0x1000\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x000a\",\r\n \"0x0019\"\r\n ]\r\n },\r\n \"242\": {\r\n \"profile_id\": 41440,\r\n \"device_type\": \"0x0061\",\r\n \"in_clusters\": [],\r\n \"out_clusters\": [\r\n \"0x0021\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"_TZ3000_oh7jddmx\",\r\n \"model\": \"TS0502A\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n```\r\n\r\n**Additional context**\r\nI'm assuming adding the signature into\r\nhttps://github.com/zigpy/zha-device-handlers/blob/b180e4f7ab4a096688f4d4ad9b47ac1b3efa9fe2/zhaquirks/lidl/cct.py#L40-L46\r\nwill fix this. ~~I'll test it and open a PR if I find time for it.~~\r\nUpdate: Successfully tested. PR opened.\n", "before_files": [{"content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [\n (\"_TZ3000_49qchf10\", \"TS0502A\"),\n (\"_TZ3000_oborybow\", \"TS0502A\"),\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n (\"_TZ3000_el5kt5im\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py"}], "after_files": [{"content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [\n (\"_TZ3000_49qchf10\", \"TS0502A\"),\n (\"_TZ3000_oborybow\", \"TS0502A\"),\n (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n (\"_TZ3000_el5kt5im\", \"TS0502A\"),\n (\"_TZ3000_oh7jddmx\", \"TS0502A\"),\n ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py"}]}
| 1,864 | 165 |
gh_patches_debug_31834
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-881
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix segmentation example
# Before submitting
- [x] Was this discussed/approved via a Github issue? (no need for typos, doc improvements)
- [x] Did you read the [contributor guideline](https://github.com/PyTorchLightning/pytorch-lightning/blob/master/.github/CONTRIBUTING.md)?
- [x] Did you make sure to update the docs?
- [ ] Did you write any new necessary tests?
## What does this PR do?
Fixes #874 (issue).
## PR review
Anyone in the community is free to review the PR once the tests have passed.
## Some comments
I have added a custom model (UNet) instead of the previous torchvision segmentation model as it was causing tests to fail (mostly because those torchvision models were introduced in version 0.3 and it will fail for previous versions).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pl_examples/full_examples/semantic_segmentation/models/unet/model.py`
Content:
```
1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4
5 from parts import DoubleConv, Down, Up
6
7
8 class UNet(nn.Module):
9 '''
10 Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation
11 Link - https://arxiv.org/abs/1505.04597
12 '''
13 def __init__(self, num_classes=19, bilinear=False):
14 super().__init__()
15 self.bilinear = bilinear
16 self.num_classes = num_classes
17 self.layer1 = DoubleConv(3, 64)
18 self.layer2 = Down(64, 128)
19 self.layer3 = Down(128, 256)
20 self.layer4 = Down(256, 512)
21 self.layer5 = Down(512, 1024)
22
23 self.layer6 = Up(1024, 512, bilinear=self.bilinear)
24 self.layer7 = Up(512, 256, bilinear=self.bilinear)
25 self.layer8 = Up(256, 128, bilinear=self.bilinear)
26 self.layer9 = Up(128, 64, bilinear=self.bilinear)
27
28 self.layer10 = nn.Conv2d(64, self.num_classes, kernel_size=1)
29
30 def forward(self, x):
31 x1 = self.layer1(x)
32 x2 = self.layer2(x1)
33 x3 = self.layer3(x2)
34 x4 = self.layer4(x3)
35 x5 = self.layer5(x4)
36
37 x6 = self.layer6(x5, x4)
38 x6 = self.layer7(x6, x3)
39 x6 = self.layer8(x6, x2)
40 x6 = self.layer9(x6, x1)
41
42 return self.layer10(x6)
43
```
Path: `pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py`
Content:
```
1 # For relative imports to work in Python 3.6
2 import os
3 import sys
4 sys.path.append(os.path.dirname(os.path.realpath(__file__)))
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py b/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py
deleted file mode 100644
--- a/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# For relative imports to work in Python 3.6
-import os
-import sys
-sys.path.append(os.path.dirname(os.path.realpath(__file__)))
diff --git a/pl_examples/full_examples/semantic_segmentation/models/unet/model.py b/pl_examples/full_examples/semantic_segmentation/models/unet/model.py
--- a/pl_examples/full_examples/semantic_segmentation/models/unet/model.py
+++ b/pl_examples/full_examples/semantic_segmentation/models/unet/model.py
@@ -2,30 +2,33 @@
import torch.nn as nn
import torch.nn.functional as F
-from parts import DoubleConv, Down, Up
+from models.unet.parts import DoubleConv, Down, Up
class UNet(nn.Module):
'''
Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation
Link - https://arxiv.org/abs/1505.04597
+
+ Parameters:
+ num_classes (int) - Number of output classes required (default 19 for KITTI dataset)
+ bilinear (bool) - Whether to use bilinear interpolation or transposed
+ convolutions for upsampling.
'''
def __init__(self, num_classes=19, bilinear=False):
super().__init__()
- self.bilinear = bilinear
- self.num_classes = num_classes
self.layer1 = DoubleConv(3, 64)
self.layer2 = Down(64, 128)
self.layer3 = Down(128, 256)
self.layer4 = Down(256, 512)
self.layer5 = Down(512, 1024)
- self.layer6 = Up(1024, 512, bilinear=self.bilinear)
- self.layer7 = Up(512, 256, bilinear=self.bilinear)
- self.layer8 = Up(256, 128, bilinear=self.bilinear)
- self.layer9 = Up(128, 64, bilinear=self.bilinear)
+ self.layer6 = Up(1024, 512, bilinear=bilinear)
+ self.layer7 = Up(512, 256, bilinear=bilinear)
+ self.layer8 = Up(256, 128, bilinear=bilinear)
+ self.layer9 = Up(128, 64, bilinear=bilinear)
- self.layer10 = nn.Conv2d(64, self.num_classes, kernel_size=1)
+ self.layer10 = nn.Conv2d(64, num_classes, kernel_size=1)
def forward(self, x):
x1 = self.layer1(x)
|
{"golden_diff": "diff --git a/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py b/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py\ndeleted file mode 100644\n--- a/pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py\n+++ /dev/null\n@@ -1,4 +0,0 @@\n-# For relative imports to work in Python 3.6\n-import os\n-import sys\n-sys.path.append(os.path.dirname(os.path.realpath(__file__)))\ndiff --git a/pl_examples/full_examples/semantic_segmentation/models/unet/model.py b/pl_examples/full_examples/semantic_segmentation/models/unet/model.py\n--- a/pl_examples/full_examples/semantic_segmentation/models/unet/model.py\n+++ b/pl_examples/full_examples/semantic_segmentation/models/unet/model.py\n@@ -2,30 +2,33 @@\n import torch.nn as nn\n import torch.nn.functional as F\n \n-from parts import DoubleConv, Down, Up\n+from models.unet.parts import DoubleConv, Down, Up\n \n \n class UNet(nn.Module):\n '''\n Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation\n Link - https://arxiv.org/abs/1505.04597\n+\n+ Parameters:\n+ num_classes (int) - Number of output classes required (default 19 for KITTI dataset)\n+ bilinear (bool) - Whether to use bilinear interpolation or transposed\n+ convolutions for upsampling.\n '''\n def __init__(self, num_classes=19, bilinear=False):\n super().__init__()\n- self.bilinear = bilinear\n- self.num_classes = num_classes\n self.layer1 = DoubleConv(3, 64)\n self.layer2 = Down(64, 128)\n self.layer3 = Down(128, 256)\n self.layer4 = Down(256, 512)\n self.layer5 = Down(512, 1024)\n \n- self.layer6 = Up(1024, 512, bilinear=self.bilinear)\n- self.layer7 = Up(512, 256, bilinear=self.bilinear)\n- self.layer8 = Up(256, 128, bilinear=self.bilinear)\n- self.layer9 = Up(128, 64, bilinear=self.bilinear)\n+ self.layer6 = Up(1024, 512, bilinear=bilinear)\n+ self.layer7 = Up(512, 256, bilinear=bilinear)\n+ self.layer8 = Up(256, 128, bilinear=bilinear)\n+ self.layer9 = Up(128, 64, bilinear=bilinear)\n \n- self.layer10 = nn.Conv2d(64, self.num_classes, kernel_size=1)\n+ self.layer10 = nn.Conv2d(64, num_classes, kernel_size=1)\n \n def forward(self, x):\n x1 = self.layer1(x)\n", "issue": "Fix segmentation example\n# Before submitting\r\n\r\n- [x] Was this discussed/approved via a Github issue? (no need for typos, doc improvements)\r\n- [x] Did you read the [contributor guideline](https://github.com/PyTorchLightning/pytorch-lightning/blob/master/.github/CONTRIBUTING.md)?\r\n- [x] Did you make sure to update the docs? \r\n- [ ] Did you write any new necessary tests? \r\n\r\n## What does this PR do?\r\nFixes #874 (issue).\r\n\r\n## PR review \r\nAnyone in the community is free to review the PR once the tests have passed. \r\n\r\n## Some comments\r\nI have added a custom model (UNet) instead of the previous torchvision segmentation model as it was causing tests to fail (mostly because those torchvision models were introduced in version 0.3 and it will fail for previous versions).\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom parts import DoubleConv, Down, Up\n\n\nclass UNet(nn.Module):\n '''\n Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation\n Link - https://arxiv.org/abs/1505.04597\n '''\n def __init__(self, num_classes=19, bilinear=False):\n super().__init__()\n self.bilinear = bilinear\n self.num_classes = num_classes\n self.layer1 = DoubleConv(3, 64)\n self.layer2 = Down(64, 128)\n self.layer3 = Down(128, 256)\n self.layer4 = Down(256, 512)\n self.layer5 = Down(512, 1024)\n\n self.layer6 = Up(1024, 512, bilinear=self.bilinear)\n self.layer7 = Up(512, 256, bilinear=self.bilinear)\n self.layer8 = Up(256, 128, bilinear=self.bilinear)\n self.layer9 = Up(128, 64, bilinear=self.bilinear)\n\n self.layer10 = nn.Conv2d(64, self.num_classes, kernel_size=1)\n\n def forward(self, x):\n x1 = self.layer1(x)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n x5 = self.layer5(x4)\n\n x6 = self.layer6(x5, x4)\n x6 = self.layer7(x6, x3)\n x6 = self.layer8(x6, x2)\n x6 = self.layer9(x6, x1)\n\n return self.layer10(x6)\n", "path": "pl_examples/full_examples/semantic_segmentation/models/unet/model.py"}, {"content": "# For relative imports to work in Python 3.6\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\n", "path": "pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py"}], "after_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models.unet.parts import DoubleConv, Down, Up\n\n\nclass UNet(nn.Module):\n '''\n Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation\n Link - https://arxiv.org/abs/1505.04597\n\n Parameters:\n num_classes (int) - Number of output classes required (default 19 for KITTI dataset)\n bilinear (bool) - Whether to use bilinear interpolation or transposed\n convolutions for upsampling.\n '''\n def __init__(self, num_classes=19, bilinear=False):\n super().__init__()\n self.layer1 = DoubleConv(3, 64)\n self.layer2 = Down(64, 128)\n self.layer3 = Down(128, 256)\n self.layer4 = Down(256, 512)\n self.layer5 = Down(512, 1024)\n\n self.layer6 = Up(1024, 512, bilinear=bilinear)\n self.layer7 = Up(512, 256, bilinear=bilinear)\n self.layer8 = Up(256, 128, bilinear=bilinear)\n self.layer9 = Up(128, 64, bilinear=bilinear)\n\n self.layer10 = nn.Conv2d(64, num_classes, kernel_size=1)\n\n def forward(self, x):\n x1 = self.layer1(x)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n x5 = self.layer5(x4)\n\n x6 = self.layer6(x5, x4)\n x6 = self.layer7(x6, x3)\n x6 = self.layer8(x6, x2)\n x6 = self.layer9(x6, x1)\n\n return self.layer10(x6)\n", "path": "pl_examples/full_examples/semantic_segmentation/models/unet/model.py"}, {"content": null, "path": "pl_examples/full_examples/semantic_segmentation/models/unet/__init__.py"}]}
| 1,022 | 698 |
gh_patches_debug_18383
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-430
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch to importlib.metadata for package versions
Python 3.8 comes with the new standard library [module `importlib.metadata`](https://docs.python.org/3.8/library/importlib.metadata.html). This is the new de-facto way of discovering installed package versions.
For older versions there's the [`importlib-metadata` backport](https://importlib-metadata.readthedocs.io/en/latest/).
We currently use `pkg_resources` to do this, we'd be better off long term switching to the backport and standard library module. The backport supports down to Python 2.7 so it'll work for everything we do.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/scout_apm/core/metadata.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import datetime as dt
5 import sys
6 from os import getpid
7
8 from scout_apm.core.commands import ApplicationEvent
9 from scout_apm.core.config import scout_config
10 from scout_apm.core.socket import CoreAgentSocket
11
12
13 def report_app_metadata():
14 CoreAgentSocket.instance().send(
15 ApplicationEvent(
16 event_type="scout.metadata",
17 event_value=get_metadata(),
18 source="Pid: " + str(getpid()),
19 timestamp=dt.datetime.utcnow(),
20 )
21 )
22
23
24 def get_metadata():
25 data = {
26 "language": "python",
27 "language_version": "{}.{}.{}".format(*sys.version_info[:3]),
28 "server_time": dt.datetime.utcnow().isoformat() + "Z",
29 "framework": scout_config.value("framework"),
30 "framework_version": scout_config.value("framework_version"),
31 "environment": "",
32 "app_server": scout_config.value("app_server"),
33 "hostname": scout_config.value("hostname"),
34 "database_engine": "",
35 "database_adapter": "",
36 "application_name": "",
37 "libraries": get_python_packages_versions(),
38 "paas": "",
39 "application_root": scout_config.value("application_root"),
40 "scm_subdirectory": scout_config.value("scm_subdirectory"),
41 "git_sha": scout_config.value("revision_sha"),
42 }
43 # Deprecated - see #327:
44 data["version"] = data["language_version"]
45 return data
46
47
48 def get_python_packages_versions():
49 try:
50 import pkg_resources
51 except ImportError:
52 return []
53
54 return sorted(
55 (distribution.project_name, distribution.version)
56 for distribution in pkg_resources.working_set
57 )
58
```
Path: `setup.py`
Content:
```
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import sys
5
6 from setuptools import Extension, find_packages, setup
7
8 with open("README.md", "r") as fp:
9 long_description = fp.read()
10
11 packages = find_packages("src")
12 if sys.version_info < (3, 6):
13 packages = [p for p in packages if not p.startswith("scout_apm.async_")]
14
15 compile_extensions = (
16 # Python 3+
17 sys.version_info >= (3,)
18 # Not Jython
19 and not sys.platform.startswith("java")
20 # Not PyPy
21 and "__pypy__" not in sys.builtin_module_names
22 )
23 if compile_extensions:
24 ext_modules = [
25 Extension(
26 str("scout_apm.core._objtrace"), [str("src/scout_apm/core/_objtrace.c")]
27 )
28 ]
29 else:
30 ext_modules = []
31
32 setup(
33 name="scout_apm",
34 version="2.9.1",
35 description="Scout Application Performance Monitoring Agent",
36 long_description=long_description,
37 long_description_content_type="text/markdown",
38 url="https://github.com/scoutapp/scout_apm_python",
39 project_urls={
40 "Documentation": "https://docs.scoutapm.com/#python-agent",
41 "Changelog": (
42 "https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md"
43 ),
44 },
45 author="Scout",
46 author_email="[email protected]",
47 license="MIT",
48 zip_safe=False,
49 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
50 packages=packages,
51 package_dir={str(""): str("src")},
52 ext_modules=ext_modules,
53 entry_points={
54 "console_scripts": [
55 "core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
56 ]
57 },
58 install_requires=[
59 'asgiref ; python_version >= "3.5"',
60 "psutil>=5,<6",
61 "requests>=2,<3",
62 "wrapt>=1.10,<2.0",
63 ],
64 keywords="apm performance monitoring development",
65 classifiers=[
66 "Development Status :: 5 - Production/Stable",
67 "Framework :: Bottle",
68 "Framework :: Django",
69 "Framework :: Django :: 1.8",
70 "Framework :: Django :: 1.9",
71 "Framework :: Django :: 1.10",
72 "Framework :: Django :: 1.11",
73 "Framework :: Django :: 2.0",
74 "Framework :: Django :: 2.1",
75 "Framework :: Django :: 2.2",
76 "Framework :: Django :: 3.0",
77 "Framework :: Flask",
78 "Framework :: Pyramid",
79 "Intended Audience :: Developers",
80 "Topic :: System :: Monitoring",
81 "License :: OSI Approved :: MIT License",
82 "Operating System :: MacOS",
83 "Operating System :: POSIX",
84 "Operating System :: POSIX :: Linux",
85 "Programming Language :: Python :: 2",
86 "Programming Language :: Python :: 2.7",
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.4",
89 "Programming Language :: Python :: 3.5",
90 "Programming Language :: Python :: 3.6",
91 "Programming Language :: Python :: 3.7",
92 "Programming Language :: Python :: 3.8",
93 ],
94 )
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,6 +57,7 @@
},
install_requires=[
'asgiref ; python_version >= "3.5"',
+ 'importlib-metadata ; python_version < "3.8"',
"psutil>=5,<6",
"requests>=2,<3",
"wrapt>=1.10,<2.0",
diff --git a/src/scout_apm/core/metadata.py b/src/scout_apm/core/metadata.py
--- a/src/scout_apm/core/metadata.py
+++ b/src/scout_apm/core/metadata.py
@@ -47,11 +47,15 @@
def get_python_packages_versions():
try:
- import pkg_resources
+ if sys.version_info >= (3, 8):
+ from importlib.metadata import distributions
+ else:
+ from importlib_metadata import distributions
except ImportError:
+ # For some reason it is unavailable
return []
return sorted(
- (distribution.project_name, distribution.version)
- for distribution in pkg_resources.working_set
+ (distribution.metadata["Name"], distribution.metadata["Version"])
+ for distribution in distributions()
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,6 +57,7 @@\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n+ 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n \"requests>=2,<3\",\n \"wrapt>=1.10,<2.0\",\ndiff --git a/src/scout_apm/core/metadata.py b/src/scout_apm/core/metadata.py\n--- a/src/scout_apm/core/metadata.py\n+++ b/src/scout_apm/core/metadata.py\n@@ -47,11 +47,15 @@\n \n def get_python_packages_versions():\n try:\n- import pkg_resources\n+ if sys.version_info >= (3, 8):\n+ from importlib.metadata import distributions\n+ else:\n+ from importlib_metadata import distributions\n except ImportError:\n+ # For some reason it is unavailable\n return []\n \n return sorted(\n- (distribution.project_name, distribution.version)\n- for distribution in pkg_resources.working_set\n+ (distribution.metadata[\"Name\"], distribution.metadata[\"Version\"])\n+ for distribution in distributions()\n )\n", "issue": "Switch to importlib.metadata for package versions\nPython 3.8 comes with the new standard library [module `importlib.metadata`](https://docs.python.org/3.8/library/importlib.metadata.html). This is the new de-facto way of discovering installed package versions.\r\n\r\nFor older versions there's the [`importlib-metadata` backport](https://importlib-metadata.readthedocs.io/en/latest/).\r\n\r\nWe currently use `pkg_resources` to do this, we'd be better off long term switching to the backport and standard library module. The backport supports down to Python 2.7 so it'll work for everything we do.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport sys\nfrom os import getpid\n\nfrom scout_apm.core.commands import ApplicationEvent\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.socket import CoreAgentSocket\n\n\ndef report_app_metadata():\n CoreAgentSocket.instance().send(\n ApplicationEvent(\n event_type=\"scout.metadata\",\n event_value=get_metadata(),\n source=\"Pid: \" + str(getpid()),\n timestamp=dt.datetime.utcnow(),\n )\n )\n\n\ndef get_metadata():\n data = {\n \"language\": \"python\",\n \"language_version\": \"{}.{}.{}\".format(*sys.version_info[:3]),\n \"server_time\": dt.datetime.utcnow().isoformat() + \"Z\",\n \"framework\": scout_config.value(\"framework\"),\n \"framework_version\": scout_config.value(\"framework_version\"),\n \"environment\": \"\",\n \"app_server\": scout_config.value(\"app_server\"),\n \"hostname\": scout_config.value(\"hostname\"),\n \"database_engine\": \"\",\n \"database_adapter\": \"\",\n \"application_name\": \"\",\n \"libraries\": get_python_packages_versions(),\n \"paas\": \"\",\n \"application_root\": scout_config.value(\"application_root\"),\n \"scm_subdirectory\": scout_config.value(\"scm_subdirectory\"),\n \"git_sha\": scout_config.value(\"revision_sha\"),\n }\n # Deprecated - see #327:\n data[\"version\"] = data[\"language_version\"]\n return data\n\n\ndef get_python_packages_versions():\n try:\n import pkg_resources\n except ImportError:\n return []\n\n return sorted(\n (distribution.project_name, distribution.version)\n for distribution in pkg_resources.working_set\n )\n", "path": "src/scout_apm/core/metadata.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n str(\"scout_apm.core._objtrace\"), [str(\"src/scout_apm/core/_objtrace.c\")]\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.9.1\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n \"psutil>=5,<6\",\n \"requests>=2,<3\",\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=\"apm performance monitoring development\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport sys\nfrom os import getpid\n\nfrom scout_apm.core.commands import ApplicationEvent\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.socket import CoreAgentSocket\n\n\ndef report_app_metadata():\n CoreAgentSocket.instance().send(\n ApplicationEvent(\n event_type=\"scout.metadata\",\n event_value=get_metadata(),\n source=\"Pid: \" + str(getpid()),\n timestamp=dt.datetime.utcnow(),\n )\n )\n\n\ndef get_metadata():\n data = {\n \"language\": \"python\",\n \"language_version\": \"{}.{}.{}\".format(*sys.version_info[:3]),\n \"server_time\": dt.datetime.utcnow().isoformat() + \"Z\",\n \"framework\": scout_config.value(\"framework\"),\n \"framework_version\": scout_config.value(\"framework_version\"),\n \"environment\": \"\",\n \"app_server\": scout_config.value(\"app_server\"),\n \"hostname\": scout_config.value(\"hostname\"),\n \"database_engine\": \"\",\n \"database_adapter\": \"\",\n \"application_name\": \"\",\n \"libraries\": get_python_packages_versions(),\n \"paas\": \"\",\n \"application_root\": scout_config.value(\"application_root\"),\n \"scm_subdirectory\": scout_config.value(\"scm_subdirectory\"),\n \"git_sha\": scout_config.value(\"revision_sha\"),\n }\n # Deprecated - see #327:\n data[\"version\"] = data[\"language_version\"]\n return data\n\n\ndef get_python_packages_versions():\n try:\n if sys.version_info >= (3, 8):\n from importlib.metadata import distributions\n else:\n from importlib_metadata import distributions\n except ImportError:\n # For some reason it is unavailable\n return []\n\n return sorted(\n (distribution.metadata[\"Name\"], distribution.metadata[\"Version\"])\n for distribution in distributions()\n )\n", "path": "src/scout_apm/core/metadata.py"}, {"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n str(\"scout_apm.core._objtrace\"), [str(\"src/scout_apm/core/_objtrace.c\")]\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.9.1\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n \"requests>=2,<3\",\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=\"apm performance monitoring development\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]}
| 1,833 | 277 |
gh_patches_debug_35203
|
rasdani/github-patches
|
git_diff
|
acl-org__acl-anthology-255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
How to annotate first initials in XML
@mjpost and I have been discussing offline: When an author's name is written using a first initial on the paper itself, but the complete first name is known (at least probably), how should it be written in the XML? Some ideas (others are possible):
```
a) <author><first>Matt</first><last>Post</last></author>
b) <author><first>M[att]</first><last>Post</last></author>
c) <author><first complete="Matt">M.</first><last>Post</last></author>
d) <author><first initials="M.">Matt</first><last>Post</last></author>
e) <author><first>M<completion>att</completion></first><last>Post</last></author>
```
We have a big influx of these, thanks to the hard work of the LREC organizers supplying complete first names, so if the answer is anything other than (a), it would be nice to decide before merging in the new ones.
JATS uses (d) but without the period (https://jats.nlm.nih.gov/archiving/tag-library/1.1d1/n-zu82.html).
Some special cases to consider:
- Initials are not always initials: `Wm.` to `William`, `R.` to `Bob`
- Middle initials, double first names, second last names, etc. can be expanded from initials as well.
- ADDED: Possibly related, possibly a can of worms: There are some (but not many) papers where the author's name is written _only_ in a non-Latin script (e.g., https://www.aclweb.org/anthology/O00-1008), so the editor has filled in a transliteration.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/check_name_variants.py`
Content:
```
1 import yaml
2 import sys
3 import lxml.etree as etree
4
5 person_fields = {'canonical', 'variants', 'comment'}
6 name_fields = {'first', 'last', 'papers'}
7
8 def text(node):
9 """Extract text from an XML node."""
10 if node is None: return ''
11 s = ''.join(node.itertext())
12 return ' '.join(s.split())
13
14 def name(d):
15 return (d['first'], d['last'])
16
17 if len(sys.argv) > 2:
18 names = set()
19 for xmlfilename in sys.argv[2:]:
20 try:
21 tree = etree.parse(xmlfilename)
22 except:
23 print(xmlfilename)
24 raise
25 for paper in tree.getroot().findall('paper'):
26 for person in paper.xpath('./author|./editor'):
27 first = text(person.find('first'))
28 last = text(person.find('last'))
29 names.add((first,last))
30 else:
31 names = None
32
33 doc = yaml.load(open(sys.argv[1]))
34
35 assert isinstance(doc, list)
36 for person in doc:
37 assert isinstance(person, dict), person
38 assert set(person.keys()).issubset(person_fields), person
39 assert 'canonical' in person, person
40 assert isinstance(person['canonical'], dict), person
41 assert set(person['canonical']).issubset(name_fields), person
42 if names is not None and name(person['canonical']) not in names:
43 print('unused name', person['canonical'])
44 dupes = {name(person['canonical'])}
45 assert 'variants' in person, person
46 assert isinstance(person['variants'], list), person
47 for variant in person['variants']:
48 assert set(variant).issubset(name_fields), person
49 if names is not None and name(variant) not in names:
50 print('unused name', variant)
51 assert name(variant) not in dupes, variant
52 dupes.add(name(variant))
53
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bin/check_name_variants.py b/bin/check_name_variants.py
--- a/bin/check_name_variants.py
+++ b/bin/check_name_variants.py
@@ -1,14 +1,19 @@
import yaml
import sys
import lxml.etree as etree
+import logging
person_fields = {'canonical', 'variants', 'comment'}
name_fields = {'first', 'last', 'papers'}
def text(node):
"""Extract text from an XML node."""
- if node is None: return ''
- s = ''.join(node.itertext())
+ if node is None:
+ return ''
+ if 'complete' in node.attrib:
+ s = node.attrib['complete']
+ else:
+ s = ''.join(node.itertext())
return ' '.join(s.split())
def name(d):
@@ -20,7 +25,7 @@
try:
tree = etree.parse(xmlfilename)
except:
- print(xmlfilename)
+ logging.error("couldn't parse {}".format(xmlfilename))
raise
for paper in tree.getroot().findall('paper'):
for person in paper.xpath('./author|./editor'):
@@ -40,14 +45,15 @@
assert isinstance(person['canonical'], dict), person
assert set(person['canonical']).issubset(name_fields), person
if names is not None and name(person['canonical']) not in names:
- print('unused name', person['canonical'])
+ logging.warning('unused name: {}'.format(person['canonical']))
dupes = {name(person['canonical'])}
assert 'variants' in person, person
assert isinstance(person['variants'], list), person
for variant in person['variants']:
assert set(variant).issubset(name_fields), person
if names is not None and name(variant) not in names:
- print('unused name', variant)
+ logging.warning('unused name: {}'.format(variant))
assert name(variant) not in dupes, variant
dupes.add(name(variant))
+print(yaml.dump(doc, allow_unicode=True))
|
{"golden_diff": "diff --git a/bin/check_name_variants.py b/bin/check_name_variants.py\n--- a/bin/check_name_variants.py\n+++ b/bin/check_name_variants.py\n@@ -1,14 +1,19 @@\n import yaml\n import sys\n import lxml.etree as etree\n+import logging\n \n person_fields = {'canonical', 'variants', 'comment'}\n name_fields = {'first', 'last', 'papers'}\n \n def text(node):\n \"\"\"Extract text from an XML node.\"\"\"\n- if node is None: return ''\n- s = ''.join(node.itertext())\n+ if node is None:\n+ return ''\n+ if 'complete' in node.attrib:\n+ s = node.attrib['complete']\n+ else:\n+ s = ''.join(node.itertext())\n return ' '.join(s.split())\n \n def name(d):\n@@ -20,7 +25,7 @@\n try:\n tree = etree.parse(xmlfilename)\n except:\n- print(xmlfilename)\n+ logging.error(\"couldn't parse {}\".format(xmlfilename))\n raise\n for paper in tree.getroot().findall('paper'):\n for person in paper.xpath('./author|./editor'):\n@@ -40,14 +45,15 @@\n assert isinstance(person['canonical'], dict), person\n assert set(person['canonical']).issubset(name_fields), person\n if names is not None and name(person['canonical']) not in names:\n- print('unused name', person['canonical'])\n+ logging.warning('unused name: {}'.format(person['canonical']))\n dupes = {name(person['canonical'])}\n assert 'variants' in person, person\n assert isinstance(person['variants'], list), person\n for variant in person['variants']:\n assert set(variant).issubset(name_fields), person\n if names is not None and name(variant) not in names:\n- print('unused name', variant)\n+ logging.warning('unused name: {}'.format(variant))\n assert name(variant) not in dupes, variant\n dupes.add(name(variant))\n \n+print(yaml.dump(doc, allow_unicode=True))\n", "issue": "How to annotate first initials in XML\n@mjpost and I have been discussing offline: When an author's name is written using a first initial on the paper itself, but the complete first name is known (at least probably), how should it be written in the XML? Some ideas (others are possible):\r\n\r\n```\r\na) <author><first>Matt</first><last>Post</last></author>\r\nb) <author><first>M[att]</first><last>Post</last></author>\r\nc) <author><first complete=\"Matt\">M.</first><last>Post</last></author>\r\nd) <author><first initials=\"M.\">Matt</first><last>Post</last></author>\r\ne) <author><first>M<completion>att</completion></first><last>Post</last></author>\r\n```\r\n\r\nWe have a big influx of these, thanks to the hard work of the LREC organizers supplying complete first names, so if the answer is anything other than (a), it would be nice to decide before merging in the new ones.\r\n\r\nJATS uses (d) but without the period (https://jats.nlm.nih.gov/archiving/tag-library/1.1d1/n-zu82.html).\r\n\r\nSome special cases to consider:\r\n\r\n- Initials are not always initials: `Wm.` to `William`, `R.` to `Bob`\r\n- Middle initials, double first names, second last names, etc. can be expanded from initials as well.\r\n- ADDED: Possibly related, possibly a can of worms: There are some (but not many) papers where the author's name is written _only_ in a non-Latin script (e.g., https://www.aclweb.org/anthology/O00-1008), so the editor has filled in a transliteration.\n", "before_files": [{"content": "import yaml\nimport sys\nimport lxml.etree as etree\n\nperson_fields = {'canonical', 'variants', 'comment'}\nname_fields = {'first', 'last', 'papers'}\n\ndef text(node):\n \"\"\"Extract text from an XML node.\"\"\"\n if node is None: return ''\n s = ''.join(node.itertext())\n return ' '.join(s.split())\n\ndef name(d):\n return (d['first'], d['last'])\n\nif len(sys.argv) > 2:\n names = set()\n for xmlfilename in sys.argv[2:]:\n try:\n tree = etree.parse(xmlfilename)\n except:\n print(xmlfilename)\n raise\n for paper in tree.getroot().findall('paper'):\n for person in paper.xpath('./author|./editor'):\n first = text(person.find('first'))\n last = text(person.find('last'))\n names.add((first,last))\nelse:\n names = None\n\ndoc = yaml.load(open(sys.argv[1]))\n\nassert isinstance(doc, list)\nfor person in doc:\n assert isinstance(person, dict), person\n assert set(person.keys()).issubset(person_fields), person\n assert 'canonical' in person, person\n assert isinstance(person['canonical'], dict), person\n assert set(person['canonical']).issubset(name_fields), person\n if names is not None and name(person['canonical']) not in names:\n print('unused name', person['canonical'])\n dupes = {name(person['canonical'])}\n assert 'variants' in person, person\n assert isinstance(person['variants'], list), person\n for variant in person['variants']:\n assert set(variant).issubset(name_fields), person\n if names is not None and name(variant) not in names:\n print('unused name', variant)\n assert name(variant) not in dupes, variant\n dupes.add(name(variant))\n \n", "path": "bin/check_name_variants.py"}], "after_files": [{"content": "import yaml\nimport sys\nimport lxml.etree as etree\nimport logging\n\nperson_fields = {'canonical', 'variants', 'comment'}\nname_fields = {'first', 'last', 'papers'}\n\ndef text(node):\n \"\"\"Extract text from an XML node.\"\"\"\n if node is None:\n return ''\n if 'complete' in node.attrib:\n s = node.attrib['complete']\n else:\n s = ''.join(node.itertext())\n return ' '.join(s.split())\n\ndef name(d):\n return (d['first'], d['last'])\n\nif len(sys.argv) > 2:\n names = set()\n for xmlfilename in sys.argv[2:]:\n try:\n tree = etree.parse(xmlfilename)\n except:\n logging.error(\"couldn't parse {}\".format(xmlfilename))\n raise\n for paper in tree.getroot().findall('paper'):\n for person in paper.xpath('./author|./editor'):\n first = text(person.find('first'))\n last = text(person.find('last'))\n names.add((first,last))\nelse:\n names = None\n\ndoc = yaml.load(open(sys.argv[1]))\n\nassert isinstance(doc, list)\nfor person in doc:\n assert isinstance(person, dict), person\n assert set(person.keys()).issubset(person_fields), person\n assert 'canonical' in person, person\n assert isinstance(person['canonical'], dict), person\n assert set(person['canonical']).issubset(name_fields), person\n if names is not None and name(person['canonical']) not in names:\n logging.warning('unused name: {}'.format(person['canonical']))\n dupes = {name(person['canonical'])}\n assert 'variants' in person, person\n assert isinstance(person['variants'], list), person\n for variant in person['variants']:\n assert set(variant).issubset(name_fields), person\n if names is not None and name(variant) not in names:\n logging.warning('unused name: {}'.format(variant))\n assert name(variant) not in dupes, variant\n dupes.add(name(variant))\n \nprint(yaml.dump(doc, allow_unicode=True))\n", "path": "bin/check_name_variants.py"}]}
| 1,142 | 457 |
gh_patches_debug_10203
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-746
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set up integration testing environment
## Problem
We need to setup an integration test environment for Mathesar, to test all user action scenarios.
## Proposed solution
Based on the discussions we've had previously (#89), the most suitable choice for us is [Playwright](https://playwright.dev/).
Integ tests require a complete mathesar setup, including a database. We should be able to test `user action -> api calls -> changes in db`.
## Additional context
- #89
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `install.py`
Content:
```
1 """
2 This script installs functions and types for Mathesar onto the configured DB.
3 """
4 from config.settings import DATABASES
5 from db import install
6 import sys
7 import getopt
8
9
10 def main():
11 skip_confirm = False
12 (opts, _) = getopt.getopt(sys.argv[1:], ":s", ["skip-confirm"])
13 for (opt, value) in opts:
14 if (opt == "-s") or (opt == "--skip-confirm"):
15 skip_confirm = True
16 for database_key in [key for key in DATABASES if key != "default"]:
17 install_on_db_with_key(database_key, skip_confirm)
18
19
20 def install_on_db_with_key(database_key, skip_confirm):
21 if DATABASES[database_key]["HOST"] == "mathesar_db":
22 # if we're going to install on the docker-created Postgres, we'll
23 # create the DB
24 print("Creating Mathesar DB on docker-created PostgreSQL instance")
25 install.create_mathesar_database(
26 DATABASES[database_key]["NAME"],
27 DATABASES["default"]["USER"],
28 DATABASES["default"]["PASSWORD"],
29 DATABASES["default"]["HOST"],
30 DATABASES["default"]["NAME"],
31 DATABASES["default"]["PORT"],
32 )
33 print(f"Created DB is {DATABASES['mathesar_tables']['NAME']}")
34 else:
35 # if we're installing anywhere else, we require the DB to exist in
36 # advance.
37 username = DATABASES[database_key]["USER"]
38 password = DATABASES[database_key]["PASSWORD"]
39 host = DATABASES[database_key]["HOST"]
40 db_name = DATABASES[database_key]["NAME"]
41 port = DATABASES[database_key]["PORT"]
42 print("Installing Mathesar DB on preexisting PostgreSQL instance...")
43 if skip_confirm is False:
44 confirmation = input(
45 f"Mathesar will be installed on DB {db_name} at host {host}."
46 "Confirm? (y/n) > "
47 )
48 if (confirmation.lower() in ["y", "yes"]) or (skip_confirm is True):
49 print("Installing...")
50 install.install_mathesar_on_preexisting_database(
51 username,
52 password,
53 host,
54 db_name,
55 port,
56 )
57 else:
58 print("Skipping DB with key {database_key}.")
59
60
61 if __name__ == "__main__":
62 main()
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/install.py b/install.py
--- a/install.py
+++ b/install.py
@@ -39,7 +39,7 @@
host = DATABASES[database_key]["HOST"]
db_name = DATABASES[database_key]["NAME"]
port = DATABASES[database_key]["PORT"]
- print("Installing Mathesar DB on preexisting PostgreSQL instance...")
+ print(f"Installing Mathesar DB {db_name} on preexisting PostgreSQL instance at host {host}...")
if skip_confirm is False:
confirmation = input(
f"Mathesar will be installed on DB {db_name} at host {host}."
|
{"golden_diff": "diff --git a/install.py b/install.py\n--- a/install.py\n+++ b/install.py\n@@ -39,7 +39,7 @@\n host = DATABASES[database_key][\"HOST\"]\n db_name = DATABASES[database_key][\"NAME\"]\n port = DATABASES[database_key][\"PORT\"]\n- print(\"Installing Mathesar DB on preexisting PostgreSQL instance...\")\n+ print(f\"Installing Mathesar DB {db_name} on preexisting PostgreSQL instance at host {host}...\")\n if skip_confirm is False:\n confirmation = input(\n f\"Mathesar will be installed on DB {db_name} at host {host}.\"\n", "issue": "Set up integration testing environment\n## Problem\r\nWe need to setup an integration test environment for Mathesar, to test all user action scenarios.\r\n\r\n## Proposed solution\r\nBased on the discussions we've had previously (#89), the most suitable choice for us is [Playwright](https://playwright.dev/).\r\n\r\nInteg tests require a complete mathesar setup, including a database. We should be able to test `user action -> api calls -> changes in db`.\r\n\r\n## Additional context\r\n- #89 \n", "before_files": [{"content": "\"\"\"\nThis script installs functions and types for Mathesar onto the configured DB.\n\"\"\"\nfrom config.settings import DATABASES\nfrom db import install\nimport sys\nimport getopt\n\n\ndef main():\n skip_confirm = False\n (opts, _) = getopt.getopt(sys.argv[1:], \":s\", [\"skip-confirm\"])\n for (opt, value) in opts:\n if (opt == \"-s\") or (opt == \"--skip-confirm\"):\n skip_confirm = True\n for database_key in [key for key in DATABASES if key != \"default\"]:\n install_on_db_with_key(database_key, skip_confirm)\n\n\ndef install_on_db_with_key(database_key, skip_confirm):\n if DATABASES[database_key][\"HOST\"] == \"mathesar_db\":\n # if we're going to install on the docker-created Postgres, we'll\n # create the DB\n print(\"Creating Mathesar DB on docker-created PostgreSQL instance\")\n install.create_mathesar_database(\n DATABASES[database_key][\"NAME\"],\n DATABASES[\"default\"][\"USER\"],\n DATABASES[\"default\"][\"PASSWORD\"],\n DATABASES[\"default\"][\"HOST\"],\n DATABASES[\"default\"][\"NAME\"],\n DATABASES[\"default\"][\"PORT\"],\n )\n print(f\"Created DB is {DATABASES['mathesar_tables']['NAME']}\")\n else:\n # if we're installing anywhere else, we require the DB to exist in\n # advance.\n username = DATABASES[database_key][\"USER\"]\n password = DATABASES[database_key][\"PASSWORD\"]\n host = DATABASES[database_key][\"HOST\"]\n db_name = DATABASES[database_key][\"NAME\"]\n port = DATABASES[database_key][\"PORT\"]\n print(\"Installing Mathesar DB on preexisting PostgreSQL instance...\")\n if skip_confirm is False:\n confirmation = input(\n f\"Mathesar will be installed on DB {db_name} at host {host}.\"\n \"Confirm? (y/n) > \"\n )\n if (confirmation.lower() in [\"y\", \"yes\"]) or (skip_confirm is True):\n print(\"Installing...\")\n install.install_mathesar_on_preexisting_database(\n username,\n password,\n host,\n db_name,\n port,\n )\n else:\n print(\"Skipping DB with key {database_key}.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install.py"}], "after_files": [{"content": "\"\"\"\nThis script installs functions and types for Mathesar onto the configured DB.\n\"\"\"\nfrom config.settings import DATABASES\nfrom db import install\nimport sys\nimport getopt\n\n\ndef main():\n skip_confirm = False\n (opts, _) = getopt.getopt(sys.argv[1:], \":s\", [\"skip-confirm\"])\n for (opt, value) in opts:\n if (opt == \"-s\") or (opt == \"--skip-confirm\"):\n skip_confirm = True\n for database_key in [key for key in DATABASES if key != \"default\"]:\n install_on_db_with_key(database_key, skip_confirm)\n\n\ndef install_on_db_with_key(database_key, skip_confirm):\n if DATABASES[database_key][\"HOST\"] == \"mathesar_db\":\n # if we're going to install on the docker-created Postgres, we'll\n # create the DB\n print(\"Creating Mathesar DB on docker-created PostgreSQL instance\")\n install.create_mathesar_database(\n DATABASES[database_key][\"NAME\"],\n DATABASES[\"default\"][\"USER\"],\n DATABASES[\"default\"][\"PASSWORD\"],\n DATABASES[\"default\"][\"HOST\"],\n DATABASES[\"default\"][\"NAME\"],\n DATABASES[\"default\"][\"PORT\"],\n )\n print(f\"Created DB is {DATABASES['mathesar_tables']['NAME']}\")\n else:\n # if we're installing anywhere else, we require the DB to exist in\n # advance.\n username = DATABASES[database_key][\"USER\"]\n password = DATABASES[database_key][\"PASSWORD\"]\n host = DATABASES[database_key][\"HOST\"]\n db_name = DATABASES[database_key][\"NAME\"]\n port = DATABASES[database_key][\"PORT\"]\n print(f\"Installing Mathesar DB {db_name} on preexisting PostgreSQL instance at host {host}...\")\n if skip_confirm is False:\n confirmation = input(\n f\"Mathesar will be installed on DB {db_name} at host {host}.\"\n \"Confirm? (y/n) > \"\n )\n if (confirmation.lower() in [\"y\", \"yes\"]) or (skip_confirm is True):\n print(\"Installing...\")\n install.install_mathesar_on_preexisting_database(\n username,\n password,\n host,\n db_name,\n port,\n )\n else:\n print(\"Skipping DB with key {database_key}.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install.py"}]}
| 967 | 136 |
gh_patches_debug_29514
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-250
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No warning/help text when password entered incorrectly
When I try to log in with the wrong log in details, I don't get a message informing me what didn't work - i.e. wrong password/log in (Firefox, 47 on Mac).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/user_management/forms.py`
Content:
```
1 from django import forms
2 from django.contrib.auth import authenticate, get_user_model
3 from django.contrib.auth.hashers import make_password
4 from django.core.exceptions import ValidationError
5 from django.utils.translation import ugettext as _
6
7 from .models import Registration, Reset
8
9 User = get_user_model()
10
11
12 class LoginForm(forms.Form):
13 email = forms.EmailField(max_length=255, required=True)
14 password = forms.CharField(widget=forms.PasswordInput, required=True)
15
16 def clean(self):
17 email = self.cleaned_data.get('email')
18 password = self.cleaned_data.get('password')
19 user = authenticate(username=email, password=password)
20 if not user or not user.is_active:
21 raise ValidationError(_('password mismatch'))
22 return self.cleaned_data
23
24 def login(self, request):
25 email = self.cleaned_data.get('email')
26 password = self.cleaned_data.get('password')
27 user = authenticate(username=email, password=password)
28 return user
29
30
31 class RegisterForm(forms.Form):
32 email = forms.EmailField(max_length=255, required=True)
33 username = forms.CharField(max_length=255, required=True)
34 password = forms.CharField(
35 widget=forms.PasswordInput,
36 min_length=8,
37 required=True)
38 password_repeat = forms.CharField(
39 widget=forms.PasswordInput, required=True)
40
41 def clean_password_repeat(self):
42 password1 = self.cleaned_data.get('password')
43 password2 = self.cleaned_data.get('password_repeat')
44 if password1 != password2:
45 raise ValidationError(_('passwords dont match'))
46 return password2
47
48 def clean_username(self):
49 username = self.cleaned_data.get('username')
50 user_exists = User.objects.filter(
51 username=username).first() is not None
52 register_exits = Registration.objects.filter(
53 username=username).first() is not None
54
55 if user_exists or register_exits:
56 raise ValidationError(_('username taken'))
57 return username
58
59 def clean_email(self):
60 email = self.cleaned_data.get('email')
61 user_exists = User.objects.filter(email=email).first()
62 register_exists = Registration.objects.filter(email=email).first()
63 if user_exists or register_exists:
64 raise ValidationError(_('email in use'))
65 return email
66
67 def register(self, request):
68 username = self.cleaned_data.get('username')
69 password = self.cleaned_data.get('password')
70 email = self.cleaned_data.get('email')
71 registration = Registration(username=username,
72 email=email,
73 password=make_password(password))
74 return registration
75
76
77 class ActivateForm(forms.Form):
78 token = forms.UUIDField(widget=forms.HiddenInput(), required=True)
79
80 def clean_token(self):
81 token = self.cleaned_data.get('token')
82 registration = Registration.objects.filter(token=token).first()
83 if not registration:
84 raise ValidationError(_('invalid token'))
85 else:
86 self.cleaned_data['registration'] = registration
87 return token
88
89 def activate(self, request):
90 registration = self.cleaned_data.get('registration')
91 user = User(username=registration.username,
92 email=registration.email,
93 password=registration.password)
94 return user, registration
95
96
97 class RequestResetForm(forms.Form):
98 username_or_email = forms.CharField(max_length=255)
99
100 def clean_username_or_email(self):
101 username_or_email = self.cleaned_data.get('username_or_email')
102 user = (User.objects.filter(username=username_or_email).first() or
103 User.objects.filter(email=username_or_email).first())
104 if not user:
105 raise ValidationError(_('unkown user'))
106 else:
107 self.cleaned_data['user'] = user
108 return username_or_email
109
110 def request_reset(self, request):
111 user = self.cleaned_data.get('user')
112 return Reset(user=user)
113
114
115 class ResetForm(forms.Form):
116 token = forms.UUIDField(widget=forms.HiddenInput(), required=True)
117 password = forms.CharField(
118 widget=forms.PasswordInput,
119 min_length=8,
120 required=True)
121 password_repeat = forms.CharField(
122 widget=forms.PasswordInput,
123 required=True)
124
125 def clean_token(self):
126 token = self.cleaned_data.get('token')
127 reset = Reset.objects.filter(token=token).first()
128 if not reset:
129 ValidationError(_('invalid token'))
130 else:
131 self.cleaned_data['reset'] = reset
132 return token
133
134 def clean_password_repeat(self):
135 password1 = self.cleaned_data.get('password')
136 password2 = self.cleaned_data.get('password_repeat')
137 if password1 != password2:
138 raise ValidationError(_('passwords dont match'))
139 return password2
140
141 def reset_password(self, request):
142 reset = self.cleaned_data.get('reset')
143 password = self.cleaned_data.get('password')
144 user = reset.user
145 user.password = make_password(password)
146 return user, reset
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/euth/user_management/forms.py b/euth/user_management/forms.py
--- a/euth/user_management/forms.py
+++ b/euth/user_management/forms.py
@@ -15,6 +15,12 @@
def clean(self):
email = self.cleaned_data.get('email')
+ if email and not User.objects.filter(email=email):
+ if Registration.objects.filter(email=email):
+ raise ValidationError(_('account not activated'))
+ else:
+ raise ValidationError(_('account doesn\'t exist'))
+
password = self.cleaned_data.get('password')
user = authenticate(username=email, password=password)
if not user or not user.is_active:
@@ -42,7 +48,7 @@
password1 = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password_repeat')
if password1 != password2:
- raise ValidationError(_('passwords dont match'))
+ raise ValidationError(_('passwords don\'t match'))
return password2
def clean_username(self):
@@ -134,8 +140,8 @@
def clean_password_repeat(self):
password1 = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password_repeat')
- if password1 != password2:
- raise ValidationError(_('passwords dont match'))
+ if password1 and password1 != password2:
+ raise ValidationError(_('passwords don\'t match'))
return password2
def reset_password(self, request):
|
{"golden_diff": "diff --git a/euth/user_management/forms.py b/euth/user_management/forms.py\n--- a/euth/user_management/forms.py\n+++ b/euth/user_management/forms.py\n@@ -15,6 +15,12 @@\n \n def clean(self):\n email = self.cleaned_data.get('email')\n+ if email and not User.objects.filter(email=email):\n+ if Registration.objects.filter(email=email):\n+ raise ValidationError(_('account not activated'))\n+ else:\n+ raise ValidationError(_('account doesn\\'t exist'))\n+\n password = self.cleaned_data.get('password')\n user = authenticate(username=email, password=password)\n if not user or not user.is_active:\n@@ -42,7 +48,7 @@\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n if password1 != password2:\n- raise ValidationError(_('passwords dont match'))\n+ raise ValidationError(_('passwords don\\'t match'))\n return password2\n \n def clean_username(self):\n@@ -134,8 +140,8 @@\n def clean_password_repeat(self):\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n- if password1 != password2:\n- raise ValidationError(_('passwords dont match'))\n+ if password1 and password1 != password2:\n+ raise ValidationError(_('passwords don\\'t match'))\n return password2\n \n def reset_password(self, request):\n", "issue": "No warning/help text when password entered incorrectly\nWhen I try to log in with the wrong log in details, I don't get a message informing me what didn't work - i.e. wrong password/log in (Firefox, 47 on Mac).\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import authenticate, get_user_model\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _\n\nfrom .models import Registration, Reset\n\nUser = get_user_model()\n\n\nclass LoginForm(forms.Form):\n email = forms.EmailField(max_length=255, required=True)\n password = forms.CharField(widget=forms.PasswordInput, required=True)\n\n def clean(self):\n email = self.cleaned_data.get('email')\n password = self.cleaned_data.get('password')\n user = authenticate(username=email, password=password)\n if not user or not user.is_active:\n raise ValidationError(_('password mismatch'))\n return self.cleaned_data\n\n def login(self, request):\n email = self.cleaned_data.get('email')\n password = self.cleaned_data.get('password')\n user = authenticate(username=email, password=password)\n return user\n\n\nclass RegisterForm(forms.Form):\n email = forms.EmailField(max_length=255, required=True)\n username = forms.CharField(max_length=255, required=True)\n password = forms.CharField(\n widget=forms.PasswordInput,\n min_length=8,\n required=True)\n password_repeat = forms.CharField(\n widget=forms.PasswordInput, required=True)\n\n def clean_password_repeat(self):\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n if password1 != password2:\n raise ValidationError(_('passwords dont match'))\n return password2\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n user_exists = User.objects.filter(\n username=username).first() is not None\n register_exits = Registration.objects.filter(\n username=username).first() is not None\n\n if user_exists or register_exits:\n raise ValidationError(_('username taken'))\n return username\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n user_exists = User.objects.filter(email=email).first()\n register_exists = Registration.objects.filter(email=email).first()\n if user_exists or register_exists:\n raise ValidationError(_('email in use'))\n return email\n\n def register(self, request):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n email = self.cleaned_data.get('email')\n registration = Registration(username=username,\n email=email,\n password=make_password(password))\n return registration\n\n\nclass ActivateForm(forms.Form):\n token = forms.UUIDField(widget=forms.HiddenInput(), required=True)\n\n def clean_token(self):\n token = self.cleaned_data.get('token')\n registration = Registration.objects.filter(token=token).first()\n if not registration:\n raise ValidationError(_('invalid token'))\n else:\n self.cleaned_data['registration'] = registration\n return token\n\n def activate(self, request):\n registration = self.cleaned_data.get('registration')\n user = User(username=registration.username,\n email=registration.email,\n password=registration.password)\n return user, registration\n\n\nclass RequestResetForm(forms.Form):\n username_or_email = forms.CharField(max_length=255)\n\n def clean_username_or_email(self):\n username_or_email = self.cleaned_data.get('username_or_email')\n user = (User.objects.filter(username=username_or_email).first() or\n User.objects.filter(email=username_or_email).first())\n if not user:\n raise ValidationError(_('unkown user'))\n else:\n self.cleaned_data['user'] = user\n return username_or_email\n\n def request_reset(self, request):\n user = self.cleaned_data.get('user')\n return Reset(user=user)\n\n\nclass ResetForm(forms.Form):\n token = forms.UUIDField(widget=forms.HiddenInput(), required=True)\n password = forms.CharField(\n widget=forms.PasswordInput,\n min_length=8,\n required=True)\n password_repeat = forms.CharField(\n widget=forms.PasswordInput,\n required=True)\n\n def clean_token(self):\n token = self.cleaned_data.get('token')\n reset = Reset.objects.filter(token=token).first()\n if not reset:\n ValidationError(_('invalid token'))\n else:\n self.cleaned_data['reset'] = reset\n return token\n\n def clean_password_repeat(self):\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n if password1 != password2:\n raise ValidationError(_('passwords dont match'))\n return password2\n\n def reset_password(self, request):\n reset = self.cleaned_data.get('reset')\n password = self.cleaned_data.get('password')\n user = reset.user\n user.password = make_password(password)\n return user, reset\n", "path": "euth/user_management/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.auth import authenticate, get_user_model\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext as _\n\nfrom .models import Registration, Reset\n\nUser = get_user_model()\n\n\nclass LoginForm(forms.Form):\n email = forms.EmailField(max_length=255, required=True)\n password = forms.CharField(widget=forms.PasswordInput, required=True)\n\n def clean(self):\n email = self.cleaned_data.get('email')\n if email and not User.objects.filter(email=email):\n if Registration.objects.filter(email=email):\n raise ValidationError(_('account not activated'))\n else:\n raise ValidationError(_('account doesn\\'t exist'))\n\n password = self.cleaned_data.get('password')\n user = authenticate(username=email, password=password)\n if not user or not user.is_active:\n raise ValidationError(_('password mismatch'))\n return self.cleaned_data\n\n def login(self, request):\n email = self.cleaned_data.get('email')\n password = self.cleaned_data.get('password')\n user = authenticate(username=email, password=password)\n return user\n\n\nclass RegisterForm(forms.Form):\n email = forms.EmailField(max_length=255, required=True)\n username = forms.CharField(max_length=255, required=True)\n password = forms.CharField(\n widget=forms.PasswordInput,\n min_length=8,\n required=True)\n password_repeat = forms.CharField(\n widget=forms.PasswordInput, required=True)\n\n def clean_password_repeat(self):\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n if password1 != password2:\n raise ValidationError(_('passwords don\\'t match'))\n return password2\n\n def clean_username(self):\n username = self.cleaned_data.get('username')\n user_exists = User.objects.filter(\n username=username).first() is not None\n register_exits = Registration.objects.filter(\n username=username).first() is not None\n\n if user_exists or register_exits:\n raise ValidationError(_('username taken'))\n return username\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n user_exists = User.objects.filter(email=email).first()\n register_exists = Registration.objects.filter(email=email).first()\n if user_exists or register_exists:\n raise ValidationError(_('email in use'))\n return email\n\n def register(self, request):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n email = self.cleaned_data.get('email')\n registration = Registration(username=username,\n email=email,\n password=make_password(password))\n return registration\n\n\nclass ActivateForm(forms.Form):\n token = forms.UUIDField(widget=forms.HiddenInput(), required=True)\n\n def clean_token(self):\n token = self.cleaned_data.get('token')\n registration = Registration.objects.filter(token=token).first()\n if not registration:\n raise ValidationError(_('invalid token'))\n else:\n self.cleaned_data['registration'] = registration\n return token\n\n def activate(self, request):\n registration = self.cleaned_data.get('registration')\n user = User(username=registration.username,\n email=registration.email,\n password=registration.password)\n return user, registration\n\n\nclass RequestResetForm(forms.Form):\n username_or_email = forms.CharField(max_length=255)\n\n def clean_username_or_email(self):\n username_or_email = self.cleaned_data.get('username_or_email')\n user = (User.objects.filter(username=username_or_email).first() or\n User.objects.filter(email=username_or_email).first())\n if not user:\n raise ValidationError(_('unkown user'))\n else:\n self.cleaned_data['user'] = user\n return username_or_email\n\n def request_reset(self, request):\n user = self.cleaned_data.get('user')\n return Reset(user=user)\n\n\nclass ResetForm(forms.Form):\n token = forms.UUIDField(widget=forms.HiddenInput(), required=True)\n password = forms.CharField(\n widget=forms.PasswordInput,\n min_length=8,\n required=True)\n password_repeat = forms.CharField(\n widget=forms.PasswordInput,\n required=True)\n\n def clean_token(self):\n token = self.cleaned_data.get('token')\n reset = Reset.objects.filter(token=token).first()\n if not reset:\n ValidationError(_('invalid token'))\n else:\n self.cleaned_data['reset'] = reset\n return token\n\n def clean_password_repeat(self):\n password1 = self.cleaned_data.get('password')\n password2 = self.cleaned_data.get('password_repeat')\n if password1 and password1 != password2:\n raise ValidationError(_('passwords don\\'t match'))\n return password2\n\n def reset_password(self, request):\n reset = self.cleaned_data.get('reset')\n password = self.cleaned_data.get('password')\n user = reset.user\n user.password = make_password(password)\n return user, reset\n", "path": "euth/user_management/forms.py"}]}
| 1,640 | 317 |
gh_patches_debug_17
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-2071
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update data on the Ebola map
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_theme/ckanext/hdx_theme/version.py`
Content:
```
1 hdx_version = 'v0.5.13'
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.5.13'
+hdx_version = 'v0.5.14'
|
{"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.13'\n+hdx_version = 'v0.5.14'\n", "issue": "Update data on the Ebola map\n\n", "before_files": [{"content": "hdx_version = 'v0.5.13'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}], "after_files": [{"content": "hdx_version = 'v0.5.14'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]}
| 291 | 108 |
gh_patches_debug_3433
|
rasdani/github-patches
|
git_diff
|
facebookresearch__mmf-74
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ERROR: 'BaseTrainer' object has no attribute 'args'
I followed the instructions on the https://learnpythia.readthedocs.io/ to setup. I seem to have everything installed, and I could run the collab notebook locally.
However, when I am trying to run the below command to try out inferences:
python tools/run.py --tasks vqa --datasets textvqa --model lorra --config configs/vqa/textvqa/lorra.yml \
--run_type val --evalai_inference 1 --resume_file data/models/lorra_best.pth
I am getting below error:
}
2019-05-29T20:25:41 INFO: Loading tasks and data
2019-05-29T20:25:44 INFO: CUDA Device 0 is: GeForce GTX 1080 Ti
2019-05-29T20:25:46 INFO: Torch version is: 1.0.0
2019-05-29T20:25:46 ERROR: 'BaseTrainer' object has no attribute 'args'
Traceback (most recent call last):
File "tools/run.py", line 94, in <module>
run()
File "tools/run.py", line 82, in run
trainer.load()
File "/root/pythia/pythia/trainers/base_trainer.py", line 46, in load
self.load_extras()
File "/root/pythia/pythia/trainers/base_trainer.py", line 139, in load_extras
self.checkpoint = Checkpoint(self)
File "/root/pythia/pythia/utils/checkpoint.py", line 28, in __init__
self.ckpt_foldername += foldername_from_config_override(self.trainer.args)
AttributeError: 'BaseTrainer' object has no attribute 'args'
I seem to be doing something wrong, any help would be great. I didn't change any code or config yet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythia/utils/build_utils.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 import torch
3 import warnings
4
5 from pythia.utils.configuration import Configuration
6 from pythia.common.registry import registry
7 from pythia.utils.general import get_optimizer_parameters
8
9
10 def build_trainer(args, *rest, **kwargs):
11 configuration = Configuration(args.config)
12
13 # Update with the config override if passed
14 configuration.override_with_cmd_config(args.config_override)
15
16 # Now, update with opts args that were passed
17 configuration.override_with_cmd_opts(args.opts)
18
19 # Finally, update with args that were specifically passed
20 # as arguments
21 configuration.update_with_args(args)
22 configuration.freeze()
23
24 config = configuration.get_config()
25 registry.register("config", config)
26 registry.register("configuration", configuration)
27
28 trainer_type = config.training_parameters.trainer
29 trainer_cls = registry.get_trainer_class(trainer_type)
30 return trainer_cls(config)
31
32
33 def build_model(config):
34 model_name = config.model
35
36 model_class = registry.get_model_class(model_name)
37
38 if model_class is None:
39 registry.get("writer").write("No model registered for name: %s" % model_name)
40 model = model_class(config)
41
42 if hasattr(model, "build"):
43 model.build()
44 model.init_losses_and_metrics()
45
46 return model
47
48
49 def build_optimizer(model, config):
50 optimizer_config = config.optimizer_attributes
51 if not hasattr(optimizer_config, "type"):
52 raise ValueError(
53 "Optimizer attributes must have a 'type' key "
54 "specifying the type of optimizer. "
55 "(Custom or PyTorch)"
56 )
57 optimizer_type = optimizer_config.type
58
59 if not hasattr(optimizer_config, "params"):
60 warnings.warn(
61 "optimizer attributes has no params defined, defaulting to {}."
62 )
63
64 params = getattr(optimizer_config, "params", {})
65
66 if hasattr(torch.optim, optimizer_type):
67 optimizer_class = getattr(torch.optim, optimizer_type)
68 else:
69 optimizer_class = registry.get_optimizer_class(optimizer_type)
70 if optimizer_class is None:
71 raise ValueError(
72 "No optimizer class of type {} present in "
73 "either torch or registered to registry"
74 )
75
76 parameters = get_optimizer_parameters(model, config)
77 optimizer = optimizer_class(parameters, **params)
78 return optimizer
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pythia/utils/build_utils.py b/pythia/utils/build_utils.py
--- a/pythia/utils/build_utils.py
+++ b/pythia/utils/build_utils.py
@@ -27,7 +27,12 @@
trainer_type = config.training_parameters.trainer
trainer_cls = registry.get_trainer_class(trainer_type)
- return trainer_cls(config)
+ trainer_obj = trainer_cls(config)
+
+ # Set args as an attribute for future use
+ setattr(trainer_obj, 'args', args)
+
+ return trainer_obj
def build_model(config):
|
{"golden_diff": "diff --git a/pythia/utils/build_utils.py b/pythia/utils/build_utils.py\n--- a/pythia/utils/build_utils.py\n+++ b/pythia/utils/build_utils.py\n@@ -27,7 +27,12 @@\n \n trainer_type = config.training_parameters.trainer\n trainer_cls = registry.get_trainer_class(trainer_type)\n- return trainer_cls(config)\n+ trainer_obj = trainer_cls(config)\n+\n+ # Set args as an attribute for future use\n+ setattr(trainer_obj, 'args', args)\n+\n+ return trainer_obj\n \n \n def build_model(config):\n", "issue": "ERROR: 'BaseTrainer' object has no attribute 'args'\nI followed the instructions on the https://learnpythia.readthedocs.io/ to setup. I seem to have everything installed, and I could run the collab notebook locally. \r\n\r\nHowever, when I am trying to run the below command to try out inferences:\r\n\r\npython tools/run.py --tasks vqa --datasets textvqa --model lorra --config configs/vqa/textvqa/lorra.yml \\\r\n--run_type val --evalai_inference 1 --resume_file data/models/lorra_best.pth\r\n\r\nI am getting below error:\r\n\r\n\r\n}\r\n2019-05-29T20:25:41 INFO: Loading tasks and data\r\n2019-05-29T20:25:44 INFO: CUDA Device 0 is: GeForce GTX 1080 Ti\r\n2019-05-29T20:25:46 INFO: Torch version is: 1.0.0\r\n2019-05-29T20:25:46 ERROR: 'BaseTrainer' object has no attribute 'args'\r\nTraceback (most recent call last):\r\n File \"tools/run.py\", line 94, in <module>\r\n run()\r\n File \"tools/run.py\", line 82, in run\r\n trainer.load()\r\n File \"/root/pythia/pythia/trainers/base_trainer.py\", line 46, in load\r\n self.load_extras()\r\n File \"/root/pythia/pythia/trainers/base_trainer.py\", line 139, in load_extras\r\n self.checkpoint = Checkpoint(self)\r\n File \"/root/pythia/pythia/utils/checkpoint.py\", line 28, in __init__\r\n self.ckpt_foldername += foldername_from_config_override(self.trainer.args)\r\nAttributeError: 'BaseTrainer' object has no attribute 'args'\r\n\r\n\r\nI seem to be doing something wrong, any help would be great. I didn't change any code or config yet.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\nimport torch\nimport warnings\n\nfrom pythia.utils.configuration import Configuration\nfrom pythia.common.registry import registry\nfrom pythia.utils.general import get_optimizer_parameters\n\n\ndef build_trainer(args, *rest, **kwargs):\n configuration = Configuration(args.config)\n\n # Update with the config override if passed\n configuration.override_with_cmd_config(args.config_override)\n\n # Now, update with opts args that were passed\n configuration.override_with_cmd_opts(args.opts)\n\n # Finally, update with args that were specifically passed\n # as arguments\n configuration.update_with_args(args)\n configuration.freeze()\n\n config = configuration.get_config()\n registry.register(\"config\", config)\n registry.register(\"configuration\", configuration)\n\n trainer_type = config.training_parameters.trainer\n trainer_cls = registry.get_trainer_class(trainer_type)\n return trainer_cls(config)\n\n\ndef build_model(config):\n model_name = config.model\n\n model_class = registry.get_model_class(model_name)\n\n if model_class is None:\n registry.get(\"writer\").write(\"No model registered for name: %s\" % model_name)\n model = model_class(config)\n\n if hasattr(model, \"build\"):\n model.build()\n model.init_losses_and_metrics()\n\n return model\n\n\ndef build_optimizer(model, config):\n optimizer_config = config.optimizer_attributes\n if not hasattr(optimizer_config, \"type\"):\n raise ValueError(\n \"Optimizer attributes must have a 'type' key \"\n \"specifying the type of optimizer. \"\n \"(Custom or PyTorch)\"\n )\n optimizer_type = optimizer_config.type\n\n if not hasattr(optimizer_config, \"params\"):\n warnings.warn(\n \"optimizer attributes has no params defined, defaulting to {}.\"\n )\n\n params = getattr(optimizer_config, \"params\", {})\n\n if hasattr(torch.optim, optimizer_type):\n optimizer_class = getattr(torch.optim, optimizer_type)\n else:\n optimizer_class = registry.get_optimizer_class(optimizer_type)\n if optimizer_class is None:\n raise ValueError(\n \"No optimizer class of type {} present in \"\n \"either torch or registered to registry\"\n )\n\n parameters = get_optimizer_parameters(model, config)\n optimizer = optimizer_class(parameters, **params)\n return optimizer\n", "path": "pythia/utils/build_utils.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\nimport torch\nimport warnings\n\nfrom pythia.utils.configuration import Configuration\nfrom pythia.common.registry import registry\nfrom pythia.utils.general import get_optimizer_parameters\n\n\ndef build_trainer(args, *rest, **kwargs):\n configuration = Configuration(args.config)\n\n # Update with the config override if passed\n configuration.override_with_cmd_config(args.config_override)\n\n # Now, update with opts args that were passed\n configuration.override_with_cmd_opts(args.opts)\n\n # Finally, update with args that were specifically passed\n # as arguments\n configuration.update_with_args(args)\n configuration.freeze()\n\n config = configuration.get_config()\n registry.register(\"config\", config)\n registry.register(\"configuration\", configuration)\n\n trainer_type = config.training_parameters.trainer\n trainer_cls = registry.get_trainer_class(trainer_type)\n trainer_obj = trainer_cls(config)\n\n # Set args as an attribute for future use\n setattr(trainer_obj, 'args', args)\n\n return trainer_obj\n\n\ndef build_model(config):\n model_name = config.model\n\n model_class = registry.get_model_class(model_name)\n\n if model_class is None:\n registry.get(\"writer\").write(\"No model registered for name: %s\" % model_name)\n model = model_class(config)\n\n if hasattr(model, \"build\"):\n model.build()\n model.init_losses_and_metrics()\n\n return model\n\n\ndef build_optimizer(model, config):\n optimizer_config = config.optimizer_attributes\n if not hasattr(optimizer_config, \"type\"):\n raise ValueError(\n \"Optimizer attributes must have a 'type' key \"\n \"specifying the type of optimizer. \"\n \"(Custom or PyTorch)\"\n )\n optimizer_type = optimizer_config.type\n\n if not hasattr(optimizer_config, \"params\"):\n warnings.warn(\n \"optimizer attributes has no params defined, defaulting to {}.\"\n )\n\n params = getattr(optimizer_config, \"params\", {})\n\n if hasattr(torch.optim, optimizer_type):\n optimizer_class = getattr(torch.optim, optimizer_type)\n else:\n optimizer_class = registry.get_optimizer_class(optimizer_type)\n if optimizer_class is None:\n raise ValueError(\n \"No optimizer class of type {} present in \"\n \"either torch or registered to registry\"\n )\n\n parameters = get_optimizer_parameters(model, config)\n optimizer = optimizer_class(parameters, **params)\n return optimizer\n", "path": "pythia/utils/build_utils.py"}]}
| 1,351 | 131 |
gh_patches_debug_3708
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-163
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hasher.hexdigest() is documented as returning a str, but returns unicode under python2
It should return a native string under boht py2 and py3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cryptography/primitives/hashes.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import abc
17
18 import binascii
19
20 import six
21
22 from cryptography.bindings import _default_api
23
24
25 class BaseHash(six.with_metaclass(abc.ABCMeta)):
26 def __init__(self, data=None, api=None, ctx=None):
27 if api is None:
28 api = _default_api
29 self._api = api
30 self._ctx = self._api.create_hash_context(self) if ctx is None else ctx
31 if data is not None:
32 self.update(data)
33
34 def update(self, data):
35 if isinstance(data, six.text_type):
36 raise TypeError("Unicode-objects must be encoded before hashing")
37 self._api.update_hash_context(self._ctx, data)
38
39 def copy(self):
40 return self.__class__(ctx=self._copy_ctx())
41
42 def digest(self):
43 return self._api.finalize_hash_context(self._copy_ctx(),
44 self.digest_size)
45
46 def hexdigest(self):
47 return binascii.hexlify(self.digest()).decode("ascii")
48
49 def _copy_ctx(self):
50 return self._api.copy_hash_context(self._ctx)
51
52
53 class SHA1(BaseHash):
54 name = "sha1"
55 digest_size = 20
56 block_size = 64
57
58
59 class SHA224(BaseHash):
60 name = "sha224"
61 digest_size = 28
62 block_size = 64
63
64
65 class SHA256(BaseHash):
66 name = "sha256"
67 digest_size = 32
68 block_size = 64
69
70
71 class SHA384(BaseHash):
72 name = "sha384"
73 digest_size = 48
74 block_size = 128
75
76
77 class SHA512(BaseHash):
78 name = "sha512"
79 digest_size = 64
80 block_size = 128
81
82
83 class RIPEMD160(BaseHash):
84 name = "ripemd160"
85 digest_size = 20
86 block_size = 64
87
88
89 class Whirlpool(BaseHash):
90 name = "whirlpool"
91 digest_size = 64
92 block_size = 64
93
94
95 class MD5(BaseHash):
96 name = "md5"
97 digest_size = 16
98 block_size = 64
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cryptography/primitives/hashes.py b/cryptography/primitives/hashes.py
--- a/cryptography/primitives/hashes.py
+++ b/cryptography/primitives/hashes.py
@@ -44,7 +44,7 @@
self.digest_size)
def hexdigest(self):
- return binascii.hexlify(self.digest()).decode("ascii")
+ return str(binascii.hexlify(self.digest()).decode("ascii"))
def _copy_ctx(self):
return self._api.copy_hash_context(self._ctx)
|
{"golden_diff": "diff --git a/cryptography/primitives/hashes.py b/cryptography/primitives/hashes.py\n--- a/cryptography/primitives/hashes.py\n+++ b/cryptography/primitives/hashes.py\n@@ -44,7 +44,7 @@\n self.digest_size)\n \n def hexdigest(self):\n- return binascii.hexlify(self.digest()).decode(\"ascii\")\n+ return str(binascii.hexlify(self.digest()).decode(\"ascii\"))\n \n def _copy_ctx(self):\n return self._api.copy_hash_context(self._ctx)\n", "issue": "Hasher.hexdigest() is documented as returning a str, but returns unicode under python2\nIt should return a native string under boht py2 and py3\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport binascii\n\nimport six\n\nfrom cryptography.bindings import _default_api\n\n\nclass BaseHash(six.with_metaclass(abc.ABCMeta)):\n def __init__(self, data=None, api=None, ctx=None):\n if api is None:\n api = _default_api\n self._api = api\n self._ctx = self._api.create_hash_context(self) if ctx is None else ctx\n if data is not None:\n self.update(data)\n\n def update(self, data):\n if isinstance(data, six.text_type):\n raise TypeError(\"Unicode-objects must be encoded before hashing\")\n self._api.update_hash_context(self._ctx, data)\n\n def copy(self):\n return self.__class__(ctx=self._copy_ctx())\n\n def digest(self):\n return self._api.finalize_hash_context(self._copy_ctx(),\n self.digest_size)\n\n def hexdigest(self):\n return binascii.hexlify(self.digest()).decode(\"ascii\")\n\n def _copy_ctx(self):\n return self._api.copy_hash_context(self._ctx)\n\n\nclass SHA1(BaseHash):\n name = \"sha1\"\n digest_size = 20\n block_size = 64\n\n\nclass SHA224(BaseHash):\n name = \"sha224\"\n digest_size = 28\n block_size = 64\n\n\nclass SHA256(BaseHash):\n name = \"sha256\"\n digest_size = 32\n block_size = 64\n\n\nclass SHA384(BaseHash):\n name = \"sha384\"\n digest_size = 48\n block_size = 128\n\n\nclass SHA512(BaseHash):\n name = \"sha512\"\n digest_size = 64\n block_size = 128\n\n\nclass RIPEMD160(BaseHash):\n name = \"ripemd160\"\n digest_size = 20\n block_size = 64\n\n\nclass Whirlpool(BaseHash):\n name = \"whirlpool\"\n digest_size = 64\n block_size = 64\n\n\nclass MD5(BaseHash):\n name = \"md5\"\n digest_size = 16\n block_size = 64\n", "path": "cryptography/primitives/hashes.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport binascii\n\nimport six\n\nfrom cryptography.bindings import _default_api\n\n\nclass BaseHash(six.with_metaclass(abc.ABCMeta)):\n def __init__(self, data=None, api=None, ctx=None):\n if api is None:\n api = _default_api\n self._api = api\n self._ctx = self._api.create_hash_context(self) if ctx is None else ctx\n if data is not None:\n self.update(data)\n\n def update(self, data):\n if isinstance(data, six.text_type):\n raise TypeError(\"Unicode-objects must be encoded before hashing\")\n self._api.update_hash_context(self._ctx, data)\n\n def copy(self):\n return self.__class__(ctx=self._copy_ctx())\n\n def digest(self):\n return self._api.finalize_hash_context(self._copy_ctx(),\n self.digest_size)\n\n def hexdigest(self):\n return str(binascii.hexlify(self.digest()).decode(\"ascii\"))\n\n def _copy_ctx(self):\n return self._api.copy_hash_context(self._ctx)\n\n\nclass SHA1(BaseHash):\n name = \"sha1\"\n digest_size = 20\n block_size = 64\n\n\nclass SHA224(BaseHash):\n name = \"sha224\"\n digest_size = 28\n block_size = 64\n\n\nclass SHA256(BaseHash):\n name = \"sha256\"\n digest_size = 32\n block_size = 64\n\n\nclass SHA384(BaseHash):\n name = \"sha384\"\n digest_size = 48\n block_size = 128\n\n\nclass SHA512(BaseHash):\n name = \"sha512\"\n digest_size = 64\n block_size = 128\n\n\nclass RIPEMD160(BaseHash):\n name = \"ripemd160\"\n digest_size = 20\n block_size = 64\n\n\nclass Whirlpool(BaseHash):\n name = \"whirlpool\"\n digest_size = 64\n block_size = 64\n\n\nclass MD5(BaseHash):\n name = \"md5\"\n digest_size = 16\n block_size = 64\n", "path": "cryptography/primitives/hashes.py"}]}
| 1,127 | 113 |
gh_patches_debug_3139
|
rasdani/github-patches
|
git_diff
|
UTNkar__moore-53
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Confirmation e-mails are not sent
For some reason the confirmation e-mails are no longer being send.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/members/signals.py`
Content:
```
1 import datetime
2 from django.conf import settings
3 from django.core.mail import EmailMultiAlternatives
4 from django.db.models.signals import pre_save
5 from django.dispatch import receiver
6 from django.template import loader
7 from django.utils import timezone
8 from simple_email_confirmation import unconfirmed_email_created
9
10 from members.models import Member
11
12
13 @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
14 def send_confirmation_email(sender, email, user=None, **kwargs):
15 if user is not None:
16 context = {
17 'email': email,
18 'domain': settings.BASE_URL,
19 'site_name': settings.WAGTAIL_SITE_NAME,
20 'token': user.get_confirmation_key(email),
21 }
22
23 subject = loader.render_to_string(
24 'members/email_change_subject.txt', context)
25 # Email subject *must not* contain newlines
26 subject = ''.join(subject.splitlines())
27 body = loader.render_to_string('members/email_change_email.html',
28 context)
29
30 email_message = EmailMultiAlternatives(subject, body, None, [email])
31 email_message.send()
32
33
34 @receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')
35 def check_membership(sender, instance, **kwargs):
36 if timezone.now() - instance.status_changed > datetime.timedelta(1):
37 instance.update_status()
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/members/signals.py b/website/members/signals.py
--- a/website/members/signals.py
+++ b/website/members/signals.py
@@ -12,6 +12,7 @@
@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')
def send_confirmation_email(sender, email, user=None, **kwargs):
+ user = user or sender
if user is not None:
context = {
'email': email,
|
{"golden_diff": "diff --git a/website/members/signals.py b/website/members/signals.py\n--- a/website/members/signals.py\n+++ b/website/members/signals.py\n@@ -12,6 +12,7 @@\n \n @receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\n def send_confirmation_email(sender, email, user=None, **kwargs):\n+ user = user or sender\n if user is not None:\n context = {\n 'email': email,\n", "issue": "Confirmation e-mails are not sent\nFor some reason the confirmation e-mails are no longer being send.\n", "before_files": [{"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1):\n instance.update_status()\n", "path": "website/members/signals.py"}], "after_files": [{"content": "import datetime\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\nfrom django.template import loader\nfrom django.utils import timezone\nfrom simple_email_confirmation import unconfirmed_email_created\n\nfrom members.models import Member\n\n\n@receiver(unconfirmed_email_created, dispatch_uid='send_email_confirmation')\ndef send_confirmation_email(sender, email, user=None, **kwargs):\n user = user or sender\n if user is not None:\n context = {\n 'email': email,\n 'domain': settings.BASE_URL,\n 'site_name': settings.WAGTAIL_SITE_NAME,\n 'token': user.get_confirmation_key(email),\n }\n\n subject = loader.render_to_string(\n 'members/email_change_subject.txt', context)\n # Email subject *must not* contain newlines\n subject = ''.join(subject.splitlines())\n body = loader.render_to_string('members/email_change_email.html',\n context)\n\n email_message = EmailMultiAlternatives(subject, body, None, [email])\n email_message.send()\n\n\n@receiver(pre_save, sender=Member, dispatch_uid='member_check_membership')\ndef check_membership(sender, instance, **kwargs):\n if timezone.now() - instance.status_changed > datetime.timedelta(1):\n instance.update_status()\n", "path": "website/members/signals.py"}]}
| 617 | 106 |
gh_patches_debug_21111
|
rasdani/github-patches
|
git_diff
|
gammapy__gammapy-4314
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bad rendering of Super Exponential Cutoff Power Law Model used for 4FGL-DR3 equations in docs
**Gammapy version**
dev
**Bug description**
The latex rendering of equations in the following doc page is broken.
https://docs.gammapy.org/dev/user-guide/model-gallery/spectral/plot_super_exp_cutoff_powerlaw_4fgl.html
I think that it is due to the `\a`that appears multiple times in `\frac`, but I am not really sure as I don't understand HTML.
Bad rendering of Super Exponential Cutoff Power Law Model used for 4FGL-DR3 equations in docs
**Gammapy version**
dev
**Bug description**
The latex rendering of equations in the following doc page is broken.
https://docs.gammapy.org/dev/user-guide/model-gallery/spectral/plot_super_exp_cutoff_powerlaw_4fgl.html
I think that it is due to the `\a`that appears multiple times in `\frac`, but I am not really sure as I don't understand HTML.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py`
Content:
```
1 r"""
2 .. _super-exp-cutoff-powerlaw-4fgl-dr3-spectral-model:
3
4 Super Exponential Cutoff Power Law Model used for 4FGL-DR3
5 ==========================================================
6
7 This model parametrises super exponential cutoff power-law model spectrum used for 4FGL.
8
9 It is defined by the following equation:
10
11 .. math::
12
13
14 \phi(e) =
15 \begin{cases}
16 \phi_0 \cdot \left(\frac{E}{E_0}\right)^{\frac{\a}{\Gamma_2} -\Gamma_1} \cdot \exp \left(
17 \frac{\a}{\Gamma_2^2} \left( 1 - \left(\frac{E}{E_0}\right)^{\frac{\a}{\Gamma_2} \right)
18 \right)&
19 \\
20 \phi_0 \cdot \left(\frac{E}{E_0}\right)^{ -\Gamma_1 - \frac{\a}{2} \ln \frac{E}{E_0} - \frac{\a \Gamma_2}{6} \ln^2 \frac{E}{E_0} - \frac{\a \Gamma_2^2}{24} \ln^3 \frac{E}{E_0}}\\
21 0 & \text{for } \left| \Gamma_2 \ln \frac{E}{E_0} \right|
22 \end{cases}
23
24 See Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf
25 """
26
27 # %%
28 # Example plot
29 # ------------
30 # Here is an example plot of the model:
31
32 from astropy import units as u
33 import matplotlib.pyplot as plt
34 from gammapy.modeling.models import (
35 Models,
36 SkyModel,
37 SuperExpCutoffPowerLaw4FGLDR3SpectralModel,
38 )
39
40 energy_range = [0.1, 100] * u.TeV
41 model = SuperExpCutoffPowerLaw4FGLDR3SpectralModel(
42 index_1=1,
43 index_2=2,
44 amplitude="1e-12 TeV-1 cm-2 s-1",
45 reference="1 TeV",
46 expfactor=1e-2,
47 )
48 model.plot(energy_range)
49 plt.grid(which="both")
50 plt.ylim(1e-24, 1e-10)
51
52 # %%
53 # YAML representation
54 # -------------------
55 # Here is an example YAML file using the model:
56
57 model = SkyModel(spectral_model=model, name="super-exp-cutoff-power-law-4fgl-dr3-model")
58 models = Models([model])
59
60 print(models.to_yaml())
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py b/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py
--- a/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py
+++ b/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py
@@ -10,16 +10,7 @@
.. math::
-
- \phi(e) =
- \begin{cases}
- \phi_0 \cdot \left(\frac{E}{E_0}\right)^{\frac{\a}{\Gamma_2} -\Gamma_1} \cdot \exp \left(
- \frac{\a}{\Gamma_2^2} \left( 1 - \left(\frac{E}{E_0}\right)^{\frac{\a}{\Gamma_2} \right)
- \right)&
- \\
- \phi_0 \cdot \left(\frac{E}{E_0}\right)^{ -\Gamma_1 - \frac{\a}{2} \ln \frac{E}{E_0} - \frac{\a \Gamma_2}{6} \ln^2 \frac{E}{E_0} - \frac{\a \Gamma_2^2}{24} \ln^3 \frac{E}{E_0}}\\
- 0 & \text{for } \left| \Gamma_2 \ln \frac{E}{E_0} \right|
- \end{cases}
+\phi(E) = \begin{cases} \phi_0 \cdot \left(\frac{E}{E_0}\right)^{\frac{a}{\Gamma_2} -\Gamma_1} \cdot \exp \left( \frac{a}{\Gamma_2^2}\left( 1 - \left(\frac{E}{E_0}\right)^{\Gamma_2} \right) \right) \\ \phi_0 \cdot \left(\frac{E}{E_0}\right)^{ -\Gamma_1 - \frac{a}{2} \ln \frac{E}{E_0} - \frac{a \Gamma_2}{6} \ln^2 \frac{E}{E_0} - \frac{a \Gamma_2^2}{24} \ln^3 \frac{E}{E_0}} & \text{for } \left| \Gamma_2 \ln \frac{E}{E_0} \right| < 10^{-2} \end{cases}
See Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf
"""
|
{"golden_diff": "diff --git a/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py b/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py\n--- a/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py\n+++ b/examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py\n@@ -10,16 +10,7 @@\n \n .. math::\n \n-\n- \\phi(e) =\n- \\begin{cases}\n- \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{\\frac{\\a}{\\Gamma_2} -\\Gamma_1} \\cdot \\exp \\left(\n- \\frac{\\a}{\\Gamma_2^2} \\left( 1 - \\left(\\frac{E}{E_0}\\right)^{\\frac{\\a}{\\Gamma_2} \\right)\n- \\right)&\n- \\\\\n- \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{ -\\Gamma_1 - \\frac{\\a}{2} \\ln \\frac{E}{E_0} - \\frac{\\a \\Gamma_2}{6} \\ln^2 \\frac{E}{E_0} - \\frac{\\a \\Gamma_2^2}{24} \\ln^3 \\frac{E}{E_0}}\\\\\n- 0 & \\text{for } \\left| \\Gamma_2 \\ln \\frac{E}{E_0} \\right|\n- \\end{cases}\n+\\phi(E) = \\begin{cases} \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{\\frac{a}{\\Gamma_2} -\\Gamma_1} \\cdot \\exp \\left( \\frac{a}{\\Gamma_2^2}\\left( 1 - \\left(\\frac{E}{E_0}\\right)^{\\Gamma_2} \\right) \\right) \\\\ \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{ -\\Gamma_1 - \\frac{a}{2} \\ln \\frac{E}{E_0} - \\frac{a \\Gamma_2}{6} \\ln^2 \\frac{E}{E_0} - \\frac{a \\Gamma_2^2}{24} \\ln^3 \\frac{E}{E_0}} & \\text{for } \\left| \\Gamma_2 \\ln \\frac{E}{E_0} \\right| < 10^{-2} \\end{cases}\n \n See Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf\n \"\"\"\n", "issue": "Bad rendering of Super Exponential Cutoff Power Law Model used for 4FGL-DR3 equations in docs\n**Gammapy version**\r\ndev\r\n\r\n**Bug description**\r\n\r\nThe latex rendering of equations in the following doc page is broken. \r\nhttps://docs.gammapy.org/dev/user-guide/model-gallery/spectral/plot_super_exp_cutoff_powerlaw_4fgl.html\r\n\r\nI think that it is due to the `\\a`that appears multiple times in `\\frac`, but I am not really sure as I don't understand HTML.\r\n\nBad rendering of Super Exponential Cutoff Power Law Model used for 4FGL-DR3 equations in docs\n**Gammapy version**\r\ndev\r\n\r\n**Bug description**\r\n\r\nThe latex rendering of equations in the following doc page is broken. \r\nhttps://docs.gammapy.org/dev/user-guide/model-gallery/spectral/plot_super_exp_cutoff_powerlaw_4fgl.html\r\n\r\nI think that it is due to the `\\a`that appears multiple times in `\\frac`, but I am not really sure as I don't understand HTML.\r\n\n", "before_files": [{"content": "r\"\"\"\n.. _super-exp-cutoff-powerlaw-4fgl-dr3-spectral-model:\n\nSuper Exponential Cutoff Power Law Model used for 4FGL-DR3\n==========================================================\n\nThis model parametrises super exponential cutoff power-law model spectrum used for 4FGL.\n\nIt is defined by the following equation:\n\n.. math::\n\n\n \\phi(e) =\n \\begin{cases}\n \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{\\frac{\\a}{\\Gamma_2} -\\Gamma_1} \\cdot \\exp \\left(\n \\frac{\\a}{\\Gamma_2^2} \\left( 1 - \\left(\\frac{E}{E_0}\\right)^{\\frac{\\a}{\\Gamma_2} \\right)\n \\right)&\n \\\\\n \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{ -\\Gamma_1 - \\frac{\\a}{2} \\ln \\frac{E}{E_0} - \\frac{\\a \\Gamma_2}{6} \\ln^2 \\frac{E}{E_0} - \\frac{\\a \\Gamma_2^2}{24} \\ln^3 \\frac{E}{E_0}}\\\\\n 0 & \\text{for } \\left| \\Gamma_2 \\ln \\frac{E}{E_0} \\right|\n \\end{cases}\n\nSee Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf\n\"\"\"\n\n# %%\n# Example plot\n# ------------\n# Here is an example plot of the model:\n\nfrom astropy import units as u\nimport matplotlib.pyplot as plt\nfrom gammapy.modeling.models import (\n Models,\n SkyModel,\n SuperExpCutoffPowerLaw4FGLDR3SpectralModel,\n)\n\nenergy_range = [0.1, 100] * u.TeV\nmodel = SuperExpCutoffPowerLaw4FGLDR3SpectralModel(\n index_1=1,\n index_2=2,\n amplitude=\"1e-12 TeV-1 cm-2 s-1\",\n reference=\"1 TeV\",\n expfactor=1e-2,\n)\nmodel.plot(energy_range)\nplt.grid(which=\"both\")\nplt.ylim(1e-24, 1e-10)\n\n# %%\n# YAML representation\n# -------------------\n# Here is an example YAML file using the model:\n\nmodel = SkyModel(spectral_model=model, name=\"super-exp-cutoff-power-law-4fgl-dr3-model\")\nmodels = Models([model])\n\nprint(models.to_yaml())\n", "path": "examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py"}], "after_files": [{"content": "r\"\"\"\n.. _super-exp-cutoff-powerlaw-4fgl-dr3-spectral-model:\n\nSuper Exponential Cutoff Power Law Model used for 4FGL-DR3\n==========================================================\n\nThis model parametrises super exponential cutoff power-law model spectrum used for 4FGL.\n\nIt is defined by the following equation:\n\n.. math::\n\n\\phi(E) = \\begin{cases} \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{\\frac{a}{\\Gamma_2} -\\Gamma_1} \\cdot \\exp \\left( \\frac{a}{\\Gamma_2^2}\\left( 1 - \\left(\\frac{E}{E_0}\\right)^{\\Gamma_2} \\right) \\right) \\\\ \\phi_0 \\cdot \\left(\\frac{E}{E_0}\\right)^{ -\\Gamma_1 - \\frac{a}{2} \\ln \\frac{E}{E_0} - \\frac{a \\Gamma_2}{6} \\ln^2 \\frac{E}{E_0} - \\frac{a \\Gamma_2^2}{24} \\ln^3 \\frac{E}{E_0}} & \\text{for } \\left| \\Gamma_2 \\ln \\frac{E}{E_0} \\right| < 10^{-2} \\end{cases}\n\nSee Equation (2) and (3) in https://arxiv.org/pdf/2201.11184.pdf\n\"\"\"\n\n# %%\n# Example plot\n# ------------\n# Here is an example plot of the model:\n\nfrom astropy import units as u\nimport matplotlib.pyplot as plt\nfrom gammapy.modeling.models import (\n Models,\n SkyModel,\n SuperExpCutoffPowerLaw4FGLDR3SpectralModel,\n)\n\nenergy_range = [0.1, 100] * u.TeV\nmodel = SuperExpCutoffPowerLaw4FGLDR3SpectralModel(\n index_1=1,\n index_2=2,\n amplitude=\"1e-12 TeV-1 cm-2 s-1\",\n reference=\"1 TeV\",\n expfactor=1e-2,\n)\nmodel.plot(energy_range)\nplt.grid(which=\"both\")\nplt.ylim(1e-24, 1e-10)\n\n# %%\n# YAML representation\n# -------------------\n# Here is an example YAML file using the model:\n\nmodel = SkyModel(spectral_model=model, name=\"super-exp-cutoff-power-law-4fgl-dr3-model\")\nmodels = Models([model])\n\nprint(models.to_yaml())\n", "path": "examples/models/spectral/plot_super_exp_cutoff_powerlaw_4fgl.py"}]}
| 1,200 | 609 |
gh_patches_debug_60369
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-1426
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix Flash CI (special examples failing)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154
16
17 """## Train file https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1
18
19 ## Validation File
20 https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1
21
22 Followed by renaming the pickle files
23 cp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'
24 cp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'
25 """
26
27 import warnings
28 from dataclasses import dataclass
29 from typing import Tuple, Union
30
31 import kornia.augmentation as Ka
32 import kornia.geometry as Kg
33 import learn2learn as l2l
34 import torch
35 import torchvision.transforms as T
36
37 import flash
38 from flash.core.data.io.input import DataKeys
39 from flash.core.data.io.input_transform import InputTransform
40 from flash.core.data.transforms import ApplyToKeys, kornia_collate
41 from flash.image import ImageClassificationData, ImageClassifier
42
43 warnings.simplefilter("ignore")
44
45 # download MiniImagenet
46 train_dataset = l2l.vision.datasets.MiniImagenet(root="./", mode="train", download=False)
47 val_dataset = l2l.vision.datasets.MiniImagenet(root="./", mode="validation", download=False)
48
49
50 @dataclass
51 class ImageClassificationInputTransform(InputTransform):
52
53 image_size: Tuple[int, int] = (196, 196)
54 mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)
55 std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)
56
57 def per_sample_transform(self):
58 return T.Compose(
59 [
60 ApplyToKeys(
61 DataKeys.INPUT,
62 T.Compose(
63 [
64 T.ToTensor(),
65 Kg.Resize((196, 196)),
66 # SPATIAL
67 Ka.RandomHorizontalFlip(p=0.25),
68 Ka.RandomRotation(degrees=90.0, p=0.25),
69 Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),
70 Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),
71 # PIXEL-LEVEL
72 Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness
73 Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation
74 Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast
75 Ka.ColorJitter(hue=1 / 30, p=0.25), # hue
76 Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),
77 Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),
78 ]
79 ),
80 ),
81 ApplyToKeys(DataKeys.TARGET, torch.as_tensor),
82 ]
83 )
84
85 def train_per_sample_transform(self):
86 return T.Compose(
87 [
88 ApplyToKeys(
89 DataKeys.INPUT,
90 T.Compose(
91 [
92 T.ToTensor(),
93 T.Resize(self.image_size),
94 T.Normalize(self.mean, self.std),
95 T.RandomHorizontalFlip(),
96 T.ColorJitter(),
97 T.RandomAutocontrast(),
98 T.RandomPerspective(),
99 ]
100 ),
101 ),
102 ApplyToKeys("target", torch.as_tensor),
103 ]
104 )
105
106 def per_batch_transform_on_device(self):
107 return ApplyToKeys(
108 DataKeys.INPUT,
109 Ka.RandomHorizontalFlip(p=0.25),
110 )
111
112 def collate(self):
113 return kornia_collate
114
115
116 # construct datamodule
117
118 datamodule = ImageClassificationData.from_tensors(
119 train_data=train_dataset.x,
120 train_targets=torch.from_numpy(train_dataset.y.astype(int)),
121 val_data=val_dataset.x,
122 val_targets=torch.from_numpy(val_dataset.y.astype(int)),
123 train_transform=ImageClassificationInputTransform,
124 val_transform=ImageClassificationInputTransform,
125 batch_size=1,
126 )
127
128 model = ImageClassifier(
129 backbone="resnet18",
130 training_strategy="prototypicalnetworks",
131 training_strategy_kwargs={
132 "epoch_length": 10 * 16,
133 "meta_batch_size": 1,
134 "num_tasks": 200,
135 "test_num_tasks": 2000,
136 "ways": datamodule.num_classes,
137 "shots": 1,
138 "test_ways": 5,
139 "test_shots": 1,
140 "test_queries": 15,
141 },
142 optimizer=torch.optim.Adam,
143 learning_rate=0.001,
144 )
145
146 trainer = flash.Trainer(
147 max_epochs=1,
148 gpus=1,
149 precision=16,
150 )
151
152 trainer.finetune(model, datamodule=datamodule, strategy="no_freeze")
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
@@ -146,6 +146,7 @@
trainer = flash.Trainer(
max_epochs=1,
gpus=1,
+ accelerator="gpu",
precision=16,
)
|
{"golden_diff": "diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py\n--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py\n+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py\n@@ -146,6 +146,7 @@\n trainer = flash.Trainer(\n max_epochs=1,\n gpus=1,\n+ accelerator=\"gpu\",\n precision=16,\n )\n", "issue": "Fix Flash CI (special examples failing)\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154\n\n\"\"\"## Train file https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1\n\n## Validation File\nhttps://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1\n\nFollowed by renaming the pickle files\ncp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'\ncp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'\n\"\"\"\n\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Tuple, Union\n\nimport kornia.augmentation as Ka\nimport kornia.geometry as Kg\nimport learn2learn as l2l\nimport torch\nimport torchvision.transforms as T\n\nimport flash\nfrom flash.core.data.io.input import DataKeys\nfrom flash.core.data.io.input_transform import InputTransform\nfrom flash.core.data.transforms import ApplyToKeys, kornia_collate\nfrom flash.image import ImageClassificationData, ImageClassifier\n\nwarnings.simplefilter(\"ignore\")\n\n# download MiniImagenet\ntrain_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"train\", download=False)\nval_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"validation\", download=False)\n\n\n@dataclass\nclass ImageClassificationInputTransform(InputTransform):\n\n image_size: Tuple[int, int] = (196, 196)\n mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\n std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\n\n def per_sample_transform(self):\n return T.Compose(\n [\n ApplyToKeys(\n DataKeys.INPUT,\n T.Compose(\n [\n T.ToTensor(),\n Kg.Resize((196, 196)),\n # SPATIAL\n Ka.RandomHorizontalFlip(p=0.25),\n Ka.RandomRotation(degrees=90.0, p=0.25),\n Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),\n Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),\n # PIXEL-LEVEL\n Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness\n Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation\n Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast\n Ka.ColorJitter(hue=1 / 30, p=0.25), # hue\n Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),\n Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),\n ]\n ),\n ),\n ApplyToKeys(DataKeys.TARGET, torch.as_tensor),\n ]\n )\n\n def train_per_sample_transform(self):\n return T.Compose(\n [\n ApplyToKeys(\n DataKeys.INPUT,\n T.Compose(\n [\n T.ToTensor(),\n T.Resize(self.image_size),\n T.Normalize(self.mean, self.std),\n T.RandomHorizontalFlip(),\n T.ColorJitter(),\n T.RandomAutocontrast(),\n T.RandomPerspective(),\n ]\n ),\n ),\n ApplyToKeys(\"target\", torch.as_tensor),\n ]\n )\n\n def per_batch_transform_on_device(self):\n return ApplyToKeys(\n DataKeys.INPUT,\n Ka.RandomHorizontalFlip(p=0.25),\n )\n\n def collate(self):\n return kornia_collate\n\n\n# construct datamodule\n\ndatamodule = ImageClassificationData.from_tensors(\n train_data=train_dataset.x,\n train_targets=torch.from_numpy(train_dataset.y.astype(int)),\n val_data=val_dataset.x,\n val_targets=torch.from_numpy(val_dataset.y.astype(int)),\n train_transform=ImageClassificationInputTransform,\n val_transform=ImageClassificationInputTransform,\n batch_size=1,\n)\n\nmodel = ImageClassifier(\n backbone=\"resnet18\",\n training_strategy=\"prototypicalnetworks\",\n training_strategy_kwargs={\n \"epoch_length\": 10 * 16,\n \"meta_batch_size\": 1,\n \"num_tasks\": 200,\n \"test_num_tasks\": 2000,\n \"ways\": datamodule.num_classes,\n \"shots\": 1,\n \"test_ways\": 5,\n \"test_shots\": 1,\n \"test_queries\": 15,\n },\n optimizer=torch.optim.Adam,\n learning_rate=0.001,\n)\n\ntrainer = flash.Trainer(\n max_epochs=1,\n gpus=1,\n precision=16,\n)\n\ntrainer.finetune(model, datamodule=datamodule, strategy=\"no_freeze\")\n", "path": "flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154\n\n\"\"\"## Train file https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1\n\n## Validation File\nhttps://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1\n\nFollowed by renaming the pickle files\ncp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'\ncp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'\n\"\"\"\n\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Tuple, Union\n\nimport kornia.augmentation as Ka\nimport kornia.geometry as Kg\nimport learn2learn as l2l\nimport torch\nimport torchvision.transforms as T\n\nimport flash\nfrom flash.core.data.io.input import DataKeys\nfrom flash.core.data.io.input_transform import InputTransform\nfrom flash.core.data.transforms import ApplyToKeys, kornia_collate\nfrom flash.image import ImageClassificationData, ImageClassifier\n\nwarnings.simplefilter(\"ignore\")\n\n# download MiniImagenet\ntrain_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"train\", download=False)\nval_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"validation\", download=False)\n\n\n@dataclass\nclass ImageClassificationInputTransform(InputTransform):\n\n image_size: Tuple[int, int] = (196, 196)\n mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\n std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\n\n def per_sample_transform(self):\n return T.Compose(\n [\n ApplyToKeys(\n DataKeys.INPUT,\n T.Compose(\n [\n T.ToTensor(),\n Kg.Resize((196, 196)),\n # SPATIAL\n Ka.RandomHorizontalFlip(p=0.25),\n Ka.RandomRotation(degrees=90.0, p=0.25),\n Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),\n Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),\n # PIXEL-LEVEL\n Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness\n Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation\n Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast\n Ka.ColorJitter(hue=1 / 30, p=0.25), # hue\n Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),\n Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),\n ]\n ),\n ),\n ApplyToKeys(DataKeys.TARGET, torch.as_tensor),\n ]\n )\n\n def train_per_sample_transform(self):\n return T.Compose(\n [\n ApplyToKeys(\n DataKeys.INPUT,\n T.Compose(\n [\n T.ToTensor(),\n T.Resize(self.image_size),\n T.Normalize(self.mean, self.std),\n T.RandomHorizontalFlip(),\n T.ColorJitter(),\n T.RandomAutocontrast(),\n T.RandomPerspective(),\n ]\n ),\n ),\n ApplyToKeys(\"target\", torch.as_tensor),\n ]\n )\n\n def per_batch_transform_on_device(self):\n return ApplyToKeys(\n DataKeys.INPUT,\n Ka.RandomHorizontalFlip(p=0.25),\n )\n\n def collate(self):\n return kornia_collate\n\n\n# construct datamodule\n\ndatamodule = ImageClassificationData.from_tensors(\n train_data=train_dataset.x,\n train_targets=torch.from_numpy(train_dataset.y.astype(int)),\n val_data=val_dataset.x,\n val_targets=torch.from_numpy(val_dataset.y.astype(int)),\n train_transform=ImageClassificationInputTransform,\n val_transform=ImageClassificationInputTransform,\n batch_size=1,\n)\n\nmodel = ImageClassifier(\n backbone=\"resnet18\",\n training_strategy=\"prototypicalnetworks\",\n training_strategy_kwargs={\n \"epoch_length\": 10 * 16,\n \"meta_batch_size\": 1,\n \"num_tasks\": 200,\n \"test_num_tasks\": 2000,\n \"ways\": datamodule.num_classes,\n \"shots\": 1,\n \"test_ways\": 5,\n \"test_shots\": 1,\n \"test_queries\": 15,\n },\n optimizer=torch.optim.Adam,\n learning_rate=0.001,\n)\n\ntrainer = flash.Trainer(\n max_epochs=1,\n gpus=1,\n accelerator=\"gpu\",\n precision=16,\n)\n\ntrainer.finetune(model, datamodule=datamodule, strategy=\"no_freeze\")\n", "path": "flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py"}]}
| 1,962 | 125 |
gh_patches_debug_13125
|
rasdani/github-patches
|
git_diff
|
microsoft__hi-ml-812
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Links to BioViL paper need to be updated
We are still using the arXiv preprint. But this should be solved first:
- https://github.com/microsoft/hi-ml/pull/730#issuecomment-1419298653
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py`
Content:
```
1 # ------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 # ------------------------------------------------------------------------------------------
5
6 from typing import Any, Optional, Tuple, Union
7
8 import torch
9 import torch.nn.functional as F
10 from torch import nn
11 from torch import Tensor as T
12 from transformers import BertForMaskedLM
13 from transformers.modeling_outputs import ModelOutput
14
15 from health_multimodal.text.model.configuration_cxrbert import CXRBertConfig
16
17 BERTTupleOutput = Tuple[T, T, T, T, T]
18
19
20 class CXRBertOutput(ModelOutput):
21 last_hidden_state: torch.FloatTensor
22 logits: torch.FloatTensor
23 cls_projected_embedding: Optional[torch.FloatTensor] = None
24 hidden_states: Optional[Tuple[torch.FloatTensor]] = None
25 attentions: Optional[Tuple[torch.FloatTensor]] = None
26
27
28 class BertProjectionHead(nn.Module):
29 """Projection head to be used with BERT CLS token.
30
31 This is similar to ``BertPredictionHeadTransform`` in HuggingFace.
32
33 :param config: Configuration for BERT.
34 """
35
36 def __init__(self, config: CXRBertConfig) -> None:
37 super().__init__()
38 self.dense_to_hidden = nn.Linear(config.hidden_size, config.projection_size)
39 self.transform_act_fn = nn.functional.gelu
40 self.LayerNorm = nn.LayerNorm(config.projection_size, eps=1e-12)
41 self.dense_to_output = nn.Linear(config.projection_size, config.projection_size)
42
43 def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
44 hidden_states = self.dense_to_hidden(hidden_states)
45 hidden_states = self.transform_act_fn(hidden_states)
46 hidden_states = self.LayerNorm(hidden_states)
47 hidden_states = self.dense_to_output(hidden_states)
48
49 return hidden_states
50
51
52 class CXRBertModel(BertForMaskedLM):
53 """
54 Implements the CXR-BERT model outlined in the manuscript:
55 Boecking et al. "Making the Most of Text Semantics to Improve Biomedical Vision-Language Processing", 2022
56 https://arxiv.org/abs/2204.09817
57
58 Extends the HuggingFace BertForMaskedLM model by adding a separate projection head. The projection "[CLS]" token is
59 used to align the latent vectors of image and text modalities.
60 """
61
62 config_class = CXRBertConfig # type: ignore
63
64 def __init__(self, config: CXRBertConfig):
65 super().__init__(config)
66
67 self.cls_projection_head = BertProjectionHead(config)
68 self.init_weights()
69
70 def forward(
71 self,
72 input_ids: torch.Tensor,
73 attention_mask: torch.Tensor,
74 token_type_ids: Optional[torch.Tensor] = None,
75 position_ids: Optional[torch.Tensor] = None,
76 head_mask: Optional[torch.Tensor] = None,
77 inputs_embeds: Optional[torch.Tensor] = None,
78 output_attentions: Optional[bool] = None,
79 output_hidden_states: Optional[bool] = None,
80 output_cls_projected_embedding: Optional[bool] = None,
81 return_dict: Optional[bool] = None,
82 **kwargs: Any
83 ) -> Union[BERTTupleOutput, CXRBertOutput]:
84
85 return_dict = return_dict if return_dict is not None else self.config.use_return_dict
86
87 bert_for_masked_lm_output = super().forward(input_ids=input_ids,
88 attention_mask=attention_mask,
89 token_type_ids=token_type_ids,
90 position_ids=position_ids,
91 head_mask=head_mask,
92 inputs_embeds=inputs_embeds,
93 output_attentions=output_attentions,
94 output_hidden_states=True,
95 return_dict=True)
96
97 last_hidden_state = bert_for_masked_lm_output.hidden_states[-1]
98 cls_projected_embedding = self.cls_projection_head(
99 last_hidden_state[:, 0, :]) if output_cls_projected_embedding else None
100
101 if return_dict:
102 return CXRBertOutput(
103 last_hidden_state=last_hidden_state,
104 logits=bert_for_masked_lm_output.logits,
105 cls_projected_embedding=cls_projected_embedding,
106 hidden_states=bert_for_masked_lm_output.hidden_states if output_hidden_states else None,
107 attentions=bert_for_masked_lm_output.attentions,
108 )
109 else:
110 return (
111 last_hidden_state,
112 bert_for_masked_lm_output.logits,
113 cls_projected_embedding,
114 bert_for_masked_lm_output.hidden_states,
115 bert_for_masked_lm_output.attentions,)
116
117 def get_projected_text_embeddings(self,
118 input_ids: torch.Tensor,
119 attention_mask: torch.Tensor,
120 normalize_embeddings: bool = True) -> torch.Tensor:
121 """
122 Returns l2-normalised projected cls token embeddings for the given input token ids and attention mask.
123 The joint latent space is trained using a contrastive objective between image and text data modalities.
124
125 :param input_ids: (batch_size, sequence_length)
126 :param attention_mask: (batch_size, sequence_length)
127 :param normalize_embeddings: Whether to l2-normalise the embeddings.
128 :return: (batch_size, projection_size)
129 """
130
131 outputs = self.forward(input_ids=input_ids, attention_mask=attention_mask,
132 output_cls_projected_embedding=True, return_dict=True)
133 assert isinstance(outputs, CXRBertOutput)
134
135 cls_projected_embedding = outputs.cls_projected_embedding
136 assert cls_projected_embedding is not None
137
138 if normalize_embeddings:
139 return F.normalize(cls_projected_embedding, dim=1)
140
141 return cls_projected_embedding
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py b/hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py
--- a/hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py
+++ b/hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py
@@ -53,7 +53,7 @@
"""
Implements the CXR-BERT model outlined in the manuscript:
Boecking et al. "Making the Most of Text Semantics to Improve Biomedical Vision-Language Processing", 2022
- https://arxiv.org/abs/2204.09817
+ https://link.springer.com/chapter/10.1007/978-3-031-20059-5_1
Extends the HuggingFace BertForMaskedLM model by adding a separate projection head. The projection "[CLS]" token is
used to align the latent vectors of image and text modalities.
|
{"golden_diff": "diff --git a/hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py b/hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py\n--- a/hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py\n+++ b/hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py\n@@ -53,7 +53,7 @@\n \"\"\"\n Implements the CXR-BERT model outlined in the manuscript:\n Boecking et al. \"Making the Most of Text Semantics to Improve Biomedical Vision-Language Processing\", 2022\n- https://arxiv.org/abs/2204.09817\n+ https://link.springer.com/chapter/10.1007/978-3-031-20059-5_1\n \n Extends the HuggingFace BertForMaskedLM model by adding a separate projection head. The projection \"[CLS]\" token is\n used to align the latent vectors of image and text modalities.\n", "issue": "Links to BioViL paper need to be updated\nWe are still using the arXiv preprint. But this should be solved first:\r\n- https://github.com/microsoft/hi-ml/pull/730#issuecomment-1419298653\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\nfrom typing import Any, Optional, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch import Tensor as T\nfrom transformers import BertForMaskedLM\nfrom transformers.modeling_outputs import ModelOutput\n\nfrom health_multimodal.text.model.configuration_cxrbert import CXRBertConfig\n\nBERTTupleOutput = Tuple[T, T, T, T, T]\n\n\nclass CXRBertOutput(ModelOutput):\n last_hidden_state: torch.FloatTensor\n logits: torch.FloatTensor\n cls_projected_embedding: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nclass BertProjectionHead(nn.Module):\n \"\"\"Projection head to be used with BERT CLS token.\n\n This is similar to ``BertPredictionHeadTransform`` in HuggingFace.\n\n :param config: Configuration for BERT.\n \"\"\"\n\n def __init__(self, config: CXRBertConfig) -> None:\n super().__init__()\n self.dense_to_hidden = nn.Linear(config.hidden_size, config.projection_size)\n self.transform_act_fn = nn.functional.gelu\n self.LayerNorm = nn.LayerNorm(config.projection_size, eps=1e-12)\n self.dense_to_output = nn.Linear(config.projection_size, config.projection_size)\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense_to_hidden(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.dense_to_output(hidden_states)\n\n return hidden_states\n\n\nclass CXRBertModel(BertForMaskedLM):\n \"\"\"\n Implements the CXR-BERT model outlined in the manuscript:\n Boecking et al. \"Making the Most of Text Semantics to Improve Biomedical Vision-Language Processing\", 2022\n https://arxiv.org/abs/2204.09817\n\n Extends the HuggingFace BertForMaskedLM model by adding a separate projection head. The projection \"[CLS]\" token is\n used to align the latent vectors of image and text modalities.\n \"\"\"\n\n config_class = CXRBertConfig # type: ignore\n\n def __init__(self, config: CXRBertConfig):\n super().__init__(config)\n\n self.cls_projection_head = BertProjectionHead(config)\n self.init_weights()\n\n def forward(\n self,\n input_ids: torch.Tensor,\n attention_mask: torch.Tensor,\n token_type_ids: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_cls_projected_embedding: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n **kwargs: Any\n ) -> Union[BERTTupleOutput, CXRBertOutput]:\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n bert_for_masked_lm_output = super().forward(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=True,\n return_dict=True)\n\n last_hidden_state = bert_for_masked_lm_output.hidden_states[-1]\n cls_projected_embedding = self.cls_projection_head(\n last_hidden_state[:, 0, :]) if output_cls_projected_embedding else None\n\n if return_dict:\n return CXRBertOutput(\n last_hidden_state=last_hidden_state,\n logits=bert_for_masked_lm_output.logits,\n cls_projected_embedding=cls_projected_embedding,\n hidden_states=bert_for_masked_lm_output.hidden_states if output_hidden_states else None,\n attentions=bert_for_masked_lm_output.attentions,\n )\n else:\n return (\n last_hidden_state,\n bert_for_masked_lm_output.logits,\n cls_projected_embedding,\n bert_for_masked_lm_output.hidden_states,\n bert_for_masked_lm_output.attentions,)\n\n def get_projected_text_embeddings(self,\n input_ids: torch.Tensor,\n attention_mask: torch.Tensor,\n normalize_embeddings: bool = True) -> torch.Tensor:\n \"\"\"\n Returns l2-normalised projected cls token embeddings for the given input token ids and attention mask.\n The joint latent space is trained using a contrastive objective between image and text data modalities.\n\n :param input_ids: (batch_size, sequence_length)\n :param attention_mask: (batch_size, sequence_length)\n :param normalize_embeddings: Whether to l2-normalise the embeddings.\n :return: (batch_size, projection_size)\n \"\"\"\n\n outputs = self.forward(input_ids=input_ids, attention_mask=attention_mask,\n output_cls_projected_embedding=True, return_dict=True)\n assert isinstance(outputs, CXRBertOutput)\n\n cls_projected_embedding = outputs.cls_projected_embedding\n assert cls_projected_embedding is not None\n\n if normalize_embeddings:\n return F.normalize(cls_projected_embedding, dim=1)\n\n return cls_projected_embedding\n", "path": "hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py"}], "after_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\nfrom typing import Any, Optional, Tuple, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch import Tensor as T\nfrom transformers import BertForMaskedLM\nfrom transformers.modeling_outputs import ModelOutput\n\nfrom health_multimodal.text.model.configuration_cxrbert import CXRBertConfig\n\nBERTTupleOutput = Tuple[T, T, T, T, T]\n\n\nclass CXRBertOutput(ModelOutput):\n last_hidden_state: torch.FloatTensor\n logits: torch.FloatTensor\n cls_projected_embedding: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nclass BertProjectionHead(nn.Module):\n \"\"\"Projection head to be used with BERT CLS token.\n\n This is similar to ``BertPredictionHeadTransform`` in HuggingFace.\n\n :param config: Configuration for BERT.\n \"\"\"\n\n def __init__(self, config: CXRBertConfig) -> None:\n super().__init__()\n self.dense_to_hidden = nn.Linear(config.hidden_size, config.projection_size)\n self.transform_act_fn = nn.functional.gelu\n self.LayerNorm = nn.LayerNorm(config.projection_size, eps=1e-12)\n self.dense_to_output = nn.Linear(config.projection_size, config.projection_size)\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense_to_hidden(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.dense_to_output(hidden_states)\n\n return hidden_states\n\n\nclass CXRBertModel(BertForMaskedLM):\n \"\"\"\n Implements the CXR-BERT model outlined in the manuscript:\n Boecking et al. \"Making the Most of Text Semantics to Improve Biomedical Vision-Language Processing\", 2022\n https://link.springer.com/chapter/10.1007/978-3-031-20059-5_1\n\n Extends the HuggingFace BertForMaskedLM model by adding a separate projection head. The projection \"[CLS]\" token is\n used to align the latent vectors of image and text modalities.\n \"\"\"\n\n config_class = CXRBertConfig # type: ignore\n\n def __init__(self, config: CXRBertConfig):\n super().__init__(config)\n\n self.cls_projection_head = BertProjectionHead(config)\n self.init_weights()\n\n def forward(\n self,\n input_ids: torch.Tensor,\n attention_mask: torch.Tensor,\n token_type_ids: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_cls_projected_embedding: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n **kwargs: Any\n ) -> Union[BERTTupleOutput, CXRBertOutput]:\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n bert_for_masked_lm_output = super().forward(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=True,\n return_dict=True)\n\n last_hidden_state = bert_for_masked_lm_output.hidden_states[-1]\n cls_projected_embedding = self.cls_projection_head(\n last_hidden_state[:, 0, :]) if output_cls_projected_embedding else None\n\n if return_dict:\n return CXRBertOutput(\n last_hidden_state=last_hidden_state,\n logits=bert_for_masked_lm_output.logits,\n cls_projected_embedding=cls_projected_embedding,\n hidden_states=bert_for_masked_lm_output.hidden_states if output_hidden_states else None,\n attentions=bert_for_masked_lm_output.attentions,\n )\n else:\n return (\n last_hidden_state,\n bert_for_masked_lm_output.logits,\n cls_projected_embedding,\n bert_for_masked_lm_output.hidden_states,\n bert_for_masked_lm_output.attentions,)\n\n def get_projected_text_embeddings(self,\n input_ids: torch.Tensor,\n attention_mask: torch.Tensor,\n normalize_embeddings: bool = True) -> torch.Tensor:\n \"\"\"\n Returns l2-normalised projected cls token embeddings for the given input token ids and attention mask.\n The joint latent space is trained using a contrastive objective between image and text data modalities.\n\n :param input_ids: (batch_size, sequence_length)\n :param attention_mask: (batch_size, sequence_length)\n :param normalize_embeddings: Whether to l2-normalise the embeddings.\n :return: (batch_size, projection_size)\n \"\"\"\n\n outputs = self.forward(input_ids=input_ids, attention_mask=attention_mask,\n output_cls_projected_embedding=True, return_dict=True)\n assert isinstance(outputs, CXRBertOutput)\n\n cls_projected_embedding = outputs.cls_projected_embedding\n assert cls_projected_embedding is not None\n\n if normalize_embeddings:\n return F.normalize(cls_projected_embedding, dim=1)\n\n return cls_projected_embedding\n", "path": "hi-ml-multimodal/src/health_multimodal/text/model/modelling_cxrbert.py"}]}
| 1,865 | 255 |
gh_patches_debug_15716
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1198
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WebView in MacOS not accepting keyboard input
**Describe the bug**
I am unable to type text in HTML input fields within a Toga WebView on Mac OS.
**To Reproduce**
Steps to reproduce the behavior:
1. Follow the tutorial at https://docs.beeware.org/en/latest/tutorial/tutorial-1.html until you are able to run `briefcase dev` successfully.
2. Modify the `startup` function so that its body is:
```main_box = toga.Box()
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = toga.WebView(style=Pack(flex=1))
self.main_window.content.url = 'https://www.google.ca/'
self.main_window.show()
```
3. Attempt to search for something in the Google search input.
4. When I try, I am unable to type characters. Nothing happens with my key presses. Note that specified command shortcuts do seem to work.
**Expected behavior**
I expect to be able to enter text into text boxes in a WebView in Toga.
**Environment:**
- Operating System: Mac OS 10.15.6
- Python version: 3.6.5
- Software versions:
- Briefcase: 0.3.0
- Toga: 0.3.0.dev26
**Additional context**
I discovered that If I just comment out the `keyDown_` method in toga_cocoa [here](https://github.com/beeware/toga/blob/master/src/cocoa/toga_cocoa/widgets/webview.py#L23-L27) then I am able to enter text in a WebView.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cocoa/toga_cocoa/widgets/webview.py`
Content:
```
1 from asyncio import get_event_loop
2
3 from travertino.size import at_least
4
5 from toga_cocoa.keys import toga_key
6 from toga_cocoa.libs import NSURL, NSURLRequest, WKWebView
7 from rubicon.objc import objc_method, py_from_ns
8 from rubicon.objc.runtime import objc_id
9
10 from .base import Widget
11
12
13 class TogaWebView(WKWebView):
14 @objc_method
15 def webView_didFinish_navigation_(self, sender, wkNavigation) -> None:
16 if self.interface.on_webview_load:
17 self.interface.on_webview_load(self.interface)
18
19 @objc_method
20 def acceptsFirstResponder(self) -> bool:
21 return True
22
23 @objc_method
24 def keyDown_(self, event) -> None:
25 if self.interface.on_key_down:
26 self.interface.on_key_down(self.interface, **toga_key(event))
27
28 @objc_method
29 def touchBar(self):
30 # Disable the touchbar.
31 return None
32
33
34 class WebView(Widget):
35 def create(self):
36 self.native = TogaWebView.alloc().init()
37 self.native.interface = self.interface
38
39 self.native.downloadDelegate = self.native
40 self.native.frameLoadDelegate = self.native
41 self.native.policyDelegate = self.native
42 self.native.resourceLoadDelegate = self.native
43 self.native.uIDelegate = self.native
44
45 # Add the layout constraints
46 self.add_constraints()
47
48 def set_on_key_down(self, handler):
49 pass
50
51 def set_on_webview_load(self, handler):
52 pass
53
54 def get_dom(self):
55 # Utilises Step 2) of:
56 # https://developer.apple.com/library/content/documentation/
57 # Cocoa/Conceptual/DisplayWebContent/Tasks/SaveAndLoad.html
58 html = self.native.mainframe.DOMDocument.documentElement.outerHTML
59 return html
60
61 def set_url(self, value):
62 if value:
63 request = NSURLRequest.requestWithURL(NSURL.URLWithString(self.interface.url))
64 self.native.loadRequest(request)
65
66 def set_content(self, root_url, content):
67 self.native.loadHTMLString(content, baseURL=NSURL.URLWithString(root_url))
68
69 def set_user_agent(self, value):
70 user_agent = value if value else "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8" # NOQA
71 self.native.customUserAgent = user_agent
72
73 async def evaluate_javascript(self, javascript):
74 """
75 Evaluate a JavaScript expression.
76
77 **This method is asynchronous**. It will return when the expression has been
78 evaluated and a result is available.
79
80 :param javascript: The javascript expression to evaluate
81 :type javascript: ``str``
82 """
83
84 loop = get_event_loop()
85 future = loop.create_future()
86
87 def completion_handler(res: objc_id, error: objc_id) -> None:
88
89 if error:
90 error = py_from_ns(error)
91 exc = RuntimeError(str(error))
92 future.set_exception(exc)
93 else:
94 future.set_result(py_from_ns(res))
95
96 self.native.evaluateJavaScript(javascript, completionHandler=completion_handler)
97
98 return await future
99
100 def invoke_javascript(self, javascript):
101 """
102 Invoke a block of javascript.
103
104 :param javascript: The javascript expression to invoke
105 """
106 self.native.evaluateJavaScript(javascript, completionHandler=None)
107
108 def rehint(self):
109 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
110 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cocoa/toga_cocoa/widgets/webview.py b/src/cocoa/toga_cocoa/widgets/webview.py
--- a/src/cocoa/toga_cocoa/widgets/webview.py
+++ b/src/cocoa/toga_cocoa/widgets/webview.py
@@ -4,7 +4,7 @@
from toga_cocoa.keys import toga_key
from toga_cocoa.libs import NSURL, NSURLRequest, WKWebView
-from rubicon.objc import objc_method, py_from_ns
+from rubicon.objc import objc_method, py_from_ns, send_super
from rubicon.objc.runtime import objc_id
from .base import Widget
@@ -24,6 +24,7 @@
def keyDown_(self, event) -> None:
if self.interface.on_key_down:
self.interface.on_key_down(self.interface, **toga_key(event))
+ send_super(__class__, self, 'keyDown:', event)
@objc_method
def touchBar(self):
|
{"golden_diff": "diff --git a/src/cocoa/toga_cocoa/widgets/webview.py b/src/cocoa/toga_cocoa/widgets/webview.py\n--- a/src/cocoa/toga_cocoa/widgets/webview.py\n+++ b/src/cocoa/toga_cocoa/widgets/webview.py\n@@ -4,7 +4,7 @@\n \n from toga_cocoa.keys import toga_key\n from toga_cocoa.libs import NSURL, NSURLRequest, WKWebView\n-from rubicon.objc import objc_method, py_from_ns\n+from rubicon.objc import objc_method, py_from_ns, send_super\n from rubicon.objc.runtime import objc_id\n \n from .base import Widget\n@@ -24,6 +24,7 @@\n def keyDown_(self, event) -> None:\n if self.interface.on_key_down:\n self.interface.on_key_down(self.interface, **toga_key(event))\n+ send_super(__class__, self, 'keyDown:', event)\n \n @objc_method\n def touchBar(self):\n", "issue": "WebView in MacOS not accepting keyboard input\n**Describe the bug**\r\n\r\nI am unable to type text in HTML input fields within a Toga WebView on Mac OS.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Follow the tutorial at https://docs.beeware.org/en/latest/tutorial/tutorial-1.html until you are able to run `briefcase dev` successfully.\r\n2. Modify the `startup` function so that its body is:\r\n ```main_box = toga.Box()\r\n self.main_window = toga.MainWindow(title=self.formal_name)\r\n self.main_window.content = toga.WebView(style=Pack(flex=1))\r\n self.main_window.content.url = 'https://www.google.ca/'\r\n self.main_window.show()\r\n ```\r\n3. Attempt to search for something in the Google search input.\r\n4. When I try, I am unable to type characters. Nothing happens with my key presses. Note that specified command shortcuts do seem to work.\r\n\r\n**Expected behavior**\r\nI expect to be able to enter text into text boxes in a WebView in Toga.\r\n\r\n**Environment:**\r\n - Operating System: Mac OS 10.15.6\r\n - Python version: 3.6.5\r\n - Software versions:\r\n - Briefcase: 0.3.0\r\n - Toga: 0.3.0.dev26\r\n\r\n**Additional context**\r\nI discovered that If I just comment out the `keyDown_` method in toga_cocoa [here](https://github.com/beeware/toga/blob/master/src/cocoa/toga_cocoa/widgets/webview.py#L23-L27) then I am able to enter text in a WebView.\n", "before_files": [{"content": "from asyncio import get_event_loop\n\nfrom travertino.size import at_least\n\nfrom toga_cocoa.keys import toga_key\nfrom toga_cocoa.libs import NSURL, NSURLRequest, WKWebView\nfrom rubicon.objc import objc_method, py_from_ns\nfrom rubicon.objc.runtime import objc_id\n\nfrom .base import Widget\n\n\nclass TogaWebView(WKWebView):\n @objc_method\n def webView_didFinish_navigation_(self, sender, wkNavigation) -> None:\n if self.interface.on_webview_load:\n self.interface.on_webview_load(self.interface)\n\n @objc_method\n def acceptsFirstResponder(self) -> bool:\n return True\n\n @objc_method\n def keyDown_(self, event) -> None:\n if self.interface.on_key_down:\n self.interface.on_key_down(self.interface, **toga_key(event))\n\n @objc_method\n def touchBar(self):\n # Disable the touchbar.\n return None\n\n\nclass WebView(Widget):\n def create(self):\n self.native = TogaWebView.alloc().init()\n self.native.interface = self.interface\n\n self.native.downloadDelegate = self.native\n self.native.frameLoadDelegate = self.native\n self.native.policyDelegate = self.native\n self.native.resourceLoadDelegate = self.native\n self.native.uIDelegate = self.native\n\n # Add the layout constraints\n self.add_constraints()\n\n def set_on_key_down(self, handler):\n pass\n\n def set_on_webview_load(self, handler):\n pass\n\n def get_dom(self):\n # Utilises Step 2) of:\n # https://developer.apple.com/library/content/documentation/\n # Cocoa/Conceptual/DisplayWebContent/Tasks/SaveAndLoad.html\n html = self.native.mainframe.DOMDocument.documentElement.outerHTML\n return html\n\n def set_url(self, value):\n if value:\n request = NSURLRequest.requestWithURL(NSURL.URLWithString(self.interface.url))\n self.native.loadRequest(request)\n\n def set_content(self, root_url, content):\n self.native.loadHTMLString(content, baseURL=NSURL.URLWithString(root_url))\n\n def set_user_agent(self, value):\n user_agent = value if value else \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8\" # NOQA\n self.native.customUserAgent = user_agent\n\n async def evaluate_javascript(self, javascript):\n \"\"\"\n Evaluate a JavaScript expression.\n\n **This method is asynchronous**. It will return when the expression has been\n evaluated and a result is available.\n\n :param javascript: The javascript expression to evaluate\n :type javascript: ``str``\n \"\"\"\n\n loop = get_event_loop()\n future = loop.create_future()\n\n def completion_handler(res: objc_id, error: objc_id) -> None:\n\n if error:\n error = py_from_ns(error)\n exc = RuntimeError(str(error))\n future.set_exception(exc)\n else:\n future.set_result(py_from_ns(res))\n\n self.native.evaluateJavaScript(javascript, completionHandler=completion_handler)\n\n return await future\n\n def invoke_javascript(self, javascript):\n \"\"\"\n Invoke a block of javascript.\n\n :param javascript: The javascript expression to invoke\n \"\"\"\n self.native.evaluateJavaScript(javascript, completionHandler=None)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/cocoa/toga_cocoa/widgets/webview.py"}], "after_files": [{"content": "from asyncio import get_event_loop\n\nfrom travertino.size import at_least\n\nfrom toga_cocoa.keys import toga_key\nfrom toga_cocoa.libs import NSURL, NSURLRequest, WKWebView\nfrom rubicon.objc import objc_method, py_from_ns, send_super\nfrom rubicon.objc.runtime import objc_id\n\nfrom .base import Widget\n\n\nclass TogaWebView(WKWebView):\n @objc_method\n def webView_didFinish_navigation_(self, sender, wkNavigation) -> None:\n if self.interface.on_webview_load:\n self.interface.on_webview_load(self.interface)\n\n @objc_method\n def acceptsFirstResponder(self) -> bool:\n return True\n\n @objc_method\n def keyDown_(self, event) -> None:\n if self.interface.on_key_down:\n self.interface.on_key_down(self.interface, **toga_key(event))\n send_super(__class__, self, 'keyDown:', event)\n\n @objc_method\n def touchBar(self):\n # Disable the touchbar.\n return None\n\n\nclass WebView(Widget):\n def create(self):\n self.native = TogaWebView.alloc().init()\n self.native.interface = self.interface\n\n self.native.downloadDelegate = self.native\n self.native.frameLoadDelegate = self.native\n self.native.policyDelegate = self.native\n self.native.resourceLoadDelegate = self.native\n self.native.uIDelegate = self.native\n\n # Add the layout constraints\n self.add_constraints()\n\n def set_on_key_down(self, handler):\n pass\n\n def set_on_webview_load(self, handler):\n pass\n\n def get_dom(self):\n # Utilises Step 2) of:\n # https://developer.apple.com/library/content/documentation/\n # Cocoa/Conceptual/DisplayWebContent/Tasks/SaveAndLoad.html\n html = self.native.mainframe.DOMDocument.documentElement.outerHTML\n return html\n\n def set_url(self, value):\n if value:\n request = NSURLRequest.requestWithURL(NSURL.URLWithString(self.interface.url))\n self.native.loadRequest(request)\n\n def set_content(self, root_url, content):\n self.native.loadHTMLString(content, baseURL=NSURL.URLWithString(root_url))\n\n def set_user_agent(self, value):\n user_agent = value if value else \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/603.3.8 (KHTML, like Gecko) Version/10.1.2 Safari/603.3.8\" # NOQA\n self.native.customUserAgent = user_agent\n\n async def evaluate_javascript(self, javascript):\n \"\"\"\n Evaluate a JavaScript expression.\n\n **This method is asynchronous**. It will return when the expression has been\n evaluated and a result is available.\n\n :param javascript: The javascript expression to evaluate\n :type javascript: ``str``\n \"\"\"\n\n loop = get_event_loop()\n future = loop.create_future()\n\n def completion_handler(res: objc_id, error: objc_id) -> None:\n\n if error:\n error = py_from_ns(error)\n exc = RuntimeError(str(error))\n future.set_exception(exc)\n else:\n future.set_result(py_from_ns(res))\n\n self.native.evaluateJavaScript(javascript, completionHandler=completion_handler)\n\n return await future\n\n def invoke_javascript(self, javascript):\n \"\"\"\n Invoke a block of javascript.\n\n :param javascript: The javascript expression to invoke\n \"\"\"\n self.native.evaluateJavaScript(javascript, completionHandler=None)\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/cocoa/toga_cocoa/widgets/webview.py"}]}
| 1,624 | 213 |
gh_patches_debug_15799
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-3453
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
What is expected Kinetics400 dataset directory structure?
Given that the dataset does not come with official downloader scripts and that most roll their own or hack some third-party scripts, it would be much clearer if https://pytorch.org/docs/stable/torchvision/datasets.html#kinetics-400 explained what directory structure is expected by `torchvision.datasets.Kinetics400`
What is the expected dataset size? and the video file extensions?
Thanks!
cc @pmeier
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/kinetics.py`
Content:
```
1 from .utils import list_dir
2 from .folder import make_dataset
3 from .video_utils import VideoClips
4 from .vision import VisionDataset
5
6
7 class Kinetics400(VisionDataset):
8 """
9 `Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_
10 dataset.
11
12 Kinetics-400 is an action recognition video dataset.
13 This dataset consider every video as a collection of video clips of fixed size, specified
14 by ``frames_per_clip``, where the step in frames between each clip is given by
15 ``step_between_clips``.
16
17 To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
18 and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
19 elements will come from video 1, and the next three elements from video 2.
20 Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
21 frames in a video might be present.
22
23 Internally, it uses a VideoClips object to handle clip creation.
24
25 Args:
26 root (string): Root directory of the Kinetics-400 Dataset.
27 frames_per_clip (int): number of frames in a clip
28 step_between_clips (int): number of frames between each clip
29 transform (callable, optional): A function/transform that takes in a TxHxWxC video
30 and returns a transformed version.
31
32 Returns:
33 tuple: A 3-tuple with the following entries:
34
35 - video (Tensor[T, H, W, C]): the `T` video frames
36 - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
37 and `L` is the number of points
38 - label (int): class of the video clip
39 """
40
41 def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None,
42 extensions=('avi',), transform=None, _precomputed_metadata=None,
43 num_workers=1, _video_width=0, _video_height=0,
44 _video_min_dimension=0, _audio_samples=0, _audio_channels=0):
45 super(Kinetics400, self).__init__(root)
46
47 classes = list(sorted(list_dir(root)))
48 class_to_idx = {classes[i]: i for i in range(len(classes))}
49 self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
50 self.classes = classes
51 video_list = [x[0] for x in self.samples]
52 self.video_clips = VideoClips(
53 video_list,
54 frames_per_clip,
55 step_between_clips,
56 frame_rate,
57 _precomputed_metadata,
58 num_workers=num_workers,
59 _video_width=_video_width,
60 _video_height=_video_height,
61 _video_min_dimension=_video_min_dimension,
62 _audio_samples=_audio_samples,
63 _audio_channels=_audio_channels,
64 )
65 self.transform = transform
66
67 @property
68 def metadata(self):
69 return self.video_clips.metadata
70
71 def __len__(self):
72 return self.video_clips.num_clips()
73
74 def __getitem__(self, idx):
75 video, audio, info, video_idx = self.video_clips.get_clip(idx)
76 label = self.samples[video_idx][1]
77
78 if self.transform is not None:
79 video = self.transform(video)
80
81 return video, audio, label
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/datasets/kinetics.py b/torchvision/datasets/kinetics.py
--- a/torchvision/datasets/kinetics.py
+++ b/torchvision/datasets/kinetics.py
@@ -23,7 +23,18 @@
Internally, it uses a VideoClips object to handle clip creation.
Args:
- root (string): Root directory of the Kinetics-400 Dataset.
+ root (string): Root directory of the Kinetics-400 Dataset. Should be structured as follows:
+ .. code::
+
+ root/
+ ├── class1
+ │ ├── clip1.avi
+ │ ├── clip2.avi
+ │ └── ...
+ └── class2
+ ├── clipx.avi
+ └── ...
+
frames_per_clip (int): number of frames in a clip
step_between_clips (int): number of frames between each clip
transform (callable, optional): A function/transform that takes in a TxHxWxC video
|
{"golden_diff": "diff --git a/torchvision/datasets/kinetics.py b/torchvision/datasets/kinetics.py\n--- a/torchvision/datasets/kinetics.py\n+++ b/torchvision/datasets/kinetics.py\n@@ -23,7 +23,18 @@\n Internally, it uses a VideoClips object to handle clip creation.\n \n Args:\n- root (string): Root directory of the Kinetics-400 Dataset.\n+ root (string): Root directory of the Kinetics-400 Dataset. Should be structured as follows:\n+ .. code::\n+\n+ root/\n+ \u251c\u2500\u2500 class1\n+ \u2502 \u251c\u2500\u2500 clip1.avi\n+ \u2502 \u251c\u2500\u2500 clip2.avi\n+ \u2502 \u2514\u2500\u2500 ...\n+ \u2514\u2500\u2500 class2\n+ \u251c\u2500\u2500 clipx.avi\n+ \u2514\u2500\u2500 ...\n+\n frames_per_clip (int): number of frames in a clip\n step_between_clips (int): number of frames between each clip\n transform (callable, optional): A function/transform that takes in a TxHxWxC video\n", "issue": "What is expected Kinetics400 dataset directory structure?\nGiven that the dataset does not come with official downloader scripts and that most roll their own or hack some third-party scripts, it would be much clearer if https://pytorch.org/docs/stable/torchvision/datasets.html#kinetics-400 explained what directory structure is expected by `torchvision.datasets.Kinetics400`\r\n\r\nWhat is the expected dataset size? and the video file extensions?\r\n\r\nThanks!\n\ncc @pmeier\n", "before_files": [{"content": "from .utils import list_dir\nfrom .folder import make_dataset\nfrom .video_utils import VideoClips\nfrom .vision import VisionDataset\n\n\nclass Kinetics400(VisionDataset):\n \"\"\"\n `Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_\n dataset.\n\n Kinetics-400 is an action recognition video dataset.\n This dataset consider every video as a collection of video clips of fixed size, specified\n by ``frames_per_clip``, where the step in frames between each clip is given by\n ``step_between_clips``.\n\n To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``\n and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two\n elements will come from video 1, and the next three elements from video 2.\n Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all\n frames in a video might be present.\n\n Internally, it uses a VideoClips object to handle clip creation.\n\n Args:\n root (string): Root directory of the Kinetics-400 Dataset.\n frames_per_clip (int): number of frames in a clip\n step_between_clips (int): number of frames between each clip\n transform (callable, optional): A function/transform that takes in a TxHxWxC video\n and returns a transformed version.\n\n Returns:\n tuple: A 3-tuple with the following entries:\n\n - video (Tensor[T, H, W, C]): the `T` video frames\n - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels\n and `L` is the number of points\n - label (int): class of the video clip\n \"\"\"\n\n def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None,\n extensions=('avi',), transform=None, _precomputed_metadata=None,\n num_workers=1, _video_width=0, _video_height=0,\n _video_min_dimension=0, _audio_samples=0, _audio_channels=0):\n super(Kinetics400, self).__init__(root)\n\n classes = list(sorted(list_dir(root)))\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)\n self.classes = classes\n video_list = [x[0] for x in self.samples]\n self.video_clips = VideoClips(\n video_list,\n frames_per_clip,\n step_between_clips,\n frame_rate,\n _precomputed_metadata,\n num_workers=num_workers,\n _video_width=_video_width,\n _video_height=_video_height,\n _video_min_dimension=_video_min_dimension,\n _audio_samples=_audio_samples,\n _audio_channels=_audio_channels,\n )\n self.transform = transform\n\n @property\n def metadata(self):\n return self.video_clips.metadata\n\n def __len__(self):\n return self.video_clips.num_clips()\n\n def __getitem__(self, idx):\n video, audio, info, video_idx = self.video_clips.get_clip(idx)\n label = self.samples[video_idx][1]\n\n if self.transform is not None:\n video = self.transform(video)\n\n return video, audio, label\n", "path": "torchvision/datasets/kinetics.py"}], "after_files": [{"content": "from .utils import list_dir\nfrom .folder import make_dataset\nfrom .video_utils import VideoClips\nfrom .vision import VisionDataset\n\n\nclass Kinetics400(VisionDataset):\n \"\"\"\n `Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_\n dataset.\n\n Kinetics-400 is an action recognition video dataset.\n This dataset consider every video as a collection of video clips of fixed size, specified\n by ``frames_per_clip``, where the step in frames between each clip is given by\n ``step_between_clips``.\n\n To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``\n and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two\n elements will come from video 1, and the next three elements from video 2.\n Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all\n frames in a video might be present.\n\n Internally, it uses a VideoClips object to handle clip creation.\n\n Args:\n root (string): Root directory of the Kinetics-400 Dataset. Should be structured as follows:\n .. code::\n\n root/\n \u251c\u2500\u2500 class1\n \u2502 \u251c\u2500\u2500 clip1.avi\n \u2502 \u251c\u2500\u2500 clip2.avi\n \u2502 \u2514\u2500\u2500 ...\n \u2514\u2500\u2500 class2\n \u251c\u2500\u2500 clipx.avi\n \u2514\u2500\u2500 ...\n\n frames_per_clip (int): number of frames in a clip\n step_between_clips (int): number of frames between each clip\n transform (callable, optional): A function/transform that takes in a TxHxWxC video\n and returns a transformed version.\n\n Returns:\n tuple: A 3-tuple with the following entries:\n\n - video (Tensor[T, H, W, C]): the `T` video frames\n - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels\n and `L` is the number of points\n - label (int): class of the video clip\n \"\"\"\n\n def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None,\n extensions=('avi',), transform=None, _precomputed_metadata=None,\n num_workers=1, _video_width=0, _video_height=0,\n _video_min_dimension=0, _audio_samples=0, _audio_channels=0):\n super(Kinetics400, self).__init__(root)\n\n classes = list(sorted(list_dir(root)))\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)\n self.classes = classes\n video_list = [x[0] for x in self.samples]\n self.video_clips = VideoClips(\n video_list,\n frames_per_clip,\n step_between_clips,\n frame_rate,\n _precomputed_metadata,\n num_workers=num_workers,\n _video_width=_video_width,\n _video_height=_video_height,\n _video_min_dimension=_video_min_dimension,\n _audio_samples=_audio_samples,\n _audio_channels=_audio_channels,\n )\n self.transform = transform\n\n @property\n def metadata(self):\n return self.video_clips.metadata\n\n def __len__(self):\n return self.video_clips.num_clips()\n\n def __getitem__(self, idx):\n video, audio, info, video_idx = self.video_clips.get_clip(idx)\n label = self.samples[video_idx][1]\n\n if self.transform is not None:\n video = self.transform(video)\n\n return video, audio, label\n", "path": "torchvision/datasets/kinetics.py"}]}
| 1,301 | 239 |
gh_patches_debug_40895
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-4414
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'Completed' attribute missing from order-statistics API
**I'm submitting a ...** (check one with "x")
- [x] bug report
- [ ] feature request
- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server
**Current behavior:**
Right now the response contains the following fields
```
"placed": 0,
"draft": 0,
"cancelled": 0,
"total": 0,
"expired": 0,
"pending": 0
```
The attribute `completed` is missing from the response.
Image for reference : https://user-images.githubusercontent.com/13910561/29230756-f5dabe20-7f01-11e7-8c5f-58ee69c38e65.png
**Expected behavior:**
`completed` attribute should be there.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/order_statistics/tickets.py`
Content:
```
1 from flask_rest_jsonapi import ResourceDetail
2 from marshmallow_jsonapi.flask import Schema
3 from marshmallow_jsonapi import fields
4 from sqlalchemy import func
5
6 from app.api.helpers.utilities import dasherize
7 from app.api.bootstrap import api
8 from app.models import db
9 from app.models.order import Order, OrderTicket
10 from app.models.ticket import Ticket
11 from app.api.helpers.db import get_count
12
13
14 class OrderStatisticsTicketSchema(Schema):
15 """
16 Api schema
17 """
18
19 class Meta:
20 """
21 Meta class
22 """
23 type_ = 'order-statistics-ticket'
24 self_view = 'v1.order_statistics_ticket_detail'
25 self_view_kwargs = {'id': '<id>'}
26 inflect = dasherize
27
28 id = fields.Str()
29 identifier = fields.Str()
30 tickets = fields.Method("tickets_count")
31 orders = fields.Method("orders_count")
32 sales = fields.Method("sales_count")
33
34 def tickets_count(self, obj):
35 obj_id = obj.id
36 total = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
37 OrderTicket.ticket_id == obj_id).scalar()
38 draft = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
39 OrderTicket.ticket_id == obj_id, Order.status == 'draft').scalar()
40 cancelled = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
41 OrderTicket.ticket_id == obj_id, Order.status == 'cancelled').scalar()
42 pending = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
43 OrderTicket.ticket_id == obj_id, Order.status == 'pending').scalar()
44 expired = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
45 OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()
46 placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
47 OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()
48 result = {
49 'total': total or 0,
50 'draft': draft or 0,
51 'cancelled': cancelled or 0,
52 'pending': pending or 0,
53 'expired': expired or 0,
54 'placed': placed or 0
55 }
56 return result
57
58 def orders_count(self, obj):
59 obj_id = obj.id
60 total = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id))
61 draft = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,
62 Order.status == 'draft'))
63 cancelled = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,
64 Order.status == 'cancelled'))
65 pending = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,
66 Order.status == 'pending'))
67 expired = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,
68 Order.status == 'expired'))
69 placed = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,
70 Order.status == 'placed'))
71 result = {
72 'total': total or 0,
73 'draft': draft or 0,
74 'cancelled': cancelled or 0,
75 'pending': pending or 0,
76 'expired': expired or 0,
77 'placed': placed or 0
78 }
79 return result
80
81 def sales_count(self, obj):
82 obj_id = obj.id
83 total = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(
84 OrderTicket.ticket_id == obj_id).scalar()
85 draft = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(
86 OrderTicket.ticket_id == obj_id, Order.status == 'draft').scalar()
87 cancelled = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(
88 OrderTicket.ticket_id == obj_id, Order.status == 'cancelled').scalar()
89 pending = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(
90 OrderTicket.ticket_id == obj_id, Order.status == 'pending').scalar()
91 expired = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(
92 OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()
93 placed = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(
94 OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()
95 result = {
96 'total': total or 0,
97 'draft': draft or 0,
98 'cancelled': cancelled or 0,
99 'pending': pending or 0,
100 'expired': expired or 0,
101 'placed': placed or 0
102 }
103 return result
104
105
106 class OrderStatisticsTicketDetail(ResourceDetail):
107 """
108 detail by id
109 """
110 methods = ['GET']
111 decorators = (api.has_permission('is_coorganizer', fetch="event_id", fetch_as="event_id", model=Ticket),)
112 schema = OrderStatisticsTicketSchema
113 data_layer = {'session': db.session,
114 'model': Ticket}
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/order_statistics/tickets.py b/app/api/order_statistics/tickets.py
--- a/app/api/order_statistics/tickets.py
+++ b/app/api/order_statistics/tickets.py
@@ -45,13 +45,16 @@
OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()
placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()
+ completed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(
+ OrderTicket.ticket_id == obj_id, Order.status == 'completed').scalar()
result = {
'total': total or 0,
'draft': draft or 0,
'cancelled': cancelled or 0,
'pending': pending or 0,
'expired': expired or 0,
- 'placed': placed or 0
+ 'placed': placed or 0,
+ 'completed': completed or 0
}
return result
@@ -68,13 +71,16 @@
Order.status == 'expired'))
placed = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,
Order.status == 'placed'))
+ completed = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,
+ Order.status == 'completed'))
result = {
'total': total or 0,
'draft': draft or 0,
'cancelled': cancelled or 0,
'pending': pending or 0,
'expired': expired or 0,
- 'placed': placed or 0
+ 'placed': placed or 0,
+ 'completed': completed or 0
}
return result
@@ -92,13 +98,16 @@
OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()
placed = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(
OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()
+ completed = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(
+ OrderTicket.ticket_id == obj_id, Order.status == 'completed').scalar()
result = {
'total': total or 0,
'draft': draft or 0,
'cancelled': cancelled or 0,
'pending': pending or 0,
'expired': expired or 0,
- 'placed': placed or 0
+ 'placed': placed or 0,
+ 'completed': completed or 0
}
return result
|
{"golden_diff": "diff --git a/app/api/order_statistics/tickets.py b/app/api/order_statistics/tickets.py\n--- a/app/api/order_statistics/tickets.py\n+++ b/app/api/order_statistics/tickets.py\n@@ -45,13 +45,16 @@\n OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()\n+ completed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n+ OrderTicket.ticket_id == obj_id, Order.status == 'completed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n- 'placed': placed or 0\n+ 'placed': placed or 0,\n+ 'completed': completed or 0\n }\n return result\n \n@@ -68,13 +71,16 @@\n Order.status == 'expired'))\n placed = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'placed'))\n+ completed = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n+ Order.status == 'completed'))\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n- 'placed': placed or 0\n+ 'placed': placed or 0,\n+ 'completed': completed or 0\n }\n return result\n \n@@ -92,13 +98,16 @@\n OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()\n+ completed = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n+ OrderTicket.ticket_id == obj_id, Order.status == 'completed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n- 'placed': placed or 0\n+ 'placed': placed or 0,\n+ 'completed': completed or 0\n }\n return result\n", "issue": "'Completed' attribute missing from order-statistics API\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-orga-server\r\n\r\n**Current behavior:**\r\nRight now the response contains the following fields\r\n ```\r\n \"placed\": 0,\r\n \"draft\": 0,\r\n \"cancelled\": 0,\r\n \"total\": 0,\r\n \"expired\": 0,\r\n \"pending\": 0\r\n```\r\nThe attribute `completed` is missing from the response. \r\nImage for reference : https://user-images.githubusercontent.com/13910561/29230756-f5dabe20-7f01-11e7-8c5f-58ee69c38e65.png\r\n\r\n**Expected behavior:**\r\n`completed` attribute should be there.\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail\nfrom marshmallow_jsonapi.flask import Schema\nfrom marshmallow_jsonapi import fields\nfrom sqlalchemy import func\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.bootstrap import api\nfrom app.models import db\nfrom app.models.order import Order, OrderTicket\nfrom app.models.ticket import Ticket\nfrom app.api.helpers.db import get_count\n\n\nclass OrderStatisticsTicketSchema(Schema):\n \"\"\"\n Api schema\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class\n \"\"\"\n type_ = 'order-statistics-ticket'\n self_view = 'v1.order_statistics_ticket_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str()\n identifier = fields.Str()\n tickets = fields.Method(\"tickets_count\")\n orders = fields.Method(\"orders_count\")\n sales = fields.Method(\"sales_count\")\n\n def tickets_count(self, obj):\n obj_id = obj.id\n total = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id).scalar()\n draft = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'draft').scalar()\n cancelled = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'cancelled').scalar()\n pending = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'pending').scalar()\n expired = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0\n }\n return result\n\n def orders_count(self, obj):\n obj_id = obj.id\n total = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id))\n draft = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'draft'))\n cancelled = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'cancelled'))\n pending = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'pending'))\n expired = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'expired'))\n placed = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'placed'))\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0\n }\n return result\n\n def sales_count(self, obj):\n obj_id = obj.id\n total = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id).scalar()\n draft = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'draft').scalar()\n cancelled = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'cancelled').scalar()\n pending = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'pending').scalar()\n expired = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0\n }\n return result\n\n\nclass OrderStatisticsTicketDetail(ResourceDetail):\n \"\"\"\n detail by id\n \"\"\"\n methods = ['GET']\n decorators = (api.has_permission('is_coorganizer', fetch=\"event_id\", fetch_as=\"event_id\", model=Ticket),)\n schema = OrderStatisticsTicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n", "path": "app/api/order_statistics/tickets.py"}], "after_files": [{"content": "from flask_rest_jsonapi import ResourceDetail\nfrom marshmallow_jsonapi.flask import Schema\nfrom marshmallow_jsonapi import fields\nfrom sqlalchemy import func\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.bootstrap import api\nfrom app.models import db\nfrom app.models.order import Order, OrderTicket\nfrom app.models.ticket import Ticket\nfrom app.api.helpers.db import get_count\n\n\nclass OrderStatisticsTicketSchema(Schema):\n \"\"\"\n Api schema\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class\n \"\"\"\n type_ = 'order-statistics-ticket'\n self_view = 'v1.order_statistics_ticket_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str()\n identifier = fields.Str()\n tickets = fields.Method(\"tickets_count\")\n orders = fields.Method(\"orders_count\")\n sales = fields.Method(\"sales_count\")\n\n def tickets_count(self, obj):\n obj_id = obj.id\n total = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id).scalar()\n draft = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'draft').scalar()\n cancelled = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'cancelled').scalar()\n pending = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'pending').scalar()\n expired = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()\n completed = db.session.query(func.sum(OrderTicket.quantity.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'completed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0,\n 'completed': completed or 0\n }\n return result\n\n def orders_count(self, obj):\n obj_id = obj.id\n total = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id))\n draft = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'draft'))\n cancelled = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'cancelled'))\n pending = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'pending'))\n expired = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'expired'))\n placed = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'placed'))\n completed = get_count(db.session.query(Order).join(Order.order_tickets).filter(OrderTicket.ticket_id == obj_id,\n Order.status == 'completed'))\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0,\n 'completed': completed or 0\n }\n return result\n\n def sales_count(self, obj):\n obj_id = obj.id\n total = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id).scalar()\n draft = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'draft').scalar()\n cancelled = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'cancelled').scalar()\n pending = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'pending').scalar()\n expired = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'expired').scalar()\n placed = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'placed').scalar()\n completed = db.session.query(func.sum(Order.amount.label('sum'))).join(Order.order_tickets).filter(\n OrderTicket.ticket_id == obj_id, Order.status == 'completed').scalar()\n result = {\n 'total': total or 0,\n 'draft': draft or 0,\n 'cancelled': cancelled or 0,\n 'pending': pending or 0,\n 'expired': expired or 0,\n 'placed': placed or 0,\n 'completed': completed or 0\n }\n return result\n\n\nclass OrderStatisticsTicketDetail(ResourceDetail):\n \"\"\"\n detail by id\n \"\"\"\n methods = ['GET']\n decorators = (api.has_permission('is_coorganizer', fetch=\"event_id\", fetch_as=\"event_id\", model=Ticket),)\n schema = OrderStatisticsTicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n", "path": "app/api/order_statistics/tickets.py"}]}
| 1,880 | 607 |
gh_patches_debug_2451
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-17429
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
empty_like
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/creation.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from .tensor import Tensor
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):
12 array = ivy.array(data, dtype=dtype, device=place)
13 return Tensor(array, dtype=dtype, place=place)
14
15
16 @with_unsupported_dtypes({"2.4.2 and below": "int8"}, "paddle")
17 @to_ivy_arrays_and_back
18 def ones(shape, /, *, dtype=None, name=None):
19 dtype = "float32" if dtype is None else dtype
20 return ivy.ones(shape, dtype=dtype)
21
22
23 @with_unsupported_dtypes(
24 {"2.4.2 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
25 )
26 @to_ivy_arrays_and_back
27 def ones_like(x, /, *, dtype=None, name=None):
28 dtype = x.dtype if dtype is None else dtype
29 return ivy.ones_like(x, dtype=dtype)
30
31
32 @with_unsupported_dtypes({"2.4.2 and below": "int8"}, "paddle")
33 @to_ivy_arrays_and_back
34 def zeros(shape, /, *, dtype=None, name=None):
35 dtype = "float32" if dtype is None else dtype
36 return ivy.zeros(shape, dtype=dtype)
37
38
39 @with_unsupported_dtypes(
40 {"2.4.2 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
41 )
42 @to_ivy_arrays_and_back
43 def zeros_like(x, /, *, dtype=None, name=None):
44 dtype = x.dtype if dtype is None else dtype
45 return ivy.zeros_like(x, dtype=dtype)
46
47
48 @to_ivy_arrays_and_back
49 def full(shape, fill_value, /, *, dtype=None, name=None):
50 dtype = "float32" if dtype is None else dtype
51 return ivy.full(shape, fill_value, dtype=dtype)
52
53
54 @to_ivy_arrays_and_back
55 def full_like(x, fill_value, /, *, dtype=None, name=None):
56 dtype = x.dtype if dtype is None else dtype
57 return ivy.full_like(x, fill_value, dtype=dtype)
58
59
60 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
61 @to_ivy_arrays_and_back
62 def arange(start, end=None, step=1, dtype=None, name=None):
63 return ivy.arange(start, end, step=step, dtype=dtype)
64
65
66 @to_ivy_arrays_and_back
67 def empty(shape, dtype=None):
68 return ivy.empty(shape=shape, dtype=dtype)
69
70
71 @to_ivy_arrays_and_back
72 def eye(num_rows, num_columns=None, dtype=None, name=None):
73 return ivy.eye(num_rows, num_columns, dtype=dtype)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py
--- a/ivy/functional/frontends/paddle/tensor/creation.py
+++ b/ivy/functional/frontends/paddle/tensor/creation.py
@@ -71,3 +71,8 @@
@to_ivy_arrays_and_back
def eye(num_rows, num_columns=None, dtype=None, name=None):
return ivy.eye(num_rows, num_columns, dtype=dtype)
+
+
+@to_ivy_arrays_and_back
+def empty_like(x, dtype=None, name=None):
+ return ivy.empty_like(x, dtype=dtype)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py\n--- a/ivy/functional/frontends/paddle/tensor/creation.py\n+++ b/ivy/functional/frontends/paddle/tensor/creation.py\n@@ -71,3 +71,8 @@\n @to_ivy_arrays_and_back\r\n def eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n+\r\n+\r\n+@to_ivy_arrays_and_back\r\n+def empty_like(x, dtype=None, name=None):\r\n+ return ivy.empty_like(x, dtype=dtype)\n", "issue": "empty_like\n\n", "before_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}], "after_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.4.2 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}]}
| 1,090 | 152 |
gh_patches_debug_19096
|
rasdani/github-patches
|
git_diff
|
spack__spack-28354
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setting LD_LIBRARY_PATH to be "helpful" considered harmful
(with apologies to Djikstra and everyone else who's recycled that title meme)
**TL;DR**: the library that one of my spack binaries uses depends on what other spack packages I've `module load`-ed. **YIKES**. See also #3926.
I was trying to understand why @JusticeForMikeBrown was having trouble building bowtie2 (see #3950) when I've built it successfully with `[email protected]`.
His problem with `[email protected]` was zlib related; I checked the package and noticed that it doesn't have a dependency on zlib. Perhaps it should, I thought. Wonder what zlib my "production" copy was linked against?
```
$ ldd bowtie2-align-l | grep libz
libz.so.1 => /blah/spack/v0.0.8/opt/spack/linux-centos7-x86_64/gcc-5.4.0/zlib-1.2.11-ec535e2ikkpl7hd4y454t3yydjqorja6/lib/libz.so.1 (0x00002aaaaaf32000)
```
That surprised me, because there's no zlib dependency in the package.
Sure enough, it's because I have something else `module load`-ed that has the side effect of adding zlib's directory to `LD_LIBRARY_PATH`.
```
$ (unset LD_LIBRARY_PATH; ldd bowtie2-align-l) | grep libz
libz.so.1 => /lib64/libz.so.1 (0x00002aaaaaf2f000)
```
My "newer" version of CentOS has a `/lib64/libz.so.1` that includes gzbuffer (`nm` didn't help, library's stripped...):
```
$ strings /lib64/libz.so.1 | grep buffer
gzbuffer
buffer error
```
so it (probably) works for me either way.
But imagine if there were two versions of a library (perhaps something mathematical) that give different results. Now you have a program giving different results depending on what other Spack applications are also loaded.
**THAT** would be fun to track down (assuming you even noticed...).
W.R.T. the main problem, bowtie2 should probably have a dependency on a new-ish version of zlib, but stuff like this is why LD_LIBRARY_PATH is a slippery tool to reach for.
I'll argue that this kind of unpredictability is a bigger negative than being helpful and always setting `LD_LIBRARY_PATH`. This comment in the docs isn't actually correct:
> Spack avoids library misconfiguration by using RPATH to link dependencies. When a user links a library or runs a program, it is tied to the dependencies it was built with, so there is no need to manipulate LD_LIBRARY_PATH at runtime.
>
> [clipped from here](http://spack.readthedocs.io/en/latest/features.html?highlight=RPATH#packages-can-peacefully-coexist)
What would happen if `LD_LIBRARY_PATH` became *opt-in*, packages that need it specify it in their package definitions?
Looking at [the list of cases where RPATH support doesn't work](http://spack.readthedocs.io/en/latest/workflows.html?highlight=LD_LIBRARY_PATH#transitive-dependencies), it seems like 1) is not relevant (I think it's referring to `PERL5LIB`, etc...) and 3) are simply bugs. That leaves 2), python extensions. Is `RPATH` unworkable there or just not yet working?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/user_environment.py`
Content:
```
1 # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5 import os
6 import sys
7
8 import spack.build_environment
9 import spack.config
10 import spack.util.environment as environment
11 import spack.util.prefix as prefix
12
13 #: Environment variable name Spack uses to track individually loaded packages
14 spack_loaded_hashes_var = "SPACK_LOADED_HASHES"
15
16
17 def prefix_inspections(platform):
18 """Get list of prefix inspections for platform
19
20 Arguments:
21 platform (str): the name of the platform to consider. The platform
22 determines what environment variables Spack will use for some
23 inspections.
24
25 Returns:
26 A dictionary mapping subdirectory names to lists of environment
27 variables to modify with that directory if it exists.
28 """
29 inspections = spack.config.get("modules:prefix_inspections", {})
30 if inspections:
31 return inspections
32
33 inspections = {
34 "bin": ["PATH"],
35 "lib": ["LD_LIBRARY_PATH", "LIBRARY_PATH"],
36 "lib64": ["LD_LIBRARY_PATH", "LIBRARY_PATH"],
37 "man": ["MANPATH"],
38 "share/man": ["MANPATH"],
39 "share/aclocal": ["ACLOCAL_PATH"],
40 "include": ["CPATH"],
41 "lib/pkgconfig": ["PKG_CONFIG_PATH"],
42 "lib64/pkgconfig": ["PKG_CONFIG_PATH"],
43 "share/pkgconfig": ["PKG_CONFIG_PATH"],
44 "": ["CMAKE_PREFIX_PATH"],
45 }
46
47 if platform == "darwin":
48 for subdir in ("lib", "lib64"):
49 inspections[subdir].append("DYLD_FALLBACK_LIBRARY_PATH")
50
51 return inspections
52
53
54 def unconditional_environment_modifications(view):
55 """List of environment (shell) modifications to be processed for view.
56
57 This list does not depend on the specs in this environment"""
58 env = environment.EnvironmentModifications()
59
60 for subdir, vars in prefix_inspections(sys.platform).items():
61 full_subdir = os.path.join(view.root, subdir)
62 for var in vars:
63 env.prepend_path(var, full_subdir)
64
65 return env
66
67
68 def environment_modifications_for_spec(spec, view=None, set_package_py_globals=True):
69 """List of environment (shell) modifications to be processed for spec.
70
71 This list is specific to the location of the spec or its projection in
72 the view.
73
74 Args:
75 spec (spack.spec.Spec): spec for which to list the environment modifications
76 view: view associated with the spec passed as first argument
77 set_package_py_globals (bool): whether or not to set the global variables in the
78 package.py files (this may be problematic when using buildcaches that have
79 been built on a different but compatible OS)
80 """
81 spec = spec.copy()
82 if view and not spec.external:
83 spec.prefix = prefix.Prefix(view.get_projection_for_spec(spec))
84
85 # generic environment modifications determined by inspecting the spec
86 # prefix
87 env = environment.inspect_path(
88 spec.prefix, prefix_inspections(spec.platform), exclude=environment.is_system_path
89 )
90
91 # Let the extendee/dependency modify their extensions/dependents
92 # before asking for package-specific modifications
93 env.extend(
94 spack.build_environment.modifications_from_dependencies(
95 spec, context="run", set_package_py_globals=set_package_py_globals
96 )
97 )
98
99 if set_package_py_globals:
100 spack.build_environment.set_module_variables_for_package(spec.package)
101
102 spec.package.setup_run_environment(env)
103
104 return env
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/spack/spack/user_environment.py b/lib/spack/spack/user_environment.py
--- a/lib/spack/spack/user_environment.py
+++ b/lib/spack/spack/user_environment.py
@@ -32,12 +32,9 @@
inspections = {
"bin": ["PATH"],
- "lib": ["LD_LIBRARY_PATH", "LIBRARY_PATH"],
- "lib64": ["LD_LIBRARY_PATH", "LIBRARY_PATH"],
"man": ["MANPATH"],
"share/man": ["MANPATH"],
"share/aclocal": ["ACLOCAL_PATH"],
- "include": ["CPATH"],
"lib/pkgconfig": ["PKG_CONFIG_PATH"],
"lib64/pkgconfig": ["PKG_CONFIG_PATH"],
"share/pkgconfig": ["PKG_CONFIG_PATH"],
@@ -45,8 +42,8 @@
}
if platform == "darwin":
- for subdir in ("lib", "lib64"):
- inspections[subdir].append("DYLD_FALLBACK_LIBRARY_PATH")
+ inspections["lib"] = ["DYLD_FALLBACK_LIBRARY_PATH"]
+ inspections["lib64"] = ["DYLD_FALLBACK_LIBRARY_PATH"]
return inspections
|
{"golden_diff": "diff --git a/lib/spack/spack/user_environment.py b/lib/spack/spack/user_environment.py\n--- a/lib/spack/spack/user_environment.py\n+++ b/lib/spack/spack/user_environment.py\n@@ -32,12 +32,9 @@\n \n inspections = {\n \"bin\": [\"PATH\"],\n- \"lib\": [\"LD_LIBRARY_PATH\", \"LIBRARY_PATH\"],\n- \"lib64\": [\"LD_LIBRARY_PATH\", \"LIBRARY_PATH\"],\n \"man\": [\"MANPATH\"],\n \"share/man\": [\"MANPATH\"],\n \"share/aclocal\": [\"ACLOCAL_PATH\"],\n- \"include\": [\"CPATH\"],\n \"lib/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n \"lib64/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n \"share/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n@@ -45,8 +42,8 @@\n }\n \n if platform == \"darwin\":\n- for subdir in (\"lib\", \"lib64\"):\n- inspections[subdir].append(\"DYLD_FALLBACK_LIBRARY_PATH\")\n+ inspections[\"lib\"] = [\"DYLD_FALLBACK_LIBRARY_PATH\"]\n+ inspections[\"lib64\"] = [\"DYLD_FALLBACK_LIBRARY_PATH\"]\n \n return inspections\n", "issue": "Setting LD_LIBRARY_PATH to be \"helpful\" considered harmful\n(with apologies to Djikstra and everyone else who's recycled that title meme)\r\n\r\n**TL;DR**: the library that one of my spack binaries uses depends on what other spack packages I've `module load`-ed. **YIKES**. See also #3926.\r\n\r\nI was trying to understand why @JusticeForMikeBrown was having trouble building bowtie2 (see #3950) when I've built it successfully with `[email protected]`.\r\n\r\nHis problem with `[email protected]` was zlib related; I checked the package and noticed that it doesn't have a dependency on zlib. Perhaps it should, I thought. Wonder what zlib my \"production\" copy was linked against?\r\n\r\n```\r\n$ ldd bowtie2-align-l | grep libz\r\n\tlibz.so.1 => /blah/spack/v0.0.8/opt/spack/linux-centos7-x86_64/gcc-5.4.0/zlib-1.2.11-ec535e2ikkpl7hd4y454t3yydjqorja6/lib/libz.so.1 (0x00002aaaaaf32000)\r\n```\r\n\r\nThat surprised me, because there's no zlib dependency in the package.\r\n\r\nSure enough, it's because I have something else `module load`-ed that has the side effect of adding zlib's directory to `LD_LIBRARY_PATH`.\r\n\r\n```\r\n$ (unset LD_LIBRARY_PATH; ldd bowtie2-align-l) | grep libz\r\n\tlibz.so.1 => /lib64/libz.so.1 (0x00002aaaaaf2f000)\r\n```\r\n\r\nMy \"newer\" version of CentOS has a `/lib64/libz.so.1` that includes gzbuffer (`nm` didn't help, library's stripped...):\r\n\r\n```\r\n$ strings /lib64/libz.so.1 | grep buffer\r\ngzbuffer\r\nbuffer error\r\n```\r\n\r\nso it (probably) works for me either way.\r\n\r\nBut imagine if there were two versions of a library (perhaps something mathematical) that give different results. Now you have a program giving different results depending on what other Spack applications are also loaded. \r\n\r\n**THAT** would be fun to track down (assuming you even noticed...).\r\n\r\nW.R.T. the main problem, bowtie2 should probably have a dependency on a new-ish version of zlib, but stuff like this is why LD_LIBRARY_PATH is a slippery tool to reach for.\r\n\r\nI'll argue that this kind of unpredictability is a bigger negative than being helpful and always setting `LD_LIBRARY_PATH`. This comment in the docs isn't actually correct:\r\n\r\n> Spack avoids library misconfiguration by using RPATH to link dependencies. When a user links a library or runs a program, it is tied to the dependencies it was built with, so there is no need to manipulate LD_LIBRARY_PATH at runtime.\r\n>\r\n> [clipped from here](http://spack.readthedocs.io/en/latest/features.html?highlight=RPATH#packages-can-peacefully-coexist)\r\n\r\nWhat would happen if `LD_LIBRARY_PATH` became *opt-in*, packages that need it specify it in their package definitions?\r\n\r\nLooking at [the list of cases where RPATH support doesn't work](http://spack.readthedocs.io/en/latest/workflows.html?highlight=LD_LIBRARY_PATH#transitive-dependencies), it seems like 1) is not relevant (I think it's referring to `PERL5LIB`, etc...) and 3) are simply bugs. That leaves 2), python extensions. Is `RPATH` unworkable there or just not yet working?\n", "before_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\nimport os\nimport sys\n\nimport spack.build_environment\nimport spack.config\nimport spack.util.environment as environment\nimport spack.util.prefix as prefix\n\n#: Environment variable name Spack uses to track individually loaded packages\nspack_loaded_hashes_var = \"SPACK_LOADED_HASHES\"\n\n\ndef prefix_inspections(platform):\n \"\"\"Get list of prefix inspections for platform\n\n Arguments:\n platform (str): the name of the platform to consider. The platform\n determines what environment variables Spack will use for some\n inspections.\n\n Returns:\n A dictionary mapping subdirectory names to lists of environment\n variables to modify with that directory if it exists.\n \"\"\"\n inspections = spack.config.get(\"modules:prefix_inspections\", {})\n if inspections:\n return inspections\n\n inspections = {\n \"bin\": [\"PATH\"],\n \"lib\": [\"LD_LIBRARY_PATH\", \"LIBRARY_PATH\"],\n \"lib64\": [\"LD_LIBRARY_PATH\", \"LIBRARY_PATH\"],\n \"man\": [\"MANPATH\"],\n \"share/man\": [\"MANPATH\"],\n \"share/aclocal\": [\"ACLOCAL_PATH\"],\n \"include\": [\"CPATH\"],\n \"lib/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n \"lib64/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n \"share/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n \"\": [\"CMAKE_PREFIX_PATH\"],\n }\n\n if platform == \"darwin\":\n for subdir in (\"lib\", \"lib64\"):\n inspections[subdir].append(\"DYLD_FALLBACK_LIBRARY_PATH\")\n\n return inspections\n\n\ndef unconditional_environment_modifications(view):\n \"\"\"List of environment (shell) modifications to be processed for view.\n\n This list does not depend on the specs in this environment\"\"\"\n env = environment.EnvironmentModifications()\n\n for subdir, vars in prefix_inspections(sys.platform).items():\n full_subdir = os.path.join(view.root, subdir)\n for var in vars:\n env.prepend_path(var, full_subdir)\n\n return env\n\n\ndef environment_modifications_for_spec(spec, view=None, set_package_py_globals=True):\n \"\"\"List of environment (shell) modifications to be processed for spec.\n\n This list is specific to the location of the spec or its projection in\n the view.\n\n Args:\n spec (spack.spec.Spec): spec for which to list the environment modifications\n view: view associated with the spec passed as first argument\n set_package_py_globals (bool): whether or not to set the global variables in the\n package.py files (this may be problematic when using buildcaches that have\n been built on a different but compatible OS)\n \"\"\"\n spec = spec.copy()\n if view and not spec.external:\n spec.prefix = prefix.Prefix(view.get_projection_for_spec(spec))\n\n # generic environment modifications determined by inspecting the spec\n # prefix\n env = environment.inspect_path(\n spec.prefix, prefix_inspections(spec.platform), exclude=environment.is_system_path\n )\n\n # Let the extendee/dependency modify their extensions/dependents\n # before asking for package-specific modifications\n env.extend(\n spack.build_environment.modifications_from_dependencies(\n spec, context=\"run\", set_package_py_globals=set_package_py_globals\n )\n )\n\n if set_package_py_globals:\n spack.build_environment.set_module_variables_for_package(spec.package)\n\n spec.package.setup_run_environment(env)\n\n return env\n", "path": "lib/spack/spack/user_environment.py"}], "after_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\nimport os\nimport sys\n\nimport spack.build_environment\nimport spack.config\nimport spack.util.environment as environment\nimport spack.util.prefix as prefix\n\n#: Environment variable name Spack uses to track individually loaded packages\nspack_loaded_hashes_var = \"SPACK_LOADED_HASHES\"\n\n\ndef prefix_inspections(platform):\n \"\"\"Get list of prefix inspections for platform\n\n Arguments:\n platform (str): the name of the platform to consider. The platform\n determines what environment variables Spack will use for some\n inspections.\n\n Returns:\n A dictionary mapping subdirectory names to lists of environment\n variables to modify with that directory if it exists.\n \"\"\"\n inspections = spack.config.get(\"modules:prefix_inspections\", {})\n if inspections:\n return inspections\n\n inspections = {\n \"bin\": [\"PATH\"],\n \"man\": [\"MANPATH\"],\n \"share/man\": [\"MANPATH\"],\n \"share/aclocal\": [\"ACLOCAL_PATH\"],\n \"lib/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n \"lib64/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n \"share/pkgconfig\": [\"PKG_CONFIG_PATH\"],\n \"\": [\"CMAKE_PREFIX_PATH\"],\n }\n\n if platform == \"darwin\":\n inspections[\"lib\"] = [\"DYLD_FALLBACK_LIBRARY_PATH\"]\n inspections[\"lib64\"] = [\"DYLD_FALLBACK_LIBRARY_PATH\"]\n\n return inspections\n\n\ndef unconditional_environment_modifications(view):\n \"\"\"List of environment (shell) modifications to be processed for view.\n\n This list does not depend on the specs in this environment\"\"\"\n env = environment.EnvironmentModifications()\n\n for subdir, vars in prefix_inspections(sys.platform).items():\n full_subdir = os.path.join(view.root, subdir)\n for var in vars:\n env.prepend_path(var, full_subdir)\n\n return env\n\n\ndef environment_modifications_for_spec(spec, view=None, set_package_py_globals=True):\n \"\"\"List of environment (shell) modifications to be processed for spec.\n\n This list is specific to the location of the spec or its projection in\n the view.\n\n Args:\n spec (spack.spec.Spec): spec for which to list the environment modifications\n view: view associated with the spec passed as first argument\n set_package_py_globals (bool): whether or not to set the global variables in the\n package.py files (this may be problematic when using buildcaches that have\n been built on a different but compatible OS)\n \"\"\"\n spec = spec.copy()\n if view and not spec.external:\n spec.prefix = prefix.Prefix(view.get_projection_for_spec(spec))\n\n # generic environment modifications determined by inspecting the spec\n # prefix\n env = environment.inspect_path(\n spec.prefix, prefix_inspections(spec.platform), exclude=environment.is_system_path\n )\n\n # Let the extendee/dependency modify their extensions/dependents\n # before asking for package-specific modifications\n env.extend(\n spack.build_environment.modifications_from_dependencies(\n spec, context=\"run\", set_package_py_globals=set_package_py_globals\n )\n )\n\n if set_package_py_globals:\n spack.build_environment.set_module_variables_for_package(spec.package)\n\n spec.package.setup_run_environment(env)\n\n return env\n", "path": "lib/spack/spack/user_environment.py"}]}
| 2,044 | 260 |
gh_patches_debug_561
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-822
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.0.3
On the docket:
+ [x] Pex should trust any host passed via `--index` or `--find-links`. #812
+ [x] A cache should always be used by `pex.resolver.resolve`. #809
+ [x] Use the resolve cache to skip installs. #815
+ [x] Parallelize resolve. #818
+ [x] Cache sdist & local project builds #817
+ [x] Unify resolve and runtime wheel caches. #820
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.0.2'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.0.2'
+__version__ = '2.0.3'
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.0.2'\n+__version__ = '2.0.3'\n", "issue": "Release 2.0.3\nOn the docket:\r\n\r\n+ [x] Pex should trust any host passed via `--index` or `--find-links`. #812\r\n+ [x] A cache should always be used by `pex.resolver.resolve`. #809\r\n+ [x] Use the resolve cache to skip installs. #815\r\n+ [x] Parallelize resolve. #818\r\n+ [x] Cache sdist & local project builds #817\r\n+ [x] Unify resolve and runtime wheel caches. #820\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.2'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.3'\n", "path": "pex/version.py"}]}
| 432 | 94 |
gh_patches_debug_1738
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-94
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: url on Storage Exception when key not found
When attempting to get a key that does not exist the exception for the `NotFoundError` is trying to reference `request.url` which does not exist.
``` py
Traceback (most recent call last):
[...]
file_key = self.bucket.get_key(path)
File "gcloud/storage/bucket.py", line 83, in get_key
response = self.connection.api_request(method='GET', path=key.path)
File "gcloud/storage/connection.py", line 212, in api_request
raise exceptions.NotFoundError(response, content)
File "gcloud/storage/exceptions.py", line 17, in __init__
self.message = 'GET %s returned a 404.' % (response.url)
File "httplib2/__init__.py", line 1680, in __getattr__
raise AttributeError, name
AttributeError: url
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gcloud/storage/exceptions.py`
Content:
```
1 # TODO: Make these super useful.
2
3 class StorageError(Exception):
4 pass
5
6
7 class ConnectionError(StorageError):
8
9 def __init__(self, response, content):
10 message = str(response) + content
11 super(ConnectionError, self).__init__(message)
12
13
14 class NotFoundError(ConnectionError):
15
16 def __init__(self, response, content):
17 self.message = 'GET %s returned a 404.' % (response.url)
18
19
20 class StorageDataError(StorageError):
21 pass
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gcloud/storage/exceptions.py b/gcloud/storage/exceptions.py
--- a/gcloud/storage/exceptions.py
+++ b/gcloud/storage/exceptions.py
@@ -14,7 +14,7 @@
class NotFoundError(ConnectionError):
def __init__(self, response, content):
- self.message = 'GET %s returned a 404.' % (response.url)
+ self.message = 'Request returned a 404. Headers: %s' % (response)
class StorageDataError(StorageError):
|
{"golden_diff": "diff --git a/gcloud/storage/exceptions.py b/gcloud/storage/exceptions.py\n--- a/gcloud/storage/exceptions.py\n+++ b/gcloud/storage/exceptions.py\n@@ -14,7 +14,7 @@\n class NotFoundError(ConnectionError):\n \n def __init__(self, response, content):\n- self.message = 'GET %s returned a 404.' % (response.url)\n+ self.message = 'Request returned a 404. Headers: %s' % (response)\n \n \n class StorageDataError(StorageError):\n", "issue": "AttributeError: url on Storage Exception when key not found\nWhen attempting to get a key that does not exist the exception for the `NotFoundError` is trying to reference `request.url` which does not exist.\n\n``` py\nTraceback (most recent call last):\n [...]\n file_key = self.bucket.get_key(path)\n File \"gcloud/storage/bucket.py\", line 83, in get_key\n response = self.connection.api_request(method='GET', path=key.path)\n File \"gcloud/storage/connection.py\", line 212, in api_request\n raise exceptions.NotFoundError(response, content)\n File \"gcloud/storage/exceptions.py\", line 17, in __init__\n self.message = 'GET %s returned a 404.' % (response.url)\n File \"httplib2/__init__.py\", line 1680, in __getattr__\n raise AttributeError, name\nAttributeError: url\n```\n\n", "before_files": [{"content": "# TODO: Make these super useful.\n\nclass StorageError(Exception):\n pass\n\n\nclass ConnectionError(StorageError):\n\n def __init__(self, response, content):\n message = str(response) + content\n super(ConnectionError, self).__init__(message)\n\n\nclass NotFoundError(ConnectionError):\n\n def __init__(self, response, content):\n self.message = 'GET %s returned a 404.' % (response.url)\n\n\nclass StorageDataError(StorageError):\n pass\n", "path": "gcloud/storage/exceptions.py"}], "after_files": [{"content": "# TODO: Make these super useful.\n\nclass StorageError(Exception):\n pass\n\n\nclass ConnectionError(StorageError):\n\n def __init__(self, response, content):\n message = str(response) + content\n super(ConnectionError, self).__init__(message)\n\n\nclass NotFoundError(ConnectionError):\n\n def __init__(self, response, content):\n self.message = 'Request returned a 404. Headers: %s' % (response)\n\n\nclass StorageDataError(StorageError):\n pass\n", "path": "gcloud/storage/exceptions.py"}]}
| 608 | 119 |
gh_patches_debug_15959
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-21059
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Race conditions in muting topics and users
Our pattern in [muting topics](https://github.com/zulip/zulip/blob/b4075b78eb6e128bce7ef3d36b86d176ef2ecfa5/zerver/views/muting.py#L39-L42) is to check if the topic is muted, and if not then to add a row:
```py3
if topic_is_muted(user_profile, stream.id, topic_name):
raise JsonableError(_("Topic already muted"))
do_mute_topic(user_profile, stream, topic_name, date_muted)
return json_success()
```
This pattern is inherently prone to race conditions. Luckily, we catch those due to database constraints, in the form of `UserTopic.objects.create` raising an IntegrityError, but those bubble up as 500's, not 400's.
We should catch those IntegrityError's and re-raise them as `JsonableError(_("Topic already muted"))`. That applies to the mute-topic codepath, as well as the mute-user codepath.
Though it doesn't affect correctness in this case, since the duplicate row is the first database change operation, these actions should be done inside of transactions.
Un-muting is technically also subject to this race, though it doesn't matter -- both processes calling `.delete()` on the same object is a mostly-silent no-op for the second process.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/views/muting.py`
Content:
```
1 import datetime
2 from typing import Optional
3
4 from django.http import HttpRequest, HttpResponse
5 from django.utils.timezone import now as timezone_now
6 from django.utils.translation import gettext as _
7
8 from zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user
9 from zerver.lib.exceptions import JsonableError
10 from zerver.lib.request import REQ, has_request_variables
11 from zerver.lib.response import json_success
12 from zerver.lib.streams import (
13 access_stream_by_id,
14 access_stream_by_name,
15 access_stream_for_unmute_topic_by_id,
16 access_stream_for_unmute_topic_by_name,
17 check_for_exactly_one_stream_arg,
18 )
19 from zerver.lib.topic_mutes import topic_is_muted
20 from zerver.lib.user_mutes import get_mute_object
21 from zerver.lib.users import access_user_by_id
22 from zerver.lib.validator import check_int
23 from zerver.models import UserProfile
24
25
26 def mute_topic(
27 user_profile: UserProfile,
28 stream_id: Optional[int],
29 stream_name: Optional[str],
30 topic_name: str,
31 date_muted: datetime.datetime,
32 ) -> None:
33 if stream_name is not None:
34 (stream, sub) = access_stream_by_name(user_profile, stream_name)
35 else:
36 assert stream_id is not None
37 (stream, sub) = access_stream_by_id(user_profile, stream_id)
38
39 if topic_is_muted(user_profile, stream.id, topic_name):
40 raise JsonableError(_("Topic already muted"))
41
42 do_mute_topic(user_profile, stream, topic_name, date_muted)
43
44
45 def unmute_topic(
46 user_profile: UserProfile,
47 stream_id: Optional[int],
48 stream_name: Optional[str],
49 topic_name: str,
50 ) -> None:
51 error = _("Topic is not muted")
52
53 if stream_name is not None:
54 stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)
55 else:
56 assert stream_id is not None
57 stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)
58
59 do_unmute_topic(user_profile, stream, topic_name)
60
61
62 @has_request_variables
63 def update_muted_topic(
64 request: HttpRequest,
65 user_profile: UserProfile,
66 stream_id: Optional[int] = REQ(json_validator=check_int, default=None),
67 stream: Optional[str] = REQ(default=None),
68 topic: str = REQ(),
69 op: str = REQ(),
70 ) -> HttpResponse:
71
72 check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)
73
74 if op == "add":
75 mute_topic(
76 user_profile=user_profile,
77 stream_id=stream_id,
78 stream_name=stream,
79 topic_name=topic,
80 date_muted=timezone_now(),
81 )
82 return json_success(request)
83 elif op == "remove":
84 unmute_topic(
85 user_profile=user_profile,
86 stream_id=stream_id,
87 stream_name=stream,
88 topic_name=topic,
89 )
90 return json_success(request)
91
92
93 def mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:
94 if user_profile.id == muted_user_id:
95 raise JsonableError(_("Cannot mute self"))
96
97 muted_user = access_user_by_id(
98 user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False
99 )
100 date_muted = timezone_now()
101
102 if get_mute_object(user_profile, muted_user) is not None:
103 raise JsonableError(_("User already muted"))
104
105 do_mute_user(user_profile, muted_user, date_muted)
106 return json_success(request)
107
108
109 def unmute_user(
110 request: HttpRequest, user_profile: UserProfile, muted_user_id: int
111 ) -> HttpResponse:
112 muted_user = access_user_by_id(
113 user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False
114 )
115 mute_object = get_mute_object(user_profile, muted_user)
116
117 if mute_object is None:
118 raise JsonableError(_("User is not muted"))
119
120 do_unmute_user(mute_object)
121 return json_success(request)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/views/muting.py b/zerver/views/muting.py
--- a/zerver/views/muting.py
+++ b/zerver/views/muting.py
@@ -1,6 +1,7 @@
import datetime
from typing import Optional
+from django.db import IntegrityError
from django.http import HttpRequest, HttpResponse
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
@@ -39,7 +40,10 @@
if topic_is_muted(user_profile, stream.id, topic_name):
raise JsonableError(_("Topic already muted"))
- do_mute_topic(user_profile, stream, topic_name, date_muted)
+ try:
+ do_mute_topic(user_profile, stream, topic_name, date_muted)
+ except IntegrityError:
+ raise JsonableError(_("Topic already muted"))
def unmute_topic(
|
{"golden_diff": "diff --git a/zerver/views/muting.py b/zerver/views/muting.py\n--- a/zerver/views/muting.py\n+++ b/zerver/views/muting.py\n@@ -1,6 +1,7 @@\n import datetime\n from typing import Optional\n \n+from django.db import IntegrityError\n from django.http import HttpRequest, HttpResponse\n from django.utils.timezone import now as timezone_now\n from django.utils.translation import gettext as _\n@@ -39,7 +40,10 @@\n if topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(_(\"Topic already muted\"))\n \n- do_mute_topic(user_profile, stream, topic_name, date_muted)\n+ try:\n+ do_mute_topic(user_profile, stream, topic_name, date_muted)\n+ except IntegrityError:\n+ raise JsonableError(_(\"Topic already muted\"))\n \n \n def unmute_topic(\n", "issue": "Race conditions in muting topics and users\nOur pattern in [muting topics](https://github.com/zulip/zulip/blob/b4075b78eb6e128bce7ef3d36b86d176ef2ecfa5/zerver/views/muting.py#L39-L42) is to check if the topic is muted, and if not then to add a row:\r\n```py3\r\n if topic_is_muted(user_profile, stream.id, topic_name):\r\n raise JsonableError(_(\"Topic already muted\"))\r\n\r\n do_mute_topic(user_profile, stream, topic_name, date_muted)\r\n return json_success()\r\n```\r\n\r\nThis pattern is inherently prone to race conditions. Luckily, we catch those due to database constraints, in the form of `UserTopic.objects.create` raising an IntegrityError, but those bubble up as 500's, not 400's.\r\n\r\nWe should catch those IntegrityError's and re-raise them as `JsonableError(_(\"Topic already muted\"))`. That applies to the mute-topic codepath, as well as the mute-user codepath.\r\n\r\nThough it doesn't affect correctness in this case, since the duplicate row is the first database change operation, these actions should be done inside of transactions.\r\n\r\nUn-muting is technically also subject to this race, though it doesn't matter -- both processes calling `.delete()` on the same object is a mostly-silent no-op for the second process.\n", "before_files": [{"content": "import datetime\nfrom typing import Optional\n\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.timezone import now as timezone_now\nfrom django.utils.translation import gettext as _\n\nfrom zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.streams import (\n access_stream_by_id,\n access_stream_by_name,\n access_stream_for_unmute_topic_by_id,\n access_stream_for_unmute_topic_by_name,\n check_for_exactly_one_stream_arg,\n)\nfrom zerver.lib.topic_mutes import topic_is_muted\nfrom zerver.lib.user_mutes import get_mute_object\nfrom zerver.lib.users import access_user_by_id\nfrom zerver.lib.validator import check_int\nfrom zerver.models import UserProfile\n\n\ndef mute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n date_muted: datetime.datetime,\n) -> None:\n if stream_name is not None:\n (stream, sub) = access_stream_by_name(user_profile, stream_name)\n else:\n assert stream_id is not None\n (stream, sub) = access_stream_by_id(user_profile, stream_id)\n\n if topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(_(\"Topic already muted\"))\n\n do_mute_topic(user_profile, stream, topic_name, date_muted)\n\n\ndef unmute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n) -> None:\n error = _(\"Topic is not muted\")\n\n if stream_name is not None:\n stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)\n else:\n assert stream_id is not None\n stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)\n\n do_unmute_topic(user_profile, stream, topic_name)\n\n\n@has_request_variables\ndef update_muted_topic(\n request: HttpRequest,\n user_profile: UserProfile,\n stream_id: Optional[int] = REQ(json_validator=check_int, default=None),\n stream: Optional[str] = REQ(default=None),\n topic: str = REQ(),\n op: str = REQ(),\n) -> HttpResponse:\n\n check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)\n\n if op == \"add\":\n mute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n date_muted=timezone_now(),\n )\n return json_success(request)\n elif op == \"remove\":\n unmute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n )\n return json_success(request)\n\n\ndef mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:\n if user_profile.id == muted_user_id:\n raise JsonableError(_(\"Cannot mute self\"))\n\n muted_user = access_user_by_id(\n user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n )\n date_muted = timezone_now()\n\n if get_mute_object(user_profile, muted_user) is not None:\n raise JsonableError(_(\"User already muted\"))\n\n do_mute_user(user_profile, muted_user, date_muted)\n return json_success(request)\n\n\ndef unmute_user(\n request: HttpRequest, user_profile: UserProfile, muted_user_id: int\n) -> HttpResponse:\n muted_user = access_user_by_id(\n user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n )\n mute_object = get_mute_object(user_profile, muted_user)\n\n if mute_object is None:\n raise JsonableError(_(\"User is not muted\"))\n\n do_unmute_user(mute_object)\n return json_success(request)\n", "path": "zerver/views/muting.py"}], "after_files": [{"content": "import datetime\nfrom typing import Optional\n\nfrom django.db import IntegrityError\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.timezone import now as timezone_now\nfrom django.utils.translation import gettext as _\n\nfrom zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.streams import (\n access_stream_by_id,\n access_stream_by_name,\n access_stream_for_unmute_topic_by_id,\n access_stream_for_unmute_topic_by_name,\n check_for_exactly_one_stream_arg,\n)\nfrom zerver.lib.topic_mutes import topic_is_muted\nfrom zerver.lib.user_mutes import get_mute_object\nfrom zerver.lib.users import access_user_by_id\nfrom zerver.lib.validator import check_int\nfrom zerver.models import UserProfile\n\n\ndef mute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n date_muted: datetime.datetime,\n) -> None:\n if stream_name is not None:\n (stream, sub) = access_stream_by_name(user_profile, stream_name)\n else:\n assert stream_id is not None\n (stream, sub) = access_stream_by_id(user_profile, stream_id)\n\n if topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(_(\"Topic already muted\"))\n\n try:\n do_mute_topic(user_profile, stream, topic_name, date_muted)\n except IntegrityError:\n raise JsonableError(_(\"Topic already muted\"))\n\n\ndef unmute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n) -> None:\n error = _(\"Topic is not muted\")\n\n if stream_name is not None:\n stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)\n else:\n assert stream_id is not None\n stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)\n\n do_unmute_topic(user_profile, stream, topic_name)\n\n\n@has_request_variables\ndef update_muted_topic(\n request: HttpRequest,\n user_profile: UserProfile,\n stream_id: Optional[int] = REQ(json_validator=check_int, default=None),\n stream: Optional[str] = REQ(default=None),\n topic: str = REQ(),\n op: str = REQ(),\n) -> HttpResponse:\n\n check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)\n\n if op == \"add\":\n mute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n date_muted=timezone_now(),\n )\n return json_success(request)\n elif op == \"remove\":\n unmute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n )\n return json_success(request)\n\n\ndef mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:\n if user_profile.id == muted_user_id:\n raise JsonableError(_(\"Cannot mute self\"))\n\n muted_user = access_user_by_id(\n user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n )\n date_muted = timezone_now()\n\n if get_mute_object(user_profile, muted_user) is not None:\n raise JsonableError(_(\"User already muted\"))\n\n do_mute_user(user_profile, muted_user, date_muted)\n return json_success(request)\n\n\ndef unmute_user(\n request: HttpRequest, user_profile: UserProfile, muted_user_id: int\n) -> HttpResponse:\n muted_user = access_user_by_id(\n user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n )\n mute_object = get_mute_object(user_profile, muted_user)\n\n if mute_object is None:\n raise JsonableError(_(\"User is not muted\"))\n\n do_unmute_user(mute_object)\n return json_success(request)\n", "path": "zerver/views/muting.py"}]}
| 1,716 | 190 |
gh_patches_debug_9218
|
rasdani/github-patches
|
git_diff
|
marshmallow-code__webargs-498
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Suggest including URL variable parameters in more examples
I'm totally new to Flask, Marshmallow, SQLAlchemy, webargs and this ecosystem. I was given a coding test and while trying to complete it, I had to use flaskparser together with an URL parameter. Not knowing how to configure them to be used together, I googled "flaskparser". The top 5 results are:
- https://webargs.readthedocs.io/en/latest/_modules/webargs/flaskparser.html
- https://webargs.readthedocs.io/en/latest/
- https://webargs.readthedocs.io/en/latest/api.html
- https://webargs.readthedocs.io/en/latest/advanced.html
- https://webargs.readthedocs.io/en/latest/quickstart.html
None of these include an example where an URL variable parameter and webargs are used together. I found the example that I need in [Framework Support](https://github.com/marshmallow-code/webargs/blob/dev/docs/framework_support.rst) (which does not appear on the first page of the Google results) as following:
```
from webargs import fields
from webargs.flaskparser import use_args
@app.route("/user/<int:uid>")
@use_args({"per_page": fields.Int()}, location="query")
def user_detail(args, uid):
return ("The user page for user {uid}, showing {per_page} posts.").format(
uid=uid, per_page=args["per_page"]
)
```
Since the simpler use can can be inferred from the more complex use case, but not vice-versa, I would suggest to either include this code example in [flaskparser.py example](https://github.com/marshmallow-code/webargs/edit/dev/src/webargs/flaskparser.py), or somehow tweak the SEO for https://webargs.readthedocs.io/en/latest/framework_support.html so that it will appear on the first page of Google search results.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/flaskparser.py`
Content:
```
1 """Flask request argument parsing module.
2
3 Example: ::
4
5 from flask import Flask
6
7 from webargs import fields
8 from webargs.flaskparser import use_args
9
10 app = Flask(__name__)
11
12 hello_args = {
13 'name': fields.Str(required=True)
14 }
15
16 @app.route('/')
17 @use_args(hello_args)
18 def index(args):
19 return 'Hello ' + args['name']
20 """
21 import flask
22 from werkzeug.exceptions import HTTPException
23
24 from webargs import core
25 from webargs.compat import MARSHMALLOW_VERSION_INFO
26 from webargs.multidictproxy import MultiDictProxy
27
28
29 def abort(http_status_code, exc=None, **kwargs):
30 """Raise a HTTPException for the given http_status_code. Attach any keyword
31 arguments to the exception for later processing.
32
33 From Flask-Restful. See NOTICE file for license information.
34 """
35 try:
36 flask.abort(http_status_code)
37 except HTTPException as err:
38 err.data = kwargs
39 err.exc = exc
40 raise err
41
42
43 def is_json_request(req):
44 return core.is_json(req.mimetype)
45
46
47 class FlaskParser(core.Parser):
48 """Flask request argument parser."""
49
50 __location_map__ = dict(
51 view_args="load_view_args",
52 path="load_view_args",
53 **core.Parser.__location_map__,
54 )
55
56 def _raw_load_json(self, req):
57 """Return a json payload from the request for the core parser's load_json
58
59 Checks the input mimetype and may return 'missing' if the mimetype is
60 non-json, even if the request body is parseable as json."""
61 if not is_json_request(req):
62 return core.missing
63
64 return core.parse_json(req.get_data(cache=True))
65
66 def _handle_invalid_json_error(self, error, req, *args, **kwargs):
67 abort(400, exc=error, messages={"json": ["Invalid JSON body."]})
68
69 def load_view_args(self, req, schema):
70 """Return the request's ``view_args`` or ``missing`` if there are none."""
71 return req.view_args or core.missing
72
73 def load_querystring(self, req, schema):
74 """Return query params from the request as a MultiDictProxy."""
75 return MultiDictProxy(req.args, schema)
76
77 def load_form(self, req, schema):
78 """Return form values from the request as a MultiDictProxy."""
79 return MultiDictProxy(req.form, schema)
80
81 def load_headers(self, req, schema):
82 """Return headers from the request as a MultiDictProxy."""
83 return MultiDictProxy(req.headers, schema)
84
85 def load_cookies(self, req, schema):
86 """Return cookies from the request."""
87 return req.cookies
88
89 def load_files(self, req, schema):
90 """Return files from the request as a MultiDictProxy."""
91 return MultiDictProxy(req.files, schema)
92
93 def handle_error(self, error, req, schema, *, error_status_code, error_headers):
94 """Handles errors during parsing. Aborts the current HTTP request and
95 responds with a 422 error.
96 """
97 status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
98 # on marshmallow 2, a many schema receiving a non-list value will
99 # produce this specific error back -- reformat it to match the
100 # marshmallow 3 message so that Flask can properly encode it
101 messages = error.messages
102 if (
103 MARSHMALLOW_VERSION_INFO[0] < 3
104 and schema.many
105 and messages == {0: {}, "_schema": ["Invalid input type."]}
106 ):
107 messages.pop(0)
108 abort(
109 status_code,
110 exc=error,
111 messages=error.messages,
112 schema=schema,
113 headers=error_headers,
114 )
115
116 def get_default_request(self):
117 """Override to use Flask's thread-local request object by default"""
118 return flask.request
119
120
121 parser = FlaskParser()
122 use_args = parser.use_args
123 use_kwargs = parser.use_kwargs
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/webargs/flaskparser.py b/src/webargs/flaskparser.py
--- a/src/webargs/flaskparser.py
+++ b/src/webargs/flaskparser.py
@@ -9,14 +9,16 @@
app = Flask(__name__)
- hello_args = {
- 'name': fields.Str(required=True)
+ user_detail_args = {
+ 'per_page': fields.Int()
}
- @app.route('/')
- @use_args(hello_args)
- def index(args):
- return 'Hello ' + args['name']
+ @app.route("/user/<int:uid>")
+ @use_args(user_detail_args)
+ def user_detail(args, uid):
+ return ("The user page for user {uid}, showing {per_page} posts.").format(
+ uid=uid, per_page=args["per_page"]
+ )
"""
import flask
from werkzeug.exceptions import HTTPException
|
{"golden_diff": "diff --git a/src/webargs/flaskparser.py b/src/webargs/flaskparser.py\n--- a/src/webargs/flaskparser.py\n+++ b/src/webargs/flaskparser.py\n@@ -9,14 +9,16 @@\n \n app = Flask(__name__)\n \n- hello_args = {\n- 'name': fields.Str(required=True)\n+ user_detail_args = {\n+ 'per_page': fields.Int()\n }\n \n- @app.route('/')\n- @use_args(hello_args)\n- def index(args):\n- return 'Hello ' + args['name']\n+ @app.route(\"/user/<int:uid>\")\n+ @use_args(user_detail_args)\n+ def user_detail(args, uid):\n+ return (\"The user page for user {uid}, showing {per_page} posts.\").format(\n+ uid=uid, per_page=args[\"per_page\"]\n+ )\n \"\"\"\n import flask\n from werkzeug.exceptions import HTTPException\n", "issue": "Suggest including URL variable parameters in more examples\nI'm totally new to Flask, Marshmallow, SQLAlchemy, webargs and this ecosystem. I was given a coding test and while trying to complete it, I had to use flaskparser together with an URL parameter. Not knowing how to configure them to be used together, I googled \"flaskparser\". The top 5 results are:\r\n\r\n- https://webargs.readthedocs.io/en/latest/_modules/webargs/flaskparser.html\r\n- https://webargs.readthedocs.io/en/latest/\r\n- https://webargs.readthedocs.io/en/latest/api.html\r\n- https://webargs.readthedocs.io/en/latest/advanced.html\r\n- https://webargs.readthedocs.io/en/latest/quickstart.html\r\n\r\nNone of these include an example where an URL variable parameter and webargs are used together. I found the example that I need in [Framework Support](https://github.com/marshmallow-code/webargs/blob/dev/docs/framework_support.rst) (which does not appear on the first page of the Google results) as following:\r\n\r\n```\r\nfrom webargs import fields\r\nfrom webargs.flaskparser import use_args\r\n\r\n\r\[email protected](\"/user/<int:uid>\")\r\n@use_args({\"per_page\": fields.Int()}, location=\"query\")\r\ndef user_detail(args, uid):\r\n return (\"The user page for user {uid}, showing {per_page} posts.\").format(\r\n uid=uid, per_page=args[\"per_page\"]\r\n )\r\n```\r\n\r\nSince the simpler use can can be inferred from the more complex use case, but not vice-versa, I would suggest to either include this code example in [flaskparser.py example](https://github.com/marshmallow-code/webargs/edit/dev/src/webargs/flaskparser.py), or somehow tweak the SEO for https://webargs.readthedocs.io/en/latest/framework_support.html so that it will appear on the first page of Google search results.\n", "before_files": [{"content": "\"\"\"Flask request argument parsing module.\n\nExample: ::\n\n from flask import Flask\n\n from webargs import fields\n from webargs.flaskparser import use_args\n\n app = Flask(__name__)\n\n hello_args = {\n 'name': fields.Str(required=True)\n }\n\n @app.route('/')\n @use_args(hello_args)\n def index(args):\n return 'Hello ' + args['name']\n\"\"\"\nimport flask\nfrom werkzeug.exceptions import HTTPException\n\nfrom webargs import core\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.multidictproxy import MultiDictProxy\n\n\ndef abort(http_status_code, exc=None, **kwargs):\n \"\"\"Raise a HTTPException for the given http_status_code. Attach any keyword\n arguments to the exception for later processing.\n\n From Flask-Restful. See NOTICE file for license information.\n \"\"\"\n try:\n flask.abort(http_status_code)\n except HTTPException as err:\n err.data = kwargs\n err.exc = exc\n raise err\n\n\ndef is_json_request(req):\n return core.is_json(req.mimetype)\n\n\nclass FlaskParser(core.Parser):\n \"\"\"Flask request argument parser.\"\"\"\n\n __location_map__ = dict(\n view_args=\"load_view_args\",\n path=\"load_view_args\",\n **core.Parser.__location_map__,\n )\n\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req):\n return core.missing\n\n return core.parse_json(req.get_data(cache=True))\n\n def _handle_invalid_json_error(self, error, req, *args, **kwargs):\n abort(400, exc=error, messages={\"json\": [\"Invalid JSON body.\"]})\n\n def load_view_args(self, req, schema):\n \"\"\"Return the request's ``view_args`` or ``missing`` if there are none.\"\"\"\n return req.view_args or core.missing\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.args, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.form, schema)\n\n def load_headers(self, req, schema):\n \"\"\"Return headers from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.headers, schema)\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n return req.cookies\n\n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.files, schema)\n\n def handle_error(self, error, req, schema, *, error_status_code, error_headers):\n \"\"\"Handles errors during parsing. Aborts the current HTTP request and\n responds with a 422 error.\n \"\"\"\n status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS\n # on marshmallow 2, a many schema receiving a non-list value will\n # produce this specific error back -- reformat it to match the\n # marshmallow 3 message so that Flask can properly encode it\n messages = error.messages\n if (\n MARSHMALLOW_VERSION_INFO[0] < 3\n and schema.many\n and messages == {0: {}, \"_schema\": [\"Invalid input type.\"]}\n ):\n messages.pop(0)\n abort(\n status_code,\n exc=error,\n messages=error.messages,\n schema=schema,\n headers=error_headers,\n )\n\n def get_default_request(self):\n \"\"\"Override to use Flask's thread-local request object by default\"\"\"\n return flask.request\n\n\nparser = FlaskParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/flaskparser.py"}], "after_files": [{"content": "\"\"\"Flask request argument parsing module.\n\nExample: ::\n\n from flask import Flask\n\n from webargs import fields\n from webargs.flaskparser import use_args\n\n app = Flask(__name__)\n\n user_detail_args = {\n 'per_page': fields.Int()\n }\n\n @app.route(\"/user/<int:uid>\")\n @use_args(user_detail_args)\n def user_detail(args, uid):\n return (\"The user page for user {uid}, showing {per_page} posts.\").format(\n uid=uid, per_page=args[\"per_page\"]\n )\n\"\"\"\nimport flask\nfrom werkzeug.exceptions import HTTPException\n\nfrom webargs import core\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.multidictproxy import MultiDictProxy\n\n\ndef abort(http_status_code, exc=None, **kwargs):\n \"\"\"Raise a HTTPException for the given http_status_code. Attach any keyword\n arguments to the exception for later processing.\n\n From Flask-Restful. See NOTICE file for license information.\n \"\"\"\n try:\n flask.abort(http_status_code)\n except HTTPException as err:\n err.data = kwargs\n err.exc = exc\n raise err\n\n\ndef is_json_request(req):\n return core.is_json(req.mimetype)\n\n\nclass FlaskParser(core.Parser):\n \"\"\"Flask request argument parser.\"\"\"\n\n __location_map__ = dict(\n view_args=\"load_view_args\",\n path=\"load_view_args\",\n **core.Parser.__location_map__,\n )\n\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req):\n return core.missing\n\n return core.parse_json(req.get_data(cache=True))\n\n def _handle_invalid_json_error(self, error, req, *args, **kwargs):\n abort(400, exc=error, messages={\"json\": [\"Invalid JSON body.\"]})\n\n def load_view_args(self, req, schema):\n \"\"\"Return the request's ``view_args`` or ``missing`` if there are none.\"\"\"\n return req.view_args or core.missing\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.args, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.form, schema)\n\n def load_headers(self, req, schema):\n \"\"\"Return headers from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.headers, schema)\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n return req.cookies\n\n def load_files(self, req, schema):\n \"\"\"Return files from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.files, schema)\n\n def handle_error(self, error, req, schema, *, error_status_code, error_headers):\n \"\"\"Handles errors during parsing. Aborts the current HTTP request and\n responds with a 422 error.\n \"\"\"\n status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS\n # on marshmallow 2, a many schema receiving a non-list value will\n # produce this specific error back -- reformat it to match the\n # marshmallow 3 message so that Flask can properly encode it\n messages = error.messages\n if (\n MARSHMALLOW_VERSION_INFO[0] < 3\n and schema.many\n and messages == {0: {}, \"_schema\": [\"Invalid input type.\"]}\n ):\n messages.pop(0)\n abort(\n status_code,\n exc=error,\n messages=error.messages,\n schema=schema,\n headers=error_headers,\n )\n\n def get_default_request(self):\n \"\"\"Override to use Flask's thread-local request object by default\"\"\"\n return flask.request\n\n\nparser = FlaskParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/flaskparser.py"}]}
| 1,790 | 207 |
gh_patches_debug_1074
|
rasdani/github-patches
|
git_diff
|
huggingface__diffusers-1052
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve the precision of our integration tests
We currently have a rather low precision when testing our pipeline due to due reasons.
1. - Our reference is an image and not a numpy array. This means that when we created our reference image we lost float precision which is unnecessary
2. - We only test for `.max() < 1e-2` . IMO we should test for `.max() < 1e-4` with the numpy arrays. In my experiements across multiple devices I have **not** seen differences bigger than `.max() < 1e-4` when using full precision.
IMO this could have also prevented: https://github.com/huggingface/diffusers/issues/902
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/diffusers/utils/__init__.py`
Content:
```
1 # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 import os
17
18 from .deprecation_utils import deprecate
19 from .import_utils import (
20 ENV_VARS_TRUE_AND_AUTO_VALUES,
21 ENV_VARS_TRUE_VALUES,
22 USE_JAX,
23 USE_TF,
24 USE_TORCH,
25 DummyObject,
26 is_accelerate_available,
27 is_flax_available,
28 is_inflect_available,
29 is_modelcards_available,
30 is_onnx_available,
31 is_scipy_available,
32 is_tf_available,
33 is_torch_available,
34 is_transformers_available,
35 is_unidecode_available,
36 requires_backends,
37 )
38 from .logging import get_logger
39 from .outputs import BaseOutput
40
41
42 if is_torch_available():
43 from .testing_utils import (
44 floats_tensor,
45 load_image,
46 load_numpy,
47 parse_flag_from_env,
48 require_torch_gpu,
49 slow,
50 torch_all_close,
51 torch_device,
52 )
53
54
55 logger = get_logger(__name__)
56
57
58 hf_cache_home = os.path.expanduser(
59 os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
60 )
61 default_cache_path = os.path.join(hf_cache_home, "diffusers")
62
63
64 CONFIG_NAME = "config.json"
65 WEIGHTS_NAME = "diffusion_pytorch_model.bin"
66 FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
67 ONNX_WEIGHTS_NAME = "model.onnx"
68 HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co"
69 DIFFUSERS_CACHE = default_cache_path
70 DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
71 HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/diffusers/utils/__init__.py b/src/diffusers/utils/__init__.py
--- a/src/diffusers/utils/__init__.py
+++ b/src/diffusers/utils/__init__.py
@@ -42,6 +42,7 @@
if is_torch_available():
from .testing_utils import (
floats_tensor,
+ load_hf_numpy,
load_image,
load_numpy,
parse_flag_from_env,
|
{"golden_diff": "diff --git a/src/diffusers/utils/__init__.py b/src/diffusers/utils/__init__.py\n--- a/src/diffusers/utils/__init__.py\n+++ b/src/diffusers/utils/__init__.py\n@@ -42,6 +42,7 @@\n if is_torch_available():\n from .testing_utils import (\n floats_tensor,\n+ load_hf_numpy,\n load_image,\n load_numpy,\n parse_flag_from_env,\n", "issue": "Improve the precision of our integration tests\nWe currently have a rather low precision when testing our pipeline due to due reasons. \r\n1. - Our reference is an image and not a numpy array. This means that when we created our reference image we lost float precision which is unnecessary\r\n2. - We only test for `.max() < 1e-2` . IMO we should test for `.max() < 1e-4` with the numpy arrays. In my experiements across multiple devices I have **not** seen differences bigger than `.max() < 1e-4` when using full precision.\r\n\r\nIMO this could have also prevented: https://github.com/huggingface/diffusers/issues/902\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\n\nfrom .deprecation_utils import deprecate\nfrom .import_utils import (\n ENV_VARS_TRUE_AND_AUTO_VALUES,\n ENV_VARS_TRUE_VALUES,\n USE_JAX,\n USE_TF,\n USE_TORCH,\n DummyObject,\n is_accelerate_available,\n is_flax_available,\n is_inflect_available,\n is_modelcards_available,\n is_onnx_available,\n is_scipy_available,\n is_tf_available,\n is_torch_available,\n is_transformers_available,\n is_unidecode_available,\n requires_backends,\n)\nfrom .logging import get_logger\nfrom .outputs import BaseOutput\n\n\nif is_torch_available():\n from .testing_utils import (\n floats_tensor,\n load_image,\n load_numpy,\n parse_flag_from_env,\n require_torch_gpu,\n slow,\n torch_all_close,\n torch_device,\n )\n\n\nlogger = get_logger(__name__)\n\n\nhf_cache_home = os.path.expanduser(\n os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n)\ndefault_cache_path = os.path.join(hf_cache_home, \"diffusers\")\n\n\nCONFIG_NAME = \"config.json\"\nWEIGHTS_NAME = \"diffusion_pytorch_model.bin\"\nFLAX_WEIGHTS_NAME = \"diffusion_flax_model.msgpack\"\nONNX_WEIGHTS_NAME = \"model.onnx\"\nHUGGINGFACE_CO_RESOLVE_ENDPOINT = \"https://huggingface.co\"\nDIFFUSERS_CACHE = default_cache_path\nDIFFUSERS_DYNAMIC_MODULE_NAME = \"diffusers_modules\"\nHF_MODULES_CACHE = os.getenv(\"HF_MODULES_CACHE\", os.path.join(hf_cache_home, \"modules\"))\n", "path": "src/diffusers/utils/__init__.py"}], "after_files": [{"content": "# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\n\nfrom .deprecation_utils import deprecate\nfrom .import_utils import (\n ENV_VARS_TRUE_AND_AUTO_VALUES,\n ENV_VARS_TRUE_VALUES,\n USE_JAX,\n USE_TF,\n USE_TORCH,\n DummyObject,\n is_accelerate_available,\n is_flax_available,\n is_inflect_available,\n is_modelcards_available,\n is_onnx_available,\n is_scipy_available,\n is_tf_available,\n is_torch_available,\n is_transformers_available,\n is_unidecode_available,\n requires_backends,\n)\nfrom .logging import get_logger\nfrom .outputs import BaseOutput\n\n\nif is_torch_available():\n from .testing_utils import (\n floats_tensor,\n load_hf_numpy,\n load_image,\n load_numpy,\n parse_flag_from_env,\n require_torch_gpu,\n slow,\n torch_all_close,\n torch_device,\n )\n\n\nlogger = get_logger(__name__)\n\n\nhf_cache_home = os.path.expanduser(\n os.getenv(\"HF_HOME\", os.path.join(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\"), \"huggingface\"))\n)\ndefault_cache_path = os.path.join(hf_cache_home, \"diffusers\")\n\n\nCONFIG_NAME = \"config.json\"\nWEIGHTS_NAME = \"diffusion_pytorch_model.bin\"\nFLAX_WEIGHTS_NAME = \"diffusion_flax_model.msgpack\"\nONNX_WEIGHTS_NAME = \"model.onnx\"\nHUGGINGFACE_CO_RESOLVE_ENDPOINT = \"https://huggingface.co\"\nDIFFUSERS_CACHE = default_cache_path\nDIFFUSERS_DYNAMIC_MODULE_NAME = \"diffusers_modules\"\nHF_MODULES_CACHE = os.getenv(\"HF_MODULES_CACHE\", os.path.join(hf_cache_home, \"modules\"))\n", "path": "src/diffusers/utils/__init__.py"}]}
| 1,046 | 98 |
gh_patches_debug_24220
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-4407
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Schedule editor icons need to be more distinct
From @flynnliz
The various “person” icons are confusing. It’s hard to know at a glance in the grid which conflicts are “person who must be present” and which are “chair conflict,” and it’s even more confusing that in the session request data box on the bottom right, the “requested by” icon is the same as the chair conflict. Can these three be more distinct from each other?

- The “technology overlap” chain icon shows up really faintly and it’s very tiny, so it’s easy to miss. Same with the “key participant overlap” key icon — those two are really difficult to distinguish from each other when they are so small. Can these be made larger or even just changed to something that takes up more vertical space so they’re easier to distinguish?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/meeting/templatetags/editor_tags.py`
Content:
```
1 # Copyright The IETF Trust 2022, All Rights Reserved
2 # -*- coding: utf-8 -*-
3
4 """Custom tags for the schedule editor"""
5 import debug # pyflakes: ignore
6
7 from django import template
8 from django.utils.html import format_html
9
10 register = template.Library()
11
12
13 @register.simple_tag
14 def constraint_icon_for(constraint_name, count=None):
15 # icons must be valid HTML and kept up to date with tests.EditorTagTests.test_constraint_icon_for()
16 icons = {
17 'conflict': '<span class="encircled">{reversed}1</span>',
18 'conflic2': '<span class="encircled">{reversed}2</span>',
19 'conflic3': '<span class="encircled">{reversed}3</span>',
20 'bethere': '<i class="bi bi-person"></i>{count}',
21 'timerange': '<i class="bi bi-calendar"></i>',
22 'time_relation': 'Δ',
23 'wg_adjacent': '{reversed}<i class="bi bi-skip-end"></i>',
24 'chair_conflict': '{reversed}<i class="bi bi-person-circle"></i>',
25 'tech_overlap': '{reversed}<i class="bi bi-link"></i>',
26 'key_participant': '{reversed}<i class="bi bi-key"></i>',
27 'joint_with_groups': '<i class="bi bi-merge"></i>',
28 'responsible_ad': '<span class="encircled">AD</span>',
29 }
30 reversed_suffix = '-reversed'
31 if constraint_name.slug.endswith(reversed_suffix):
32 reversed = True
33 cn = constraint_name.slug[: -len(reversed_suffix)]
34 else:
35 reversed = False
36 cn = constraint_name.slug
37 return format_html(
38 icons[cn],
39 count=count or '',
40 reversed='-' if reversed else '',
41 )
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ietf/meeting/templatetags/editor_tags.py b/ietf/meeting/templatetags/editor_tags.py
--- a/ietf/meeting/templatetags/editor_tags.py
+++ b/ietf/meeting/templatetags/editor_tags.py
@@ -17,13 +17,13 @@
'conflict': '<span class="encircled">{reversed}1</span>',
'conflic2': '<span class="encircled">{reversed}2</span>',
'conflic3': '<span class="encircled">{reversed}3</span>',
- 'bethere': '<i class="bi bi-person"></i>{count}',
+ 'bethere': '<i class="bi bi-people-fill"></i>{count}',
'timerange': '<i class="bi bi-calendar"></i>',
'time_relation': 'Δ',
'wg_adjacent': '{reversed}<i class="bi bi-skip-end"></i>',
- 'chair_conflict': '{reversed}<i class="bi bi-person-circle"></i>',
- 'tech_overlap': '{reversed}<i class="bi bi-link"></i>',
- 'key_participant': '{reversed}<i class="bi bi-key"></i>',
+ 'chair_conflict': '{reversed}<i class="bi bi-circle-fill"></i>',
+ 'tech_overlap': '{reversed}<i class="bi bi-link-45deg"></i>',
+ 'key_participant': '{reversed}<i class="bi bi-star"></i>',
'joint_with_groups': '<i class="bi bi-merge"></i>',
'responsible_ad': '<span class="encircled">AD</span>',
}
|
{"golden_diff": "diff --git a/ietf/meeting/templatetags/editor_tags.py b/ietf/meeting/templatetags/editor_tags.py\n--- a/ietf/meeting/templatetags/editor_tags.py\n+++ b/ietf/meeting/templatetags/editor_tags.py\n@@ -17,13 +17,13 @@\n 'conflict': '<span class=\"encircled\">{reversed}1</span>',\n 'conflic2': '<span class=\"encircled\">{reversed}2</span>',\n 'conflic3': '<span class=\"encircled\">{reversed}3</span>',\n- 'bethere': '<i class=\"bi bi-person\"></i>{count}',\n+ 'bethere': '<i class=\"bi bi-people-fill\"></i>{count}',\n 'timerange': '<i class=\"bi bi-calendar\"></i>',\n 'time_relation': 'Δ',\n 'wg_adjacent': '{reversed}<i class=\"bi bi-skip-end\"></i>',\n- 'chair_conflict': '{reversed}<i class=\"bi bi-person-circle\"></i>',\n- 'tech_overlap': '{reversed}<i class=\"bi bi-link\"></i>',\n- 'key_participant': '{reversed}<i class=\"bi bi-key\"></i>',\n+ 'chair_conflict': '{reversed}<i class=\"bi bi-circle-fill\"></i>',\n+ 'tech_overlap': '{reversed}<i class=\"bi bi-link-45deg\"></i>',\n+ 'key_participant': '{reversed}<i class=\"bi bi-star\"></i>',\n 'joint_with_groups': '<i class=\"bi bi-merge\"></i>',\n 'responsible_ad': '<span class=\"encircled\">AD</span>',\n }\n", "issue": "Schedule editor icons need to be more distinct\nFrom @flynnliz\r\n\r\nThe various \u201cperson\u201d icons are confusing. It\u2019s hard to know at a glance in the grid which conflicts are \u201cperson who must be present\u201d and which are \u201cchair conflict,\u201d and it\u2019s even more confusing that in the session request data box on the bottom right, the \u201crequested by\u201d icon is the same as the chair conflict. Can these three be more distinct from each other? \r\n\r\n\r\n\r\n\r\n- The \u201ctechnology overlap\u201d chain icon shows up really faintly and it\u2019s very tiny, so it\u2019s easy to miss. Same with the \u201ckey participant overlap\u201d key icon \u2014 those two are really difficult to distinguish from each other when they are so small. Can these be made larger or even just changed to something that takes up more vertical space so they\u2019re easier to distinguish?\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright The IETF Trust 2022, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\"\"\"Custom tags for the schedule editor\"\"\"\nimport debug # pyflakes: ignore\n\nfrom django import template\nfrom django.utils.html import format_html\n\nregister = template.Library()\n\n\[email protected]_tag\ndef constraint_icon_for(constraint_name, count=None):\n # icons must be valid HTML and kept up to date with tests.EditorTagTests.test_constraint_icon_for()\n icons = {\n 'conflict': '<span class=\"encircled\">{reversed}1</span>',\n 'conflic2': '<span class=\"encircled\">{reversed}2</span>',\n 'conflic3': '<span class=\"encircled\">{reversed}3</span>',\n 'bethere': '<i class=\"bi bi-person\"></i>{count}',\n 'timerange': '<i class=\"bi bi-calendar\"></i>',\n 'time_relation': 'Δ',\n 'wg_adjacent': '{reversed}<i class=\"bi bi-skip-end\"></i>',\n 'chair_conflict': '{reversed}<i class=\"bi bi-person-circle\"></i>',\n 'tech_overlap': '{reversed}<i class=\"bi bi-link\"></i>',\n 'key_participant': '{reversed}<i class=\"bi bi-key\"></i>',\n 'joint_with_groups': '<i class=\"bi bi-merge\"></i>',\n 'responsible_ad': '<span class=\"encircled\">AD</span>',\n }\n reversed_suffix = '-reversed'\n if constraint_name.slug.endswith(reversed_suffix):\n reversed = True\n cn = constraint_name.slug[: -len(reversed_suffix)]\n else:\n reversed = False\n cn = constraint_name.slug\n return format_html(\n icons[cn],\n count=count or '',\n reversed='-' if reversed else '',\n )\n", "path": "ietf/meeting/templatetags/editor_tags.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2022, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\"\"\"Custom tags for the schedule editor\"\"\"\nimport debug # pyflakes: ignore\n\nfrom django import template\nfrom django.utils.html import format_html\n\nregister = template.Library()\n\n\[email protected]_tag\ndef constraint_icon_for(constraint_name, count=None):\n # icons must be valid HTML and kept up to date with tests.EditorTagTests.test_constraint_icon_for()\n icons = {\n 'conflict': '<span class=\"encircled\">{reversed}1</span>',\n 'conflic2': '<span class=\"encircled\">{reversed}2</span>',\n 'conflic3': '<span class=\"encircled\">{reversed}3</span>',\n 'bethere': '<i class=\"bi bi-people-fill\"></i>{count}',\n 'timerange': '<i class=\"bi bi-calendar\"></i>',\n 'time_relation': 'Δ',\n 'wg_adjacent': '{reversed}<i class=\"bi bi-skip-end\"></i>',\n 'chair_conflict': '{reversed}<i class=\"bi bi-circle-fill\"></i>',\n 'tech_overlap': '{reversed}<i class=\"bi bi-link-45deg\"></i>',\n 'key_participant': '{reversed}<i class=\"bi bi-star\"></i>',\n 'joint_with_groups': '<i class=\"bi bi-merge\"></i>',\n 'responsible_ad': '<span class=\"encircled\">AD</span>',\n }\n reversed_suffix = '-reversed'\n if constraint_name.slug.endswith(reversed_suffix):\n reversed = True\n cn = constraint_name.slug[: -len(reversed_suffix)]\n else:\n reversed = False\n cn = constraint_name.slug\n return format_html(\n icons[cn],\n count=count or '',\n reversed='-' if reversed else '',\n )\n", "path": "ietf/meeting/templatetags/editor_tags.py"}]}
| 1,041 | 379 |
gh_patches_debug_23481
|
rasdani/github-patches
|
git_diff
|
opensearch-project__opensearch-build-900
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename "bundle" to "distribution"?
**Is your feature request related to a problem? Please describe.**
We've been calling our output a bundle, but it's really a distribution.
**Describe the solution you'd like**
Rename bundle to distribution everywhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/assemble_workflow/bundle_recorder.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8 from urllib.parse import urljoin
9
10 from manifests.bundle_manifest import BundleManifest
11
12
13 class BundleRecorder:
14 def __init__(self, build, output_dir, artifacts_dir, base_url):
15 self.output_dir = output_dir
16 self.build_id = build.id
17 self.base_url = base_url
18 self.version = build.version
19 self.package_name = self.__get_package_name(build)
20 self.artifacts_dir = artifacts_dir
21 self.architecture = build.architecture
22 self.bundle_manifest = self.BundleManifestBuilder(
23 build.id,
24 build.name,
25 build.version,
26 build.platform,
27 build.architecture,
28 self.__get_package_location(),
29 )
30
31 def __get_package_name(self, build):
32 parts = [
33 build.name.lower().replace(" ", "-"),
34 build.version,
35 build.platform,
36 build.architecture,
37 ]
38 return "-".join(parts) + (".zip" if build.platform == "windows" else ".tar.gz")
39
40 def __get_public_url_path(self, folder, rel_path):
41 path = "/".join((folder, rel_path))
42 return urljoin(self.base_url + "/", path)
43
44 def __get_location(self, folder_name, rel_path, abs_path):
45 if self.base_url:
46 return self.__get_public_url_path(folder_name, rel_path)
47 return abs_path
48
49 # Assembled bundles are expected to be served from a separate "bundles" folder
50 # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id
51 def __get_package_location(self):
52 return self.__get_location("dist", self.package_name, os.path.join(self.output_dir, self.package_name))
53
54 # Build artifacts are expected to be served from a "builds" folder
55 # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>
56 def __get_component_location(self, component_rel_path):
57 abs_path = os.path.join(self.artifacts_dir, component_rel_path)
58 return self.__get_location("builds", component_rel_path, abs_path)
59
60 def record_component(self, component, rel_path):
61 self.bundle_manifest.append_component(
62 component.name,
63 component.repository,
64 component.ref,
65 component.commit_id,
66 self.__get_component_location(rel_path),
67 )
68
69 def get_manifest(self):
70 return self.bundle_manifest.to_manifest()
71
72 def write_manifest(self, folder):
73 manifest_path = os.path.join(folder, "manifest.yml")
74 self.get_manifest().to_file(manifest_path)
75
76 class BundleManifestBuilder:
77 def __init__(self, build_id, name, version, platform, architecture, location):
78 self.data = {}
79 self.data["build"] = {}
80 self.data["build"]["id"] = build_id
81 self.data["build"]["name"] = name
82 self.data["build"]["version"] = str(version)
83 self.data["build"]["platform"] = platform
84 self.data["build"]["architecture"] = architecture
85 self.data["build"]["location"] = location
86 self.data["schema-version"] = "1.1"
87 # We need to store components as a hash so that we can append artifacts by component name
88 # When we convert to a BundleManifest this will get converted back into a list
89 self.data["components"] = []
90
91 def append_component(self, name, repository_url, ref, commit_id, location):
92 component = {
93 "name": name,
94 "repository": repository_url,
95 "ref": ref,
96 "commit_id": commit_id,
97 "location": location,
98 }
99 self.data["components"].append(component)
100
101 def to_manifest(self):
102 return BundleManifest(self.data)
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/assemble_workflow/bundle_recorder.py b/src/assemble_workflow/bundle_recorder.py
--- a/src/assemble_workflow/bundle_recorder.py
+++ b/src/assemble_workflow/bundle_recorder.py
@@ -46,13 +46,13 @@
return self.__get_public_url_path(folder_name, rel_path)
return abs_path
- # Assembled bundles are expected to be served from a separate "bundles" folder
- # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id
+ # Assembled output are expected to be served from a separate "dist" folder
+ # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/dist/
def __get_package_location(self):
return self.__get_location("dist", self.package_name, os.path.join(self.output_dir, self.package_name))
# Build artifacts are expected to be served from a "builds" folder
- # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>
+ # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/builds/
def __get_component_location(self, component_rel_path):
abs_path = os.path.join(self.artifacts_dir, component_rel_path)
return self.__get_location("builds", component_rel_path, abs_path)
|
{"golden_diff": "diff --git a/src/assemble_workflow/bundle_recorder.py b/src/assemble_workflow/bundle_recorder.py\n--- a/src/assemble_workflow/bundle_recorder.py\n+++ b/src/assemble_workflow/bundle_recorder.py\n@@ -46,13 +46,13 @@\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n \n- # Assembled bundles are expected to be served from a separate \"bundles\" folder\n- # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id\n+ # Assembled output are expected to be served from a separate \"dist\" folder\n+ # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/dist/\n def __get_package_location(self):\n return self.__get_location(\"dist\", self.package_name, os.path.join(self.output_dir, self.package_name))\n \n # Build artifacts are expected to be served from a \"builds\" folder\n- # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>\n+ # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/builds/\n def __get_component_location(self, component_rel_path):\n abs_path = os.path.join(self.artifacts_dir, component_rel_path)\n return self.__get_location(\"builds\", component_rel_path, abs_path)\n", "issue": "Rename \"bundle\" to \"distribution\"?\n**Is your feature request related to a problem? Please describe.**\r\nWe've been calling our output a bundle, but it's really a distribution.\r\n\r\n**Describe the solution you'd like**\r\nRename bundle to distribution everywhere.\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nfrom urllib.parse import urljoin\n\nfrom manifests.bundle_manifest import BundleManifest\n\n\nclass BundleRecorder:\n def __init__(self, build, output_dir, artifacts_dir, base_url):\n self.output_dir = output_dir\n self.build_id = build.id\n self.base_url = base_url\n self.version = build.version\n self.package_name = self.__get_package_name(build)\n self.artifacts_dir = artifacts_dir\n self.architecture = build.architecture\n self.bundle_manifest = self.BundleManifestBuilder(\n build.id,\n build.name,\n build.version,\n build.platform,\n build.architecture,\n self.__get_package_location(),\n )\n\n def __get_package_name(self, build):\n parts = [\n build.name.lower().replace(\" \", \"-\"),\n build.version,\n build.platform,\n build.architecture,\n ]\n return \"-\".join(parts) + (\".zip\" if build.platform == \"windows\" else \".tar.gz\")\n\n def __get_public_url_path(self, folder, rel_path):\n path = \"/\".join((folder, rel_path))\n return urljoin(self.base_url + \"/\", path)\n\n def __get_location(self, folder_name, rel_path, abs_path):\n if self.base_url:\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n\n # Assembled bundles are expected to be served from a separate \"bundles\" folder\n # Example: https://artifacts.opensearch.org/bundles/1.0.0/<build-id\n def __get_package_location(self):\n return self.__get_location(\"dist\", self.package_name, os.path.join(self.output_dir, self.package_name))\n\n # Build artifacts are expected to be served from a \"builds\" folder\n # Example: https://artifacts.opensearch.org/builds/1.0.0/<build-id>\n def __get_component_location(self, component_rel_path):\n abs_path = os.path.join(self.artifacts_dir, component_rel_path)\n return self.__get_location(\"builds\", component_rel_path, abs_path)\n\n def record_component(self, component, rel_path):\n self.bundle_manifest.append_component(\n component.name,\n component.repository,\n component.ref,\n component.commit_id,\n self.__get_component_location(rel_path),\n )\n\n def get_manifest(self):\n return self.bundle_manifest.to_manifest()\n\n def write_manifest(self, folder):\n manifest_path = os.path.join(folder, \"manifest.yml\")\n self.get_manifest().to_file(manifest_path)\n\n class BundleManifestBuilder:\n def __init__(self, build_id, name, version, platform, architecture, location):\n self.data = {}\n self.data[\"build\"] = {}\n self.data[\"build\"][\"id\"] = build_id\n self.data[\"build\"][\"name\"] = name\n self.data[\"build\"][\"version\"] = str(version)\n self.data[\"build\"][\"platform\"] = platform\n self.data[\"build\"][\"architecture\"] = architecture\n self.data[\"build\"][\"location\"] = location\n self.data[\"schema-version\"] = \"1.1\"\n # We need to store components as a hash so that we can append artifacts by component name\n # When we convert to a BundleManifest this will get converted back into a list\n self.data[\"components\"] = []\n\n def append_component(self, name, repository_url, ref, commit_id, location):\n component = {\n \"name\": name,\n \"repository\": repository_url,\n \"ref\": ref,\n \"commit_id\": commit_id,\n \"location\": location,\n }\n self.data[\"components\"].append(component)\n\n def to_manifest(self):\n return BundleManifest(self.data)\n", "path": "src/assemble_workflow/bundle_recorder.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nfrom urllib.parse import urljoin\n\nfrom manifests.bundle_manifest import BundleManifest\n\n\nclass BundleRecorder:\n def __init__(self, build, output_dir, artifacts_dir, base_url):\n self.output_dir = output_dir\n self.build_id = build.id\n self.base_url = base_url\n self.version = build.version\n self.package_name = self.__get_package_name(build)\n self.artifacts_dir = artifacts_dir\n self.architecture = build.architecture\n self.bundle_manifest = self.BundleManifestBuilder(\n build.id,\n build.name,\n build.version,\n build.platform,\n build.architecture,\n self.__get_package_location(),\n )\n\n def __get_package_name(self, build):\n parts = [\n build.name.lower().replace(\" \", \"-\"),\n build.version,\n build.platform,\n build.architecture,\n ]\n return \"-\".join(parts) + (\".zip\" if build.platform == \"windows\" else \".tar.gz\")\n\n def __get_public_url_path(self, folder, rel_path):\n path = \"/\".join((folder, rel_path))\n return urljoin(self.base_url + \"/\", path)\n\n def __get_location(self, folder_name, rel_path, abs_path):\n if self.base_url:\n return self.__get_public_url_path(folder_name, rel_path)\n return abs_path\n\n # Assembled output are expected to be served from a separate \"dist\" folder\n # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/dist/\n def __get_package_location(self):\n return self.__get_location(\"dist\", self.package_name, os.path.join(self.output_dir, self.package_name))\n\n # Build artifacts are expected to be served from a \"builds\" folder\n # Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/builds/\n def __get_component_location(self, component_rel_path):\n abs_path = os.path.join(self.artifacts_dir, component_rel_path)\n return self.__get_location(\"builds\", component_rel_path, abs_path)\n\n def record_component(self, component, rel_path):\n self.bundle_manifest.append_component(\n component.name,\n component.repository,\n component.ref,\n component.commit_id,\n self.__get_component_location(rel_path),\n )\n\n def get_manifest(self):\n return self.bundle_manifest.to_manifest()\n\n def write_manifest(self, folder):\n manifest_path = os.path.join(folder, \"manifest.yml\")\n self.get_manifest().to_file(manifest_path)\n\n class BundleManifestBuilder:\n def __init__(self, build_id, name, version, platform, architecture, location):\n self.data = {}\n self.data[\"build\"] = {}\n self.data[\"build\"][\"id\"] = build_id\n self.data[\"build\"][\"name\"] = name\n self.data[\"build\"][\"version\"] = str(version)\n self.data[\"build\"][\"platform\"] = platform\n self.data[\"build\"][\"architecture\"] = architecture\n self.data[\"build\"][\"location\"] = location\n self.data[\"schema-version\"] = \"1.1\"\n # We need to store components as a hash so that we can append artifacts by component name\n # When we convert to a BundleManifest this will get converted back into a list\n self.data[\"components\"] = []\n\n def append_component(self, name, repository_url, ref, commit_id, location):\n component = {\n \"name\": name,\n \"repository\": repository_url,\n \"ref\": ref,\n \"commit_id\": commit_id,\n \"location\": location,\n }\n self.data[\"components\"].append(component)\n\n def to_manifest(self):\n return BundleManifest(self.data)\n", "path": "src/assemble_workflow/bundle_recorder.py"}]}
| 1,357 | 331 |
gh_patches_debug_8433
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-1350
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Post release bump of version to 2.4.1-dev
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Use the following command to install retriever: python setup.py install"""
2 from __future__ import absolute_import
3
4 import os
5 import re
6 import platform
7
8 from pkg_resources import parse_version
9 from setuptools import setup, find_packages
10
11 current_platform = platform.system().lower()
12 extra_includes = []
13 if current_platform == "windows":
14 extra_includes += ["pypyodbc"]
15
16 if os.path.exists(".git/hooks"): # check if we are in git repo
17 os.system("cp hooks/pre-commit .git/hooks/pre-commit")
18 os.system("chmod +x .git/hooks/pre-commit")
19
20 app_data = "~/.retriever/scripts"
21 if os.path.exists(app_data):
22 os.system("rm -r {}".format(app_data))
23
24 __version__ = 'v2.4.0'
25 with open(os.path.join("retriever", "_version.py"), "w") as version_file:
26 version_file.write("__version__ = " + "'" + __version__ + "'\n")
27 version_file.close()
28
29
30 def clean_version(v):
31 return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
32
33
34 def read(*names, **kwargs):
35 return open(
36 os.path.join(os.path.dirname(__file__), *names),
37 ).read()
38
39 includes = [
40 'xlrd',
41 'future',
42 'argcomplete',
43 'pymysql',
44 'psycopg2-binary',
45 'sqlite3',
46 ] + extra_includes
47
48 excludes = [
49 'pyreadline',
50 'doctest',
51 'pickle',
52 'pdb',
53 'pywin', 'pywin.debugger',
54 'pywin.debugger.dbgcon',
55 'pywin.dialogs', 'pywin.dialogs.list',
56 'Tkconstants', 'Tkinter', 'tcl', 'tk'
57 ]
58
59 setup(
60 name='retriever',
61 version=clean_version(__version__),
62 description='Data Retriever',
63 long_description='{a}'.format(a=read('README.md')),
64 long_description_content_type='text/markdown',
65 author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
66 author_email='[email protected]',
67 url='https://github.com/weecology/retriever',
68 classifiers=[
69 'Intended Audience :: Science/Research',
70 'License :: OSI Approved :: MIT License',
71 'Operating System :: Microsoft :: Windows',
72 'Operating System :: POSIX',
73 'Operating System :: Unix',
74 'Programming Language :: Python',
75 'Programming Language :: Python :: 3',
76 'Programming Language :: Python :: 3.4',
77 'Programming Language :: Python :: 3.5',
78 'Programming Language :: Python :: 3.6',
79 'Programming Language :: Python :: 3.7',
80 'Programming Language :: Python :: Implementation :: PyPy',
81 'Topic :: Software Development :: Libraries :: Python Modules',
82 'Topic :: Scientific/Engineering :: GIS',
83 'Topic :: Scientific/Engineering :: Information Analysis',
84 'Topic :: Database',
85 ],
86 packages=find_packages(
87 exclude=['hooks',
88 'docs',
89 'tests',
90 'scripts',
91 'docker',
92 ".cache"]),
93 entry_points={
94 'console_scripts': [
95 'retriever = retriever.__main__:main',
96 ],
97 },
98 install_requires=[
99 'xlrd',
100 'future',
101 'argcomplete',
102 'tqdm',
103 'requests',
104 'pandas'
105 ],
106 data_files=[('', ['CITATION'])],
107 setup_requires=[],
108 )
109
110 # windows doesn't have bash. No point in using bash-completion
111 if current_platform != "windows":
112 # if platform is OS X use "~/.bash_profile"
113 if current_platform == "darwin":
114 bash_file = "~/.bash_profile"
115 # if platform is Linux use "~/.bashrc
116 elif current_platform == "linux":
117 bash_file = "~/.bashrc"
118 # else write and discard
119 else:
120 bash_file = "/dev/null"
121
122 argcomplete_command = 'eval "$(register-python-argcomplete retriever)"'
123 with open(os.path.expanduser(bash_file), "a+") as bashrc:
124 bashrc.seek(0)
125 # register retriever for arg-completion if not already registered
126 # whenever a new shell is spawned
127 if argcomplete_command not in bashrc.read():
128 bashrc.write(argcomplete_command + "\n")
129 bashrc.close()
130 os.system("activate-global-python-argcomplete")
131 # register for the current shell
132 os.system(argcomplete_command)
133
134 try:
135 from retriever.compile import compile
136 from retriever.lib.repository import check_for_updates
137
138 check_for_updates()
139 compile()
140 except:
141 pass
142
```
Path: `retriever/_version.py`
Content:
```
1 __version__ = 'v2.4.0'
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/retriever/_version.py b/retriever/_version.py
--- a/retriever/_version.py
+++ b/retriever/_version.py
@@ -1 +1 @@
-__version__ = 'v2.4.0'
+__version__ = 'v2.4.1.dev'
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
if os.path.exists(app_data):
os.system("rm -r {}".format(app_data))
-__version__ = 'v2.4.0'
+__version__ = 'v2.4.1.dev'
with open(os.path.join("retriever", "_version.py"), "w") as version_file:
version_file.write("__version__ = " + "'" + __version__ + "'\n")
version_file.close()
|
{"golden_diff": "diff --git a/retriever/_version.py b/retriever/_version.py\n--- a/retriever/_version.py\n+++ b/retriever/_version.py\n@@ -1 +1 @@\n-__version__ = 'v2.4.0'\n+__version__ = 'v2.4.1.dev'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n if os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n \n-__version__ = 'v2.4.0'\n+__version__ = 'v2.4.1.dev'\n with open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n", "issue": "Post release bump of version to 2.4.1-dev\n\n", "before_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport re\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.4.0'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\n\ndef read(*names, **kwargs):\n return open(\n os.path.join(os.path.dirname(__file__), *names),\n ).read()\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2-binary',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(\n name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n long_description='{a}'.format(a=read('README.md')),\n long_description_content_type='text/markdown',\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Database',\n ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm',\n 'requests',\n 'pandas'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n)\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates()\n compile()\nexcept:\n pass\n", "path": "setup.py"}, {"content": "__version__ = 'v2.4.0'\n", "path": "retriever/_version.py"}], "after_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport re\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.4.1.dev'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\n\ndef read(*names, **kwargs):\n return open(\n os.path.join(os.path.dirname(__file__), *names),\n ).read()\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2-binary',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(\n name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n long_description='{a}'.format(a=read('README.md')),\n long_description_content_type='text/markdown',\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Database',\n ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm',\n 'requests',\n 'pandas'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n)\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates()\n compile()\nexcept:\n pass\n", "path": "setup.py"}, {"content": "__version__ = 'v2.4.1.dev'\n", "path": "retriever/_version.py"}]}
| 1,641 | 194 |
gh_patches_debug_3437
|
rasdani/github-patches
|
git_diff
|
vacanza__python-holidays-794
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeprecationWarning upon "import holidays" in version 0.17
The implementation of deprecating the Swaziland calendar contains a bug. Just importing the holidays package is enough to fire the `DeprecationWarning`.
**Steps to reproduce (in bash):**
```bash
# Setup
python -m venv demo
source demo/bin/activate
pip install --upgrade pip
# Bad version
pip install holidays==0.17
# Expose bug
python -W error::DeprecationWarning -c 'import holidays'
# Workoround
pip uninstall -y holidays
pip install holidays!=0.17
python -W error::DeprecationWarning -c 'import holidays'
# Cleanup
deactivate
rm -rf demo
```
**Expected behavior:**
The `DeprecationWarning` should only fire when the user constructs an instance of the `Swaziland` or a subclass.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/countries/eswatini.py`
Content:
```
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2022
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 import warnings
13 from datetime import date
14
15 from dateutil.easter import easter
16 from dateutil.relativedelta import relativedelta as rd
17
18 from holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC
19 from holidays.holiday_base import HolidayBase
20
21
22 class Eswatini(HolidayBase):
23 """
24 https://swazilii.org/sz/legislation/act/1938/71
25 https://www.officeholidays.com/countries/swaziland
26 """
27
28 country = "SZ"
29
30 def _populate(self, year):
31 super()._populate(year)
32
33 # Observed since 1938
34 if year > 1938:
35 self[date(year, JAN, 1)] = "New Year's Day"
36
37 e = easter(year)
38 good_friday = e - rd(days=2)
39 easter_monday = e + rd(days=1)
40 ascension_day = e + rd(days=39)
41 self[good_friday] = "Good Friday"
42 self[easter_monday] = "Easter Monday"
43 self[ascension_day] = "Ascension Day"
44
45 if year > 1968:
46 self[date(year, APR, 25)] = "National Flag Day"
47
48 if year > 1982:
49 # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza
50 self[date(year, JUL, 22)] = "Birthday of Late King Sobhuza"
51
52 if year > 1986:
53 # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii
54 self[date(year, APR, 19)] = "King's Birthday"
55
56 self[date(year, MAY, 1)] = "Worker's Day"
57 self[date(year, SEP, 6)] = "Independence Day"
58 self[date(year, DEC, 25)] = "Christmas Day"
59 self[date(year, DEC, 26)] = "Boxing Day"
60
61 # Once-off public holidays
62 y2k = "Y2K changeover"
63
64 if year == 1999:
65 # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/
66 self[date(1999, DEC, 31)] = y2k
67 if year == 2000:
68 self[date(2000, JAN, 3)] = y2k
69
70 # As of 2021/1/1, whenever a public holiday falls on a
71 # Sunday
72 # it rolls over to the following Monday
73 for k, v in list(self.items()):
74
75 if self.observed and k.weekday() == SUN and k.year == year:
76 add_days = 1
77 while self.get(k + rd(days=add_days)) is not None:
78 add_days += 1
79 self[k + rd(days=add_days)] = v + " (Day Off)"
80
81
82 class Swaziland(Eswatini):
83 warnings.warn(
84 "Swaziland is deprecated, use Eswatini instead.",
85 DeprecationWarning,
86 )
87 pass
88
89
90 class SZ(Eswatini):
91 pass
92
93
94 class SZW(Eswatini):
95 pass
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/holidays/countries/eswatini.py b/holidays/countries/eswatini.py
--- a/holidays/countries/eswatini.py
+++ b/holidays/countries/eswatini.py
@@ -80,11 +80,13 @@
class Swaziland(Eswatini):
- warnings.warn(
- "Swaziland is deprecated, use Eswatini instead.",
- DeprecationWarning,
- )
- pass
+ def __init__(self, *args, **kwargs) -> None:
+ warnings.warn(
+ "Swaziland is deprecated, use Eswatini instead.",
+ DeprecationWarning,
+ )
+
+ super().__init__(*args, **kwargs)
class SZ(Eswatini):
|
{"golden_diff": "diff --git a/holidays/countries/eswatini.py b/holidays/countries/eswatini.py\n--- a/holidays/countries/eswatini.py\n+++ b/holidays/countries/eswatini.py\n@@ -80,11 +80,13 @@\n \n \n class Swaziland(Eswatini):\n- warnings.warn(\n- \"Swaziland is deprecated, use Eswatini instead.\",\n- DeprecationWarning,\n- )\n- pass\n+ def __init__(self, *args, **kwargs) -> None:\n+ warnings.warn(\n+ \"Swaziland is deprecated, use Eswatini instead.\",\n+ DeprecationWarning,\n+ )\n+\n+ super().__init__(*args, **kwargs)\n \n \n class SZ(Eswatini):\n", "issue": "DeprecationWarning upon \"import holidays\" in version 0.17\nThe implementation of deprecating the Swaziland calendar contains a bug. Just importing the holidays package is enough to fire the `DeprecationWarning`.\r\n\r\n**Steps to reproduce (in bash):**\r\n\r\n```bash\r\n# Setup\r\npython -m venv demo\r\nsource demo/bin/activate\r\npip install --upgrade pip\r\n\r\n# Bad version\r\npip install holidays==0.17\r\n\r\n# Expose bug\r\npython -W error::DeprecationWarning -c 'import holidays'\r\n\r\n# Workoround\r\npip uninstall -y holidays\r\npip install holidays!=0.17\r\npython -W error::DeprecationWarning -c 'import holidays'\r\n\r\n# Cleanup\r\ndeactivate\r\nrm -rf demo\r\n```\r\n\r\n**Expected behavior:**\r\n\r\nThe `DeprecationWarning` should only fire when the user constructs an instance of the `Swaziland` or a subclass.\r\n\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport warnings\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Eswatini(HolidayBase):\n \"\"\"\n https://swazilii.org/sz/legislation/act/1938/71\n https://www.officeholidays.com/countries/swaziland\n \"\"\"\n\n country = \"SZ\"\n\n def _populate(self, year):\n super()._populate(year)\n\n # Observed since 1938\n if year > 1938:\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n e = easter(year)\n good_friday = e - rd(days=2)\n easter_monday = e + rd(days=1)\n ascension_day = e + rd(days=39)\n self[good_friday] = \"Good Friday\"\n self[easter_monday] = \"Easter Monday\"\n self[ascension_day] = \"Ascension Day\"\n\n if year > 1968:\n self[date(year, APR, 25)] = \"National Flag Day\"\n\n if year > 1982:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza\n self[date(year, JUL, 22)] = \"Birthday of Late King Sobhuza\"\n\n if year > 1986:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii\n self[date(year, APR, 19)] = \"King's Birthday\"\n\n self[date(year, MAY, 1)] = \"Worker's Day\"\n self[date(year, SEP, 6)] = \"Independence Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Once-off public holidays\n y2k = \"Y2K changeover\"\n\n if year == 1999:\n # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/\n self[date(1999, DEC, 31)] = y2k\n if year == 2000:\n self[date(2000, JAN, 3)] = y2k\n\n # As of 2021/1/1, whenever a public holiday falls on a\n # Sunday\n # it rolls over to the following Monday\n for k, v in list(self.items()):\n\n if self.observed and k.weekday() == SUN and k.year == year:\n add_days = 1\n while self.get(k + rd(days=add_days)) is not None:\n add_days += 1\n self[k + rd(days=add_days)] = v + \" (Day Off)\"\n\n\nclass Swaziland(Eswatini):\n warnings.warn(\n \"Swaziland is deprecated, use Eswatini instead.\",\n DeprecationWarning,\n )\n pass\n\n\nclass SZ(Eswatini):\n pass\n\n\nclass SZW(Eswatini):\n pass\n", "path": "holidays/countries/eswatini.py"}], "after_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport warnings\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import SUN, JAN, APR, MAY, JUL, SEP, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Eswatini(HolidayBase):\n \"\"\"\n https://swazilii.org/sz/legislation/act/1938/71\n https://www.officeholidays.com/countries/swaziland\n \"\"\"\n\n country = \"SZ\"\n\n def _populate(self, year):\n super()._populate(year)\n\n # Observed since 1938\n if year > 1938:\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n e = easter(year)\n good_friday = e - rd(days=2)\n easter_monday = e + rd(days=1)\n ascension_day = e + rd(days=39)\n self[good_friday] = \"Good Friday\"\n self[easter_monday] = \"Easter Monday\"\n self[ascension_day] = \"Ascension Day\"\n\n if year > 1968:\n self[date(year, APR, 25)] = \"National Flag Day\"\n\n if year > 1982:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-late-king-sobhuza\n self[date(year, JUL, 22)] = \"Birthday of Late King Sobhuza\"\n\n if year > 1986:\n # https://www.officeholidays.com/holidays/swaziland/birthday-of-king-mswati-iii\n self[date(year, APR, 19)] = \"King's Birthday\"\n\n self[date(year, MAY, 1)] = \"Worker's Day\"\n self[date(year, SEP, 6)] = \"Independence Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n self[date(year, DEC, 26)] = \"Boxing Day\"\n\n # Once-off public holidays\n y2k = \"Y2K changeover\"\n\n if year == 1999:\n # https://mg.co.za/article/1999-12-09-swaziland-declares-bank-holidays/\n self[date(1999, DEC, 31)] = y2k\n if year == 2000:\n self[date(2000, JAN, 3)] = y2k\n\n # As of 2021/1/1, whenever a public holiday falls on a\n # Sunday\n # it rolls over to the following Monday\n for k, v in list(self.items()):\n\n if self.observed and k.weekday() == SUN and k.year == year:\n add_days = 1\n while self.get(k + rd(days=add_days)) is not None:\n add_days += 1\n self[k + rd(days=add_days)] = v + \" (Day Off)\"\n\n\nclass Swaziland(Eswatini):\n def __init__(self, *args, **kwargs) -> None:\n warnings.warn(\n \"Swaziland is deprecated, use Eswatini instead.\",\n DeprecationWarning,\n )\n\n super().__init__(*args, **kwargs)\n\n\nclass SZ(Eswatini):\n pass\n\n\nclass SZW(Eswatini):\n pass\n", "path": "holidays/countries/eswatini.py"}]}
| 1,542 | 171 |
gh_patches_debug_32462
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-59557
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate the ProjectCombinedRuleIndexEndpoint
[GCP API access logs](https://console.cloud.google.com/logs/query;query=resource.type%20%3D%20k8s_container%0Aresource.labels.namespace_name%20%3D%20default%0Aresource.labels.container_name%20%3D%20sentry%0Alabels.name%20%3D%20sentry.access.api%0AjsonPayload.view%3D~%22ProjectCombinedRuleIndexEndpoint%22;summaryFields=:true:32:beginning;lfeCustomFields=jsonPayload%252Fview,jsonPayload%252Forganization_id;cursorTimestamp=2023-09-06T18:29:05.855473577Z;startTime=2023-09-06T16:51:17.461Z;endTime=2023-09-06T23:51:17.461482Z?project=internal-sentry) show that it's not used by us, and only by 2 customers. It's an undocumented endpoint so we can [set the deprecation header](https://www.notion.so/sentry/Sentry-API-Deprecation-Policy-ccbdea15a34c4fdeb50985685adc3368) and get rid of it.
Related to https://github.com/getsentry/sentry/issues/54005
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/incidents/endpoints/project_alert_rule_index.py`
Content:
```
1 from __future__ import annotations
2
3 from rest_framework.request import Request
4 from rest_framework.response import Response
5
6 from sentry import features
7 from sentry.api.api_publish_status import ApiPublishStatus
8 from sentry.api.base import region_silo_endpoint
9 from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint
10 from sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator
11 from sentry.api.serializers import CombinedRuleSerializer, serialize
12 from sentry.constants import ObjectStatus
13 from sentry.incidents.endpoints.organization_alert_rule_index import AlertRuleIndexMixin
14 from sentry.incidents.models import AlertRule
15 from sentry.models.rule import Rule
16 from sentry.snuba.dataset import Dataset
17
18
19 @region_silo_endpoint
20 class ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):
21 publish_status = {
22 "GET": ApiPublishStatus.UNKNOWN,
23 }
24
25 def get(self, request: Request, project) -> Response:
26 """
27 Fetches alert rules and legacy rules for a project
28 """
29 alert_rules = AlertRule.objects.fetch_for_project(project)
30 if not features.has("organizations:performance-view", project.organization):
31 # Filter to only error alert rules
32 alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)
33
34 alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, ["date_added"])
35 rule_intermediary = CombinedQuerysetIntermediary(
36 Rule.objects.filter(
37 project=project,
38 status=ObjectStatus.ACTIVE,
39 ),
40 ["date_added"],
41 )
42
43 return self.paginate(
44 request,
45 paginator_cls=CombinedQuerysetPaginator,
46 on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer()),
47 default_per_page=25,
48 intermediaries=[alert_rule_intermediary, rule_intermediary],
49 desc=True,
50 )
51
52
53 @region_silo_endpoint
54 class ProjectAlertRuleIndexEndpoint(ProjectEndpoint, AlertRuleIndexMixin):
55 publish_status = {
56 "GET": ApiPublishStatus.UNKNOWN,
57 "POST": ApiPublishStatus.UNKNOWN,
58 }
59 permission_classes = (ProjectAlertRulePermission,)
60
61 def get(self, request: Request, project) -> Response:
62 """
63 Fetches metric alert rules for a project - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.
64 """
65 return self.fetch_metric_alert(request, project.organization, project)
66
67 def post(self, request: Request, project) -> Response:
68 """
69 Create an alert rule - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.
70 """
71 return self.create_metric_alert(request, project.organization, project)
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/incidents/endpoints/project_alert_rule_index.py b/src/sentry/incidents/endpoints/project_alert_rule_index.py
--- a/src/sentry/incidents/endpoints/project_alert_rule_index.py
+++ b/src/sentry/incidents/endpoints/project_alert_rule_index.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+from datetime import datetime
+
from rest_framework.request import Request
from rest_framework.response import Response
@@ -7,6 +9,7 @@
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint
+from sentry.api.helpers.deprecation import deprecated
from sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator
from sentry.api.serializers import CombinedRuleSerializer, serialize
from sentry.constants import ObjectStatus
@@ -18,13 +21,15 @@
@region_silo_endpoint
class ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):
+ DEPRECATION_DATE = datetime.fromisoformat("2024-02-07T00:00:00+00:00:00")
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
+ @deprecated(DEPRECATION_DATE, "sentry-api-0-organization-combined-rules")
def get(self, request: Request, project) -> Response:
"""
- Fetches alert rules and legacy rules for a project
+ Fetches alert rules and legacy rules for a project. @deprecated. Use OrganizationCombinedRuleIndexEndpoint instead.
"""
alert_rules = AlertRule.objects.fetch_for_project(project)
if not features.has("organizations:performance-view", project.organization):
|
{"golden_diff": "diff --git a/src/sentry/incidents/endpoints/project_alert_rule_index.py b/src/sentry/incidents/endpoints/project_alert_rule_index.py\n--- a/src/sentry/incidents/endpoints/project_alert_rule_index.py\n+++ b/src/sentry/incidents/endpoints/project_alert_rule_index.py\n@@ -1,5 +1,7 @@\n from __future__ import annotations\n \n+from datetime import datetime\n+\n from rest_framework.request import Request\n from rest_framework.response import Response\n \n@@ -7,6 +9,7 @@\n from sentry.api.api_publish_status import ApiPublishStatus\n from sentry.api.base import region_silo_endpoint\n from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint\n+from sentry.api.helpers.deprecation import deprecated\n from sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator\n from sentry.api.serializers import CombinedRuleSerializer, serialize\n from sentry.constants import ObjectStatus\n@@ -18,13 +21,15 @@\n \n @region_silo_endpoint\n class ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):\n+ DEPRECATION_DATE = datetime.fromisoformat(\"2024-02-07T00:00:00+00:00:00\")\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n }\n \n+ @deprecated(DEPRECATION_DATE, \"sentry-api-0-organization-combined-rules\")\n def get(self, request: Request, project) -> Response:\n \"\"\"\n- Fetches alert rules and legacy rules for a project\n+ Fetches alert rules and legacy rules for a project. @deprecated. Use OrganizationCombinedRuleIndexEndpoint instead.\n \"\"\"\n alert_rules = AlertRule.objects.fetch_for_project(project)\n if not features.has(\"organizations:performance-view\", project.organization):\n", "issue": "Deprecate the ProjectCombinedRuleIndexEndpoint\n[GCP API access logs](https://console.cloud.google.com/logs/query;query=resource.type%20%3D%20k8s_container%0Aresource.labels.namespace_name%20%3D%20default%0Aresource.labels.container_name%20%3D%20sentry%0Alabels.name%20%3D%20sentry.access.api%0AjsonPayload.view%3D~%22ProjectCombinedRuleIndexEndpoint%22;summaryFields=:true:32:beginning;lfeCustomFields=jsonPayload%252Fview,jsonPayload%252Forganization_id;cursorTimestamp=2023-09-06T18:29:05.855473577Z;startTime=2023-09-06T16:51:17.461Z;endTime=2023-09-06T23:51:17.461482Z?project=internal-sentry) show that it's not used by us, and only by 2 customers. It's an undocumented endpoint so we can [set the deprecation header](https://www.notion.so/sentry/Sentry-API-Deprecation-Policy-ccbdea15a34c4fdeb50985685adc3368) and get rid of it. \n\nRelated to https://github.com/getsentry/sentry/issues/54005\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import features\nfrom sentry.api.api_publish_status import ApiPublishStatus\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint\nfrom sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator\nfrom sentry.api.serializers import CombinedRuleSerializer, serialize\nfrom sentry.constants import ObjectStatus\nfrom sentry.incidents.endpoints.organization_alert_rule_index import AlertRuleIndexMixin\nfrom sentry.incidents.models import AlertRule\nfrom sentry.models.rule import Rule\nfrom sentry.snuba.dataset import Dataset\n\n\n@region_silo_endpoint\nclass ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n }\n\n def get(self, request: Request, project) -> Response:\n \"\"\"\n Fetches alert rules and legacy rules for a project\n \"\"\"\n alert_rules = AlertRule.objects.fetch_for_project(project)\n if not features.has(\"organizations:performance-view\", project.organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n\n alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, [\"date_added\"])\n rule_intermediary = CombinedQuerysetIntermediary(\n Rule.objects.filter(\n project=project,\n status=ObjectStatus.ACTIVE,\n ),\n [\"date_added\"],\n )\n\n return self.paginate(\n request,\n paginator_cls=CombinedQuerysetPaginator,\n on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer()),\n default_per_page=25,\n intermediaries=[alert_rule_intermediary, rule_intermediary],\n desc=True,\n )\n\n\n@region_silo_endpoint\nclass ProjectAlertRuleIndexEndpoint(ProjectEndpoint, AlertRuleIndexMixin):\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n \"POST\": ApiPublishStatus.UNKNOWN,\n }\n permission_classes = (ProjectAlertRulePermission,)\n\n def get(self, request: Request, project) -> Response:\n \"\"\"\n Fetches metric alert rules for a project - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.\n \"\"\"\n return self.fetch_metric_alert(request, project.organization, project)\n\n def post(self, request: Request, project) -> Response:\n \"\"\"\n Create an alert rule - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.\n \"\"\"\n return self.create_metric_alert(request, project.organization, project)\n", "path": "src/sentry/incidents/endpoints/project_alert_rule_index.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import features\nfrom sentry.api.api_publish_status import ApiPublishStatus\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint\nfrom sentry.api.helpers.deprecation import deprecated\nfrom sentry.api.paginator import CombinedQuerysetIntermediary, CombinedQuerysetPaginator\nfrom sentry.api.serializers import CombinedRuleSerializer, serialize\nfrom sentry.constants import ObjectStatus\nfrom sentry.incidents.endpoints.organization_alert_rule_index import AlertRuleIndexMixin\nfrom sentry.incidents.models import AlertRule\nfrom sentry.models.rule import Rule\nfrom sentry.snuba.dataset import Dataset\n\n\n@region_silo_endpoint\nclass ProjectCombinedRuleIndexEndpoint(ProjectEndpoint):\n DEPRECATION_DATE = datetime.fromisoformat(\"2024-02-07T00:00:00+00:00:00\")\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n }\n\n @deprecated(DEPRECATION_DATE, \"sentry-api-0-organization-combined-rules\")\n def get(self, request: Request, project) -> Response:\n \"\"\"\n Fetches alert rules and legacy rules for a project. @deprecated. Use OrganizationCombinedRuleIndexEndpoint instead.\n \"\"\"\n alert_rules = AlertRule.objects.fetch_for_project(project)\n if not features.has(\"organizations:performance-view\", project.organization):\n # Filter to only error alert rules\n alert_rules = alert_rules.filter(snuba_query__dataset=Dataset.Events.value)\n\n alert_rule_intermediary = CombinedQuerysetIntermediary(alert_rules, [\"date_added\"])\n rule_intermediary = CombinedQuerysetIntermediary(\n Rule.objects.filter(\n project=project,\n status=ObjectStatus.ACTIVE,\n ),\n [\"date_added\"],\n )\n\n return self.paginate(\n request,\n paginator_cls=CombinedQuerysetPaginator,\n on_results=lambda x: serialize(x, request.user, CombinedRuleSerializer()),\n default_per_page=25,\n intermediaries=[alert_rule_intermediary, rule_intermediary],\n desc=True,\n )\n\n\n@region_silo_endpoint\nclass ProjectAlertRuleIndexEndpoint(ProjectEndpoint, AlertRuleIndexMixin):\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n \"POST\": ApiPublishStatus.UNKNOWN,\n }\n permission_classes = (ProjectAlertRulePermission,)\n\n def get(self, request: Request, project) -> Response:\n \"\"\"\n Fetches metric alert rules for a project - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.\n \"\"\"\n return self.fetch_metric_alert(request, project.organization, project)\n\n def post(self, request: Request, project) -> Response:\n \"\"\"\n Create an alert rule - @deprecated. Use OrganizationAlertRuleIndexEndpoint instead.\n \"\"\"\n return self.create_metric_alert(request, project.organization, project)\n", "path": "src/sentry/incidents/endpoints/project_alert_rule_index.py"}]}
| 1,298 | 389 |
gh_patches_debug_20394
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1522
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Collection events for sbazv_de double in calendar
### I Have A Problem With:
A specific source
### What's Your Problem
Collection dates and types are pulled correctly into the calendar but events show up twice per day. Sensor for next collection ('AbfallTermin') reads 'Gelber Sack, Gelber Sack in 7 Tagen'. Config and logs look ok.

### Source (if relevant)
sbazv_de
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: sbazv_de
args:
city: Schönefeld
district: Großziethen
street: Kxxxxxxxx
customize:
- type: Restmülltonnen
alias: Restmuell
icon: mdi:trash-can
- type: Gelbe Säcke
alias: GelberSack
icon: mdi:recycle
- type: Papiertonnen
alias: Altpapier
icon: mdi:file-document
- type: Laubsäcke
alias: Laubsack
icon: mdi:trash-can
use_dedicated_calendar: false
dedicated_calendar_title: SBAZV
fetch_time: "04:00"
day_switch_time: "12:00"
sensor:
# ------- Waste Collection Schedule -------
# Nächster Abholtermin
- platform: waste_collection_schedule
name: "AbfallTermin"
value_template: '{{value.types|join(", ")}}{% if value.daysTo == 0 %} Heute{% elif value.daysTo == 1 %} Morgen{% else %} in {{value.daysTo}} Tagen{% endif %}'
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py`
Content:
```
1 import requests
2 from waste_collection_schedule import Collection # type: ignore[attr-defined]
3 from waste_collection_schedule.service.ICS import ICS
4
5 TITLE = "Südbrandenburgischer Abfallzweckverband"
6 DESCRIPTION = "SBAZV Brandenburg, Deutschland"
7 URL = "https://www.sbazv.de"
8 TEST_CASES = {
9 "Wildau": {"city": "wildau", "district": "Wildau", "street": "Miersdorfer Str."}
10 }
11
12 ICON_MAP = {
13 "Restmülltonnen": "mdi:trash-can",
14 "Laubsäcke": "mdi:leaf",
15 "Gelbe Säcke": "mdi:sack",
16 "Papiertonnen": "mdi:package-variant",
17 "Weihnachtsbäume": "mdi:pine-tree",
18 }
19
20 # _LOGGER = logging.getLogger(__name__)
21
22
23 class Source:
24 def __init__(self, city, district, street=None):
25 self._city = city
26 self._district = district
27 self._street = street
28 self._ics = ICS()
29
30 def fetch(self):
31 args = {
32 "city": self._city,
33 "district": self._district,
34 "street": self._street,
35 }
36
37 # get ics file
38 # https://www.sbazv.de/entsorgungstermine/klein.ics?city=Wildau&district=Wildau&street=Miersdorfer+Str.
39 r = requests.get(
40 "https://www.sbazv.de/entsorgungstermine/klein.ics", params=args
41 )
42
43 # parse ics file
44 dates = self._ics.convert(r.text)
45
46 entries = []
47 for d in dates:
48 waste_type = d[1].strip()
49 next_pickup_date = d[0]
50
51 entries.append(
52 Collection(
53 date=next_pickup_date,
54 t=waste_type,
55 icon=ICON_MAP.get(waste_type),
56 )
57 )
58
59 return entries
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py
@@ -6,7 +6,12 @@
DESCRIPTION = "SBAZV Brandenburg, Deutschland"
URL = "https://www.sbazv.de"
TEST_CASES = {
- "Wildau": {"city": "wildau", "district": "Wildau", "street": "Miersdorfer Str."}
+ "Wildau": {"city": "wildau", "district": "Wildau", "street": "Miersdorfer Str."},
+ "Schönefeld": {
+ "city": "Schönefeld",
+ "district": "Großziethen",
+ "street": "kleistring",
+ },
}
ICON_MAP = {
@@ -47,7 +52,11 @@
for d in dates:
waste_type = d[1].strip()
next_pickup_date = d[0]
-
+ # remove duplicates
+ if any(
+ e.date == next_pickup_date and e.type == waste_type for e in entries
+ ):
+ continue
entries.append(
Collection(
date=next_pickup_date,
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py\n@@ -6,7 +6,12 @@\n DESCRIPTION = \"SBAZV Brandenburg, Deutschland\"\n URL = \"https://www.sbazv.de\"\n TEST_CASES = {\n- \"Wildau\": {\"city\": \"wildau\", \"district\": \"Wildau\", \"street\": \"Miersdorfer Str.\"}\n+ \"Wildau\": {\"city\": \"wildau\", \"district\": \"Wildau\", \"street\": \"Miersdorfer Str.\"},\n+ \"Sch\u00f6nefeld\": {\n+ \"city\": \"Sch\u00f6nefeld\",\n+ \"district\": \"Gro\u00dfziethen\",\n+ \"street\": \"kleistring\",\n+ },\n }\n \n ICON_MAP = {\n@@ -47,7 +52,11 @@\n for d in dates:\n waste_type = d[1].strip()\n next_pickup_date = d[0]\n-\n+ # remove duplicates\n+ if any(\n+ e.date == next_pickup_date and e.type == waste_type for e in entries\n+ ):\n+ continue\n entries.append(\n Collection(\n date=next_pickup_date,\n", "issue": "[Bug]: Collection events for sbazv_de double in calendar\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nCollection dates and types are pulled correctly into the calendar but events show up twice per day. Sensor for next collection ('AbfallTermin') reads 'Gelber Sack, Gelber Sack in 7 Tagen'. Config and logs look ok.\r\n\r\n\r\n\n\n### Source (if relevant)\n\nsbazv_de\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: sbazv_de\r\n args:\r\n city: Sch\u00f6nefeld\r\n district: Gro\u00dfziethen\r\n street: Kxxxxxxxx\r\n customize:\r\n - type: Restm\u00fclltonnen\r\n alias: Restmuell\r\n icon: mdi:trash-can \r\n - type: Gelbe S\u00e4cke\r\n alias: GelberSack\r\n icon: mdi:recycle\r\n - type: Papiertonnen\r\n alias: Altpapier\r\n icon: mdi:file-document\r\n - type: Laubs\u00e4cke\r\n alias: Laubsack\r\n icon: mdi:trash-can\r\n use_dedicated_calendar: false\r\n dedicated_calendar_title: SBAZV\r\n fetch_time: \"04:00\"\r\n day_switch_time: \"12:00\"\r\n\r\nsensor:\r\n # ------- Waste Collection Schedule ------- \r\n # N\u00e4chster Abholtermin\r\n - platform: waste_collection_schedule\r\n name: \"AbfallTermin\"\r\n value_template: '{{value.types|join(\", \")}}{% if value.daysTo == 0 %} Heute{% elif value.daysTo == 1 %} Morgen{% else %} in {{value.daysTo}} Tagen{% endif %}'\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"S\u00fcdbrandenburgischer Abfallzweckverband\"\nDESCRIPTION = \"SBAZV Brandenburg, Deutschland\"\nURL = \"https://www.sbazv.de\"\nTEST_CASES = {\n \"Wildau\": {\"city\": \"wildau\", \"district\": \"Wildau\", \"street\": \"Miersdorfer Str.\"}\n}\n\nICON_MAP = {\n \"Restm\u00fclltonnen\": \"mdi:trash-can\",\n \"Laubs\u00e4cke\": \"mdi:leaf\",\n \"Gelbe S\u00e4cke\": \"mdi:sack\",\n \"Papiertonnen\": \"mdi:package-variant\",\n \"Weihnachtsb\u00e4ume\": \"mdi:pine-tree\",\n}\n\n# _LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, city, district, street=None):\n self._city = city\n self._district = district\n self._street = street\n self._ics = ICS()\n\n def fetch(self):\n args = {\n \"city\": self._city,\n \"district\": self._district,\n \"street\": self._street,\n }\n\n # get ics file\n # https://www.sbazv.de/entsorgungstermine/klein.ics?city=Wildau&district=Wildau&street=Miersdorfer+Str.\n r = requests.get(\n \"https://www.sbazv.de/entsorgungstermine/klein.ics\", params=args\n )\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n waste_type = d[1].strip()\n next_pickup_date = d[0]\n\n entries.append(\n Collection(\n date=next_pickup_date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py"}], "after_files": [{"content": "import requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"S\u00fcdbrandenburgischer Abfallzweckverband\"\nDESCRIPTION = \"SBAZV Brandenburg, Deutschland\"\nURL = \"https://www.sbazv.de\"\nTEST_CASES = {\n \"Wildau\": {\"city\": \"wildau\", \"district\": \"Wildau\", \"street\": \"Miersdorfer Str.\"},\n \"Sch\u00f6nefeld\": {\n \"city\": \"Sch\u00f6nefeld\",\n \"district\": \"Gro\u00dfziethen\",\n \"street\": \"kleistring\",\n },\n}\n\nICON_MAP = {\n \"Restm\u00fclltonnen\": \"mdi:trash-can\",\n \"Laubs\u00e4cke\": \"mdi:leaf\",\n \"Gelbe S\u00e4cke\": \"mdi:sack\",\n \"Papiertonnen\": \"mdi:package-variant\",\n \"Weihnachtsb\u00e4ume\": \"mdi:pine-tree\",\n}\n\n# _LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, city, district, street=None):\n self._city = city\n self._district = district\n self._street = street\n self._ics = ICS()\n\n def fetch(self):\n args = {\n \"city\": self._city,\n \"district\": self._district,\n \"street\": self._street,\n }\n\n # get ics file\n # https://www.sbazv.de/entsorgungstermine/klein.ics?city=Wildau&district=Wildau&street=Miersdorfer+Str.\n r = requests.get(\n \"https://www.sbazv.de/entsorgungstermine/klein.ics\", params=args\n )\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n waste_type = d[1].strip()\n next_pickup_date = d[0]\n # remove duplicates\n if any(\n e.date == next_pickup_date and e.type == waste_type for e in entries\n ):\n continue\n entries.append(\n Collection(\n date=next_pickup_date,\n t=waste_type,\n icon=ICON_MAP.get(waste_type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/sbazv_de.py"}]}
| 1,489 | 319 |
gh_patches_debug_2340
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-18776
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
download from d.tube fails
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2019.01.02*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2019.01.02**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [x] Site support request (request for __re__adding support for an existing site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
Add the `-v` flag to **your command line**
```
user@mymachine:~$ youtube-dl --verbose
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['--verbose']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2019.01.02
[debug] Python version 3.5.3 (CPython) - Linux-4.9.0-4-amd64-x86_64-with-debian-9.2
[debug] exe versions: none
[debug] Proxy map: {}
Usage: youtube-dl [OPTIONS] URL [URL...]
youtube-dl: error: You must provide at least one URL.
Type youtube-dl --help to see a list of all options.
user@ mymachine:~$ youtube-dl --verbose "https://d.tube/#!/v/dennisxxx/lgfrcata"
bash: !/v/dennisxxx/lgfrcata: event not found
```
### Description of your *issue*, suggested solution and other information
Download from d.tube failed. I am sorry to not being able to provide more information. If I can help/try anything else I will gladly do though....
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/dtube.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import json
5 import re
6 from socket import timeout
7
8 from .common import InfoExtractor
9 from ..utils import (
10 int_or_none,
11 parse_iso8601,
12 )
13
14
15 class DTubeIE(InfoExtractor):
16 _VALID_URL = r'https?://(?:www\.)?d\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'
17 _TEST = {
18 'url': 'https://d.tube/#!/v/benswann/zqd630em',
19 'md5': 'a03eaa186618ffa7a3145945543a251e',
20 'info_dict': {
21 'id': 'zqd630em',
22 'ext': 'mp4',
23 'title': 'Reality Check: FDA\'s Disinformation Campaign on Kratom',
24 'description': 'md5:700d164e066b87f9eac057949e4227c2',
25 'uploader_id': 'benswann',
26 'upload_date': '20180222',
27 'timestamp': 1519328958,
28 },
29 'params': {
30 'format': '480p',
31 },
32 }
33
34 def _real_extract(self, url):
35 uploader_id, video_id = re.match(self._VALID_URL, url).groups()
36 result = self._download_json('https://api.steemit.com/', video_id, data=json.dumps({
37 'jsonrpc': '2.0',
38 'method': 'get_content',
39 'params': [uploader_id, video_id],
40 }).encode())['result']
41
42 metadata = json.loads(result['json_metadata'])
43 video = metadata['video']
44 content = video['content']
45 info = video.get('info', {})
46 title = info.get('title') or result['title']
47
48 def canonical_url(h):
49 if not h:
50 return None
51 return 'https://ipfs.io/ipfs/' + h
52
53 formats = []
54 for q in ('240', '480', '720', '1080', ''):
55 video_url = canonical_url(content.get('video%shash' % q))
56 if not video_url:
57 continue
58 format_id = (q + 'p') if q else 'Source'
59 try:
60 self.to_screen('%s: Checking %s video format URL' % (video_id, format_id))
61 self._downloader._opener.open(video_url, timeout=5).close()
62 except timeout:
63 self.to_screen(
64 '%s: %s URL is invalid, skipping' % (video_id, format_id))
65 continue
66 formats.append({
67 'format_id': format_id,
68 'url': video_url,
69 'height': int_or_none(q),
70 'ext': 'mp4',
71 })
72
73 return {
74 'id': video_id,
75 'title': title,
76 'description': content.get('description'),
77 'thumbnail': canonical_url(info.get('snaphash')),
78 'tags': content.get('tags') or metadata.get('tags'),
79 'duration': info.get('duration'),
80 'formats': formats,
81 'timestamp': parse_iso8601(result.get('created')),
82 'uploader_id': uploader_id,
83 }
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/youtube_dl/extractor/dtube.py b/youtube_dl/extractor/dtube.py
--- a/youtube_dl/extractor/dtube.py
+++ b/youtube_dl/extractor/dtube.py
@@ -48,7 +48,7 @@
def canonical_url(h):
if not h:
return None
- return 'https://ipfs.io/ipfs/' + h
+ return 'https://video.dtube.top/ipfs/' + h
formats = []
for q in ('240', '480', '720', '1080', ''):
|
{"golden_diff": "diff --git a/youtube_dl/extractor/dtube.py b/youtube_dl/extractor/dtube.py\n--- a/youtube_dl/extractor/dtube.py\n+++ b/youtube_dl/extractor/dtube.py\n@@ -48,7 +48,7 @@\n def canonical_url(h):\n if not h:\n return None\n- return 'https://ipfs.io/ipfs/' + h\n+ return 'https://video.dtube.top/ipfs/' + h\n \n formats = []\n for q in ('240', '480', '720', '1080', ''):\n", "issue": "download from d.tube fails\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2019.01.02*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2019.01.02**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [x] Site support request (request for __re__adding support for an existing site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:\r\n\r\nAdd the `-v` flag to **your command line** \r\n\r\n```\r\nuser@mymachine:~$ youtube-dl --verbose\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['--verbose']\r\n[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2019.01.02\r\n[debug] Python version 3.5.3 (CPython) - Linux-4.9.0-4-amd64-x86_64-with-debian-9.2\r\n[debug] exe versions: none\r\n[debug] Proxy map: {}\r\nUsage: youtube-dl [OPTIONS] URL [URL...]\r\n\r\nyoutube-dl: error: You must provide at least one URL.\r\nType youtube-dl --help to see a list of all options.\r\nuser@ mymachine:~$ youtube-dl --verbose \"https://d.tube/#!/v/dennisxxx/lgfrcata\"\r\nbash: !/v/dennisxxx/lgfrcata: event not found\r\n```\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nDownload from d.tube failed. I am sorry to not being able to provide more information. If I can help/try anything else I will gladly do though....\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport re\nfrom socket import timeout\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n parse_iso8601,\n)\n\n\nclass DTubeIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?d\\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'\n _TEST = {\n 'url': 'https://d.tube/#!/v/benswann/zqd630em',\n 'md5': 'a03eaa186618ffa7a3145945543a251e',\n 'info_dict': {\n 'id': 'zqd630em',\n 'ext': 'mp4',\n 'title': 'Reality Check: FDA\\'s Disinformation Campaign on Kratom',\n 'description': 'md5:700d164e066b87f9eac057949e4227c2',\n 'uploader_id': 'benswann',\n 'upload_date': '20180222',\n 'timestamp': 1519328958,\n },\n 'params': {\n 'format': '480p',\n },\n }\n\n def _real_extract(self, url):\n uploader_id, video_id = re.match(self._VALID_URL, url).groups()\n result = self._download_json('https://api.steemit.com/', video_id, data=json.dumps({\n 'jsonrpc': '2.0',\n 'method': 'get_content',\n 'params': [uploader_id, video_id],\n }).encode())['result']\n\n metadata = json.loads(result['json_metadata'])\n video = metadata['video']\n content = video['content']\n info = video.get('info', {})\n title = info.get('title') or result['title']\n\n def canonical_url(h):\n if not h:\n return None\n return 'https://ipfs.io/ipfs/' + h\n\n formats = []\n for q in ('240', '480', '720', '1080', ''):\n video_url = canonical_url(content.get('video%shash' % q))\n if not video_url:\n continue\n format_id = (q + 'p') if q else 'Source'\n try:\n self.to_screen('%s: Checking %s video format URL' % (video_id, format_id))\n self._downloader._opener.open(video_url, timeout=5).close()\n except timeout:\n self.to_screen(\n '%s: %s URL is invalid, skipping' % (video_id, format_id))\n continue\n formats.append({\n 'format_id': format_id,\n 'url': video_url,\n 'height': int_or_none(q),\n 'ext': 'mp4',\n })\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': content.get('description'),\n 'thumbnail': canonical_url(info.get('snaphash')),\n 'tags': content.get('tags') or metadata.get('tags'),\n 'duration': info.get('duration'),\n 'formats': formats,\n 'timestamp': parse_iso8601(result.get('created')),\n 'uploader_id': uploader_id,\n }\n", "path": "youtube_dl/extractor/dtube.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport re\nfrom socket import timeout\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n parse_iso8601,\n)\n\n\nclass DTubeIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?d\\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'\n _TEST = {\n 'url': 'https://d.tube/#!/v/benswann/zqd630em',\n 'md5': 'a03eaa186618ffa7a3145945543a251e',\n 'info_dict': {\n 'id': 'zqd630em',\n 'ext': 'mp4',\n 'title': 'Reality Check: FDA\\'s Disinformation Campaign on Kratom',\n 'description': 'md5:700d164e066b87f9eac057949e4227c2',\n 'uploader_id': 'benswann',\n 'upload_date': '20180222',\n 'timestamp': 1519328958,\n },\n 'params': {\n 'format': '480p',\n },\n }\n\n def _real_extract(self, url):\n uploader_id, video_id = re.match(self._VALID_URL, url).groups()\n result = self._download_json('https://api.steemit.com/', video_id, data=json.dumps({\n 'jsonrpc': '2.0',\n 'method': 'get_content',\n 'params': [uploader_id, video_id],\n }).encode())['result']\n\n metadata = json.loads(result['json_metadata'])\n video = metadata['video']\n content = video['content']\n info = video.get('info', {})\n title = info.get('title') or result['title']\n\n def canonical_url(h):\n if not h:\n return None\n return 'https://video.dtube.top/ipfs/' + h\n\n formats = []\n for q in ('240', '480', '720', '1080', ''):\n video_url = canonical_url(content.get('video%shash' % q))\n if not video_url:\n continue\n format_id = (q + 'p') if q else 'Source'\n try:\n self.to_screen('%s: Checking %s video format URL' % (video_id, format_id))\n self._downloader._opener.open(video_url, timeout=5).close()\n except timeout:\n self.to_screen(\n '%s: %s URL is invalid, skipping' % (video_id, format_id))\n continue\n formats.append({\n 'format_id': format_id,\n 'url': video_url,\n 'height': int_or_none(q),\n 'ext': 'mp4',\n })\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': content.get('description'),\n 'thumbnail': canonical_url(info.get('snaphash')),\n 'tags': content.get('tags') or metadata.get('tags'),\n 'duration': info.get('duration'),\n 'formats': formats,\n 'timestamp': parse_iso8601(result.get('created')),\n 'uploader_id': uploader_id,\n }\n", "path": "youtube_dl/extractor/dtube.py"}]}
| 1,862 | 137 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.