problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_7002 | rasdani/github-patches | git_diff | streamlit__streamlit-7050 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove number_input -/+ step toggles option
Is there an option to remove the -/+ number_input step toggles? If not, I would suggest that for a future release. Thank you!
Also, is it possible to increase the precision?
Right now I am just using a text_input and type casting to float to get around this.
---
Community voting on feature requests enables the Streamlit team to understand which features are most important to our users.
**If you'd like the Streamlit team to prioritize this feature request, please use the 👍 (thumbs up emoji) reaction in response to the initial post.**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_number_input.py`
Content:
```
1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16 from streamlit import runtime
17
18 i1 = st.number_input("number input 1")
19 st.write('value 1: "', i1, '"')
20
21 i2 = st.number_input("number input 2", value=1)
22 st.write('value 2: "', i2, '"')
23
24 i3 = st.number_input("number input 3", 1, 10)
25 st.write('value 3: "', i3, '"')
26
27 i4 = st.number_input("number input 4", step=2)
28 st.write('value 4: "', i4, '"')
29
30 i5 = st.number_input("number input 5", max_value=10)
31 st.write('value 5: "', i5, '"')
32
33 i6 = st.number_input("number input 6", disabled=True)
34 st.write('value 6: "', i6, '"')
35
36 i7 = st.number_input("number input 7", label_visibility="hidden")
37 st.write('value 7: "', i7, '"')
38
39 i8 = st.number_input("number input 8", label_visibility="collapsed")
40 st.write('value 8: "', i8, '"')
41
42 if runtime.exists():
43
44 def on_change():
45 st.session_state.number_input_changed = True
46
47 st.number_input("number input 9", key="number_input9", on_change=on_change)
48 st.write('value 9: "', st.session_state.number_input9, '"')
49 st.write("number input changed:", "number_input_changed" in st.session_state)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/e2e/scripts/st_number_input.py b/e2e/scripts/st_number_input.py
--- a/e2e/scripts/st_number_input.py
+++ b/e2e/scripts/st_number_input.py
@@ -47,3 +47,9 @@
st.number_input("number input 9", key="number_input9", on_change=on_change)
st.write('value 9: "', st.session_state.number_input9, '"')
st.write("number input changed:", "number_input_changed" in st.session_state)
+
+[col1, col2, col3, col4, col5, col6] = st.columns(6)
+
+with col1:
+ i10 = st.number_input("number input 10", max_value=10)
+ st.write('value 10: "', i10, '"')
| {"golden_diff": "diff --git a/e2e/scripts/st_number_input.py b/e2e/scripts/st_number_input.py\n--- a/e2e/scripts/st_number_input.py\n+++ b/e2e/scripts/st_number_input.py\n@@ -47,3 +47,9 @@\n st.number_input(\"number input 9\", key=\"number_input9\", on_change=on_change)\n st.write('value 9: \"', st.session_state.number_input9, '\"')\n st.write(\"number input changed:\", \"number_input_changed\" in st.session_state)\n+\n+[col1, col2, col3, col4, col5, col6] = st.columns(6)\n+\n+with col1:\n+ i10 = st.number_input(\"number input 10\", max_value=10)\n+ st.write('value 10: \"', i10, '\"')\n", "issue": "Remove number_input -/+ step toggles option\nIs there an option to remove the -/+ number_input step toggles? If not, I would suggest that for a future release. Thank you! \r\n\r\nAlso, is it possible to increase the precision? \r\n\r\nRight now I am just using a text_input and type casting to float to get around this.\r\n\r\n---\r\n\r\nCommunity voting on feature requests enables the Streamlit team to understand which features are most important to our users.\r\n\r\n**If you'd like the Streamlit team to prioritize this feature request, please use the \ud83d\udc4d (thumbs up emoji) reaction in response to the initial post.**\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nfrom streamlit import runtime\n\ni1 = st.number_input(\"number input 1\")\nst.write('value 1: \"', i1, '\"')\n\ni2 = st.number_input(\"number input 2\", value=1)\nst.write('value 2: \"', i2, '\"')\n\ni3 = st.number_input(\"number input 3\", 1, 10)\nst.write('value 3: \"', i3, '\"')\n\ni4 = st.number_input(\"number input 4\", step=2)\nst.write('value 4: \"', i4, '\"')\n\ni5 = st.number_input(\"number input 5\", max_value=10)\nst.write('value 5: \"', i5, '\"')\n\ni6 = st.number_input(\"number input 6\", disabled=True)\nst.write('value 6: \"', i6, '\"')\n\ni7 = st.number_input(\"number input 7\", label_visibility=\"hidden\")\nst.write('value 7: \"', i7, '\"')\n\ni8 = st.number_input(\"number input 8\", label_visibility=\"collapsed\")\nst.write('value 8: \"', i8, '\"')\n\nif runtime.exists():\n\n def on_change():\n st.session_state.number_input_changed = True\n\n st.number_input(\"number input 9\", key=\"number_input9\", on_change=on_change)\n st.write('value 9: \"', st.session_state.number_input9, '\"')\n st.write(\"number input changed:\", \"number_input_changed\" in st.session_state)\n", "path": "e2e/scripts/st_number_input.py"}], "after_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\nfrom streamlit import runtime\n\ni1 = st.number_input(\"number input 1\")\nst.write('value 1: \"', i1, '\"')\n\ni2 = st.number_input(\"number input 2\", value=1)\nst.write('value 2: \"', i2, '\"')\n\ni3 = st.number_input(\"number input 3\", 1, 10)\nst.write('value 3: \"', i3, '\"')\n\ni4 = st.number_input(\"number input 4\", step=2)\nst.write('value 4: \"', i4, '\"')\n\ni5 = st.number_input(\"number input 5\", max_value=10)\nst.write('value 5: \"', i5, '\"')\n\ni6 = st.number_input(\"number input 6\", disabled=True)\nst.write('value 6: \"', i6, '\"')\n\ni7 = st.number_input(\"number input 7\", label_visibility=\"hidden\")\nst.write('value 7: \"', i7, '\"')\n\ni8 = st.number_input(\"number input 8\", label_visibility=\"collapsed\")\nst.write('value 8: \"', i8, '\"')\n\nif runtime.exists():\n\n def on_change():\n st.session_state.number_input_changed = True\n\n st.number_input(\"number input 9\", key=\"number_input9\", on_change=on_change)\n st.write('value 9: \"', st.session_state.number_input9, '\"')\n st.write(\"number input changed:\", \"number_input_changed\" in st.session_state)\n\n[col1, col2, col3, col4, col5, col6] = st.columns(6)\n\nwith col1:\n i10 = st.number_input(\"number input 10\", max_value=10)\n st.write('value 10: \"', i10, '\"')\n", "path": "e2e/scripts/st_number_input.py"}]} | 966 | 185 |
gh_patches_debug_29309 | rasdani/github-patches | git_diff | deepchecks__deepchecks-1131 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEAT] add tests for more model types
we should test any model with predict and predict_proba functions, including the common ones:
Scikitlearn (also pipelines)
CatBoost
LGBM
XGBoost
Custom binary classification model that implements the predict_proba and predict functions
Custom multiclass classification model that implements the predict_proba and predict functions
Custom regression model that implements the predict function
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/tabular/checks/performance/confusion_matrix_report.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """The confusion_matrix_report check module."""
12 import pandas as pd
13 import sklearn
14 import plotly.express as px
15
16 from deepchecks.core import CheckResult
17 from deepchecks.tabular import Context, SingleDatasetCheck
18
19
20 __all__ = ['ConfusionMatrixReport']
21
22
23 class ConfusionMatrixReport(SingleDatasetCheck):
24 """Calculate the confusion matrix of the model on the given dataset."""
25
26 def run_logic(self, context: Context, dataset_type: str = 'train') -> CheckResult:
27 """Run check.
28
29 Returns
30 -------
31 CheckResult
32 value is numpy array of the confusion matrix, displays the confusion matrix
33
34 Raises
35 ------
36 DeepchecksValueError
37 If the data is not a Dataset instance with a label
38 """
39 if dataset_type == 'train':
40 dataset = context.train
41 else:
42 dataset = context.test
43
44 context.assert_classification_task()
45 ds_y = dataset.label_col
46 ds_x = dataset.features_columns
47 model = context.model
48
49 y_pred = model.predict(ds_x)
50 total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))
51 confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)
52
53 # Figure
54 fig = px.imshow(confusion_matrix, x=total_classes, y=total_classes, text_auto=True)
55 fig.update_layout(width=600, height=600)
56 fig.update_xaxes(title='Predicted Value', type='category')
57 fig.update_yaxes(title='True value', type='category')
58
59 return CheckResult(confusion_matrix, display=fig)
60
```
Path: `deepchecks/tabular/checks/overview/model_info.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module contains model_info check."""
12 import warnings
13
14 import pandas as pd
15
16 from deepchecks.tabular import Context, ModelOnlyCheck
17 from deepchecks.core import CheckResult
18 from deepchecks.utils.model import get_model_of_pipeline
19
20
21 __all__ = ['ModelInfo']
22
23
24 class ModelInfo(ModelOnlyCheck):
25 """Summarize given model parameters."""
26
27 def run_logic(self, context: Context) -> CheckResult:
28 """Run check.
29
30 Returns
31 -------
32 CheckResult
33 value is dictionary in format {type: <model_type>, params: <model_params_dict>}
34 """
35 model = context.model
36 estimator = get_model_of_pipeline(model)
37 model_type = type(estimator).__name__
38 model_params = estimator.get_params()
39 default_params = type(estimator)().get_params()
40
41 # Create dataframe to show
42 model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])
43 model_param_df['Default'] = model_param_df['Parameter'].map(lambda x: default_params.get(x, ''))
44
45 def highlight_not_default(data):
46 n = len(data)
47 if data['Value'] != data['Default']:
48 return n * ['background-color: lightblue']
49 else:
50 return n * ['']
51 with warnings.catch_warnings():
52 warnings.simplefilter(action='ignore', category=FutureWarning)
53 model_param_df = model_param_df.style.apply(highlight_not_default, axis=1).hide_index()
54
55 value = {'type': model_type, 'params': model_params}
56 footnote = '<p style="font-size:0.7em"><i>Colored rows are parameters with non-default values</i></p>'
57 display = [f'Model Type: {model_type}', model_param_df, footnote]
58
59 return CheckResult(value, header='Model Info', display=display)
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepchecks/tabular/checks/overview/model_info.py b/deepchecks/tabular/checks/overview/model_info.py
--- a/deepchecks/tabular/checks/overview/model_info.py
+++ b/deepchecks/tabular/checks/overview/model_info.py
@@ -35,8 +35,12 @@
model = context.model
estimator = get_model_of_pipeline(model)
model_type = type(estimator).__name__
- model_params = estimator.get_params()
- default_params = type(estimator)().get_params()
+ try:
+ model_params = estimator.get_params()
+ default_params = type(estimator)().get_params()
+ except AttributeError:
+ model_params = {}
+ default_params = {}
# Create dataframe to show
model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])
diff --git a/deepchecks/tabular/checks/performance/confusion_matrix_report.py b/deepchecks/tabular/checks/performance/confusion_matrix_report.py
--- a/deepchecks/tabular/checks/performance/confusion_matrix_report.py
+++ b/deepchecks/tabular/checks/performance/confusion_matrix_report.py
@@ -9,6 +9,7 @@
# ----------------------------------------------------------------------------
#
"""The confusion_matrix_report check module."""
+import numpy as np
import pandas as pd
import sklearn
import plotly.express as px
@@ -46,7 +47,7 @@
ds_x = dataset.features_columns
model = context.model
- y_pred = model.predict(ds_x)
+ y_pred = np.array(model.predict(ds_x)).reshape(len(ds_y), )
total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))
confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)
| {"golden_diff": "diff --git a/deepchecks/tabular/checks/overview/model_info.py b/deepchecks/tabular/checks/overview/model_info.py\n--- a/deepchecks/tabular/checks/overview/model_info.py\n+++ b/deepchecks/tabular/checks/overview/model_info.py\n@@ -35,8 +35,12 @@\n model = context.model\n estimator = get_model_of_pipeline(model)\n model_type = type(estimator).__name__\n- model_params = estimator.get_params()\n- default_params = type(estimator)().get_params()\n+ try:\n+ model_params = estimator.get_params()\n+ default_params = type(estimator)().get_params()\n+ except AttributeError:\n+ model_params = {}\n+ default_params = {}\n \n # Create dataframe to show\n model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])\ndiff --git a/deepchecks/tabular/checks/performance/confusion_matrix_report.py b/deepchecks/tabular/checks/performance/confusion_matrix_report.py\n--- a/deepchecks/tabular/checks/performance/confusion_matrix_report.py\n+++ b/deepchecks/tabular/checks/performance/confusion_matrix_report.py\n@@ -9,6 +9,7 @@\n # ----------------------------------------------------------------------------\n #\n \"\"\"The confusion_matrix_report check module.\"\"\"\n+import numpy as np\n import pandas as pd\n import sklearn\n import plotly.express as px\n@@ -46,7 +47,7 @@\n ds_x = dataset.features_columns\n model = context.model\n \n- y_pred = model.predict(ds_x)\n+ y_pred = np.array(model.predict(ds_x)).reshape(len(ds_y), )\n total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n", "issue": "[FEAT] add tests for more model types\nwe should test any model with predict and predict_proba functions, including the common ones:\r\n\r\nScikitlearn (also pipelines)\r\nCatBoost\r\nLGBM\r\nXGBoost\r\nCustom binary classification model that implements the predict_proba and predict functions\r\nCustom multiclass classification model that implements the predict_proba and predict functions\r\nCustom regression model that implements the predict function\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The confusion_matrix_report check module.\"\"\"\nimport pandas as pd\nimport sklearn\nimport plotly.express as px\n\nfrom deepchecks.core import CheckResult\nfrom deepchecks.tabular import Context, SingleDatasetCheck\n\n\n__all__ = ['ConfusionMatrixReport']\n\n\nclass ConfusionMatrixReport(SingleDatasetCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\"\"\"\n\n def run_logic(self, context: Context, dataset_type: str = 'train') -> CheckResult:\n \"\"\"Run check.\n\n Returns\n -------\n CheckResult\n value is numpy array of the confusion matrix, displays the confusion matrix\n\n Raises\n ------\n DeepchecksValueError\n If the data is not a Dataset instance with a label\n \"\"\"\n if dataset_type == 'train':\n dataset = context.train\n else:\n dataset = context.test\n\n context.assert_classification_task()\n ds_y = dataset.label_col\n ds_x = dataset.features_columns\n model = context.model\n\n y_pred = model.predict(ds_x)\n total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n\n # Figure\n fig = px.imshow(confusion_matrix, x=total_classes, y=total_classes, text_auto=True)\n fig.update_layout(width=600, height=600)\n fig.update_xaxes(title='Predicted Value', type='category')\n fig.update_yaxes(title='True value', type='category')\n\n return CheckResult(confusion_matrix, display=fig)\n", "path": "deepchecks/tabular/checks/performance/confusion_matrix_report.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module contains model_info check.\"\"\"\nimport warnings\n\nimport pandas as pd\n\nfrom deepchecks.tabular import Context, ModelOnlyCheck\nfrom deepchecks.core import CheckResult\nfrom deepchecks.utils.model import get_model_of_pipeline\n\n\n__all__ = ['ModelInfo']\n\n\nclass ModelInfo(ModelOnlyCheck):\n \"\"\"Summarize given model parameters.\"\"\"\n\n def run_logic(self, context: Context) -> CheckResult:\n \"\"\"Run check.\n\n Returns\n -------\n CheckResult\n value is dictionary in format {type: <model_type>, params: <model_params_dict>}\n \"\"\"\n model = context.model\n estimator = get_model_of_pipeline(model)\n model_type = type(estimator).__name__\n model_params = estimator.get_params()\n default_params = type(estimator)().get_params()\n\n # Create dataframe to show\n model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])\n model_param_df['Default'] = model_param_df['Parameter'].map(lambda x: default_params.get(x, ''))\n\n def highlight_not_default(data):\n n = len(data)\n if data['Value'] != data['Default']:\n return n * ['background-color: lightblue']\n else:\n return n * ['']\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n model_param_df = model_param_df.style.apply(highlight_not_default, axis=1).hide_index()\n\n value = {'type': model_type, 'params': model_params}\n footnote = '<p style=\"font-size:0.7em\"><i>Colored rows are parameters with non-default values</i></p>'\n display = [f'Model Type: {model_type}', model_param_df, footnote]\n\n return CheckResult(value, header='Model Info', display=display)\n", "path": "deepchecks/tabular/checks/overview/model_info.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The confusion_matrix_report check module.\"\"\"\nimport numpy as np\nimport pandas as pd\nimport sklearn\nimport plotly.express as px\n\nfrom deepchecks.core import CheckResult\nfrom deepchecks.tabular import Context, SingleDatasetCheck\n\n\n__all__ = ['ConfusionMatrixReport']\n\n\nclass ConfusionMatrixReport(SingleDatasetCheck):\n \"\"\"Calculate the confusion matrix of the model on the given dataset.\"\"\"\n\n def run_logic(self, context: Context, dataset_type: str = 'train') -> CheckResult:\n \"\"\"Run check.\n\n Returns\n -------\n CheckResult\n value is numpy array of the confusion matrix, displays the confusion matrix\n\n Raises\n ------\n DeepchecksValueError\n If the data is not a Dataset instance with a label\n \"\"\"\n if dataset_type == 'train':\n dataset = context.train\n else:\n dataset = context.test\n\n context.assert_classification_task()\n ds_y = dataset.label_col\n ds_x = dataset.features_columns\n model = context.model\n\n y_pred = np.array(model.predict(ds_x)).reshape(len(ds_y), )\n total_classes = sorted(list(set(pd.concat([ds_y, pd.Series(y_pred)]).to_list())))\n confusion_matrix = sklearn.metrics.confusion_matrix(ds_y, y_pred)\n\n # Figure\n fig = px.imshow(confusion_matrix, x=total_classes, y=total_classes, text_auto=True)\n fig.update_layout(width=600, height=600)\n fig.update_xaxes(title='Predicted Value', type='category')\n fig.update_yaxes(title='True value', type='category')\n\n return CheckResult(confusion_matrix, display=fig)\n", "path": "deepchecks/tabular/checks/performance/confusion_matrix_report.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module contains model_info check.\"\"\"\nimport warnings\n\nimport pandas as pd\n\nfrom deepchecks.tabular import Context, ModelOnlyCheck\nfrom deepchecks.core import CheckResult\nfrom deepchecks.utils.model import get_model_of_pipeline\n\n\n__all__ = ['ModelInfo']\n\n\nclass ModelInfo(ModelOnlyCheck):\n \"\"\"Summarize given model parameters.\"\"\"\n\n def run_logic(self, context: Context) -> CheckResult:\n \"\"\"Run check.\n\n Returns\n -------\n CheckResult\n value is dictionary in format {type: <model_type>, params: <model_params_dict>}\n \"\"\"\n model = context.model\n estimator = get_model_of_pipeline(model)\n model_type = type(estimator).__name__\n try:\n model_params = estimator.get_params()\n default_params = type(estimator)().get_params()\n except AttributeError:\n model_params = {}\n default_params = {}\n\n # Create dataframe to show\n model_param_df = pd.DataFrame(model_params.items(), columns=['Parameter', 'Value'])\n model_param_df['Default'] = model_param_df['Parameter'].map(lambda x: default_params.get(x, ''))\n\n def highlight_not_default(data):\n n = len(data)\n if data['Value'] != data['Default']:\n return n * ['background-color: lightblue']\n else:\n return n * ['']\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n model_param_df = model_param_df.style.apply(highlight_not_default, axis=1).hide_index()\n\n value = {'type': model_type, 'params': model_params}\n footnote = '<p style=\"font-size:0.7em\"><i>Colored rows are parameters with non-default values</i></p>'\n display = [f'Model Type: {model_type}', model_param_df, footnote]\n\n return CheckResult(value, header='Model Info', display=display)\n", "path": "deepchecks/tabular/checks/overview/model_info.py"}]} | 1,532 | 391 |
gh_patches_debug_5115 | rasdani/github-patches | git_diff | magenta__magenta-1347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Melody_rnn Create Dataset get_pipeline missing arg
`melody_rnn_create_dataset --config=basic_rnn --input=notesequences.tfrecord --output_dir=sequence_examples` fails with this error:
```
File "/Users/ericcacciavillani/anaconda3/envs/Magenta_Testing/lib/python3.6/site-packages/magenta/pipelines/note_sequence_pipelines.py", line 184, in transform
for amount in self._transposition_range:
TypeError: 'float' object is not iterable
```
We're trying to train our own melody rnn and we get this error, also we don't see a mention of transposition range in the melody rnn readme?
Looks like in `melody_rnn_create_dataset.py` at line 52 its calling `melody_rnn_pipeline.get_pipeline(config, FLAGS.eval_ratio)`
but `get_pipeline` in `melody_rnn_pipeline.py` takes 3 args `config`, `transposition_range`, and `eval ratio` so it looks like transposition_range is being set as eval_ratio.
Working with @robindiddams on this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `magenta/models/melody_rnn/melody_rnn_create_dataset.py`
Content:
```
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Create a dataset of SequenceExamples from NoteSequence protos.
15
16 This script will extract melodies from NoteSequence protos and save them to
17 TensorFlow's SequenceExample protos for input to the melody RNN models.
18 """
19
20 import os
21
22 import tensorflow as tf
23
24 from magenta.models.melody_rnn import melody_rnn_config_flags
25 from magenta.models.melody_rnn import melody_rnn_pipeline
26 from magenta.pipelines import pipeline
27
28 flags = tf.app.flags
29 FLAGS = tf.app.flags.FLAGS
30 flags.DEFINE_string(
31 'input', None,
32 'TFRecord to read NoteSequence protos from.')
33 flags.DEFINE_string(
34 'output_dir', None,
35 'Directory to write training and eval TFRecord files. The TFRecord files '
36 'are populated with SequenceExample protos.')
37 flags.DEFINE_float(
38 'eval_ratio', 0.1,
39 'Fraction of input to set aside for eval set. Partition is randomly '
40 'selected.')
41 flags.DEFINE_string(
42 'log', 'INFO',
43 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
44 'or FATAL.')
45
46
47 def main(unused_argv):
48 tf.logging.set_verbosity(FLAGS.log)
49
50 config = melody_rnn_config_flags.config_from_flags()
51 pipeline_instance = melody_rnn_pipeline.get_pipeline(
52 config, FLAGS.eval_ratio)
53
54 FLAGS.input = os.path.expanduser(FLAGS.input)
55 FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
56 pipeline.run_pipeline_serial(
57 pipeline_instance,
58 pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
59 FLAGS.output_dir)
60
61
62 def console_entry_point():
63 tf.app.run(main)
64
65
66 if __name__ == '__main__':
67 console_entry_point()
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/magenta/models/melody_rnn/melody_rnn_create_dataset.py b/magenta/models/melody_rnn/melody_rnn_create_dataset.py
--- a/magenta/models/melody_rnn/melody_rnn_create_dataset.py
+++ b/magenta/models/melody_rnn/melody_rnn_create_dataset.py
@@ -49,7 +49,7 @@
config = melody_rnn_config_flags.config_from_flags()
pipeline_instance = melody_rnn_pipeline.get_pipeline(
- config, FLAGS.eval_ratio)
+ config, eval_ratio=FLAGS.eval_ratio)
FLAGS.input = os.path.expanduser(FLAGS.input)
FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
| {"golden_diff": "diff --git a/magenta/models/melody_rnn/melody_rnn_create_dataset.py b/magenta/models/melody_rnn/melody_rnn_create_dataset.py\n--- a/magenta/models/melody_rnn/melody_rnn_create_dataset.py\n+++ b/magenta/models/melody_rnn/melody_rnn_create_dataset.py\n@@ -49,7 +49,7 @@\n \n config = melody_rnn_config_flags.config_from_flags()\n pipeline_instance = melody_rnn_pipeline.get_pipeline(\n- config, FLAGS.eval_ratio)\n+ config, eval_ratio=FLAGS.eval_ratio)\n \n FLAGS.input = os.path.expanduser(FLAGS.input)\n FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)\n", "issue": "[Bug] Melody_rnn Create Dataset get_pipeline missing arg\n`melody_rnn_create_dataset --config=basic_rnn --input=notesequences.tfrecord --output_dir=sequence_examples` fails with this error:\r\n```\r\nFile \"/Users/ericcacciavillani/anaconda3/envs/Magenta_Testing/lib/python3.6/site-packages/magenta/pipelines/note_sequence_pipelines.py\", line 184, in transform\r\n for amount in self._transposition_range:\r\nTypeError: 'float' object is not iterable\r\n```\r\nWe're trying to train our own melody rnn and we get this error, also we don't see a mention of transposition range in the melody rnn readme?\r\n\r\nLooks like in `melody_rnn_create_dataset.py` at line 52 its calling `melody_rnn_pipeline.get_pipeline(config, FLAGS.eval_ratio)`\r\nbut `get_pipeline` in `melody_rnn_pipeline.py` takes 3 args `config`, `transposition_range`, and `eval ratio` so it looks like transposition_range is being set as eval_ratio.\r\n\r\nWorking with @robindiddams on this.\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Create a dataset of SequenceExamples from NoteSequence protos.\n\nThis script will extract melodies from NoteSequence protos and save them to\nTensorFlow's SequenceExample protos for input to the melody RNN models.\n\"\"\"\n\nimport os\n\nimport tensorflow as tf\n\nfrom magenta.models.melody_rnn import melody_rnn_config_flags\nfrom magenta.models.melody_rnn import melody_rnn_pipeline\nfrom magenta.pipelines import pipeline\n\nflags = tf.app.flags\nFLAGS = tf.app.flags.FLAGS\nflags.DEFINE_string(\n 'input', None,\n 'TFRecord to read NoteSequence protos from.')\nflags.DEFINE_string(\n 'output_dir', None,\n 'Directory to write training and eval TFRecord files. The TFRecord files '\n 'are populated with SequenceExample protos.')\nflags.DEFINE_float(\n 'eval_ratio', 0.1,\n 'Fraction of input to set aside for eval set. Partition is randomly '\n 'selected.')\nflags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(FLAGS.log)\n\n config = melody_rnn_config_flags.config_from_flags()\n pipeline_instance = melody_rnn_pipeline.get_pipeline(\n config, FLAGS.eval_ratio)\n\n FLAGS.input = os.path.expanduser(FLAGS.input)\n FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)\n pipeline.run_pipeline_serial(\n pipeline_instance,\n pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),\n FLAGS.output_dir)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/melody_rnn/melody_rnn_create_dataset.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Create a dataset of SequenceExamples from NoteSequence protos.\n\nThis script will extract melodies from NoteSequence protos and save them to\nTensorFlow's SequenceExample protos for input to the melody RNN models.\n\"\"\"\n\nimport os\n\nimport tensorflow as tf\n\nfrom magenta.models.melody_rnn import melody_rnn_config_flags\nfrom magenta.models.melody_rnn import melody_rnn_pipeline\nfrom magenta.pipelines import pipeline\n\nflags = tf.app.flags\nFLAGS = tf.app.flags.FLAGS\nflags.DEFINE_string(\n 'input', None,\n 'TFRecord to read NoteSequence protos from.')\nflags.DEFINE_string(\n 'output_dir', None,\n 'Directory to write training and eval TFRecord files. The TFRecord files '\n 'are populated with SequenceExample protos.')\nflags.DEFINE_float(\n 'eval_ratio', 0.1,\n 'Fraction of input to set aside for eval set. Partition is randomly '\n 'selected.')\nflags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(FLAGS.log)\n\n config = melody_rnn_config_flags.config_from_flags()\n pipeline_instance = melody_rnn_pipeline.get_pipeline(\n config, eval_ratio=FLAGS.eval_ratio)\n\n FLAGS.input = os.path.expanduser(FLAGS.input)\n FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)\n pipeline.run_pipeline_serial(\n pipeline_instance,\n pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),\n FLAGS.output_dir)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/melody_rnn/melody_rnn_create_dataset.py"}]} | 1,127 | 146 |
gh_patches_debug_22874 | rasdani/github-patches | git_diff | chainer__chainer-719 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unreadable error message appears when `nvcc` command is not found
Only "No suche file or directory" is shown in such case.
related to #698
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/cuda/compiler.py`
Content:
```
1 import hashlib
2 import os
3 import re
4 import subprocess
5 import sys
6 import tempfile
7
8 import filelock
9 import six
10
11 from cupy.cuda import device
12 from cupy.cuda import function
13
14
15 def _get_arch():
16 cc = device.Device().compute_capability
17 return 'sm_%s' % cc
18
19
20 class TemporaryDirectory(object):
21
22 def __enter__(self):
23 self.path = tempfile.mkdtemp()
24 return self.path
25
26 def __exit__(self, exc_type, exc_value, traceback):
27 if exc_value is not None:
28 return
29
30 for name in os.listdir(self.path):
31 os.unlink(os.path.join(self.path, name))
32 os.rmdir(self.path)
33
34
35 def nvcc(source, options=(), arch=None):
36 if not arch:
37 arch = _get_arch()
38 cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)
39
40 with TemporaryDirectory() as root_dir:
41 path = os.path.join(root_dir, 'kern')
42 cu_path = '%s.cu' % path
43 cubin_path = '%s.cubin' % path
44
45 with open(cu_path, 'w') as cu_file:
46 cu_file.write(source)
47
48 cmd.append(cu_path)
49 subprocess.check_output(cmd, cwd=root_dir)
50
51 with open(cubin_path, 'rb') as bin_file:
52 return bin_file.read()
53
54
55 def preprocess(source, options=()):
56 cmd = ['nvcc', '--preprocess'] + list(options)
57 with TemporaryDirectory() as root_dir:
58 path = os.path.join(root_dir, 'kern')
59 cu_path = '%s.cu' % path
60
61 with open(cu_path, 'w') as cu_file:
62 cu_file.write(source)
63
64 cmd.append(cu_path)
65 pp_src = subprocess.check_output(cmd, cwd=root_dir)
66
67 if isinstance(pp_src, six.binary_type):
68 pp_src = pp_src.decode('utf-8')
69 return re.sub('(?m)^#.*$', '', pp_src)
70
71
72 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
73
74
75 def get_cache_dir():
76 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
77
78
79 _empty_file_preprocess_cache = {}
80
81
82 def compile_with_cache(source, options=(), arch=None, cache_dir=None):
83 global _empty_file_preprocess_cache
84 if cache_dir is None:
85 cache_dir = get_cache_dir()
86 if arch is None:
87 arch = _get_arch()
88
89 if 'win32' == sys.platform:
90 options += ('-Xcompiler', '/wd 4819')
91 if sys.maxsize == 9223372036854775807:
92 options += '-m64',
93 elif sys.maxsize == 2147483647:
94 options += '-m32',
95
96 env = (arch, options)
97 if '#include' in source:
98 pp_src = '%s %s' % (env, preprocess(source, options))
99 else:
100 base = _empty_file_preprocess_cache.get(env, None)
101 if base is None:
102 base = _empty_file_preprocess_cache[env] = preprocess('', options)
103 pp_src = '%s %s %s' % (env, base, source)
104
105 if isinstance(pp_src, six.text_type):
106 pp_src = pp_src.encode('utf-8')
107 name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()
108
109 mod = function.Module()
110
111 if not os.path.exists(cache_dir):
112 os.makedirs(cache_dir)
113
114 lock_path = os.path.join(cache_dir, 'lock_file.lock')
115
116 path = os.path.join(cache_dir, name)
117 with filelock.FileLock(lock_path) as lock:
118 if os.path.exists(path):
119 with open(path, 'rb') as file:
120 cubin = file.read()
121 mod.load(cubin)
122 else:
123 lock.release()
124 cubin = nvcc(source, options, arch)
125 mod.load(cubin)
126 lock.acquire()
127 with open(path, 'wb') as cubin_file:
128 cubin_file.write(cubin)
129
130 return mod
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py
--- a/cupy/cuda/compiler.py
+++ b/cupy/cuda/compiler.py
@@ -32,6 +32,15 @@
os.rmdir(self.path)
+def _run_nvcc(cmd, cwd):
+ try:
+ return subprocess.check_output(cmd, cwd=cwd)
+ except OSError as e:
+ trace = sys.exc_info()[2]
+ msg = 'Failed to run `nvcc` command: ' + str(e)
+ six.reraise(OSError, msg, trace)
+
+
def nvcc(source, options=(), arch=None):
if not arch:
arch = _get_arch()
@@ -46,7 +55,7 @@
cu_file.write(source)
cmd.append(cu_path)
- subprocess.check_output(cmd, cwd=root_dir)
+ _run_nvcc(cmd, root_dir)
with open(cubin_path, 'rb') as bin_file:
return bin_file.read()
@@ -62,7 +71,7 @@
cu_file.write(source)
cmd.append(cu_path)
- pp_src = subprocess.check_output(cmd, cwd=root_dir)
+ pp_src = _run_nvcc(cmd, root_dir)
if isinstance(pp_src, six.binary_type):
pp_src = pp_src.decode('utf-8')
| {"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -32,6 +32,15 @@\n os.rmdir(self.path)\n \n \n+def _run_nvcc(cmd, cwd):\n+ try:\n+ return subprocess.check_output(cmd, cwd=cwd)\n+ except OSError as e:\n+ trace = sys.exc_info()[2]\n+ msg = 'Failed to run `nvcc` command: ' + str(e)\n+ six.reraise(OSError, msg, trace)\n+\n+\n def nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n@@ -46,7 +55,7 @@\n cu_file.write(source)\n \n cmd.append(cu_path)\n- subprocess.check_output(cmd, cwd=root_dir)\n+ _run_nvcc(cmd, root_dir)\n \n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n@@ -62,7 +71,7 @@\n cu_file.write(source)\n \n cmd.append(cu_path)\n- pp_src = subprocess.check_output(cmd, cwd=root_dir)\n+ pp_src = _run_nvcc(cmd, root_dir)\n \n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n", "issue": "Unreadable error message appears when `nvcc` command is not found\nOnly \"No suche file or directory\" is shown in such case.\n\nrelated to #698\n\n", "before_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n subprocess.check_output(cmd, cwd=root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = subprocess.check_output(cmd, cwd=root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options)\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}], "after_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd)\n except OSError as e:\n trace = sys.exc_info()[2]\n msg = 'Failed to run `nvcc` command: ' + str(e)\n six.reraise(OSError, msg, trace)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options)\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}]} | 1,510 | 304 |
gh_patches_debug_27343 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5115 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update-locale triggers CKV_DOCKER_5
**Describe the issue**
CKV_DOCKER_5 (Ensure update instructions are not use alone in the Dockerfile) fails in anything that has `update` in it. From reading the source, it. seems that CKV_DOCKER_5 is geared towards `apt-get update` and `apt-get install` which, from the code, are cancelling each other out so the `update_cnt` variable remains 0. I have other `update` command like `update-locale`. I'm not sure if it's part of the issue in my Dockerfile that I need to deal or I could just ignore the failure message.
**Examples**
```
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
dpkg-reconfigure --frontend noninteractive locales && \
update-locale LANG=en_US.UTF-8
```
**Version (please complete the following information):**
- Checkov Version 2.2.229
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/checks/UpdateNotAlone.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
7
8 if TYPE_CHECKING:
9 from dockerfile_parse.parser import _Instruction
10
11 install_commands = (
12 "install",
13 "source-install",
14 "reinstall",
15 "groupinstall",
16 "localinstall",
17 "add",
18 )
19 update_commands = (
20 "update",
21 "--update",
22 )
23
24
25 class UpdateNotAlone(BaseDockerfileCheck):
26 def __init__(self) -> None:
27 name = "Ensure update instructions are not use alone in the Dockerfile"
28 id = "CKV_DOCKER_5"
29 supported_instructions = ("RUN",)
30 categories = (CheckCategories.APPLICATION_SECURITY,)
31 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
32
33 def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:
34 update_instructions = []
35 update_cnt = 0
36 i = 0
37 for instruction in conf:
38 content = instruction["content"]
39 if instruction["instruction"] in self.supported_instructions:
40
41 if any(x in content for x in update_commands):
42 update_cnt = update_cnt + 1
43 update_instructions.append(i)
44 if any(x in content for x in install_commands):
45 update_cnt = update_cnt - 1
46 i = i + 1
47
48 if update_cnt <= 0:
49 return CheckResult.PASSED, None
50 output = []
51 for i in update_instructions:
52 output.append(conf[i])
53
54 return CheckResult.FAILED, output
55
56
57 check = UpdateNotAlone()
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/dockerfile/checks/UpdateNotAlone.py b/checkov/dockerfile/checks/UpdateNotAlone.py
--- a/checkov/dockerfile/checks/UpdateNotAlone.py
+++ b/checkov/dockerfile/checks/UpdateNotAlone.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import re
from typing import TYPE_CHECKING
from checkov.common.models.enums import CheckCategories, CheckResult
@@ -8,6 +9,8 @@
if TYPE_CHECKING:
from dockerfile_parse.parser import _Instruction
+UPDATE_COMMANDS_PATTERN = re.compile(r"\s+(?:--)?update(?!\S)")
+
install_commands = (
"install",
"source-install",
@@ -15,10 +18,7 @@
"groupinstall",
"localinstall",
"add",
-)
-update_commands = (
- "update",
- "--update",
+ "upgrade"
)
@@ -38,7 +38,7 @@
content = instruction["content"]
if instruction["instruction"] in self.supported_instructions:
- if any(x in content for x in update_commands):
+ if re.search(UPDATE_COMMANDS_PATTERN, content):
update_cnt = update_cnt + 1
update_instructions.append(i)
if any(x in content for x in install_commands):
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/UpdateNotAlone.py b/checkov/dockerfile/checks/UpdateNotAlone.py\n--- a/checkov/dockerfile/checks/UpdateNotAlone.py\n+++ b/checkov/dockerfile/checks/UpdateNotAlone.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+import re\n from typing import TYPE_CHECKING\n \n from checkov.common.models.enums import CheckCategories, CheckResult\n@@ -8,6 +9,8 @@\n if TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n \n+UPDATE_COMMANDS_PATTERN = re.compile(r\"\\s+(?:--)?update(?!\\S)\")\n+\n install_commands = (\n \"install\",\n \"source-install\",\n@@ -15,10 +18,7 @@\n \"groupinstall\",\n \"localinstall\",\n \"add\",\n-)\n-update_commands = (\n- \"update\",\n- \"--update\",\n+ \"upgrade\"\n )\n \n \n@@ -38,7 +38,7 @@\n content = instruction[\"content\"]\n if instruction[\"instruction\"] in self.supported_instructions:\n \n- if any(x in content for x in update_commands):\n+ if re.search(UPDATE_COMMANDS_PATTERN, content):\n update_cnt = update_cnt + 1\n update_instructions.append(i)\n if any(x in content for x in install_commands):\n", "issue": "update-locale triggers CKV_DOCKER_5\n**Describe the issue**\r\nCKV_DOCKER_5 (Ensure update instructions are not use alone in the Dockerfile) fails in anything that has `update` in it. From reading the source, it. seems that CKV_DOCKER_5 is geared towards `apt-get update` and `apt-get install` which, from the code, are cancelling each other out so the `update_cnt` variable remains 0. I have other `update` command like `update-locale`. I'm not sure if it's part of the issue in my Dockerfile that I need to deal or I could just ignore the failure message.\r\n\r\n**Examples**\r\n```\r\nRUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \\\r\n dpkg-reconfigure --frontend noninteractive locales && \\\r\n update-locale LANG=en_US.UTF-8\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.2.229\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\ninstall_commands = (\n \"install\",\n \"source-install\",\n \"reinstall\",\n \"groupinstall\",\n \"localinstall\",\n \"add\",\n)\nupdate_commands = (\n \"update\",\n \"--update\",\n)\n\n\nclass UpdateNotAlone(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure update instructions are not use alone in the Dockerfile\"\n id = \"CKV_DOCKER_5\"\n supported_instructions = (\"RUN\",)\n categories = (CheckCategories.APPLICATION_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n update_instructions = []\n update_cnt = 0\n i = 0\n for instruction in conf:\n content = instruction[\"content\"]\n if instruction[\"instruction\"] in self.supported_instructions:\n\n if any(x in content for x in update_commands):\n update_cnt = update_cnt + 1\n update_instructions.append(i)\n if any(x in content for x in install_commands):\n update_cnt = update_cnt - 1\n i = i + 1\n\n if update_cnt <= 0:\n return CheckResult.PASSED, None\n output = []\n for i in update_instructions:\n output.append(conf[i])\n\n return CheckResult.FAILED, output\n\n\ncheck = UpdateNotAlone()\n", "path": "checkov/dockerfile/checks/UpdateNotAlone.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\nUPDATE_COMMANDS_PATTERN = re.compile(r\"\\s+(?:--)?update(?!\\S)\")\n\ninstall_commands = (\n \"install\",\n \"source-install\",\n \"reinstall\",\n \"groupinstall\",\n \"localinstall\",\n \"add\",\n \"upgrade\"\n)\n\n\nclass UpdateNotAlone(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure update instructions are not use alone in the Dockerfile\"\n id = \"CKV_DOCKER_5\"\n supported_instructions = (\"RUN\",)\n categories = (CheckCategories.APPLICATION_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n update_instructions = []\n update_cnt = 0\n i = 0\n for instruction in conf:\n content = instruction[\"content\"]\n if instruction[\"instruction\"] in self.supported_instructions:\n\n if re.search(UPDATE_COMMANDS_PATTERN, content):\n update_cnt = update_cnt + 1\n update_instructions.append(i)\n if any(x in content for x in install_commands):\n update_cnt = update_cnt - 1\n i = i + 1\n\n if update_cnt <= 0:\n return CheckResult.PASSED, None\n output = []\n for i in update_instructions:\n output.append(conf[i])\n\n return CheckResult.FAILED, output\n\n\ncheck = UpdateNotAlone()\n", "path": "checkov/dockerfile/checks/UpdateNotAlone.py"}]} | 984 | 296 |
gh_patches_debug_57147 | rasdani/github-patches | git_diff | pymeasure__pymeasure-433 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pyvisa no longer support ask, replace with query
In resources.py
`idn = res.ask('*idn?')[:-1]`
Should be:
`idn = res.query('*idn?')[:-1]`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymeasure/instruments/resources.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2021 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import pyvisa
26
27
28 def list_resources():
29 """
30 Prints the available resources, and returns a list of VISA resource names
31
32 .. code-block:: python
33
34 resources = list_resources()
35 #prints (e.g.)
36 #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******
37 #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****
38 dmm = Agilent34410(resources[0])
39
40 """
41 rm = pyvisa.ResourceManager()
42 instrs = rm.list_resources()
43 for n, instr in enumerate(instrs):
44 # trying to catch errors in comunication
45 try:
46 res = rm.open_resource(instr)
47 # try to avoid errors from *idn?
48 try:
49 # noinspection PyUnresolvedReferences
50 idn = res.ask('*idn?')[:-1]
51 except pyvisa.Error:
52 idn = "Not known"
53 finally:
54 res.close()
55 print(n, ":", instr, ":", idn)
56 except pyvisa.VisaIOError as e:
57 print(n, ":", instr, ":", "Visa IO Error: check connections")
58 print(e)
59 rm.close()
60 return instrs
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pymeasure/instruments/resources.py b/pymeasure/instruments/resources.py
--- a/pymeasure/instruments/resources.py
+++ b/pymeasure/instruments/resources.py
@@ -47,7 +47,7 @@
# try to avoid errors from *idn?
try:
# noinspection PyUnresolvedReferences
- idn = res.ask('*idn?')[:-1]
+ idn = res.query('*idn?')[:-1]
except pyvisa.Error:
idn = "Not known"
finally:
| {"golden_diff": "diff --git a/pymeasure/instruments/resources.py b/pymeasure/instruments/resources.py\n--- a/pymeasure/instruments/resources.py\n+++ b/pymeasure/instruments/resources.py\n@@ -47,7 +47,7 @@\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n- idn = res.ask('*idn?')[:-1]\n+ idn = res.query('*idn?')[:-1]\n except pyvisa.Error:\n idn = \"Not known\"\n finally:\n", "issue": "Pyvisa no longer support ask, replace with query\nIn resources.py\r\n`idn = res.ask('*idn?')[:-1]`\r\nShould be:\r\n`idn = res.query('*idn?')[:-1]`\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport pyvisa\n\n\ndef list_resources():\n \"\"\"\n Prints the available resources, and returns a list of VISA resource names\n \n .. code-block:: python\n\n resources = list_resources()\n #prints (e.g.)\n #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******\n #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****\n dmm = Agilent34410(resources[0])\n \n \"\"\"\n rm = pyvisa.ResourceManager()\n instrs = rm.list_resources()\n for n, instr in enumerate(instrs):\n # trying to catch errors in comunication\n try:\n res = rm.open_resource(instr)\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n idn = res.ask('*idn?')[:-1]\n except pyvisa.Error:\n idn = \"Not known\"\n finally:\n res.close()\n print(n, \":\", instr, \":\", idn)\n except pyvisa.VisaIOError as e:\n print(n, \":\", instr, \":\", \"Visa IO Error: check connections\")\n print(e)\n rm.close()\n return instrs\n", "path": "pymeasure/instruments/resources.py"}], "after_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2021 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport pyvisa\n\n\ndef list_resources():\n \"\"\"\n Prints the available resources, and returns a list of VISA resource names\n \n .. code-block:: python\n\n resources = list_resources()\n #prints (e.g.)\n #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******\n #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****\n dmm = Agilent34410(resources[0])\n \n \"\"\"\n rm = pyvisa.ResourceManager()\n instrs = rm.list_resources()\n for n, instr in enumerate(instrs):\n # trying to catch errors in comunication\n try:\n res = rm.open_resource(instr)\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n idn = res.query('*idn?')[:-1]\n except pyvisa.Error:\n idn = \"Not known\"\n finally:\n res.close()\n print(n, \":\", instr, \":\", idn)\n except pyvisa.VisaIOError as e:\n print(n, \":\", instr, \":\", \"Visa IO Error: check connections\")\n print(e)\n rm.close()\n return instrs\n", "path": "pymeasure/instruments/resources.py"}]} | 960 | 122 |
gh_patches_debug_56463 | rasdani/github-patches | git_diff | acl-org__acl-anthology-3109 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reingestion Request: ROCLING (10-20-2023)
### General information about this request
- [X] I confirm that I have read the [Information for Submitters](https://aclanthology.org/info/contrib/).
- [ ] I am submitting a request for a **new venue** that does not exist in the ACL Anthology yet.
### Venue Identifier
ROCLING
### Volume Title
Proceedings of the 35th Conference on Computational Linguistics and Speech Processing (ROCLING 2023)
### Venue Name (only if you are submitting a new venue)
Conference on Computational Linguistics and Speech Processing
### Venue Website (only if you are submitting a new venue)
https://rocling2023.github.io/
### Date of Publication
2023-10-20
### Supporting Information
Dear Anthology Director,
I'm Hou-Chiang Tseng who the publication chair of the 35th annual Conference on Computational Linguistics and Speech Processing (ROCLING 2023).
The conference website: https://rocling2023.github.io/
We want to register the ROCLING 2023 to ACL Anthology. Please see following two items:
(a) the complete list of volumes: please see the attached file,
and (b) all the new material can be downloaded from the following URL:
https://drive.google.com/drive/folders/1dxt_gYlUvmuLiNETgDRg9cGpiJxVGwbD?usp=sharing
If there is any question, please let me know.
[Anthology.Volume_ROCLING.2023.xlsx](https://github.com/acl-org/acl-anthology/files/14318157/Anthology.Volume_ROCLING.2023.xlsx)
Best regards,
Dr. Hou-Chiang Tseng
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/volumes_from_diff.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """
4 Takes a list of XML files on STDIN, and prints all the volumes
5 within each of those files. e.g.,
6
7 git diff --name-only master | ./bin/volumes_from_xml.py https://preview.aclanthology.org/BRANCH
8
9 Used to find the list of volumes to generate previews for.
10 """
11
12 import sys
13 import argparse
14 import lxml.etree as etree
15
16
17 parser = argparse.ArgumentParser()
18 parser.add_argument("url_root")
19 args = parser.parse_args()
20
21 volumes = []
22 for filepath in sys.stdin:
23 if filepath.startswith("python/") or not filepath.endswith(".xml"):
24 continue
25
26 try:
27 tree = etree.parse(filepath.rstrip())
28 except Exception:
29 continue
30
31 root = tree.getroot()
32 collection_id = root.attrib["id"]
33 for volume in root.findall("./volume"):
34 volume_name = volume.attrib["id"]
35 volume_id = f"{collection_id}-{volume_name}"
36 volumes.append(f"[{volume_id}]({args.url_root}/{volume_id})")
37
38 if len(volumes) > 50:
39 volumes = volumes[0:50] + [f"(plus {len(volumes)-50} more...)"]
40
41 print(", ".join(volumes))
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/volumes_from_diff.py b/bin/volumes_from_diff.py
--- a/bin/volumes_from_diff.py
+++ b/bin/volumes_from_diff.py
@@ -20,6 +20,7 @@
volumes = []
for filepath in sys.stdin:
+ filepath = filepath.rstrip()
if filepath.startswith("python/") or not filepath.endswith(".xml"):
continue
| {"golden_diff": "diff --git a/bin/volumes_from_diff.py b/bin/volumes_from_diff.py\n--- a/bin/volumes_from_diff.py\n+++ b/bin/volumes_from_diff.py\n@@ -20,6 +20,7 @@\n \n volumes = []\n for filepath in sys.stdin:\n+ filepath = filepath.rstrip()\n if filepath.startswith(\"python/\") or not filepath.endswith(\".xml\"):\n continue\n", "issue": "Reingestion Request: ROCLING (10-20-2023)\n### General information about this request\n\n- [X] I confirm that I have read the [Information for Submitters](https://aclanthology.org/info/contrib/).\n- [ ] I am submitting a request for a **new venue** that does not exist in the ACL Anthology yet.\n\n### Venue Identifier\n\nROCLING\n\n### Volume Title\n\nProceedings of the 35th Conference on Computational Linguistics and Speech Processing (ROCLING 2023)\n\n### Venue Name (only if you are submitting a new venue)\n\nConference on Computational Linguistics and Speech Processing\n\n### Venue Website (only if you are submitting a new venue)\n\nhttps://rocling2023.github.io/\n\n### Date of Publication\n\n2023-10-20\n\n### Supporting Information\n\nDear Anthology Director,\r\n\r\nI'm Hou-Chiang Tseng who the publication chair of the 35th annual Conference on Computational Linguistics and Speech Processing (ROCLING 2023).\r\n\r\nThe conference website: https://rocling2023.github.io/\r\n\r\nWe want to register the ROCLING 2023 to ACL Anthology. Please see following two items:\r\n(a) the complete list of volumes: please see the attached file,\r\nand (b) all the new material can be downloaded from the following URL:\r\nhttps://drive.google.com/drive/folders/1dxt_gYlUvmuLiNETgDRg9cGpiJxVGwbD?usp=sharing\r\n\r\nIf there is any question, please let me know.\r\n[Anthology.Volume_ROCLING.2023.xlsx](https://github.com/acl-org/acl-anthology/files/14318157/Anthology.Volume_ROCLING.2023.xlsx)\r\n\r\nBest regards,\r\nDr. Hou-Chiang Tseng\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nTakes a list of XML files on STDIN, and prints all the volumes\nwithin each of those files. e.g.,\n\n git diff --name-only master | ./bin/volumes_from_xml.py https://preview.aclanthology.org/BRANCH\n\nUsed to find the list of volumes to generate previews for.\n\"\"\"\n\nimport sys\nimport argparse\nimport lxml.etree as etree\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"url_root\")\nargs = parser.parse_args()\n\nvolumes = []\nfor filepath in sys.stdin:\n if filepath.startswith(\"python/\") or not filepath.endswith(\".xml\"):\n continue\n\n try:\n tree = etree.parse(filepath.rstrip())\n except Exception:\n continue\n\n root = tree.getroot()\n collection_id = root.attrib[\"id\"]\n for volume in root.findall(\"./volume\"):\n volume_name = volume.attrib[\"id\"]\n volume_id = f\"{collection_id}-{volume_name}\"\n volumes.append(f\"[{volume_id}]({args.url_root}/{volume_id})\")\n\nif len(volumes) > 50:\n volumes = volumes[0:50] + [f\"(plus {len(volumes)-50} more...)\"]\n\nprint(\", \".join(volumes))\n", "path": "bin/volumes_from_diff.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nTakes a list of XML files on STDIN, and prints all the volumes\nwithin each of those files. e.g.,\n\n git diff --name-only master | ./bin/volumes_from_xml.py https://preview.aclanthology.org/BRANCH\n\nUsed to find the list of volumes to generate previews for.\n\"\"\"\n\nimport sys\nimport argparse\nimport lxml.etree as etree\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"url_root\")\nargs = parser.parse_args()\n\nvolumes = []\nfor filepath in sys.stdin:\n filepath = filepath.rstrip()\n if filepath.startswith(\"python/\") or not filepath.endswith(\".xml\"):\n continue\n\n try:\n tree = etree.parse(filepath.rstrip())\n except Exception:\n continue\n\n root = tree.getroot()\n collection_id = root.attrib[\"id\"]\n for volume in root.findall(\"./volume\"):\n volume_name = volume.attrib[\"id\"]\n volume_id = f\"{collection_id}-{volume_name}\"\n volumes.append(f\"[{volume_id}]({args.url_root}/{volume_id})\")\n\nif len(volumes) > 50:\n volumes = volumes[0:50] + [f\"(plus {len(volumes)-50} more...)\"]\n\nprint(\", \".join(volumes))\n", "path": "bin/volumes_from_diff.py"}]} | 1,007 | 82 |
gh_patches_debug_2740 | rasdani/github-patches | git_diff | mozilla__bugbug-200 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use 'product' and 'component' features in the models
b7369ea8bf282941ce4b378ad5ad3c832db20668 introduced the features, but we are still not using them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/bug.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.over_sampling import BorderlineSMOTE
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features
13 from bugbug import bugzilla
14 from bugbug import labels
15 from bugbug.model import Model
16
17
18 class BugModel(Model):
19 def __init__(self, lemmatization=False):
20 Model.__init__(self, lemmatization)
21
22 self.sampler = BorderlineSMOTE(random_state=0)
23
24 feature_extractors = [
25 bug_features.has_str(),
26 bug_features.severity(),
27 # Ignore keywords that would make the ML completely skewed
28 # (we are going to use them as 100% rules in the evaluation phase).
29 bug_features.keywords({'regression', 'talos-regression', 'feature'}),
30 bug_features.is_coverity_issue(),
31 bug_features.has_crash_signature(),
32 bug_features.has_url(),
33 bug_features.has_w3c_url(),
34 bug_features.has_github_url(),
35 bug_features.whiteboard(),
36 bug_features.patches(),
37 bug_features.landings(),
38 bug_features.title(),
39 bug_features.blocked_bugs_number(),
40 bug_features.ever_affected(),
41 bug_features.affected_then_unaffected(),
42 ]
43
44 cleanup_functions = [
45 bug_features.cleanup_url,
46 bug_features.cleanup_fileref,
47 bug_features.cleanup_synonyms,
48 ]
49
50 self.extraction_pipeline = Pipeline([
51 ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),
52 ('union', ColumnTransformer([
53 ('data', DictVectorizer(), 'data'),
54
55 ('title', self.text_vectorizer(min_df=0.001), 'title'),
56
57 ('first_comment', self.text_vectorizer(min_df=0.001), 'first_comment'),
58
59 ('comments', self.text_vectorizer(min_df=0.001), 'comments'),
60 ])),
61 ])
62
63 self.clf = xgboost.XGBClassifier(n_jobs=16)
64 self.clf.set_params(predictor='cpu_predictor')
65
66 def get_bugbug_labels(self, kind='bug'):
67 assert kind in ['bug', 'regression', 'defect_feature_task']
68
69 classes = {}
70
71 for bug_id, category in labels.get_labels('bug_nobug'):
72 assert category in ['True', 'False'], f'unexpected category {category}'
73 if kind == 'bug':
74 classes[int(bug_id)] = 1 if category == 'True' else 0
75 elif kind == 'regression':
76 if category == 'False':
77 classes[int(bug_id)] = 0
78 elif kind == 'defect_feature_task':
79 if category == 'True':
80 classes[int(bug_id)] = 'd'
81
82 for bug_id, category in labels.get_labels('regression_bug_nobug'):
83 assert category in ['nobug', 'bug_unknown_regression', 'bug_no_regression', 'regression'], f'unexpected category {category}'
84 if kind == 'bug':
85 classes[int(bug_id)] = 1 if category != 'nobug' else 0
86 elif kind == 'regression':
87 if category == 'bug_unknown_regression':
88 continue
89
90 classes[int(bug_id)] = 1 if category == 'regression' else 0
91 elif kind == 'defect_feature_task':
92 if category != 'nobug':
93 classes[int(bug_id)] = 'd'
94
95 for bug_id, category in labels.get_labels('defect_feature_task'):
96 assert category in ['d', 'f', 't']
97 if kind == 'bug':
98 classes[int(bug_id)] = 1 if category == 'd' else 0
99 elif kind == 'regression':
100 if category in ['f', 't']:
101 classes[int(bug_id)] = 0
102 elif kind == 'defect_feature_task':
103 classes[int(bug_id)] = category
104
105 # Augment labes by using bugs marked as 'regression' or 'feature', as they are basically labelled.
106 bug_ids = set()
107 for bug in bugzilla.get_bugs():
108 bug_id = int(bug['id'])
109
110 bug_ids.add(bug_id)
111
112 if bug_id in classes:
113 continue
114
115 if any(keyword in bug['keywords'] for keyword in ['regression', 'talos-regression']) or ('cf_has_regression_range' in bug and bug['cf_has_regression_range'] == 'yes'):
116 if kind in ['bug', 'regression']:
117 classes[bug_id] = 1
118 else:
119 classes[bug_id] = 'd'
120 elif any(keyword in bug['keywords'] for keyword in ['feature']):
121 if kind in ['bug', 'regression']:
122 classes[bug_id] = 0
123 else:
124 classes[bug_id] = 'f'
125 elif kind == 'regression':
126 for history in bug['history']:
127 for change in history['changes']:
128 if change['field_name'] == 'keywords' and change['removed'] == 'regression':
129 classes[bug_id] = 0
130
131 # Remove labels which belong to bugs for which we have no data.
132 return {bug_id: label for bug_id, label in classes.items() if bug_id in bug_ids}
133
134 def get_labels(self):
135 return self.get_bugbug_labels('bug')
136
137 def get_feature_names(self):
138 return self.extraction_pipeline.named_steps['union'].get_feature_names()
139
140 def overwrite_classes(self, bugs, classes, probabilities):
141 for i, bug in enumerate(bugs):
142 if any(keyword in bug['keywords'] for keyword in ['regression', 'talos-regression']) or ('cf_has_regression_range' in bug and bug['cf_has_regression_range'] == 'yes'):
143 classes[i] = 1 if not probabilities else [0., 1.]
144 elif 'feature' in bug['keywords']:
145 classes[i] = 0 if not probabilities else [1., 0.]
146
147 return classes
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/models/bug.py b/bugbug/models/bug.py
--- a/bugbug/models/bug.py
+++ b/bugbug/models/bug.py
@@ -39,6 +39,8 @@
bug_features.blocked_bugs_number(),
bug_features.ever_affected(),
bug_features.affected_then_unaffected(),
+ bug_features.product(),
+ bug_features.component(),
]
cleanup_functions = [
| {"golden_diff": "diff --git a/bugbug/models/bug.py b/bugbug/models/bug.py\n--- a/bugbug/models/bug.py\n+++ b/bugbug/models/bug.py\n@@ -39,6 +39,8 @@\n bug_features.blocked_bugs_number(),\n bug_features.ever_affected(),\n bug_features.affected_then_unaffected(),\n+ bug_features.product(),\n+ bug_features.component(),\n ]\n \n cleanup_functions = [\n", "issue": "Use 'product' and 'component' features in the models\nb7369ea8bf282941ce4b378ad5ad3c832db20668 introduced the features, but we are still not using them.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.over_sampling import BorderlineSMOTE\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug import labels\nfrom bugbug.model import Model\n\n\nclass BugModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.sampler = BorderlineSMOTE(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.severity(),\n # Ignore keywords that would make the ML completely skewed\n # (we are going to use them as 100% rules in the evaluation phase).\n bug_features.keywords({'regression', 'talos-regression', 'feature'}),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.blocked_bugs_number(),\n bug_features.ever_affected(),\n bug_features.affected_then_unaffected(),\n ]\n\n cleanup_functions = [\n bug_features.cleanup_url,\n bug_features.cleanup_fileref,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(min_df=0.001), 'title'),\n\n ('first_comment', self.text_vectorizer(min_df=0.001), 'first_comment'),\n\n ('comments', self.text_vectorizer(min_df=0.001), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def get_bugbug_labels(self, kind='bug'):\n assert kind in ['bug', 'regression', 'defect_feature_task']\n\n classes = {}\n\n for bug_id, category in labels.get_labels('bug_nobug'):\n assert category in ['True', 'False'], f'unexpected category {category}'\n if kind == 'bug':\n classes[int(bug_id)] = 1 if category == 'True' else 0\n elif kind == 'regression':\n if category == 'False':\n classes[int(bug_id)] = 0\n elif kind == 'defect_feature_task':\n if category == 'True':\n classes[int(bug_id)] = 'd'\n\n for bug_id, category in labels.get_labels('regression_bug_nobug'):\n assert category in ['nobug', 'bug_unknown_regression', 'bug_no_regression', 'regression'], f'unexpected category {category}'\n if kind == 'bug':\n classes[int(bug_id)] = 1 if category != 'nobug' else 0\n elif kind == 'regression':\n if category == 'bug_unknown_regression':\n continue\n\n classes[int(bug_id)] = 1 if category == 'regression' else 0\n elif kind == 'defect_feature_task':\n if category != 'nobug':\n classes[int(bug_id)] = 'd'\n\n for bug_id, category in labels.get_labels('defect_feature_task'):\n assert category in ['d', 'f', 't']\n if kind == 'bug':\n classes[int(bug_id)] = 1 if category == 'd' else 0\n elif kind == 'regression':\n if category in ['f', 't']:\n classes[int(bug_id)] = 0\n elif kind == 'defect_feature_task':\n classes[int(bug_id)] = category\n\n # Augment labes by using bugs marked as 'regression' or 'feature', as they are basically labelled.\n bug_ids = set()\n for bug in bugzilla.get_bugs():\n bug_id = int(bug['id'])\n\n bug_ids.add(bug_id)\n\n if bug_id in classes:\n continue\n\n if any(keyword in bug['keywords'] for keyword in ['regression', 'talos-regression']) or ('cf_has_regression_range' in bug and bug['cf_has_regression_range'] == 'yes'):\n if kind in ['bug', 'regression']:\n classes[bug_id] = 1\n else:\n classes[bug_id] = 'd'\n elif any(keyword in bug['keywords'] for keyword in ['feature']):\n if kind in ['bug', 'regression']:\n classes[bug_id] = 0\n else:\n classes[bug_id] = 'f'\n elif kind == 'regression':\n for history in bug['history']:\n for change in history['changes']:\n if change['field_name'] == 'keywords' and change['removed'] == 'regression':\n classes[bug_id] = 0\n\n # Remove labels which belong to bugs for which we have no data.\n return {bug_id: label for bug_id, label in classes.items() if bug_id in bug_ids}\n\n def get_labels(self):\n return self.get_bugbug_labels('bug')\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for i, bug in enumerate(bugs):\n if any(keyword in bug['keywords'] for keyword in ['regression', 'talos-regression']) or ('cf_has_regression_range' in bug and bug['cf_has_regression_range'] == 'yes'):\n classes[i] = 1 if not probabilities else [0., 1.]\n elif 'feature' in bug['keywords']:\n classes[i] = 0 if not probabilities else [1., 0.]\n\n return classes\n", "path": "bugbug/models/bug.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.over_sampling import BorderlineSMOTE\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug import labels\nfrom bugbug.model import Model\n\n\nclass BugModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.sampler = BorderlineSMOTE(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.severity(),\n # Ignore keywords that would make the ML completely skewed\n # (we are going to use them as 100% rules in the evaluation phase).\n bug_features.keywords({'regression', 'talos-regression', 'feature'}),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.blocked_bugs_number(),\n bug_features.ever_affected(),\n bug_features.affected_then_unaffected(),\n bug_features.product(),\n bug_features.component(),\n ]\n\n cleanup_functions = [\n bug_features.cleanup_url,\n bug_features.cleanup_fileref,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(min_df=0.001), 'title'),\n\n ('first_comment', self.text_vectorizer(min_df=0.001), 'first_comment'),\n\n ('comments', self.text_vectorizer(min_df=0.001), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def get_bugbug_labels(self, kind='bug'):\n assert kind in ['bug', 'regression', 'defect_feature_task']\n\n classes = {}\n\n for bug_id, category in labels.get_labels('bug_nobug'):\n assert category in ['True', 'False'], f'unexpected category {category}'\n if kind == 'bug':\n classes[int(bug_id)] = 1 if category == 'True' else 0\n elif kind == 'regression':\n if category == 'False':\n classes[int(bug_id)] = 0\n elif kind == 'defect_feature_task':\n if category == 'True':\n classes[int(bug_id)] = 'd'\n\n for bug_id, category in labels.get_labels('regression_bug_nobug'):\n assert category in ['nobug', 'bug_unknown_regression', 'bug_no_regression', 'regression'], f'unexpected category {category}'\n if kind == 'bug':\n classes[int(bug_id)] = 1 if category != 'nobug' else 0\n elif kind == 'regression':\n if category == 'bug_unknown_regression':\n continue\n\n classes[int(bug_id)] = 1 if category == 'regression' else 0\n elif kind == 'defect_feature_task':\n if category != 'nobug':\n classes[int(bug_id)] = 'd'\n\n for bug_id, category in labels.get_labels('defect_feature_task'):\n assert category in ['d', 'f', 't']\n if kind == 'bug':\n classes[int(bug_id)] = 1 if category == 'd' else 0\n elif kind == 'regression':\n if category in ['f', 't']:\n classes[int(bug_id)] = 0\n elif kind == 'defect_feature_task':\n classes[int(bug_id)] = category\n\n # Augment labes by using bugs marked as 'regression' or 'feature', as they are basically labelled.\n bug_ids = set()\n for bug in bugzilla.get_bugs():\n bug_id = int(bug['id'])\n\n bug_ids.add(bug_id)\n\n if bug_id in classes:\n continue\n\n if any(keyword in bug['keywords'] for keyword in ['regression', 'talos-regression']) or ('cf_has_regression_range' in bug and bug['cf_has_regression_range'] == 'yes'):\n if kind in ['bug', 'regression']:\n classes[bug_id] = 1\n else:\n classes[bug_id] = 'd'\n elif any(keyword in bug['keywords'] for keyword in ['feature']):\n if kind in ['bug', 'regression']:\n classes[bug_id] = 0\n else:\n classes[bug_id] = 'f'\n elif kind == 'regression':\n for history in bug['history']:\n for change in history['changes']:\n if change['field_name'] == 'keywords' and change['removed'] == 'regression':\n classes[bug_id] = 0\n\n # Remove labels which belong to bugs for which we have no data.\n return {bug_id: label for bug_id, label in classes.items() if bug_id in bug_ids}\n\n def get_labels(self):\n return self.get_bugbug_labels('bug')\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for i, bug in enumerate(bugs):\n if any(keyword in bug['keywords'] for keyword in ['regression', 'talos-regression']) or ('cf_has_regression_range' in bug and bug['cf_has_regression_range'] == 'yes'):\n classes[i] = 1 if not probabilities else [0., 1.]\n elif 'feature' in bug['keywords']:\n classes[i] = 0 if not probabilities else [1., 0.]\n\n return classes\n", "path": "bugbug/models/bug.py"}]} | 2,013 | 98 |
gh_patches_debug_50396 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2467 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow simple loading of "dummy" configuration
A simple method to load a minimal configuration should be added.
This can be handy for some data-analysis tasks in notebooks, where no full configuration is needed or necessary.
Something like `Configuration.get_empty_config()`, which contains the minimum required keys set to default values
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/configuration/check_exchange.py`
Content:
```
1 import logging
2 from typing import Any, Dict
3
4 from freqtrade import OperationalException
5 from freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,
6 is_exchange_known_ccxt, is_exchange_bad,
7 is_exchange_officially_supported)
8 from freqtrade.state import RunMode
9
10 logger = logging.getLogger(__name__)
11
12
13 def check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:
14 """
15 Check if the exchange name in the config file is supported by Freqtrade
16 :param check_for_bad: if True, check the exchange against the list of known 'bad'
17 exchanges
18 :return: False if exchange is 'bad', i.e. is known to work with the bot with
19 critical issues or does not work at all, crashes, etc. True otherwise.
20 raises an exception if the exchange if not supported by ccxt
21 and thus is not known for the Freqtrade at all.
22 """
23
24 if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE]
25 and not config.get('exchange', {}).get('name')):
26 # Skip checking exchange in plot mode, since it requires no exchange
27 return True
28 logger.info("Checking exchange...")
29
30 exchange = config.get('exchange', {}).get('name').lower()
31 if not exchange:
32 raise OperationalException(
33 f'This command requires a configured exchange. You should either use '
34 f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\n'
35 f'The following exchanges are available for Freqtrade: '
36 f'{", ".join(available_exchanges())}'
37 )
38
39 if not is_exchange_known_ccxt(exchange):
40 raise OperationalException(
41 f'Exchange "{exchange}" is not known to the ccxt library '
42 f'and therefore not available for the bot.\n'
43 f'The following exchanges are available for Freqtrade: '
44 f'{", ".join(available_exchanges())}'
45 )
46
47 if check_for_bad and is_exchange_bad(exchange):
48 raise OperationalException(f'Exchange "{exchange}" is known to not work with the bot yet. '
49 f'Reason: {get_exchange_bad_reason(exchange)}')
50
51 if is_exchange_officially_supported(exchange):
52 logger.info(f'Exchange "{exchange}" is officially supported '
53 f'by the Freqtrade development team.')
54 else:
55 logger.warning(f'Exchange "{exchange}" is known to the the ccxt library, '
56 f'available for the bot, but not officially supported '
57 f'by the Freqtrade development team. '
58 f'It may work flawlessly (please report back) or have serious issues. '
59 f'Use it at your own discretion.')
60
61 return True
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/configuration/check_exchange.py b/freqtrade/configuration/check_exchange.py
--- a/freqtrade/configuration/check_exchange.py
+++ b/freqtrade/configuration/check_exchange.py
@@ -21,7 +21,7 @@
and thus is not known for the Freqtrade at all.
"""
- if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE]
+ if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE, RunMode.OTHER]
and not config.get('exchange', {}).get('name')):
# Skip checking exchange in plot mode, since it requires no exchange
return True
| {"golden_diff": "diff --git a/freqtrade/configuration/check_exchange.py b/freqtrade/configuration/check_exchange.py\n--- a/freqtrade/configuration/check_exchange.py\n+++ b/freqtrade/configuration/check_exchange.py\n@@ -21,7 +21,7 @@\n and thus is not known for the Freqtrade at all.\n \"\"\"\n \n- if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE]\n+ if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE, RunMode.OTHER]\n and not config.get('exchange', {}).get('name')):\n # Skip checking exchange in plot mode, since it requires no exchange\n return True\n", "issue": "Allow simple loading of \"dummy\" configuration\nA simple method to load a minimal configuration should be added.\r\n\r\nThis can be handy for some data-analysis tasks in notebooks, where no full configuration is needed or necessary.\r\n\r\nSomething like `Configuration.get_empty_config()`, which contains the minimum required keys set to default values\n", "before_files": [{"content": "import logging\nfrom typing import Any, Dict\n\nfrom freqtrade import OperationalException\nfrom freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,\n is_exchange_known_ccxt, is_exchange_bad,\n is_exchange_officially_supported)\nfrom freqtrade.state import RunMode\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:\n \"\"\"\n Check if the exchange name in the config file is supported by Freqtrade\n :param check_for_bad: if True, check the exchange against the list of known 'bad'\n exchanges\n :return: False if exchange is 'bad', i.e. is known to work with the bot with\n critical issues or does not work at all, crashes, etc. True otherwise.\n raises an exception if the exchange if not supported by ccxt\n and thus is not known for the Freqtrade at all.\n \"\"\"\n\n if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE]\n and not config.get('exchange', {}).get('name')):\n # Skip checking exchange in plot mode, since it requires no exchange\n return True\n logger.info(\"Checking exchange...\")\n\n exchange = config.get('exchange', {}).get('name').lower()\n if not exchange:\n raise OperationalException(\n f'This command requires a configured exchange. You should either use '\n f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\\n'\n f'The following exchanges are available for Freqtrade: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if not is_exchange_known_ccxt(exchange):\n raise OperationalException(\n f'Exchange \"{exchange}\" is not known to the ccxt library '\n f'and therefore not available for the bot.\\n'\n f'The following exchanges are available for Freqtrade: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if check_for_bad and is_exchange_bad(exchange):\n raise OperationalException(f'Exchange \"{exchange}\" is known to not work with the bot yet. '\n f'Reason: {get_exchange_bad_reason(exchange)}')\n\n if is_exchange_officially_supported(exchange):\n logger.info(f'Exchange \"{exchange}\" is officially supported '\n f'by the Freqtrade development team.')\n else:\n logger.warning(f'Exchange \"{exchange}\" is known to the the ccxt library, '\n f'available for the bot, but not officially supported '\n f'by the Freqtrade development team. '\n f'It may work flawlessly (please report back) or have serious issues. '\n f'Use it at your own discretion.')\n\n return True\n", "path": "freqtrade/configuration/check_exchange.py"}], "after_files": [{"content": "import logging\nfrom typing import Any, Dict\n\nfrom freqtrade import OperationalException\nfrom freqtrade.exchange import (available_exchanges, get_exchange_bad_reason,\n is_exchange_known_ccxt, is_exchange_bad,\n is_exchange_officially_supported)\nfrom freqtrade.state import RunMode\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:\n \"\"\"\n Check if the exchange name in the config file is supported by Freqtrade\n :param check_for_bad: if True, check the exchange against the list of known 'bad'\n exchanges\n :return: False if exchange is 'bad', i.e. is known to work with the bot with\n critical issues or does not work at all, crashes, etc. True otherwise.\n raises an exception if the exchange if not supported by ccxt\n and thus is not known for the Freqtrade at all.\n \"\"\"\n\n if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE, RunMode.OTHER]\n and not config.get('exchange', {}).get('name')):\n # Skip checking exchange in plot mode, since it requires no exchange\n return True\n logger.info(\"Checking exchange...\")\n\n exchange = config.get('exchange', {}).get('name').lower()\n if not exchange:\n raise OperationalException(\n f'This command requires a configured exchange. You should either use '\n f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\\n'\n f'The following exchanges are available for Freqtrade: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if not is_exchange_known_ccxt(exchange):\n raise OperationalException(\n f'Exchange \"{exchange}\" is not known to the ccxt library '\n f'and therefore not available for the bot.\\n'\n f'The following exchanges are available for Freqtrade: '\n f'{\", \".join(available_exchanges())}'\n )\n\n if check_for_bad and is_exchange_bad(exchange):\n raise OperationalException(f'Exchange \"{exchange}\" is known to not work with the bot yet. '\n f'Reason: {get_exchange_bad_reason(exchange)}')\n\n if is_exchange_officially_supported(exchange):\n logger.info(f'Exchange \"{exchange}\" is officially supported '\n f'by the Freqtrade development team.')\n else:\n logger.warning(f'Exchange \"{exchange}\" is known to the the ccxt library, '\n f'available for the bot, but not officially supported '\n f'by the Freqtrade development team. '\n f'It may work flawlessly (please report back) or have serious issues. '\n f'Use it at your own discretion.')\n\n return True\n", "path": "freqtrade/configuration/check_exchange.py"}]} | 1,034 | 154 |
gh_patches_debug_34359 | rasdani/github-patches | git_diff | bridgecrewio__checkov-975 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_103 should not trigger for TCP NLB's
Hi!
CKV_AWS_103 throws an error when using an NLB with TCP protocol.
However, setting an `ssl_policy` only make sense for ELB's with protocol HTTPS or TLS.
```
14 | resource "aws_lb_listener" "nlb_listener_https" {
15 | load_balancer_arn = aws_lb.nlb.arn
16 | port = xxxxxxxx
17 | protocol = "TCP"
18 | default_action {
19 | target_group_arn = aws_lb_target_group.nlb_target_group.id
20 | type = "forward"
21 | }
22 | }
```
`Check: CKV_AWS_103: "Ensure that load balancer is using TLS 1.2"
FAILED for resource: aws_lb_listener.nlb_listener_https
File: /nlb.tf:14-22
Guide: https://docs.bridgecrew.io/docs/bc_aws_general_43`
**Expected behavior**
Check is PASSED instead of FAILED.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class AppLoadBalancerTLS12(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure that load balancer is using TLS 1.2"
8 id = "CKV_AWS_103"
9 supported_resources = ['aws_lb_listener']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 key="protocol"
15 if key in conf.keys():
16 if conf[key] in (["HTTPS"], ["TLS"]):
17 # Only interested in HTTPS & TLS listeners
18 policy="ssl_policy"
19 if policy in conf.keys():
20 name=str(conf[policy]).strip("['']")
21 if name.startswith("ELBSecurityPolicy-FS-1-2") or name.startswith("ELBSecurityPolicy-TLS-1-2"):
22 return CheckResult.PASSED
23 else:
24 return CheckResult.FAILED
25 else:
26 return CheckResult.FAILED
27 else:
28 for action in conf.get('default_action',[]):
29 for redirect in action.get('redirect',[]):
30 if redirect.get('protocol',[]) == ["HTTPS"]:
31 return CheckResult.PASSED
32 return CheckResult.FAILED
33 else:
34 return CheckResult.FAILED
35
36
37 check = AppLoadBalancerTLS12()
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py
--- a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py
+++ b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py
@@ -6,28 +6,35 @@
def __init__(self):
name = "Ensure that load balancer is using TLS 1.2"
id = "CKV_AWS_103"
- supported_resources = ['aws_lb_listener']
+ supported_resources = ["aws_lb_listener"]
categories = [CheckCategories.GENERAL_SECURITY]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+ super().__init__(
+ name=name,
+ id=id,
+ categories=categories,
+ supported_resources=supported_resources,
+ )
def scan_resource_conf(self, conf):
- key="protocol"
+ key = "protocol"
if key in conf.keys():
if conf[key] in (["HTTPS"], ["TLS"]):
- # Only interested in HTTPS & TLS listeners
- policy="ssl_policy"
+ # Only interested in HTTPS & TLS listeners
+ policy = "ssl_policy"
if policy in conf.keys():
- name=str(conf[policy]).strip("['']")
- if name.startswith("ELBSecurityPolicy-FS-1-2") or name.startswith("ELBSecurityPolicy-TLS-1-2"):
- return CheckResult.PASSED
- else:
- return CheckResult.FAILED
+ name = str(conf[policy]).strip("['']")
+ if name.startswith("ELBSecurityPolicy-FS-1-2") or name.startswith("ELBSecurityPolicy-TLS-1-2"):
+ return CheckResult.PASSED
+ else:
+ return CheckResult.FAILED
else:
- return CheckResult.FAILED
+ return CheckResult.FAILED
+ elif conf[key] in (["TCP"], ["UDP"], ["TCP_UDP"]):
+ return CheckResult.PASSED
else:
- for action in conf.get('default_action',[]):
- for redirect in action.get('redirect',[]):
- if redirect.get('protocol',[]) == ["HTTPS"]:
+ for action in conf.get("default_action", []):
+ for redirect in action.get("redirect", []):
+ if redirect.get("protocol", []) == ["HTTPS"]:
return CheckResult.PASSED
return CheckResult.FAILED
else:
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n--- a/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n+++ b/checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py\n@@ -6,28 +6,35 @@\n def __init__(self):\n name = \"Ensure that load balancer is using TLS 1.2\"\n id = \"CKV_AWS_103\"\n- supported_resources = ['aws_lb_listener']\n+ supported_resources = [\"aws_lb_listener\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n+ super().__init__(\n+ name=name,\n+ id=id,\n+ categories=categories,\n+ supported_resources=supported_resources,\n+ )\n \n def scan_resource_conf(self, conf):\n- key=\"protocol\"\n+ key = \"protocol\"\n if key in conf.keys():\n if conf[key] in ([\"HTTPS\"], [\"TLS\"]):\n- # Only interested in HTTPS & TLS listeners\n- policy=\"ssl_policy\"\n+ # Only interested in HTTPS & TLS listeners\n+ policy = \"ssl_policy\"\n if policy in conf.keys():\n- name=str(conf[policy]).strip(\"['']\") \n- if name.startswith(\"ELBSecurityPolicy-FS-1-2\") or name.startswith(\"ELBSecurityPolicy-TLS-1-2\"):\n- return CheckResult.PASSED\n- else:\n- return CheckResult.FAILED\n+ name = str(conf[policy]).strip(\"['']\")\n+ if name.startswith(\"ELBSecurityPolicy-FS-1-2\") or name.startswith(\"ELBSecurityPolicy-TLS-1-2\"):\n+ return CheckResult.PASSED\n+ else:\n+ return CheckResult.FAILED\n else:\n- return CheckResult.FAILED\n+ return CheckResult.FAILED\n+ elif conf[key] in ([\"TCP\"], [\"UDP\"], [\"TCP_UDP\"]):\n+ return CheckResult.PASSED\n else:\n- for action in conf.get('default_action',[]):\n- for redirect in action.get('redirect',[]):\n- if redirect.get('protocol',[]) == [\"HTTPS\"]:\n+ for action in conf.get(\"default_action\", []):\n+ for redirect in action.get(\"redirect\", []):\n+ if redirect.get(\"protocol\", []) == [\"HTTPS\"]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n else:\n", "issue": "CKV_AWS_103 should not trigger for TCP NLB's\nHi!\r\n\r\nCKV_AWS_103 throws an error when using an NLB with TCP protocol.\r\nHowever, setting an `ssl_policy` only make sense for ELB's with protocol HTTPS or TLS.\r\n\r\n```\r\n 14 | resource \"aws_lb_listener\" \"nlb_listener_https\" {\r\n 15 | load_balancer_arn = aws_lb.nlb.arn\r\n 16 | port = xxxxxxxx\r\n 17 | protocol = \"TCP\"\r\n 18 | default_action {\r\n 19 | target_group_arn = aws_lb_target_group.nlb_target_group.id\r\n 20 | type = \"forward\"\r\n 21 | }\r\n 22 | }\r\n```\r\n\r\n`Check: CKV_AWS_103: \"Ensure that load balancer is using TLS 1.2\"\r\n FAILED for resource: aws_lb_listener.nlb_listener_https\r\n File: /nlb.tf:14-22\r\n Guide: https://docs.bridgecrew.io/docs/bc_aws_general_43`\r\n\r\n**Expected behavior**\r\nCheck is PASSED instead of FAILED.\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass AppLoadBalancerTLS12(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that load balancer is using TLS 1.2\"\n id = \"CKV_AWS_103\"\n supported_resources = ['aws_lb_listener']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n key=\"protocol\"\n if key in conf.keys():\n if conf[key] in ([\"HTTPS\"], [\"TLS\"]):\n # Only interested in HTTPS & TLS listeners\n policy=\"ssl_policy\"\n if policy in conf.keys():\n name=str(conf[policy]).strip(\"['']\") \n if name.startswith(\"ELBSecurityPolicy-FS-1-2\") or name.startswith(\"ELBSecurityPolicy-TLS-1-2\"):\n return CheckResult.PASSED\n else:\n return CheckResult.FAILED\n else:\n return CheckResult.FAILED\n else:\n for action in conf.get('default_action',[]):\n for redirect in action.get('redirect',[]):\n if redirect.get('protocol',[]) == [\"HTTPS\"]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n else:\n return CheckResult.FAILED\n\n\ncheck = AppLoadBalancerTLS12()\n", "path": "checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass AppLoadBalancerTLS12(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure that load balancer is using TLS 1.2\"\n id = \"CKV_AWS_103\"\n supported_resources = [\"aws_lb_listener\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(\n name=name,\n id=id,\n categories=categories,\n supported_resources=supported_resources,\n )\n\n def scan_resource_conf(self, conf):\n key = \"protocol\"\n if key in conf.keys():\n if conf[key] in ([\"HTTPS\"], [\"TLS\"]):\n # Only interested in HTTPS & TLS listeners\n policy = \"ssl_policy\"\n if policy in conf.keys():\n name = str(conf[policy]).strip(\"['']\")\n if name.startswith(\"ELBSecurityPolicy-FS-1-2\") or name.startswith(\"ELBSecurityPolicy-TLS-1-2\"):\n return CheckResult.PASSED\n else:\n return CheckResult.FAILED\n else:\n return CheckResult.FAILED\n elif conf[key] in ([\"TCP\"], [\"UDP\"], [\"TCP_UDP\"]):\n return CheckResult.PASSED\n else:\n for action in conf.get(\"default_action\", []):\n for redirect in action.get(\"redirect\", []):\n if redirect.get(\"protocol\", []) == [\"HTTPS\"]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n else:\n return CheckResult.FAILED\n\n\ncheck = AppLoadBalancerTLS12()\n", "path": "checkov/terraform/checks/resource/aws/AppLoadBalancerTLS12.py"}]} | 932 | 577 |
gh_patches_debug_29263 | rasdani/github-patches | git_diff | joke2k__faker-266 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use environment markers and reactivate universal wheels + distribute wheels
right now pip install is broken due to the released package claiming to be universal wheel but not using environment markers
so pip makes a wheel for all pythons with the first python its run on, then its used on all other pythons,
so a pip install on python2.6 would create a broken wheel with the extra dependency which is then subsequently tried in other pythons, and will utterly break them
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # coding=utf-8
3
4 import os
5 import io
6 import sys
7 from setuptools import setup, find_packages
8
9 here = os.path.abspath(os.path.dirname(__file__))
10 README = io.open(os.path.join(here, 'README.rst'), encoding="utf8").read()
11 NEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding="utf8").read()
12
13
14 version = '0.5.3'
15
16 install_requires = []
17 if ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or
18 (sys.version_info[0] == 3 and sys.version_info[1] < 1)):
19 install_requires.append('importlib')
20
21 # this module can be zip-safe if the zipimporter implements iter_modules or if
22 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
23 try:
24 import pkgutil
25 import zipimport
26 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
27 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
28 except (ImportError, AttributeError):
29 zip_safe = False
30
31 setup(
32 name='fake-factory',
33 version=version,
34 description="Faker is a Python package that generates fake data for you.",
35 long_description=README + '\n\n' + NEWS,
36 scripts=['faker/bin/faker'],
37 classifiers=[
38 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
39 'Development Status :: 3 - Alpha',
40 'Environment :: Console',
41 'Intended Audience :: Developers',
42 'Programming Language :: Python',
43 'Programming Language :: Python :: 2',
44 'Programming Language :: Python :: 3',
45 'Programming Language :: Python :: 3.4',
46 'Topic :: Software Development :: Libraries :: Python Modules',
47 'Topic :: Software Development :: Testing',
48 'Topic :: Utilities',
49 'License :: OSI Approved :: MIT License'
50 ],
51 keywords='faker fixtures data test mock generator',
52 author='joke2k',
53 author_email='[email protected]',
54 url='http://github.com/joke2k/faker',
55 license='MIT License',
56 packages=find_packages(exclude=['*.tests']),
57 platforms=["any"],
58 test_suite='faker.tests',
59 zip_safe=zip_safe,
60 install_requires=install_requires
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,6 @@
import os
import io
-import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
@@ -13,11 +12,6 @@
version = '0.5.3'
-install_requires = []
-if ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or
- (sys.version_info[0] == 3 and sys.version_info[1] < 1)):
- install_requires.append('importlib')
-
# this module can be zip-safe if the zipimporter implements iter_modules or if
# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
try:
@@ -33,7 +27,9 @@
version=version,
description="Faker is a Python package that generates fake data for you.",
long_description=README + '\n\n' + NEWS,
- scripts=['faker/bin/faker'],
+ entry_points={
+ 'console_scripts': ['faker=faker.cli:execute_from_command_line'],
+ },
classifiers=[
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
@@ -57,5 +53,8 @@
platforms=["any"],
test_suite='faker.tests',
zip_safe=zip_safe,
- install_requires=install_requires
+ extras_require={
+ ':python_version=="2.6"': ['importlib'],
+ ':python_version=="3.0"': ['importlib'],
+ }
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,7 +3,6 @@\n \n import os\n import io\n-import sys\n from setuptools import setup, find_packages\n \n here = os.path.abspath(os.path.dirname(__file__))\n@@ -13,11 +12,6 @@\n \n version = '0.5.3'\n \n-install_requires = []\n-if ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or\n- (sys.version_info[0] == 3 and sys.version_info[1] < 1)):\n- install_requires.append('importlib')\n-\n # this module can be zip-safe if the zipimporter implements iter_modules or if\n # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\n try:\n@@ -33,7 +27,9 @@\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n- scripts=['faker/bin/faker'],\n+ entry_points={\n+ 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n+ },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n@@ -57,5 +53,8 @@\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n- install_requires=install_requires\n+ extras_require={\n+ ':python_version==\"2.6\"': ['importlib'],\n+ ':python_version==\"3.0\"': ['importlib'],\n+ }\n )\n", "issue": "use environment markers and reactivate universal wheels + distribute wheels\nright now pip install is broken due to the released package claiming to be universal wheel but not using environment markers\n\nso pip makes a wheel for all pythons with the first python its run on, then its used on all other pythons,\nso a pip install on python2.6 would create a broken wheel with the extra dependency which is then subsequently tried in other pythons, and will utterly break them\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\nimport sys\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\nNEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.5.3'\n\ninstall_requires = []\nif ((sys.version_info[0] == 2 and sys.version_info[1] < 7) or\n (sys.version_info[0] == 3 and sys.version_info[1] < 1)):\n install_requires.append('importlib')\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n scripts=['faker/bin/faker'],\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n install_requires=install_requires\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\nNEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.5.3'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n extras_require={\n ':python_version==\"2.6\"': ['importlib'],\n ':python_version==\"3.0\"': ['importlib'],\n }\n)\n", "path": "setup.py"}]} | 984 | 372 |
gh_patches_debug_8279 | rasdani/github-patches | git_diff | microsoft__ptvsd-759 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create release version of ptvsd
Create a release version of ptvsd version 4.1.1
- [x] Change development status to production from alpha
- [x] Set version to 4.1.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 # Copyright (c) Microsoft Corporation. All rights reserved.
4 # Licensed under the MIT License. See LICENSE in the project root
5 # for license information.
6
7 import os
8 import os.path
9 import subprocess
10 import sys
11
12 from setuptools import setup
13
14 import versioneer
15 import ptvsd
16 import ptvsd._vendored
17
18
19 PYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')
20 PTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))
21
22
23 def cython_build():
24 print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')
25 subprocess.call([
26 sys.executable,
27 os.path.join(PYDEVD_ROOT, 'setup_cython.py'),
28 'build_ext',
29 '-i',
30 ])
31
32
33 def iter_vendored_files():
34 # Add pydevd files as data files for this package. They are not
35 # treated as a package of their own, because we don't actually
36 # want to provide pydevd - just use our own copy internally.
37 for project in ptvsd._vendored.list_all():
38 for filename in ptvsd._vendored.iter_packaging_files(project):
39 yield filename
40
41
42 with open('DESCRIPTION.md', 'r') as fh:
43 long_description = fh.read()
44
45
46 if __name__ == '__main__':
47 if not os.getenv('SKIP_CYTHON_BUILD'):
48 cython_build()
49
50 setup(
51 name='ptvsd',
52 version=versioneer.get_version(),
53 description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa
54 long_description=long_description,
55 long_description_content_type='text/markdown',
56 license='MIT',
57 author='Microsoft Corporation',
58 author_email='[email protected]',
59 url='https://aka.ms/ptvs',
60 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
61 classifiers=[
62 'Development Status :: 3 - Alpha',
63 'Programming Language :: Python :: 2.7',
64 'Programming Language :: Python :: 3.4',
65 'Programming Language :: Python :: 3.5',
66 'Programming Language :: Python :: 3.6',
67 'Programming Language :: Python :: 3.7',
68 'Topic :: Software Development :: Debuggers',
69 'Operating System :: OS Independent',
70 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',
71 'License :: OSI Approved :: MIT License',
72 ],
73 packages=[
74 'ptvsd',
75 'ptvsd._vendored',
76 ],
77 package_data={
78 'ptvsd': ['ThirdPartyNotices.txt'],
79 'ptvsd._vendored': list(iter_vendored_files()),
80 },
81 cmdclass=versioneer.get_cmdclass(),
82 )
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,7 +59,7 @@
url='https://aka.ms/ptvs',
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
classifiers=[
- 'Development Status :: 3 - Alpha',
+ 'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,7 +59,7 @@\n url='https://aka.ms/ptvs',\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n classifiers=[\n- 'Development Status :: 3 - Alpha',\n+ 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n", "issue": "Create release version of ptvsd\nCreate a release version of ptvsd version 4.1.1\r\n- [x] Change development status to production from alpha\r\n- [x] Set version to 4.1.1\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\nimport versioneer\nimport ptvsd\nimport ptvsd._vendored\n\n\nPYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')\nPTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))\n\n\ndef cython_build():\n print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')\n subprocess.call([\n sys.executable,\n os.path.join(PYDEVD_ROOT, 'setup_cython.py'),\n 'build_ext',\n '-i',\n ])\n\n\ndef iter_vendored_files():\n # Add pydevd files as data files for this package. They are not\n # treated as a package of their own, because we don't actually\n # want to provide pydevd - just use our own copy internally.\n for project in ptvsd._vendored.list_all():\n for filename in ptvsd._vendored.iter_packaging_files(project):\n yield filename\n\n\nwith open('DESCRIPTION.md', 'r') as fh:\n long_description = fh.read()\n\n\nif __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n\n setup(\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Debuggers',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',\n 'License :: OSI Approved :: MIT License',\n ],\n packages=[\n 'ptvsd',\n 'ptvsd._vendored',\n ],\n package_data={\n 'ptvsd': ['ThirdPartyNotices.txt'],\n 'ptvsd._vendored': list(iter_vendored_files()),\n },\n cmdclass=versioneer.get_cmdclass(),\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nfrom setuptools import setup\n\nimport versioneer\nimport ptvsd\nimport ptvsd._vendored\n\n\nPYDEVD_ROOT = ptvsd._vendored.project_root('pydevd')\nPTVSD_ROOT = os.path.dirname(os.path.abspath(ptvsd.__file__))\n\n\ndef cython_build():\n print('Compiling extension modules (set SKIP_CYTHON_BUILD=1 to omit)')\n subprocess.call([\n sys.executable,\n os.path.join(PYDEVD_ROOT, 'setup_cython.py'),\n 'build_ext',\n '-i',\n ])\n\n\ndef iter_vendored_files():\n # Add pydevd files as data files for this package. They are not\n # treated as a package of their own, because we don't actually\n # want to provide pydevd - just use our own copy internally.\n for project in ptvsd._vendored.list_all():\n for filename in ptvsd._vendored.iter_packaging_files(project):\n yield filename\n\n\nwith open('DESCRIPTION.md', 'r') as fh:\n long_description = fh.read()\n\n\nif __name__ == '__main__':\n if not os.getenv('SKIP_CYTHON_BUILD'):\n cython_build()\n\n setup(\n name='ptvsd',\n version=versioneer.get_version(),\n description='Remote debugging server for Python support in Visual Studio and Visual Studio Code', # noqa\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='MIT',\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://aka.ms/ptvs',\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Debuggers',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)',\n 'License :: OSI Approved :: MIT License',\n ],\n packages=[\n 'ptvsd',\n 'ptvsd._vendored',\n ],\n package_data={\n 'ptvsd': ['ThirdPartyNotices.txt'],\n 'ptvsd._vendored': list(iter_vendored_files()),\n },\n cmdclass=versioneer.get_cmdclass(),\n )\n", "path": "setup.py"}]} | 1,098 | 136 |
gh_patches_debug_24618 | rasdani/github-patches | git_diff | airctic__icevision-660 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fastai efficientdet fails on learn.validate() with AttributeError: 'NoneType' object has no attribute 'shape'
## 🐛 Bug
when trying to simply validate metrics for an efficientdet model with fastai
```python
KeyError: 'image_id'
```
```python
AttributeError: 'NoneType' object has no attribute 'shape'
```
it fails when trying to read the batch size automatically: in `accumulate, find_bs`
```python
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
```
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://colab.research.google.com/drive/1i4aXYu4wIKA7eLUK86GwTm7lq7zku_oF?usp=sharing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/torchvision/fastai/learner.py`
Content:
```
1 __all__ = ["RCNNCallback", "rcnn_learner"]
2
3 from icevision.imports import *
4 from icevision.engines.fastai import *
5 from icevision.models.torchvision.loss_fn import loss_fn
6 from icevision.models.torchvision.fastai.callbacks import *
7
8
9 def noop_watch(models, criterion=None, log="gradients", log_freq=1000, idx=None):
10 return []
11
12
13 def rcnn_learner(
14 dls: List[Union[DataLoader, fastai.DataLoader]],
15 model: nn.Module,
16 cbs=None,
17 **kwargs,
18 ):
19 learn = adapted_fastai_learner(
20 dls=dls,
21 model=model,
22 cbs=cbs,
23 loss_func=loss_fn,
24 **kwargs,
25 )
26
27 # HACK: patch AvgLoss (in original, find_bs gives errors)
28 class RCNNAvgLoss(fastai.AvgLoss):
29 def accumulate(self, learn):
30 bs = len(learn.yb)
31 self.total += fastai.to_detach(learn.loss.mean()) * bs
32 self.count += bs
33
34 recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
35 recorder.loss = RCNNAvgLoss()
36
37 is_wandb = [cb for cb in learn.cbs if "WandbCallback" in str(type(cb))]
38 if len(is_wandb) == 1:
39 logger.warning("Wandb quickfix implemented, for more info check issue #527")
40 wandb.watch = noop_watch
41 if len(is_wandb) > 1:
42 raise ValueError(
43 f"It seems you are passing {len(is_wandb)} `WandbCallback` instances to the `learner`. Only 1 is allowed."
44 )
45
46 return learn
47
```
Path: `icevision/models/ross/efficientdet/fastai/learner.py`
Content:
```
1 __all__ = ["learner"]
2
3 from icevision.imports import *
4 from icevision.engines.fastai import *
5 from icevision.models.ross.efficientdet.loss_fn import loss_fn
6 from icevision.models.ross.efficientdet.fastai.callbacks import EfficientDetCallback
7
8
9 def learner(
10 dls: List[Union[DataLoader, fastai.DataLoader]],
11 model: nn.Module,
12 cbs=None,
13 **learner_kwargs,
14 ):
15 """Fastai `Learner` adapted for EfficientDet.
16
17 # Arguments
18 dls: `Sequence` of `DataLoaders` passed to the `Learner`.
19 The first one will be used for training and the second for validation.
20 model: The model to train.
21 cbs: Optional `Sequence` of callbacks.
22 **learner_kwargs: Keyword arguments that will be internally passed to `Learner`.
23
24 # Returns
25 A fastai `Learner`.
26 """
27 cbs = [EfficientDetCallback()] + L(cbs)
28
29 learn = adapted_fastai_learner(
30 dls=dls,
31 model=model,
32 cbs=cbs,
33 loss_func=loss_fn,
34 **learner_kwargs,
35 )
36
37 # HACK: patch AvgLoss (in original, find_bs gives errors)
38 class PatchedAvgLoss(fastai.AvgLoss):
39 def accumulate(self, learn):
40 bs = len(learn.yb)
41 self.total += fastai.to_detach(learn.loss.mean()) * bs
42 self.count += bs
43
44 recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
45 recorder.loss = PatchedAvgLoss()
46
47 return learn
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/icevision/models/ross/efficientdet/fastai/learner.py b/icevision/models/ross/efficientdet/fastai/learner.py
--- a/icevision/models/ross/efficientdet/fastai/learner.py
+++ b/icevision/models/ross/efficientdet/fastai/learner.py
@@ -34,14 +34,14 @@
**learner_kwargs,
)
- # HACK: patch AvgLoss (in original, find_bs gives errors)
- class PatchedAvgLoss(fastai.AvgLoss):
+ # HACK: patch AvgLoss (in original, find_bs looks at the first element in dictionary and gives errors)
+ class EffDetAvgLoss(fastai.AvgLoss):
def accumulate(self, learn):
- bs = len(learn.yb)
- self.total += fastai.to_detach(learn.loss.mean()) * bs
+ bs = len(first(learn.yb)["cls"])
+ self.total += learn.to_detach(learn.loss.mean()) * bs
self.count += bs
recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]
- recorder.loss = PatchedAvgLoss()
+ recorder.loss = EffDetAvgLoss()
return learn
diff --git a/icevision/models/torchvision/fastai/learner.py b/icevision/models/torchvision/fastai/learner.py
--- a/icevision/models/torchvision/fastai/learner.py
+++ b/icevision/models/torchvision/fastai/learner.py
@@ -27,7 +27,7 @@
# HACK: patch AvgLoss (in original, find_bs gives errors)
class RCNNAvgLoss(fastai.AvgLoss):
def accumulate(self, learn):
- bs = len(learn.yb)
+ bs = len(first(learn.yb))
self.total += fastai.to_detach(learn.loss.mean()) * bs
self.count += bs
| {"golden_diff": "diff --git a/icevision/models/ross/efficientdet/fastai/learner.py b/icevision/models/ross/efficientdet/fastai/learner.py\n--- a/icevision/models/ross/efficientdet/fastai/learner.py\n+++ b/icevision/models/ross/efficientdet/fastai/learner.py\n@@ -34,14 +34,14 @@\n **learner_kwargs,\n )\n \n- # HACK: patch AvgLoss (in original, find_bs gives errors)\n- class PatchedAvgLoss(fastai.AvgLoss):\n+ # HACK: patch AvgLoss (in original, find_bs looks at the first element in dictionary and gives errors)\n+ class EffDetAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n- bs = len(learn.yb)\n- self.total += fastai.to_detach(learn.loss.mean()) * bs\n+ bs = len(first(learn.yb)[\"cls\"])\n+ self.total += learn.to_detach(learn.loss.mean()) * bs\n self.count += bs\n \n recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]\n- recorder.loss = PatchedAvgLoss()\n+ recorder.loss = EffDetAvgLoss()\n \n return learn\ndiff --git a/icevision/models/torchvision/fastai/learner.py b/icevision/models/torchvision/fastai/learner.py\n--- a/icevision/models/torchvision/fastai/learner.py\n+++ b/icevision/models/torchvision/fastai/learner.py\n@@ -27,7 +27,7 @@\n # HACK: patch AvgLoss (in original, find_bs gives errors)\n class RCNNAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n- bs = len(learn.yb)\n+ bs = len(first(learn.yb))\n self.total += fastai.to_detach(learn.loss.mean()) * bs\n self.count += bs\n", "issue": "fastai efficientdet fails on learn.validate() with AttributeError: 'NoneType' object has no attribute 'shape'\n## \ud83d\udc1b Bug\r\nwhen trying to simply validate metrics for an efficientdet model with fastai \r\n```python\r\nKeyError: 'image_id'\r\n```\r\n```python\r\nAttributeError: 'NoneType' object has no attribute 'shape'\r\n```\r\n\r\nit fails when trying to read the batch size automatically: in `accumulate, find_bs`\r\n```python\r\nclass AvgLoss(Metric):\r\n \"Average the losses taking into account potential different batch sizes\"\r\n def reset(self): self.total,self.count = 0.,0\r\n def accumulate(self, learn):\r\n bs = find_bs(learn.yb)\r\n self.total += learn.to_detach(learn.loss.mean())*bs\r\n self.count += bs\r\n @property\r\n def value(self): return self.total/self.count if self.count != 0 else None\r\n @property\r\n def name(self): return \"loss\"\r\n```\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://colab.research.google.com/drive/1i4aXYu4wIKA7eLUK86GwTm7lq7zku_oF?usp=sharing\r\n\r\n\n", "before_files": [{"content": "__all__ = [\"RCNNCallback\", \"rcnn_learner\"]\n\nfrom icevision.imports import *\nfrom icevision.engines.fastai import *\nfrom icevision.models.torchvision.loss_fn import loss_fn\nfrom icevision.models.torchvision.fastai.callbacks import *\n\n\ndef noop_watch(models, criterion=None, log=\"gradients\", log_freq=1000, idx=None):\n return []\n\n\ndef rcnn_learner(\n dls: List[Union[DataLoader, fastai.DataLoader]],\n model: nn.Module,\n cbs=None,\n **kwargs,\n):\n learn = adapted_fastai_learner(\n dls=dls,\n model=model,\n cbs=cbs,\n loss_func=loss_fn,\n **kwargs,\n )\n\n # HACK: patch AvgLoss (in original, find_bs gives errors)\n class RCNNAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n bs = len(learn.yb)\n self.total += fastai.to_detach(learn.loss.mean()) * bs\n self.count += bs\n\n recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]\n recorder.loss = RCNNAvgLoss()\n\n is_wandb = [cb for cb in learn.cbs if \"WandbCallback\" in str(type(cb))]\n if len(is_wandb) == 1:\n logger.warning(\"Wandb quickfix implemented, for more info check issue #527\")\n wandb.watch = noop_watch\n if len(is_wandb) > 1:\n raise ValueError(\n f\"It seems you are passing {len(is_wandb)} `WandbCallback` instances to the `learner`. Only 1 is allowed.\"\n )\n\n return learn\n", "path": "icevision/models/torchvision/fastai/learner.py"}, {"content": "__all__ = [\"learner\"]\n\nfrom icevision.imports import *\nfrom icevision.engines.fastai import *\nfrom icevision.models.ross.efficientdet.loss_fn import loss_fn\nfrom icevision.models.ross.efficientdet.fastai.callbacks import EfficientDetCallback\n\n\ndef learner(\n dls: List[Union[DataLoader, fastai.DataLoader]],\n model: nn.Module,\n cbs=None,\n **learner_kwargs,\n):\n \"\"\"Fastai `Learner` adapted for EfficientDet.\n\n # Arguments\n dls: `Sequence` of `DataLoaders` passed to the `Learner`.\n The first one will be used for training and the second for validation.\n model: The model to train.\n cbs: Optional `Sequence` of callbacks.\n **learner_kwargs: Keyword arguments that will be internally passed to `Learner`.\n\n # Returns\n A fastai `Learner`.\n \"\"\"\n cbs = [EfficientDetCallback()] + L(cbs)\n\n learn = adapted_fastai_learner(\n dls=dls,\n model=model,\n cbs=cbs,\n loss_func=loss_fn,\n **learner_kwargs,\n )\n\n # HACK: patch AvgLoss (in original, find_bs gives errors)\n class PatchedAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n bs = len(learn.yb)\n self.total += fastai.to_detach(learn.loss.mean()) * bs\n self.count += bs\n\n recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]\n recorder.loss = PatchedAvgLoss()\n\n return learn\n", "path": "icevision/models/ross/efficientdet/fastai/learner.py"}], "after_files": [{"content": "__all__ = [\"RCNNCallback\", \"rcnn_learner\"]\n\nfrom icevision.imports import *\nfrom icevision.engines.fastai import *\nfrom icevision.models.torchvision.loss_fn import loss_fn\nfrom icevision.models.torchvision.fastai.callbacks import *\n\n\ndef noop_watch(models, criterion=None, log=\"gradients\", log_freq=1000, idx=None):\n return []\n\n\ndef rcnn_learner(\n dls: List[Union[DataLoader, fastai.DataLoader]],\n model: nn.Module,\n cbs=None,\n **kwargs,\n):\n learn = adapted_fastai_learner(\n dls=dls,\n model=model,\n cbs=cbs,\n loss_func=loss_fn,\n **kwargs,\n )\n\n # HACK: patch AvgLoss (in original, find_bs gives errors)\n class RCNNAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n bs = len(first(learn.yb))\n self.total += fastai.to_detach(learn.loss.mean()) * bs\n self.count += bs\n\n recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]\n recorder.loss = RCNNAvgLoss()\n\n is_wandb = [cb for cb in learn.cbs if \"WandbCallback\" in str(type(cb))]\n if len(is_wandb) == 1:\n logger.warning(\"Wandb quickfix implemented, for more info check issue #527\")\n wandb.watch = noop_watch\n if len(is_wandb) > 1:\n raise ValueError(\n f\"It seems you are passing {len(is_wandb)} `WandbCallback` instances to the `learner`. Only 1 is allowed.\"\n )\n\n return learn\n", "path": "icevision/models/torchvision/fastai/learner.py"}, {"content": "__all__ = [\"learner\"]\n\nfrom icevision.imports import *\nfrom icevision.engines.fastai import *\nfrom icevision.models.ross.efficientdet.loss_fn import loss_fn\nfrom icevision.models.ross.efficientdet.fastai.callbacks import EfficientDetCallback\n\n\ndef learner(\n dls: List[Union[DataLoader, fastai.DataLoader]],\n model: nn.Module,\n cbs=None,\n **learner_kwargs,\n):\n \"\"\"Fastai `Learner` adapted for EfficientDet.\n\n # Arguments\n dls: `Sequence` of `DataLoaders` passed to the `Learner`.\n The first one will be used for training and the second for validation.\n model: The model to train.\n cbs: Optional `Sequence` of callbacks.\n **learner_kwargs: Keyword arguments that will be internally passed to `Learner`.\n\n # Returns\n A fastai `Learner`.\n \"\"\"\n cbs = [EfficientDetCallback()] + L(cbs)\n\n learn = adapted_fastai_learner(\n dls=dls,\n model=model,\n cbs=cbs,\n loss_func=loss_fn,\n **learner_kwargs,\n )\n\n # HACK: patch AvgLoss (in original, find_bs looks at the first element in dictionary and gives errors)\n class EffDetAvgLoss(fastai.AvgLoss):\n def accumulate(self, learn):\n bs = len(first(learn.yb)[\"cls\"])\n self.total += learn.to_detach(learn.loss.mean()) * bs\n self.count += bs\n\n recorder = [cb for cb in learn.cbs if isinstance(cb, fastai.Recorder)][0]\n recorder.loss = EffDetAvgLoss()\n\n return learn\n", "path": "icevision/models/ross/efficientdet/fastai/learner.py"}]} | 1,483 | 428 |
gh_patches_debug_24853 | rasdani/github-patches | git_diff | getnikola__nikola-2188 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use a FS cache for Jinja to speed it up a bit
http://jinja.pocoo.org/docs/dev/api/#bytecode-cache
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/template/jinja.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2015 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27
28 """Jinja template handler."""
29
30 from __future__ import unicode_literals
31 import os
32 import json
33 from collections import deque
34 try:
35 import jinja2
36 from jinja2 import meta
37 except ImportError:
38 jinja2 = None # NOQA
39
40 from nikola.plugin_categories import TemplateSystem
41 from nikola.utils import makedirs, req_missing
42
43
44 class JinjaTemplates(TemplateSystem):
45 """Support for Jinja2 templates."""
46
47 name = "jinja"
48 lookup = None
49 dependency_cache = {}
50
51 def __init__(self):
52 """Initialize Jinja2 environment with extended set of filters."""
53 if jinja2 is None:
54 return
55 self.lookup = jinja2.Environment()
56 self.lookup.trim_blocks = True
57 self.lookup.lstrip_blocks = True
58 self.lookup.filters['tojson'] = json.dumps
59 self.lookup.globals['enumerate'] = enumerate
60 self.lookup.globals['isinstance'] = isinstance
61 self.lookup.globals['tuple'] = tuple
62
63 def set_directories(self, directories, cache_folder):
64 """Create a new template lookup with set directories."""
65 if jinja2 is None:
66 req_missing(['jinja2'], 'use this theme')
67 self.directories = directories
68 self.create_lookup()
69
70 def inject_directory(self, directory):
71 """Add a directory to the lookup and recreate it if it's not there yet."""
72 if directory not in self.directories:
73 self.directories.append(directory)
74 self.create_lookup()
75
76 def create_lookup(self):
77 """Create a template lookup."""
78 self.lookup.loader = jinja2.FileSystemLoader(self.directories,
79 encoding='utf-8')
80
81 def set_site(self, site):
82 """Set the Nikola site."""
83 self.site = site
84 self.lookup.filters.update(self.site.config['TEMPLATE_FILTERS'])
85
86 def render_template(self, template_name, output_name, context):
87 """Render the template into output_name using context."""
88 if jinja2 is None:
89 req_missing(['jinja2'], 'use this theme')
90 template = self.lookup.get_template(template_name)
91 output = template.render(**context)
92 if output_name is not None:
93 makedirs(os.path.dirname(output_name))
94 with open(output_name, 'w+') as output:
95 output.write(output.encode('utf8'))
96 return output
97
98 def render_template_to_string(self, template, context):
99 """Render template to a string using context."""
100 return self.lookup.from_string(template).render(**context)
101
102 def template_deps(self, template_name):
103 """Generate list of dependencies for a template."""
104 # Cache the lists of dependencies for each template name.
105 if self.dependency_cache.get(template_name) is None:
106 # Use a breadth-first search to find all templates this one
107 # depends on.
108 queue = deque([template_name])
109 visited_templates = set([template_name])
110 deps = []
111 while len(queue) > 0:
112 curr = queue.popleft()
113 source, filename = self.lookup.loader.get_source(self.lookup,
114 curr)[:2]
115 deps.append(filename)
116 ast = self.lookup.parse(source)
117 dep_names = meta.find_referenced_templates(ast)
118 for dep_name in dep_names:
119 if (dep_name not in visited_templates and dep_name is not None):
120 visited_templates.add(dep_name)
121 queue.append(dep_name)
122 self.dependency_cache[template_name] = deps
123 return self.dependency_cache[template_name]
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/template/jinja.py b/nikola/plugins/template/jinja.py
--- a/nikola/plugins/template/jinja.py
+++ b/nikola/plugins/template/jinja.py
@@ -52,18 +52,21 @@
"""Initialize Jinja2 environment with extended set of filters."""
if jinja2 is None:
return
- self.lookup = jinja2.Environment()
+
+ def set_directories(self, directories, cache_folder):
+ """Create a new template lookup with set directories."""
+ if jinja2 is None:
+ req_missing(['jinja2'], 'use this theme')
+ cache_folder = os.path.join(cache_folder, 'jinja')
+ makedirs(cache_folder)
+ cache = jinja2.FileSystemBytecodeCache(cache_folder)
+ self.lookup = jinja2.Environment(bytecode_cache=cache)
self.lookup.trim_blocks = True
self.lookup.lstrip_blocks = True
self.lookup.filters['tojson'] = json.dumps
self.lookup.globals['enumerate'] = enumerate
self.lookup.globals['isinstance'] = isinstance
self.lookup.globals['tuple'] = tuple
-
- def set_directories(self, directories, cache_folder):
- """Create a new template lookup with set directories."""
- if jinja2 is None:
- req_missing(['jinja2'], 'use this theme')
self.directories = directories
self.create_lookup()
| {"golden_diff": "diff --git a/nikola/plugins/template/jinja.py b/nikola/plugins/template/jinja.py\n--- a/nikola/plugins/template/jinja.py\n+++ b/nikola/plugins/template/jinja.py\n@@ -52,18 +52,21 @@\n \"\"\"Initialize Jinja2 environment with extended set of filters.\"\"\"\n if jinja2 is None:\n return\n- self.lookup = jinja2.Environment()\n+\n+ def set_directories(self, directories, cache_folder):\n+ \"\"\"Create a new template lookup with set directories.\"\"\"\n+ if jinja2 is None:\n+ req_missing(['jinja2'], 'use this theme')\n+ cache_folder = os.path.join(cache_folder, 'jinja')\n+ makedirs(cache_folder)\n+ cache = jinja2.FileSystemBytecodeCache(cache_folder)\n+ self.lookup = jinja2.Environment(bytecode_cache=cache)\n self.lookup.trim_blocks = True\n self.lookup.lstrip_blocks = True\n self.lookup.filters['tojson'] = json.dumps\n self.lookup.globals['enumerate'] = enumerate\n self.lookup.globals['isinstance'] = isinstance\n self.lookup.globals['tuple'] = tuple\n-\n- def set_directories(self, directories, cache_folder):\n- \"\"\"Create a new template lookup with set directories.\"\"\"\n- if jinja2 is None:\n- req_missing(['jinja2'], 'use this theme')\n self.directories = directories\n self.create_lookup()\n", "issue": "Use a FS cache for Jinja to speed it up a bit\nhttp://jinja.pocoo.org/docs/dev/api/#bytecode-cache\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\"Jinja template handler.\"\"\"\n\nfrom __future__ import unicode_literals\nimport os\nimport json\nfrom collections import deque\ntry:\n import jinja2\n from jinja2 import meta\nexcept ImportError:\n jinja2 = None # NOQA\n\nfrom nikola.plugin_categories import TemplateSystem\nfrom nikola.utils import makedirs, req_missing\n\n\nclass JinjaTemplates(TemplateSystem):\n \"\"\"Support for Jinja2 templates.\"\"\"\n\n name = \"jinja\"\n lookup = None\n dependency_cache = {}\n\n def __init__(self):\n \"\"\"Initialize Jinja2 environment with extended set of filters.\"\"\"\n if jinja2 is None:\n return\n self.lookup = jinja2.Environment()\n self.lookup.trim_blocks = True\n self.lookup.lstrip_blocks = True\n self.lookup.filters['tojson'] = json.dumps\n self.lookup.globals['enumerate'] = enumerate\n self.lookup.globals['isinstance'] = isinstance\n self.lookup.globals['tuple'] = tuple\n\n def set_directories(self, directories, cache_folder):\n \"\"\"Create a new template lookup with set directories.\"\"\"\n if jinja2 is None:\n req_missing(['jinja2'], 'use this theme')\n self.directories = directories\n self.create_lookup()\n\n def inject_directory(self, directory):\n \"\"\"Add a directory to the lookup and recreate it if it's not there yet.\"\"\"\n if directory not in self.directories:\n self.directories.append(directory)\n self.create_lookup()\n\n def create_lookup(self):\n \"\"\"Create a template lookup.\"\"\"\n self.lookup.loader = jinja2.FileSystemLoader(self.directories,\n encoding='utf-8')\n\n def set_site(self, site):\n \"\"\"Set the Nikola site.\"\"\"\n self.site = site\n self.lookup.filters.update(self.site.config['TEMPLATE_FILTERS'])\n\n def render_template(self, template_name, output_name, context):\n \"\"\"Render the template into output_name using context.\"\"\"\n if jinja2 is None:\n req_missing(['jinja2'], 'use this theme')\n template = self.lookup.get_template(template_name)\n output = template.render(**context)\n if output_name is not None:\n makedirs(os.path.dirname(output_name))\n with open(output_name, 'w+') as output:\n output.write(output.encode('utf8'))\n return output\n\n def render_template_to_string(self, template, context):\n \"\"\"Render template to a string using context.\"\"\"\n return self.lookup.from_string(template).render(**context)\n\n def template_deps(self, template_name):\n \"\"\"Generate list of dependencies for a template.\"\"\"\n # Cache the lists of dependencies for each template name.\n if self.dependency_cache.get(template_name) is None:\n # Use a breadth-first search to find all templates this one\n # depends on.\n queue = deque([template_name])\n visited_templates = set([template_name])\n deps = []\n while len(queue) > 0:\n curr = queue.popleft()\n source, filename = self.lookup.loader.get_source(self.lookup,\n curr)[:2]\n deps.append(filename)\n ast = self.lookup.parse(source)\n dep_names = meta.find_referenced_templates(ast)\n for dep_name in dep_names:\n if (dep_name not in visited_templates and dep_name is not None):\n visited_templates.add(dep_name)\n queue.append(dep_name)\n self.dependency_cache[template_name] = deps\n return self.dependency_cache[template_name]\n", "path": "nikola/plugins/template/jinja.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\"Jinja template handler.\"\"\"\n\nfrom __future__ import unicode_literals\nimport os\nimport json\nfrom collections import deque\ntry:\n import jinja2\n from jinja2 import meta\nexcept ImportError:\n jinja2 = None # NOQA\n\nfrom nikola.plugin_categories import TemplateSystem\nfrom nikola.utils import makedirs, req_missing\n\n\nclass JinjaTemplates(TemplateSystem):\n \"\"\"Support for Jinja2 templates.\"\"\"\n\n name = \"jinja\"\n lookup = None\n dependency_cache = {}\n\n def __init__(self):\n \"\"\"Initialize Jinja2 environment with extended set of filters.\"\"\"\n if jinja2 is None:\n return\n\n def set_directories(self, directories, cache_folder):\n \"\"\"Create a new template lookup with set directories.\"\"\"\n if jinja2 is None:\n req_missing(['jinja2'], 'use this theme')\n cache_folder = os.path.join(cache_folder, 'jinja')\n makedirs(cache_folder)\n cache = jinja2.FileSystemBytecodeCache(cache_folder)\n self.lookup = jinja2.Environment(bytecode_cache=cache)\n self.lookup.trim_blocks = True\n self.lookup.lstrip_blocks = True\n self.lookup.filters['tojson'] = json.dumps\n self.lookup.globals['enumerate'] = enumerate\n self.lookup.globals['isinstance'] = isinstance\n self.lookup.globals['tuple'] = tuple\n self.directories = directories\n self.create_lookup()\n\n def inject_directory(self, directory):\n \"\"\"Add a directory to the lookup and recreate it if it's not there yet.\"\"\"\n if directory not in self.directories:\n self.directories.append(directory)\n self.create_lookup()\n\n def create_lookup(self):\n \"\"\"Create a template lookup.\"\"\"\n self.lookup.loader = jinja2.FileSystemLoader(self.directories,\n encoding='utf-8')\n\n def set_site(self, site):\n \"\"\"Set the Nikola site.\"\"\"\n self.site = site\n self.lookup.filters.update(self.site.config['TEMPLATE_FILTERS'])\n\n def render_template(self, template_name, output_name, context):\n \"\"\"Render the template into output_name using context.\"\"\"\n if jinja2 is None:\n req_missing(['jinja2'], 'use this theme')\n template = self.lookup.get_template(template_name)\n output = template.render(**context)\n if output_name is not None:\n makedirs(os.path.dirname(output_name))\n with open(output_name, 'w+') as output:\n output.write(output.encode('utf8'))\n return output\n\n def render_template_to_string(self, template, context):\n \"\"\"Render template to a string using context.\"\"\"\n return self.lookup.from_string(template).render(**context)\n\n def template_deps(self, template_name):\n \"\"\"Generate list of dependencies for a template.\"\"\"\n # Cache the lists of dependencies for each template name.\n if self.dependency_cache.get(template_name) is None:\n # Use a breadth-first search to find all templates this one\n # depends on.\n queue = deque([template_name])\n visited_templates = set([template_name])\n deps = []\n while len(queue) > 0:\n curr = queue.popleft()\n source, filename = self.lookup.loader.get_source(self.lookup,\n curr)[:2]\n deps.append(filename)\n ast = self.lookup.parse(source)\n dep_names = meta.find_referenced_templates(ast)\n for dep_name in dep_names:\n if (dep_name not in visited_templates and dep_name is not None):\n visited_templates.add(dep_name)\n queue.append(dep_name)\n self.dependency_cache[template_name] = deps\n return self.dependency_cache[template_name]\n", "path": "nikola/plugins/template/jinja.py"}]} | 1,551 | 316 |
gh_patches_debug_9404 | rasdani/github-patches | git_diff | pyca__cryptography-3130 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scrypt bounds checking
```
[11:23:58] <Alex_Gaynor> reaperhulk: what happens if you pass a non-even n?
[11:24:10] <Alex_Gaynor> Or a negative value for any of the params?
```
Presumably it will fail with an assertion error on return from the call to `EVP_PBE_scrypt`, but we shouldn't allow those types of errors.
cc @Ayrx.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/kdf/scrypt.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 from cryptography import utils
8 from cryptography.exceptions import (
9 AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
10 )
11 from cryptography.hazmat.backends.interfaces import ScryptBackend
12 from cryptography.hazmat.primitives import constant_time
13 from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
14
15
16 @utils.register_interface(KeyDerivationFunction)
17 class Scrypt(object):
18 def __init__(self, salt, length, n, r, p, backend):
19 if not isinstance(backend, ScryptBackend):
20 raise UnsupportedAlgorithm(
21 "Backend object does not implement ScryptBackend.",
22 _Reasons.BACKEND_MISSING_INTERFACE
23 )
24
25 self._length = length
26 if not isinstance(salt, bytes):
27 raise TypeError("salt must be bytes.")
28 self._used = False
29 self._salt = salt
30 self._n = n
31 self._r = r
32 self._p = p
33 self._backend = backend
34
35 def derive(self, key_material):
36 if self._used:
37 raise AlreadyFinalized("Scrypt instances can only be used once.")
38 self._used = True
39
40 if not isinstance(key_material, bytes):
41 raise TypeError("key_material must be bytes.")
42 return self._backend.derive_scrypt(
43 key_material, self._salt, self._length, self._n, self._r, self._p
44 )
45
46 def verify(self, key_material, expected_key):
47 derived_key = self.derive(key_material)
48 if not constant_time.bytes_eq(derived_key, expected_key):
49 raise InvalidKey("Keys do not match.")
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/primitives/kdf/scrypt.py b/src/cryptography/hazmat/primitives/kdf/scrypt.py
--- a/src/cryptography/hazmat/primitives/kdf/scrypt.py
+++ b/src/cryptography/hazmat/primitives/kdf/scrypt.py
@@ -25,6 +25,16 @@
self._length = length
if not isinstance(salt, bytes):
raise TypeError("salt must be bytes.")
+
+ if n < 2 or (n & (n - 1)) != 0:
+ raise ValueError("n must be greater than 1 and be a power of 2.")
+
+ if r < 1:
+ raise ValueError("r must be greater than or equal to 1.")
+
+ if p < 1:
+ raise ValueError("p must be greater than or equal to 1.")
+
self._used = False
self._salt = salt
self._n = n
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/kdf/scrypt.py b/src/cryptography/hazmat/primitives/kdf/scrypt.py\n--- a/src/cryptography/hazmat/primitives/kdf/scrypt.py\n+++ b/src/cryptography/hazmat/primitives/kdf/scrypt.py\n@@ -25,6 +25,16 @@\n self._length = length\n if not isinstance(salt, bytes):\n raise TypeError(\"salt must be bytes.\")\n+\n+ if n < 2 or (n & (n - 1)) != 0:\n+ raise ValueError(\"n must be greater than 1 and be a power of 2.\")\n+\n+ if r < 1:\n+ raise ValueError(\"r must be greater than or equal to 1.\")\n+\n+ if p < 1:\n+ raise ValueError(\"p must be greater than or equal to 1.\")\n+\n self._used = False\n self._salt = salt\n self._n = n\n", "issue": "scrypt bounds checking\n```\n[11:23:58] <Alex_Gaynor> reaperhulk: what happens if you pass a non-even n?\n[11:24:10] <Alex_Gaynor> Or a negative value for any of the params?\n```\n\nPresumably it will fail with an assertion error on return from the call to `EVP_PBE_scrypt`, but we shouldn't allow those types of errors.\n\ncc @Ayrx.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import ScryptBackend\nfrom cryptography.hazmat.primitives import constant_time\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass Scrypt(object):\n def __init__(self, salt, length, n, r, p, backend):\n if not isinstance(backend, ScryptBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement ScryptBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._length = length\n if not isinstance(salt, bytes):\n raise TypeError(\"salt must be bytes.\")\n self._used = False\n self._salt = salt\n self._n = n\n self._r = r\n self._p = p\n self._backend = backend\n\n def derive(self, key_material):\n if self._used:\n raise AlreadyFinalized(\"Scrypt instances can only be used once.\")\n self._used = True\n\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n return self._backend.derive_scrypt(\n key_material, self._salt, self._length, self._n, self._r, self._p\n )\n\n def verify(self, key_material, expected_key):\n derived_key = self.derive(key_material)\n if not constant_time.bytes_eq(derived_key, expected_key):\n raise InvalidKey(\"Keys do not match.\")\n", "path": "src/cryptography/hazmat/primitives/kdf/scrypt.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import ScryptBackend\nfrom cryptography.hazmat.primitives import constant_time\nfrom cryptography.hazmat.primitives.kdf import KeyDerivationFunction\n\n\[email protected]_interface(KeyDerivationFunction)\nclass Scrypt(object):\n def __init__(self, salt, length, n, r, p, backend):\n if not isinstance(backend, ScryptBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement ScryptBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n self._length = length\n if not isinstance(salt, bytes):\n raise TypeError(\"salt must be bytes.\")\n\n if n < 2 or (n & (n - 1)) != 0:\n raise ValueError(\"n must be greater than 1 and be a power of 2.\")\n\n if r < 1:\n raise ValueError(\"r must be greater than or equal to 1.\")\n\n if p < 1:\n raise ValueError(\"p must be greater than or equal to 1.\")\n\n self._used = False\n self._salt = salt\n self._n = n\n self._r = r\n self._p = p\n self._backend = backend\n\n def derive(self, key_material):\n if self._used:\n raise AlreadyFinalized(\"Scrypt instances can only be used once.\")\n self._used = True\n\n if not isinstance(key_material, bytes):\n raise TypeError(\"key_material must be bytes.\")\n return self._backend.derive_scrypt(\n key_material, self._salt, self._length, self._n, self._r, self._p\n )\n\n def verify(self, key_material, expected_key):\n derived_key = self.derive(key_material)\n if not constant_time.bytes_eq(derived_key, expected_key):\n raise InvalidKey(\"Keys do not match.\")\n", "path": "src/cryptography/hazmat/primitives/kdf/scrypt.py"}]} | 869 | 216 |
gh_patches_debug_32814 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-645 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
skill container is broken
When the intent skill became the intent class the skill container wasn't updated to match the new structure and is currently not working.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/skills/container.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import argparse
20 import sys
21 from os.path import dirname, exists, isdir
22
23 from mycroft.configuration import ConfigurationManager
24 from mycroft.messagebus.client.ws import WebsocketClient
25 from mycroft.skills.core import create_skill_descriptor, load_skill
26 from mycroft.skills.intent import create_skill as create_intent_skill
27 from mycroft.util.log import getLogger
28
29 __author__ = 'seanfitz'
30
31 LOG = getLogger("SkillContainer")
32
33
34 class SkillContainer(object):
35 def __init__(self, args):
36 params = self.__build_params(args)
37
38 if params.config:
39 ConfigurationManager.load_local([params.config])
40
41 if exists(params.lib) and isdir(params.lib):
42 sys.path.append(params.lib)
43
44 sys.path.append(params.dir)
45 self.dir = params.dir
46
47 self.enable_intent_skill = params.enable_intent_skill
48
49 self.__init_client(params)
50
51 @staticmethod
52 def __build_params(args):
53 parser = argparse.ArgumentParser()
54 parser.add_argument("--config", default="./mycroft.conf")
55 parser.add_argument("dir", nargs='?', default=dirname(__file__))
56 parser.add_argument("--lib", default="./lib")
57 parser.add_argument("--host", default=None)
58 parser.add_argument("--port", default=None)
59 parser.add_argument("--use-ssl", action='store_true', default=False)
60 parser.add_argument("--enable-intent-skill", action='store_true',
61 default=False)
62 return parser.parse_args(args)
63
64 def __init_client(self, params):
65 config = ConfigurationManager.get().get("websocket")
66
67 if not params.host:
68 params.host = config.get('host')
69 if not params.port:
70 params.port = config.get('port')
71
72 self.ws = WebsocketClient(host=params.host,
73 port=params.port,
74 ssl=params.use_ssl)
75
76 def load_skill(self):
77 if self.enable_intent_skill:
78 intent_skill = create_intent_skill()
79 intent_skill.bind(self.ws)
80 intent_skill.initialize()
81 skill_descriptor = create_skill_descriptor(self.dir)
82 self.skill = load_skill(skill_descriptor, self.ws)
83
84 def run(self):
85 try:
86 self.ws.on('message', LOG.debug)
87 self.ws.on('open', self.load_skill)
88 self.ws.on('error', LOG.error)
89 self.ws.run_forever()
90 except Exception as e:
91 LOG.error("Error: {0}".format(e))
92 self.stop()
93
94 def stop(self):
95 if self.skill:
96 self.skill.shutdown()
97
98
99 def main():
100 container = SkillContainer(sys.argv[1:])
101 try:
102 container.run()
103 except KeyboardInterrupt:
104 container.stop()
105 finally:
106 sys.exit()
107
108
109 if __name__ == "__main__":
110 main()
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/skills/container.py b/mycroft/skills/container.py
--- a/mycroft/skills/container.py
+++ b/mycroft/skills/container.py
@@ -23,7 +23,7 @@
from mycroft.configuration import ConfigurationManager
from mycroft.messagebus.client.ws import WebsocketClient
from mycroft.skills.core import create_skill_descriptor, load_skill
-from mycroft.skills.intent import create_skill as create_intent_skill
+from mycroft.skills.intent import Intent
from mycroft.util.log import getLogger
__author__ = 'seanfitz'
@@ -44,7 +44,7 @@
sys.path.append(params.dir)
self.dir = params.dir
- self.enable_intent_skill = params.enable_intent_skill
+ self.enable_intent = params.enable_intent
self.__init_client(params)
@@ -57,7 +57,7 @@
parser.add_argument("--host", default=None)
parser.add_argument("--port", default=None)
parser.add_argument("--use-ssl", action='store_true', default=False)
- parser.add_argument("--enable-intent-skill", action='store_true',
+ parser.add_argument("--enable-intent", action='store_true',
default=False)
return parser.parse_args(args)
@@ -74,10 +74,9 @@
ssl=params.use_ssl)
def load_skill(self):
- if self.enable_intent_skill:
- intent_skill = create_intent_skill()
- intent_skill.bind(self.ws)
- intent_skill.initialize()
+ if self.enable_intent:
+ Intent(self.ws)
+
skill_descriptor = create_skill_descriptor(self.dir)
self.skill = load_skill(skill_descriptor, self.ws)
| {"golden_diff": "diff --git a/mycroft/skills/container.py b/mycroft/skills/container.py\n--- a/mycroft/skills/container.py\n+++ b/mycroft/skills/container.py\n@@ -23,7 +23,7 @@\n from mycroft.configuration import ConfigurationManager\n from mycroft.messagebus.client.ws import WebsocketClient\n from mycroft.skills.core import create_skill_descriptor, load_skill\n-from mycroft.skills.intent import create_skill as create_intent_skill\n+from mycroft.skills.intent import Intent\n from mycroft.util.log import getLogger\n \n __author__ = 'seanfitz'\n@@ -44,7 +44,7 @@\n sys.path.append(params.dir)\n self.dir = params.dir\n \n- self.enable_intent_skill = params.enable_intent_skill\n+ self.enable_intent = params.enable_intent\n \n self.__init_client(params)\n \n@@ -57,7 +57,7 @@\n parser.add_argument(\"--host\", default=None)\n parser.add_argument(\"--port\", default=None)\n parser.add_argument(\"--use-ssl\", action='store_true', default=False)\n- parser.add_argument(\"--enable-intent-skill\", action='store_true',\n+ parser.add_argument(\"--enable-intent\", action='store_true',\n default=False)\n return parser.parse_args(args)\n \n@@ -74,10 +74,9 @@\n ssl=params.use_ssl)\n \n def load_skill(self):\n- if self.enable_intent_skill:\n- intent_skill = create_intent_skill()\n- intent_skill.bind(self.ws)\n- intent_skill.initialize()\n+ if self.enable_intent:\n+ Intent(self.ws)\n+\n skill_descriptor = create_skill_descriptor(self.dir)\n self.skill = load_skill(skill_descriptor, self.ws)\n", "issue": "skill container is broken\nWhen the intent skill became the intent class the skill container wasn't updated to match the new structure and is currently not working.\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport argparse\nimport sys\nfrom os.path import dirname, exists, isdir\n\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.messagebus.client.ws import WebsocketClient\nfrom mycroft.skills.core import create_skill_descriptor, load_skill\nfrom mycroft.skills.intent import create_skill as create_intent_skill\nfrom mycroft.util.log import getLogger\n\n__author__ = 'seanfitz'\n\nLOG = getLogger(\"SkillContainer\")\n\n\nclass SkillContainer(object):\n def __init__(self, args):\n params = self.__build_params(args)\n\n if params.config:\n ConfigurationManager.load_local([params.config])\n\n if exists(params.lib) and isdir(params.lib):\n sys.path.append(params.lib)\n\n sys.path.append(params.dir)\n self.dir = params.dir\n\n self.enable_intent_skill = params.enable_intent_skill\n\n self.__init_client(params)\n\n @staticmethod\n def __build_params(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", default=\"./mycroft.conf\")\n parser.add_argument(\"dir\", nargs='?', default=dirname(__file__))\n parser.add_argument(\"--lib\", default=\"./lib\")\n parser.add_argument(\"--host\", default=None)\n parser.add_argument(\"--port\", default=None)\n parser.add_argument(\"--use-ssl\", action='store_true', default=False)\n parser.add_argument(\"--enable-intent-skill\", action='store_true',\n default=False)\n return parser.parse_args(args)\n\n def __init_client(self, params):\n config = ConfigurationManager.get().get(\"websocket\")\n\n if not params.host:\n params.host = config.get('host')\n if not params.port:\n params.port = config.get('port')\n\n self.ws = WebsocketClient(host=params.host,\n port=params.port,\n ssl=params.use_ssl)\n\n def load_skill(self):\n if self.enable_intent_skill:\n intent_skill = create_intent_skill()\n intent_skill.bind(self.ws)\n intent_skill.initialize()\n skill_descriptor = create_skill_descriptor(self.dir)\n self.skill = load_skill(skill_descriptor, self.ws)\n\n def run(self):\n try:\n self.ws.on('message', LOG.debug)\n self.ws.on('open', self.load_skill)\n self.ws.on('error', LOG.error)\n self.ws.run_forever()\n except Exception as e:\n LOG.error(\"Error: {0}\".format(e))\n self.stop()\n\n def stop(self):\n if self.skill:\n self.skill.shutdown()\n\n\ndef main():\n container = SkillContainer(sys.argv[1:])\n try:\n container.run()\n except KeyboardInterrupt:\n container.stop()\n finally:\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mycroft/skills/container.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport argparse\nimport sys\nfrom os.path import dirname, exists, isdir\n\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.messagebus.client.ws import WebsocketClient\nfrom mycroft.skills.core import create_skill_descriptor, load_skill\nfrom mycroft.skills.intent import Intent\nfrom mycroft.util.log import getLogger\n\n__author__ = 'seanfitz'\n\nLOG = getLogger(\"SkillContainer\")\n\n\nclass SkillContainer(object):\n def __init__(self, args):\n params = self.__build_params(args)\n\n if params.config:\n ConfigurationManager.load_local([params.config])\n\n if exists(params.lib) and isdir(params.lib):\n sys.path.append(params.lib)\n\n sys.path.append(params.dir)\n self.dir = params.dir\n\n self.enable_intent = params.enable_intent\n\n self.__init_client(params)\n\n @staticmethod\n def __build_params(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", default=\"./mycroft.conf\")\n parser.add_argument(\"dir\", nargs='?', default=dirname(__file__))\n parser.add_argument(\"--lib\", default=\"./lib\")\n parser.add_argument(\"--host\", default=None)\n parser.add_argument(\"--port\", default=None)\n parser.add_argument(\"--use-ssl\", action='store_true', default=False)\n parser.add_argument(\"--enable-intent\", action='store_true',\n default=False)\n return parser.parse_args(args)\n\n def __init_client(self, params):\n config = ConfigurationManager.get().get(\"websocket\")\n\n if not params.host:\n params.host = config.get('host')\n if not params.port:\n params.port = config.get('port')\n\n self.ws = WebsocketClient(host=params.host,\n port=params.port,\n ssl=params.use_ssl)\n\n def load_skill(self):\n if self.enable_intent:\n Intent(self.ws)\n\n skill_descriptor = create_skill_descriptor(self.dir)\n self.skill = load_skill(skill_descriptor, self.ws)\n\n def run(self):\n try:\n self.ws.on('message', LOG.debug)\n self.ws.on('open', self.load_skill)\n self.ws.on('error', LOG.error)\n self.ws.run_forever()\n except Exception as e:\n LOG.error(\"Error: {0}\".format(e))\n self.stop()\n\n def stop(self):\n if self.skill:\n self.skill.shutdown()\n\n\ndef main():\n container = SkillContainer(sys.argv[1:])\n try:\n container.run()\n except KeyboardInterrupt:\n container.stop()\n finally:\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mycroft/skills/container.py"}]} | 1,256 | 362 |
gh_patches_debug_25137 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1072 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HDF5Exporter: throws error when curves aren't the same length
When trying to save data from a graph as an hdf5 file, the HDF5Exporter throws and error when you have multiple curves with differing lengths. This looks to be because the numpy.array(data).astype('double') can't handle lists with different lengths. Below is a traceback from the error. This occurs when trying to save data from the "Multiple curves" graph in the "Basic Plotting" example.
````
Traceback (most recent call last):
File "/home/jchrist/PycharmProjects/lib/python3.5/site-packages/pyqtgraph/exporters/Exporter.py", line 77, in fileSaveFinished
self.export(fileName=fileName, **self.fileDialog.opts)
File "/home/jchrist/PycharmProjects/lib/python3.5/site-packages/pyqtgraph/exporters/HDF5Exporter.py", line 55, in export
fdata = numpy.array(data).astype('double')
ValueError: setting an array element with a sequence.
````
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/exporters/HDF5Exporter.py`
Content:
```
1 from ..Qt import QtGui, QtCore
2 from .Exporter import Exporter
3 from ..parametertree import Parameter
4 from .. import PlotItem
5
6 import numpy
7 try:
8 import h5py
9 HAVE_HDF5 = True
10 except ImportError:
11 HAVE_HDF5 = False
12
13 __all__ = ['HDF5Exporter']
14
15
16 class HDF5Exporter(Exporter):
17 Name = "HDF5 Export: plot (x,y)"
18 windows = []
19 allowCopy = False
20
21 def __init__(self, item):
22 Exporter.__init__(self, item)
23 self.params = Parameter(name='params', type='group', children=[
24 {'name': 'Name', 'type': 'str', 'value': 'Export',},
25 {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']},
26 ])
27
28 def parameters(self):
29 return self.params
30
31 def export(self, fileName=None):
32 if not HAVE_HDF5:
33 raise RuntimeError("This exporter requires the h5py package, "
34 "but it was not importable.")
35
36 if not isinstance(self.item, PlotItem):
37 raise Exception("Must have a PlotItem selected for HDF5 export.")
38
39 if fileName is None:
40 self.fileSaveDialog(filter=["*.h5", "*.hdf", "*.hd5"])
41 return
42 dsname = self.params['Name']
43 fd = h5py.File(fileName, 'a') # forces append to file... 'w' doesn't seem to "delete/overwrite"
44 data = []
45
46 appendAllX = self.params['columnMode'] == '(x,y) per plot'
47 #print dir(self.item.curves[0])
48 tlen = 0
49 for i, c in enumerate(self.item.curves):
50 d = c.getData()
51 if i > 0 and len(d[0]) != tlen:
52 raise ValueError ("HDF5 Export requires all curves in plot to have same length")
53 if appendAllX or i == 0:
54 data.append(d[0])
55 tlen = len(d[0])
56 data.append(d[1])
57
58
59 fdata = numpy.array(data).astype('double')
60 dset = fd.create_dataset(dsname, data=fdata)
61 fd.close()
62
63 if HAVE_HDF5:
64 HDF5Exporter.register()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/exporters/HDF5Exporter.py b/pyqtgraph/exporters/HDF5Exporter.py
--- a/pyqtgraph/exporters/HDF5Exporter.py
+++ b/pyqtgraph/exporters/HDF5Exporter.py
@@ -44,20 +44,27 @@
data = []
appendAllX = self.params['columnMode'] == '(x,y) per plot'
- #print dir(self.item.curves[0])
- tlen = 0
- for i, c in enumerate(self.item.curves):
- d = c.getData()
- if i > 0 and len(d[0]) != tlen:
- raise ValueError ("HDF5 Export requires all curves in plot to have same length")
- if appendAllX or i == 0:
- data.append(d[0])
- tlen = len(d[0])
- data.append(d[1])
+ # Check if the arrays are ragged
+ len_first = len(self.item.curves[0].getData()[0]) if self.item.curves[0] else None
+ ragged = any(len(i.getData()[0]) != len_first for i in self.item.curves)
+ if ragged:
+ dgroup = fd.create_group(dsname)
+ for i, c in enumerate(self.item.curves):
+ d = c.getData()
+ fdata = numpy.array([d[0], d[1]]).astype('double')
+ cname = c.name() if c.name() is not None else str(i)
+ dset = dgroup.create_dataset(cname, data=fdata)
+ else:
+ for i, c in enumerate(self.item.curves):
+ d = c.getData()
+ if appendAllX or i == 0:
+ data.append(d[0])
+ data.append(d[1])
+
+ fdata = numpy.array(data).astype('double')
+ dset = fd.create_dataset(dsname, data=fdata)
- fdata = numpy.array(data).astype('double')
- dset = fd.create_dataset(dsname, data=fdata)
fd.close()
if HAVE_HDF5:
| {"golden_diff": "diff --git a/pyqtgraph/exporters/HDF5Exporter.py b/pyqtgraph/exporters/HDF5Exporter.py\n--- a/pyqtgraph/exporters/HDF5Exporter.py\n+++ b/pyqtgraph/exporters/HDF5Exporter.py\n@@ -44,20 +44,27 @@\n data = []\n \n appendAllX = self.params['columnMode'] == '(x,y) per plot'\n- #print dir(self.item.curves[0])\n- tlen = 0\n- for i, c in enumerate(self.item.curves):\n- d = c.getData()\n- if i > 0 and len(d[0]) != tlen:\n- raise ValueError (\"HDF5 Export requires all curves in plot to have same length\")\n- if appendAllX or i == 0:\n- data.append(d[0])\n- tlen = len(d[0])\n- data.append(d[1])\n+ # Check if the arrays are ragged\n+ len_first = len(self.item.curves[0].getData()[0]) if self.item.curves[0] else None\n+ ragged = any(len(i.getData()[0]) != len_first for i in self.item.curves)\n \n+ if ragged:\n+ dgroup = fd.create_group(dsname)\n+ for i, c in enumerate(self.item.curves):\n+ d = c.getData()\n+ fdata = numpy.array([d[0], d[1]]).astype('double')\n+ cname = c.name() if c.name() is not None else str(i)\n+ dset = dgroup.create_dataset(cname, data=fdata)\n+ else:\n+ for i, c in enumerate(self.item.curves):\n+ d = c.getData()\n+ if appendAllX or i == 0:\n+ data.append(d[0])\n+ data.append(d[1])\n+\n+ fdata = numpy.array(data).astype('double')\n+ dset = fd.create_dataset(dsname, data=fdata)\n \n- fdata = numpy.array(data).astype('double')\n- dset = fd.create_dataset(dsname, data=fdata)\n fd.close()\n \n if HAVE_HDF5:\n", "issue": "HDF5Exporter: throws error when curves aren't the same length\nWhen trying to save data from a graph as an hdf5 file, the HDF5Exporter throws and error when you have multiple curves with differing lengths. This looks to be because the numpy.array(data).astype('double') can't handle lists with different lengths. Below is a traceback from the error. This occurs when trying to save data from the \"Multiple curves\" graph in the \"Basic Plotting\" example.\r\n\r\n````\r\nTraceback (most recent call last):\r\n File \"/home/jchrist/PycharmProjects/lib/python3.5/site-packages/pyqtgraph/exporters/Exporter.py\", line 77, in fileSaveFinished\r\n self.export(fileName=fileName, **self.fileDialog.opts)\r\n File \"/home/jchrist/PycharmProjects/lib/python3.5/site-packages/pyqtgraph/exporters/HDF5Exporter.py\", line 55, in export\r\n fdata = numpy.array(data).astype('double')\r\nValueError: setting an array element with a sequence.\r\n````\n", "before_files": [{"content": "from ..Qt import QtGui, QtCore\nfrom .Exporter import Exporter\nfrom ..parametertree import Parameter\nfrom .. import PlotItem\n\nimport numpy \ntry:\n import h5py\n HAVE_HDF5 = True\nexcept ImportError:\n HAVE_HDF5 = False\n \n__all__ = ['HDF5Exporter']\n\n \nclass HDF5Exporter(Exporter):\n Name = \"HDF5 Export: plot (x,y)\"\n windows = []\n allowCopy = False\n\n def __init__(self, item):\n Exporter.__init__(self, item)\n self.params = Parameter(name='params', type='group', children=[\n {'name': 'Name', 'type': 'str', 'value': 'Export',},\n {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']},\n ])\n \n def parameters(self):\n return self.params\n \n def export(self, fileName=None):\n if not HAVE_HDF5:\n raise RuntimeError(\"This exporter requires the h5py package, \"\n \"but it was not importable.\")\n \n if not isinstance(self.item, PlotItem):\n raise Exception(\"Must have a PlotItem selected for HDF5 export.\")\n \n if fileName is None:\n self.fileSaveDialog(filter=[\"*.h5\", \"*.hdf\", \"*.hd5\"])\n return\n dsname = self.params['Name']\n fd = h5py.File(fileName, 'a') # forces append to file... 'w' doesn't seem to \"delete/overwrite\"\n data = []\n\n appendAllX = self.params['columnMode'] == '(x,y) per plot'\n #print dir(self.item.curves[0])\n tlen = 0\n for i, c in enumerate(self.item.curves):\n d = c.getData()\n if i > 0 and len(d[0]) != tlen:\n raise ValueError (\"HDF5 Export requires all curves in plot to have same length\")\n if appendAllX or i == 0:\n data.append(d[0])\n tlen = len(d[0])\n data.append(d[1])\n\n\n fdata = numpy.array(data).astype('double')\n dset = fd.create_dataset(dsname, data=fdata)\n fd.close()\n\nif HAVE_HDF5:\n HDF5Exporter.register()\n", "path": "pyqtgraph/exporters/HDF5Exporter.py"}], "after_files": [{"content": "from ..Qt import QtGui, QtCore\nfrom .Exporter import Exporter\nfrom ..parametertree import Parameter\nfrom .. import PlotItem\n\nimport numpy \ntry:\n import h5py\n HAVE_HDF5 = True\nexcept ImportError:\n HAVE_HDF5 = False\n \n__all__ = ['HDF5Exporter']\n\n \nclass HDF5Exporter(Exporter):\n Name = \"HDF5 Export: plot (x,y)\"\n windows = []\n allowCopy = False\n\n def __init__(self, item):\n Exporter.__init__(self, item)\n self.params = Parameter(name='params', type='group', children=[\n {'name': 'Name', 'type': 'str', 'value': 'Export',},\n {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']},\n ])\n \n def parameters(self):\n return self.params\n \n def export(self, fileName=None):\n if not HAVE_HDF5:\n raise RuntimeError(\"This exporter requires the h5py package, \"\n \"but it was not importable.\")\n \n if not isinstance(self.item, PlotItem):\n raise Exception(\"Must have a PlotItem selected for HDF5 export.\")\n \n if fileName is None:\n self.fileSaveDialog(filter=[\"*.h5\", \"*.hdf\", \"*.hd5\"])\n return\n dsname = self.params['Name']\n fd = h5py.File(fileName, 'a') # forces append to file... 'w' doesn't seem to \"delete/overwrite\"\n data = []\n\n appendAllX = self.params['columnMode'] == '(x,y) per plot'\n # Check if the arrays are ragged\n len_first = len(self.item.curves[0].getData()[0]) if self.item.curves[0] else None\n ragged = any(len(i.getData()[0]) != len_first for i in self.item.curves)\n\n if ragged:\n dgroup = fd.create_group(dsname)\n for i, c in enumerate(self.item.curves):\n d = c.getData()\n fdata = numpy.array([d[0], d[1]]).astype('double')\n cname = c.name() if c.name() is not None else str(i)\n dset = dgroup.create_dataset(cname, data=fdata)\n else:\n for i, c in enumerate(self.item.curves):\n d = c.getData()\n if appendAllX or i == 0:\n data.append(d[0])\n data.append(d[1])\n\n fdata = numpy.array(data).astype('double')\n dset = fd.create_dataset(dsname, data=fdata)\n\n fd.close()\n\nif HAVE_HDF5:\n HDF5Exporter.register()\n", "path": "pyqtgraph/exporters/HDF5Exporter.py"}]} | 1,120 | 480 |
gh_patches_debug_25737 | rasdani/github-patches | git_diff | pytorch__vision-1301 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
allow utils.save_image to write to a file object
Currently, the `torchvision.utils.save_image` function only allows to save to a file on disk.
https://github.com/pytorch/vision/blob/26c9630bc7add88685400e54457203816c06e750/torchvision/utils.py#L90-L105
It cannot write to file objects as that requires the [`format` parameter](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save) to be passed which `torchvision.utils.save_image` does not support as of yet.
Can you add this parameter to `torchvision.utils.save_image`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/utils.py`
Content:
```
1 import torch
2 import math
3 irange = range
4
5
6 def make_grid(tensor, nrow=8, padding=2,
7 normalize=False, range=None, scale_each=False, pad_value=0):
8 """Make a grid of images.
9
10 Args:
11 tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
12 or a list of images all of the same size.
13 nrow (int, optional): Number of images displayed in each row of the grid.
14 The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
15 padding (int, optional): amount of padding. Default: ``2``.
16 normalize (bool, optional): If True, shift the image to the range (0, 1),
17 by the min and max values specified by :attr:`range`. Default: ``False``.
18 range (tuple, optional): tuple (min, max) where min and max are numbers,
19 then these numbers are used to normalize the image. By default, min and max
20 are computed from the tensor.
21 scale_each (bool, optional): If ``True``, scale each image in the batch of
22 images separately rather than the (min, max) over all images. Default: ``False``.
23 pad_value (float, optional): Value for the padded pixels. Default: ``0``.
24
25 Example:
26 See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
27
28 """
29 if not (torch.is_tensor(tensor) or
30 (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
31 raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
32
33 # if list of tensors, convert to a 4D mini-batch Tensor
34 if isinstance(tensor, list):
35 tensor = torch.stack(tensor, dim=0)
36
37 if tensor.dim() == 2: # single image H x W
38 tensor = tensor.unsqueeze(0)
39 if tensor.dim() == 3: # single image
40 if tensor.size(0) == 1: # if single-channel, convert to 3-channel
41 tensor = torch.cat((tensor, tensor, tensor), 0)
42 tensor = tensor.unsqueeze(0)
43
44 if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
45 tensor = torch.cat((tensor, tensor, tensor), 1)
46
47 if normalize is True:
48 tensor = tensor.clone() # avoid modifying tensor in-place
49 if range is not None:
50 assert isinstance(range, tuple), \
51 "range has to be a tuple (min, max) if specified. min and max are numbers"
52
53 def norm_ip(img, min, max):
54 img.clamp_(min=min, max=max)
55 img.add_(-min).div_(max - min + 1e-5)
56
57 def norm_range(t, range):
58 if range is not None:
59 norm_ip(t, range[0], range[1])
60 else:
61 norm_ip(t, float(t.min()), float(t.max()))
62
63 if scale_each is True:
64 for t in tensor: # loop over mini-batch dimension
65 norm_range(t, range)
66 else:
67 norm_range(tensor, range)
68
69 if tensor.size(0) == 1:
70 return tensor.squeeze(0)
71
72 # make the mini-batch of images into a grid
73 nmaps = tensor.size(0)
74 xmaps = min(nrow, nmaps)
75 ymaps = int(math.ceil(float(nmaps) / xmaps))
76 height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
77 grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)
78 k = 0
79 for y in irange(ymaps):
80 for x in irange(xmaps):
81 if k >= nmaps:
82 break
83 grid.narrow(1, y * height + padding, height - padding)\
84 .narrow(2, x * width + padding, width - padding)\
85 .copy_(tensor[k])
86 k = k + 1
87 return grid
88
89
90 def save_image(tensor, filename, nrow=8, padding=2,
91 normalize=False, range=None, scale_each=False, pad_value=0):
92 """Save a given Tensor into an image file.
93
94 Args:
95 tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
96 saves the tensor as a grid of images by calling ``make_grid``.
97 **kwargs: Other arguments are documented in ``make_grid``.
98 """
99 from PIL import Image
100 grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
101 normalize=normalize, range=range, scale_each=scale_each)
102 # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
103 ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
104 im = Image.fromarray(ndarr)
105 im.save(filename)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/utils.py b/torchvision/utils.py
--- a/torchvision/utils.py
+++ b/torchvision/utils.py
@@ -87,13 +87,16 @@
return grid
-def save_image(tensor, filename, nrow=8, padding=2,
- normalize=False, range=None, scale_each=False, pad_value=0):
+def save_image(tensor, fp, nrow=8, padding=2,
+ normalize=False, range=None, scale_each=False, pad_value=0, format=None):
"""Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
+ fp - A filename(string) or file object
+ format(Optional): If omitted, the format to use is determined from the filename extension.
+ If a file object was used instead of a filename, this parameter should always be used.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
@@ -102,4 +105,4 @@
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
- im.save(filename)
+ im.save(fp, format=format)
| {"golden_diff": "diff --git a/torchvision/utils.py b/torchvision/utils.py\n--- a/torchvision/utils.py\n+++ b/torchvision/utils.py\n@@ -87,13 +87,16 @@\n return grid\n \n \n-def save_image(tensor, filename, nrow=8, padding=2,\n- normalize=False, range=None, scale_each=False, pad_value=0):\n+def save_image(tensor, fp, nrow=8, padding=2,\n+ normalize=False, range=None, scale_each=False, pad_value=0, format=None):\n \"\"\"Save a given Tensor into an image file.\n \n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n+ fp - A filename(string) or file object\n+ format(Optional): If omitted, the format to use is determined from the filename extension.\n+ If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n@@ -102,4 +105,4 @@\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n- im.save(filename)\n+ im.save(fp, format=format)\n", "issue": "allow utils.save_image to write to a file object\nCurrently, the `torchvision.utils.save_image` function only allows to save to a file on disk.\r\nhttps://github.com/pytorch/vision/blob/26c9630bc7add88685400e54457203816c06e750/torchvision/utils.py#L90-L105\r\n\r\nIt cannot write to file objects as that requires the [`format` parameter](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.save) to be passed which `torchvision.utils.save_image` does not support as of yet.\r\n\r\nCan you add this parameter to `torchvision.utils.save_image`?\n", "before_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The final grid size is ``(B / nrow, nrow)``. Default: ``8``.\n padding (int, optional): amount of padding. Default: ``2``.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by the min and max values specified by :attr:`range`. Default: ``False``.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If ``True``, scale each image in the batch of\n images separately rather than the (min, max) over all images. Default: ``False``.\n pad_value (float, optional): Value for the padded pixels. Default: ``0``.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min + 1e-5)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, filename, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)\n", "path": "torchvision/utils.py"}], "after_files": [{"content": "import torch\nimport math\nirange = range\n\n\ndef make_grid(tensor, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0):\n \"\"\"Make a grid of images.\n\n Args:\n tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)\n or a list of images all of the same size.\n nrow (int, optional): Number of images displayed in each row of the grid.\n The final grid size is ``(B / nrow, nrow)``. Default: ``8``.\n padding (int, optional): amount of padding. Default: ``2``.\n normalize (bool, optional): If True, shift the image to the range (0, 1),\n by the min and max values specified by :attr:`range`. Default: ``False``.\n range (tuple, optional): tuple (min, max) where min and max are numbers,\n then these numbers are used to normalize the image. By default, min and max\n are computed from the tensor.\n scale_each (bool, optional): If ``True``, scale each image in the batch of\n images separately rather than the (min, max) over all images. Default: ``False``.\n pad_value (float, optional): Value for the padded pixels. Default: ``0``.\n\n Example:\n See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_\n\n \"\"\"\n if not (torch.is_tensor(tensor) or\n (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = torch.stack(tensor, dim=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.size(0) == 1: # if single-channel, convert to 3-channel\n tensor = torch.cat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n\n if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images\n tensor = torch.cat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n tensor = tensor.clone() # avoid modifying tensor in-place\n if range is not None:\n assert isinstance(range, tuple), \\\n \"range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min + 1e-5)\n\n def norm_range(t, range):\n if range is not None:\n norm_ip(t, range[0], range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, range)\n else:\n norm_range(tensor, range)\n\n if tensor.size(0) == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.size(0)\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)\n grid = tensor.new_full((3, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in irange(ymaps):\n for x in irange(xmaps):\n if k >= nmaps:\n break\n grid.narrow(1, y * height + padding, height - padding)\\\n .narrow(2, x * width + padding, width - padding)\\\n .copy_(tensor[k])\n k = k + 1\n return grid\n\n\ndef save_image(tensor, fp, nrow=8, padding=2,\n normalize=False, range=None, scale_each=False, pad_value=0, format=None):\n \"\"\"Save a given Tensor into an image file.\n\n Args:\n tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,\n saves the tensor as a grid of images by calling ``make_grid``.\n fp - A filename(string) or file object\n format(Optional): If omitted, the format to use is determined from the filename extension.\n If a file object was used instead of a filename, this parameter should always be used.\n **kwargs: Other arguments are documented in ``make_grid``.\n \"\"\"\n from PIL import Image\n grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,\n normalize=normalize, range=range, scale_each=scale_each)\n # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer\n ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n", "path": "torchvision/utils.py"}]} | 1,814 | 357 |
gh_patches_debug_7270 | rasdani/github-patches | git_diff | pantsbuild__pants-18551 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`Failed to write to remote cache` when running `ruff`
**Describe the bug**
When running `pants lint --only=ruff ::` I see many `WARN` logs like:
```
14:18:48.60 [WARN] Failed to write to remote cache (1 occurrences so far): Declared output directory path RelativePath("src/django_apps/preventive_care/consults/automatic_refer_out/__init__.py") in output digest Digest { hash: Fingerprint<b4c4a7e44c3d23b8eac247bfdd5ed723d054947915bbfb808e0ee16e4fa75430>, size_bytes: 77 } contained a file instead.
```
**Pants version**
`PANTS_SHA=254f69b3f111fb620206bbfe72b262520849484f` (on the 2.16.x branch)
**OS**
MacOS
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/lint/ruff/rules.py`
Content:
```
1 # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from pants.backend.python.lint.ruff.subsystem import Ruff, RuffFieldSet
7 from pants.backend.python.util_rules import pex
8 from pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess
9 from pants.core.goals.fix import FixResult, FixTargetsRequest
10 from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
11 from pants.core.util_rules.partitions import PartitionerType
12 from pants.engine.fs import Digest, MergeDigests
13 from pants.engine.process import FallibleProcessResult
14 from pants.engine.rules import Get, MultiGet, collect_rules, rule
15 from pants.util.logging import LogLevel
16 from pants.util.strutil import pluralize
17
18
19 class RuffRequest(FixTargetsRequest):
20 field_set_type = RuffFieldSet
21 tool_subsystem = Ruff
22 partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION
23
24
25 @rule(desc="Fix with ruff", level=LogLevel.DEBUG)
26 async def ruff_fix(request: RuffRequest.Batch, ruff: Ruff) -> FixResult:
27 ruff_pex_get = Get(VenvPex, PexRequest, ruff.to_pex_request())
28
29 config_files_get = Get(
30 ConfigFiles, ConfigFilesRequest, ruff.config_request(request.snapshot.dirs)
31 )
32
33 ruff_pex, config_files = await MultiGet(ruff_pex_get, config_files_get)
34
35 input_digest = await Get(
36 Digest,
37 MergeDigests((request.snapshot.digest, config_files.snapshot.digest)),
38 )
39
40 conf_args = [f"--config={ruff.config}"] if ruff.config else []
41
42 result = await Get(
43 FallibleProcessResult,
44 VenvPexProcess(
45 ruff_pex,
46 argv=("--fix", *conf_args, *ruff.args, *request.files),
47 input_digest=input_digest,
48 output_directories=request.files,
49 description=f"Run ruff on {pluralize(len(request.elements), 'file')}.",
50 level=LogLevel.DEBUG,
51 ),
52 )
53 return await FixResult.create(request, result, strip_chroot_path=True)
54
55
56 def rules():
57 return [*collect_rules(), *RuffRequest.rules(), *pex.rules()]
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/python/lint/ruff/rules.py b/src/python/pants/backend/python/lint/ruff/rules.py
--- a/src/python/pants/backend/python/lint/ruff/rules.py
+++ b/src/python/pants/backend/python/lint/ruff/rules.py
@@ -45,7 +45,7 @@
ruff_pex,
argv=("--fix", *conf_args, *ruff.args, *request.files),
input_digest=input_digest,
- output_directories=request.files,
+ output_files=request.files,
description=f"Run ruff on {pluralize(len(request.elements), 'file')}.",
level=LogLevel.DEBUG,
),
| {"golden_diff": "diff --git a/src/python/pants/backend/python/lint/ruff/rules.py b/src/python/pants/backend/python/lint/ruff/rules.py\n--- a/src/python/pants/backend/python/lint/ruff/rules.py\n+++ b/src/python/pants/backend/python/lint/ruff/rules.py\n@@ -45,7 +45,7 @@\n ruff_pex,\n argv=(\"--fix\", *conf_args, *ruff.args, *request.files),\n input_digest=input_digest,\n- output_directories=request.files,\n+ output_files=request.files,\n description=f\"Run ruff on {pluralize(len(request.elements), 'file')}.\",\n level=LogLevel.DEBUG,\n ),\n", "issue": "`Failed to write to remote cache` when running `ruff`\n**Describe the bug**\r\n\r\nWhen running `pants lint --only=ruff ::` I see many `WARN` logs like:\r\n```\r\n14:18:48.60 [WARN] Failed to write to remote cache (1 occurrences so far): Declared output directory path RelativePath(\"src/django_apps/preventive_care/consults/automatic_refer_out/__init__.py\") in output digest Digest { hash: Fingerprint<b4c4a7e44c3d23b8eac247bfdd5ed723d054947915bbfb808e0ee16e4fa75430>, size_bytes: 77 } contained a file instead.\r\n```\r\n\r\n**Pants version**\r\n\r\n`PANTS_SHA=254f69b3f111fb620206bbfe72b262520849484f` (on the 2.16.x branch)\r\n\r\n**OS**\r\n\r\nMacOS\n", "before_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom pants.backend.python.lint.ruff.subsystem import Ruff, RuffFieldSet\nfrom pants.backend.python.util_rules import pex\nfrom pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess\nfrom pants.core.goals.fix import FixResult, FixTargetsRequest\nfrom pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest\nfrom pants.core.util_rules.partitions import PartitionerType\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.process import FallibleProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass RuffRequest(FixTargetsRequest):\n field_set_type = RuffFieldSet\n tool_subsystem = Ruff\n partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION\n\n\n@rule(desc=\"Fix with ruff\", level=LogLevel.DEBUG)\nasync def ruff_fix(request: RuffRequest.Batch, ruff: Ruff) -> FixResult:\n ruff_pex_get = Get(VenvPex, PexRequest, ruff.to_pex_request())\n\n config_files_get = Get(\n ConfigFiles, ConfigFilesRequest, ruff.config_request(request.snapshot.dirs)\n )\n\n ruff_pex, config_files = await MultiGet(ruff_pex_get, config_files_get)\n\n input_digest = await Get(\n Digest,\n MergeDigests((request.snapshot.digest, config_files.snapshot.digest)),\n )\n\n conf_args = [f\"--config={ruff.config}\"] if ruff.config else []\n\n result = await Get(\n FallibleProcessResult,\n VenvPexProcess(\n ruff_pex,\n argv=(\"--fix\", *conf_args, *ruff.args, *request.files),\n input_digest=input_digest,\n output_directories=request.files,\n description=f\"Run ruff on {pluralize(len(request.elements), 'file')}.\",\n level=LogLevel.DEBUG,\n ),\n )\n return await FixResult.create(request, result, strip_chroot_path=True)\n\n\ndef rules():\n return [*collect_rules(), *RuffRequest.rules(), *pex.rules()]\n", "path": "src/python/pants/backend/python/lint/ruff/rules.py"}], "after_files": [{"content": "# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom pants.backend.python.lint.ruff.subsystem import Ruff, RuffFieldSet\nfrom pants.backend.python.util_rules import pex\nfrom pants.backend.python.util_rules.pex import PexRequest, VenvPex, VenvPexProcess\nfrom pants.core.goals.fix import FixResult, FixTargetsRequest\nfrom pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest\nfrom pants.core.util_rules.partitions import PartitionerType\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.process import FallibleProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass RuffRequest(FixTargetsRequest):\n field_set_type = RuffFieldSet\n tool_subsystem = Ruff\n partitioner_type = PartitionerType.DEFAULT_SINGLE_PARTITION\n\n\n@rule(desc=\"Fix with ruff\", level=LogLevel.DEBUG)\nasync def ruff_fix(request: RuffRequest.Batch, ruff: Ruff) -> FixResult:\n ruff_pex_get = Get(VenvPex, PexRequest, ruff.to_pex_request())\n\n config_files_get = Get(\n ConfigFiles, ConfigFilesRequest, ruff.config_request(request.snapshot.dirs)\n )\n\n ruff_pex, config_files = await MultiGet(ruff_pex_get, config_files_get)\n\n input_digest = await Get(\n Digest,\n MergeDigests((request.snapshot.digest, config_files.snapshot.digest)),\n )\n\n conf_args = [f\"--config={ruff.config}\"] if ruff.config else []\n\n result = await Get(\n FallibleProcessResult,\n VenvPexProcess(\n ruff_pex,\n argv=(\"--fix\", *conf_args, *ruff.args, *request.files),\n input_digest=input_digest,\n output_files=request.files,\n description=f\"Run ruff on {pluralize(len(request.elements), 'file')}.\",\n level=LogLevel.DEBUG,\n ),\n )\n return await FixResult.create(request, result, strip_chroot_path=True)\n\n\ndef rules():\n return [*collect_rules(), *RuffRequest.rules(), *pex.rules()]\n", "path": "src/python/pants/backend/python/lint/ruff/rules.py"}]} | 1,130 | 146 |
gh_patches_debug_3369 | rasdani/github-patches | git_diff | svthalia__concrexit-1925 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Annual documents does not show drop-down menu for year
### Describe the bug
When I try to add an annual document to the site, I am not able to select a year, the dropdown menu does not contain any entries.
### Steps to reproduce
1.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/documents/forms.py`
Content:
```
1 """The forms defined by the documents package."""
2 from django import forms
3 from django.contrib import admin
4 from django.forms import widgets
5 from django.utils import timezone
6
7 from documents import models
8 from utils.snippets import datetime_to_lectureyear
9
10
11 class DocumentFileInput(widgets.ClearableFileInput):
12 """Wrapper around Django's :class:`~django.forms.widgets.ClearableFileInput`.
13
14 It overrides the URL of the associated file when it is fetched.
15 """
16
17 template_name = "widgets/clearable_file_input.html"
18
19 def get_context(self, name, value, attrs):
20 context = super().get_context(name, value, attrs)
21 if hasattr(value, "url"):
22 doc = models.Document.objects.get(file=value)
23 context["document_id"] = doc.pk
24 context["language"] = "en"
25 return context
26
27
28 class MinutesForm(forms.ModelForm):
29 """Form that overrides the widgets for the files."""
30
31 class Meta:
32 model = models.Minutes
33 fields = (
34 "file",
35 "members_only",
36 )
37 widgets = {
38 "file": DocumentFileInput,
39 }
40
41
42 class AnnualDocumentForm(forms.ModelForm):
43 """Form that provides custom functionality for annual documents."""
44
45 class Meta:
46 model = models.AnnualDocument
47 fields = "__all__"
48 widgets = {
49 "year": forms.Select,
50 "file": DocumentFileInput,
51 }
52
53 @staticmethod
54 def _current_year():
55 """Get the current lecture year."""
56 return datetime_to_lectureyear(timezone.now())
57
58 @staticmethod
59 def _year_choices():
60 """Get the lecture years."""
61 current = datetime_to_lectureyear(timezone.now())
62 return [
63 (year, "{}-{}".format(year, year + 1))
64 for year in range(current + 1, 1989, -1)
65 ]
66
67
68 class AssociationDocumentForm(forms.ModelForm):
69 """Form that overrides the widgets for the files."""
70
71 class Meta:
72 model = models.AssociationDocument
73 fields = (
74 "name",
75 "file",
76 "members_only",
77 )
78 widgets = {
79 "file": DocumentFileInput,
80 }
81
82
83 class EventDocumentForm(forms.ModelForm):
84 """Form that overrides the widgets for the files."""
85
86 class Meta:
87 model = models.EventDocument
88 fields = (
89 "name",
90 "file",
91 "members_only",
92 "owner",
93 )
94 widgets = {
95 "file": DocumentFileInput,
96 }
97
98
99 class MiscellaneousDocumentForm(forms.ModelForm):
100 """Form that overrides the widgets for the files."""
101
102 class Meta:
103 model = models.MiscellaneousDocument
104 fields = (
105 "name",
106 "file",
107 "members_only",
108 )
109 widgets = {
110 "file": DocumentFileInput,
111 }
112
113
114 class GeneralMeetingForm(forms.ModelForm):
115 """Custom form for general meetings with a custom widget for documents."""
116
117 class Meta:
118 model = models.GeneralMeeting
119 fields = "__all__"
120 widgets = {
121 "documents": admin.widgets.FilteredSelectMultiple(
122 "documents", is_stacked=False
123 )
124 }
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/documents/forms.py b/website/documents/forms.py
--- a/website/documents/forms.py
+++ b/website/documents/forms.py
@@ -64,6 +64,10 @@
for year in range(current + 1, 1989, -1)
]
+ year = forms.TypedChoiceField(
+ coerce=int, choices=_year_choices.__func__, initial=_current_year.__func__
+ )
+
class AssociationDocumentForm(forms.ModelForm):
"""Form that overrides the widgets for the files."""
| {"golden_diff": "diff --git a/website/documents/forms.py b/website/documents/forms.py\n--- a/website/documents/forms.py\n+++ b/website/documents/forms.py\n@@ -64,6 +64,10 @@\n for year in range(current + 1, 1989, -1)\n ]\n \n+ year = forms.TypedChoiceField(\n+ coerce=int, choices=_year_choices.__func__, initial=_current_year.__func__\n+ )\n+\n \n class AssociationDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n", "issue": "Annual documents does not show drop-down menu for year\n### Describe the bug\r\n\r\nWhen I try to add an annual document to the site, I am not able to select a year, the dropdown menu does not contain any entries. \r\n\r\n### Steps to reproduce\r\n\r\n1. \n", "before_files": [{"content": "\"\"\"The forms defined by the documents package.\"\"\"\nfrom django import forms\nfrom django.contrib import admin\nfrom django.forms import widgets\nfrom django.utils import timezone\n\nfrom documents import models\nfrom utils.snippets import datetime_to_lectureyear\n\n\nclass DocumentFileInput(widgets.ClearableFileInput):\n \"\"\"Wrapper around Django's :class:`~django.forms.widgets.ClearableFileInput`.\n\n It overrides the URL of the associated file when it is fetched.\n \"\"\"\n\n template_name = \"widgets/clearable_file_input.html\"\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n if hasattr(value, \"url\"):\n doc = models.Document.objects.get(file=value)\n context[\"document_id\"] = doc.pk\n context[\"language\"] = \"en\"\n return context\n\n\nclass MinutesForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.Minutes\n fields = (\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass AnnualDocumentForm(forms.ModelForm):\n \"\"\"Form that provides custom functionality for annual documents.\"\"\"\n\n class Meta:\n model = models.AnnualDocument\n fields = \"__all__\"\n widgets = {\n \"year\": forms.Select,\n \"file\": DocumentFileInput,\n }\n\n @staticmethod\n def _current_year():\n \"\"\"Get the current lecture year.\"\"\"\n return datetime_to_lectureyear(timezone.now())\n\n @staticmethod\n def _year_choices():\n \"\"\"Get the lecture years.\"\"\"\n current = datetime_to_lectureyear(timezone.now())\n return [\n (year, \"{}-{}\".format(year, year + 1))\n for year in range(current + 1, 1989, -1)\n ]\n\n\nclass AssociationDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.AssociationDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass EventDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.EventDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n \"owner\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass MiscellaneousDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.MiscellaneousDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass GeneralMeetingForm(forms.ModelForm):\n \"\"\"Custom form for general meetings with a custom widget for documents.\"\"\"\n\n class Meta:\n model = models.GeneralMeeting\n fields = \"__all__\"\n widgets = {\n \"documents\": admin.widgets.FilteredSelectMultiple(\n \"documents\", is_stacked=False\n )\n }\n", "path": "website/documents/forms.py"}], "after_files": [{"content": "\"\"\"The forms defined by the documents package.\"\"\"\nfrom django import forms\nfrom django.contrib import admin\nfrom django.forms import widgets\nfrom django.utils import timezone\n\nfrom documents import models\nfrom utils.snippets import datetime_to_lectureyear\n\n\nclass DocumentFileInput(widgets.ClearableFileInput):\n \"\"\"Wrapper around Django's :class:`~django.forms.widgets.ClearableFileInput`.\n\n It overrides the URL of the associated file when it is fetched.\n \"\"\"\n\n template_name = \"widgets/clearable_file_input.html\"\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n if hasattr(value, \"url\"):\n doc = models.Document.objects.get(file=value)\n context[\"document_id\"] = doc.pk\n context[\"language\"] = \"en\"\n return context\n\n\nclass MinutesForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.Minutes\n fields = (\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass AnnualDocumentForm(forms.ModelForm):\n \"\"\"Form that provides custom functionality for annual documents.\"\"\"\n\n class Meta:\n model = models.AnnualDocument\n fields = \"__all__\"\n widgets = {\n \"year\": forms.Select,\n \"file\": DocumentFileInput,\n }\n\n @staticmethod\n def _current_year():\n \"\"\"Get the current lecture year.\"\"\"\n return datetime_to_lectureyear(timezone.now())\n\n @staticmethod\n def _year_choices():\n \"\"\"Get the lecture years.\"\"\"\n current = datetime_to_lectureyear(timezone.now())\n return [\n (year, \"{}-{}\".format(year, year + 1))\n for year in range(current + 1, 1989, -1)\n ]\n\n year = forms.TypedChoiceField(\n coerce=int, choices=_year_choices.__func__, initial=_current_year.__func__\n )\n\n\nclass AssociationDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.AssociationDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass EventDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.EventDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n \"owner\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass MiscellaneousDocumentForm(forms.ModelForm):\n \"\"\"Form that overrides the widgets for the files.\"\"\"\n\n class Meta:\n model = models.MiscellaneousDocument\n fields = (\n \"name\",\n \"file\",\n \"members_only\",\n )\n widgets = {\n \"file\": DocumentFileInput,\n }\n\n\nclass GeneralMeetingForm(forms.ModelForm):\n \"\"\"Custom form for general meetings with a custom widget for documents.\"\"\"\n\n class Meta:\n model = models.GeneralMeeting\n fields = \"__all__\"\n widgets = {\n \"documents\": admin.widgets.FilteredSelectMultiple(\n \"documents\", is_stacked=False\n )\n }\n", "path": "website/documents/forms.py"}]} | 1,250 | 118 |
gh_patches_debug_28224 | rasdani/github-patches | git_diff | Parsl__parsl-435 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
confusing error message with simple configs
Using this config:
```
config = Config(executors=[ThreadPoolExecutor()])
```
parsl startup logs this message to the logger:
```
$ ./c.py
2018-07-11 08:04:42 parsl.config:66 [DEBUG] Checkpoint period only has an effect with checkpoint_mode='periodic'
```
This appears to be in config creation, as it happens even without passing that config to create a DataFlowKernel.
This might cause user confusion that something is broken/misconfigured.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/config.py`
Content:
```
1 import logging
2
3 from libsubmit.utils import RepresentationMixin
4 from parsl.executors.threads import ThreadPoolExecutor
5 from parsl.dataflow.error import ConfigurationError
6
7 logger = logging.getLogger(__name__)
8
9
10 class Config(RepresentationMixin):
11 """
12 Specification of Parsl configuration options.
13
14 Parameters
15 ----------
16 executors : list of ParslExecutor, optional
17 List of executor instances to use. Possible executors include :class:`~parsl.executors.threads.ThreadPoolExecutor`,
18 :class:`~parsl.executors.ipp.IPyParallelExecutor`, or :class:`~parsl.executors.swift_t.TurbineExecutor`. Default
19 is [:class:`~parsl.executors.threads.ThreadPoolExecutor()`].
20 app_cache : bool, optional
21 Enable app caching. Default is True.
22 checkpoint_files : list of str, optional
23 List of paths to checkpoint files. Default is None.
24 checkpoint_mode : str, optional
25 Checkpoint mode to use, can be 'dfk_exit', 'task_exit', or 'periodic'. If set to
26 `None`, checkpointing will be disabled. Default is None.
27 checkpoint_period : str, optional
28 Time interval (in "HH:MM:SS") at which to checkpoint completed tasks. Only has an effect if
29 `checkpoint_mode='periodic'`.
30 data_management_max_threads : int, optional
31 Maximum number of threads to allocate for the data manager to use for managing input and output transfers.
32 Default is 10.
33 lazy_errors : bool, optional
34 If True, errors from task failures will not be raised until `future.result()` is called. Otherwise, they will
35 be raised as soon as the task returns. Default is True.
36 retries : int, optional
37 Set the number of retries in case of failure. Default is 0.
38 run_dir : str, optional
39 Path to run directory. Default is 'runinfo'.
40 strategy : str, optional
41 Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic
42 scaling will be disabled. Default is 'simple'.
43 usage_tracking : bool, optional
44 Enable usage tracking. Default is True.
45 """
46 def __init__(self,
47 executors=None,
48 app_cache=True,
49 checkpoint_files=None,
50 checkpoint_mode=None,
51 checkpoint_period="00:30:00",
52 data_management_max_threads=10,
53 lazy_errors=True,
54 retries=0,
55 run_dir='runinfo',
56 strategy='simple',
57 db_logger_config=None,
58 usage_tracking=True):
59 if executors is None:
60 executors = [ThreadPoolExecutor()]
61 self.executors = executors
62 self.app_cache = app_cache
63 self.checkpoint_files = checkpoint_files
64 self.checkpoint_mode = checkpoint_mode
65 if checkpoint_mode is not 'periodic' and checkpoint_period is not None:
66 logger.debug("Checkpoint period only has an effect with checkpoint_mode='periodic'")
67 self.checkpoint_period = checkpoint_period
68 self.data_management_max_threads = data_management_max_threads
69 self.lazy_errors = lazy_errors
70 self.retries = retries
71 self.run_dir = run_dir
72 self.strategy = strategy
73 self.usage_tracking = usage_tracking
74 self.db_logger_config = db_logger_config
75
76 @property
77 def executors(self):
78 return self._executors
79
80 @executors.setter
81 def executors(self, executors):
82 labels = [e.label for e in executors]
83 duplicates = [e for n, e in enumerate(labels) if e in labels[:n]]
84 if len(duplicates) > 0:
85 raise ConfigurationError('Executors must have unique labels ({})'.format(
86 ', '.join(['label={}'.format(repr(d)) for d in duplicates])))
87 self._executors = executors
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/config.py b/parsl/config.py
--- a/parsl/config.py
+++ b/parsl/config.py
@@ -48,7 +48,7 @@
app_cache=True,
checkpoint_files=None,
checkpoint_mode=None,
- checkpoint_period="00:30:00",
+ checkpoint_period=None,
data_management_max_threads=10,
lazy_errors=True,
retries=0,
@@ -62,8 +62,17 @@
self.app_cache = app_cache
self.checkpoint_files = checkpoint_files
self.checkpoint_mode = checkpoint_mode
- if checkpoint_mode is not 'periodic' and checkpoint_period is not None:
- logger.debug("Checkpoint period only has an effect with checkpoint_mode='periodic'")
+ if checkpoint_period is not None:
+ if checkpoint_mode is None:
+ logger.debug('The requested `checkpoint_period={}` will have no effect because `checkpoint_mode=None`'.format(
+ checkpoint_period)
+ )
+ elif checkpoint_mode is not 'periodic':
+ logger.debug("Requested checkpoint period of {} only has an effect with checkpoint_mode='periodic'".format(
+ checkpoint_period)
+ )
+ if checkpoint_mode is 'periodic' and checkpoint_period is None:
+ checkpoint_period = "00:30:00"
self.checkpoint_period = checkpoint_period
self.data_management_max_threads = data_management_max_threads
self.lazy_errors = lazy_errors
| {"golden_diff": "diff --git a/parsl/config.py b/parsl/config.py\n--- a/parsl/config.py\n+++ b/parsl/config.py\n@@ -48,7 +48,7 @@\n app_cache=True,\n checkpoint_files=None,\n checkpoint_mode=None,\n- checkpoint_period=\"00:30:00\",\n+ checkpoint_period=None,\n data_management_max_threads=10,\n lazy_errors=True,\n retries=0,\n@@ -62,8 +62,17 @@\n self.app_cache = app_cache\n self.checkpoint_files = checkpoint_files\n self.checkpoint_mode = checkpoint_mode\n- if checkpoint_mode is not 'periodic' and checkpoint_period is not None:\n- logger.debug(\"Checkpoint period only has an effect with checkpoint_mode='periodic'\")\n+ if checkpoint_period is not None:\n+ if checkpoint_mode is None:\n+ logger.debug('The requested `checkpoint_period={}` will have no effect because `checkpoint_mode=None`'.format(\n+ checkpoint_period)\n+ )\n+ elif checkpoint_mode is not 'periodic':\n+ logger.debug(\"Requested checkpoint period of {} only has an effect with checkpoint_mode='periodic'\".format(\n+ checkpoint_period)\n+ )\n+ if checkpoint_mode is 'periodic' and checkpoint_period is None:\n+ checkpoint_period = \"00:30:00\"\n self.checkpoint_period = checkpoint_period\n self.data_management_max_threads = data_management_max_threads\n self.lazy_errors = lazy_errors\n", "issue": "confusing error message with simple configs\nUsing this config:\r\n\r\n```\r\nconfig = Config(executors=[ThreadPoolExecutor()])\r\n```\r\n\r\nparsl startup logs this message to the logger:\r\n\r\n```\r\n$ ./c.py \r\n2018-07-11 08:04:42 parsl.config:66 [DEBUG] Checkpoint period only has an effect with checkpoint_mode='periodic'\r\n```\r\n\r\nThis appears to be in config creation, as it happens even without passing that config to create a DataFlowKernel.\r\n\r\nThis might cause user confusion that something is broken/misconfigured.\r\n\n", "before_files": [{"content": "import logging\n\nfrom libsubmit.utils import RepresentationMixin\nfrom parsl.executors.threads import ThreadPoolExecutor\nfrom parsl.dataflow.error import ConfigurationError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Config(RepresentationMixin):\n \"\"\"\n Specification of Parsl configuration options.\n\n Parameters\n ----------\n executors : list of ParslExecutor, optional\n List of executor instances to use. Possible executors include :class:`~parsl.executors.threads.ThreadPoolExecutor`,\n :class:`~parsl.executors.ipp.IPyParallelExecutor`, or :class:`~parsl.executors.swift_t.TurbineExecutor`. Default\n is [:class:`~parsl.executors.threads.ThreadPoolExecutor()`].\n app_cache : bool, optional\n Enable app caching. Default is True.\n checkpoint_files : list of str, optional\n List of paths to checkpoint files. Default is None.\n checkpoint_mode : str, optional\n Checkpoint mode to use, can be 'dfk_exit', 'task_exit', or 'periodic'. If set to\n `None`, checkpointing will be disabled. Default is None.\n checkpoint_period : str, optional\n Time interval (in \"HH:MM:SS\") at which to checkpoint completed tasks. Only has an effect if\n `checkpoint_mode='periodic'`.\n data_management_max_threads : int, optional\n Maximum number of threads to allocate for the data manager to use for managing input and output transfers.\n Default is 10.\n lazy_errors : bool, optional\n If True, errors from task failures will not be raised until `future.result()` is called. Otherwise, they will\n be raised as soon as the task returns. Default is True.\n retries : int, optional\n Set the number of retries in case of failure. Default is 0.\n run_dir : str, optional\n Path to run directory. Default is 'runinfo'.\n strategy : str, optional\n Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic\n scaling will be disabled. Default is 'simple'.\n usage_tracking : bool, optional\n Enable usage tracking. Default is True.\n \"\"\"\n def __init__(self,\n executors=None,\n app_cache=True,\n checkpoint_files=None,\n checkpoint_mode=None,\n checkpoint_period=\"00:30:00\",\n data_management_max_threads=10,\n lazy_errors=True,\n retries=0,\n run_dir='runinfo',\n strategy='simple',\n db_logger_config=None,\n usage_tracking=True):\n if executors is None:\n executors = [ThreadPoolExecutor()]\n self.executors = executors\n self.app_cache = app_cache\n self.checkpoint_files = checkpoint_files\n self.checkpoint_mode = checkpoint_mode\n if checkpoint_mode is not 'periodic' and checkpoint_period is not None:\n logger.debug(\"Checkpoint period only has an effect with checkpoint_mode='periodic'\")\n self.checkpoint_period = checkpoint_period\n self.data_management_max_threads = data_management_max_threads\n self.lazy_errors = lazy_errors\n self.retries = retries\n self.run_dir = run_dir\n self.strategy = strategy\n self.usage_tracking = usage_tracking\n self.db_logger_config = db_logger_config\n\n @property\n def executors(self):\n return self._executors\n\n @executors.setter\n def executors(self, executors):\n labels = [e.label for e in executors]\n duplicates = [e for n, e in enumerate(labels) if e in labels[:n]]\n if len(duplicates) > 0:\n raise ConfigurationError('Executors must have unique labels ({})'.format(\n ', '.join(['label={}'.format(repr(d)) for d in duplicates])))\n self._executors = executors\n", "path": "parsl/config.py"}], "after_files": [{"content": "import logging\n\nfrom libsubmit.utils import RepresentationMixin\nfrom parsl.executors.threads import ThreadPoolExecutor\nfrom parsl.dataflow.error import ConfigurationError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Config(RepresentationMixin):\n \"\"\"\n Specification of Parsl configuration options.\n\n Parameters\n ----------\n executors : list of ParslExecutor, optional\n List of executor instances to use. Possible executors include :class:`~parsl.executors.threads.ThreadPoolExecutor`,\n :class:`~parsl.executors.ipp.IPyParallelExecutor`, or :class:`~parsl.executors.swift_t.TurbineExecutor`. Default\n is [:class:`~parsl.executors.threads.ThreadPoolExecutor()`].\n app_cache : bool, optional\n Enable app caching. Default is True.\n checkpoint_files : list of str, optional\n List of paths to checkpoint files. Default is None.\n checkpoint_mode : str, optional\n Checkpoint mode to use, can be 'dfk_exit', 'task_exit', or 'periodic'. If set to\n `None`, checkpointing will be disabled. Default is None.\n checkpoint_period : str, optional\n Time interval (in \"HH:MM:SS\") at which to checkpoint completed tasks. Only has an effect if\n `checkpoint_mode='periodic'`.\n data_management_max_threads : int, optional\n Maximum number of threads to allocate for the data manager to use for managing input and output transfers.\n Default is 10.\n lazy_errors : bool, optional\n If True, errors from task failures will not be raised until `future.result()` is called. Otherwise, they will\n be raised as soon as the task returns. Default is True.\n retries : int, optional\n Set the number of retries in case of failure. Default is 0.\n run_dir : str, optional\n Path to run directory. Default is 'runinfo'.\n strategy : str, optional\n Strategy to use for scaling resources according to workflow needs. Can be 'simple' or `None`. If `None`, dynamic\n scaling will be disabled. Default is 'simple'.\n usage_tracking : bool, optional\n Enable usage tracking. Default is True.\n \"\"\"\n def __init__(self,\n executors=None,\n app_cache=True,\n checkpoint_files=None,\n checkpoint_mode=None,\n checkpoint_period=None,\n data_management_max_threads=10,\n lazy_errors=True,\n retries=0,\n run_dir='runinfo',\n strategy='simple',\n db_logger_config=None,\n usage_tracking=True):\n if executors is None:\n executors = [ThreadPoolExecutor()]\n self.executors = executors\n self.app_cache = app_cache\n self.checkpoint_files = checkpoint_files\n self.checkpoint_mode = checkpoint_mode\n if checkpoint_period is not None:\n if checkpoint_mode is None:\n logger.debug('The requested `checkpoint_period={}` will have no effect because `checkpoint_mode=None`'.format(\n checkpoint_period)\n )\n elif checkpoint_mode is not 'periodic':\n logger.debug(\"Requested checkpoint period of {} only has an effect with checkpoint_mode='periodic'\".format(\n checkpoint_period)\n )\n if checkpoint_mode is 'periodic' and checkpoint_period is None:\n checkpoint_period = \"00:30:00\"\n self.checkpoint_period = checkpoint_period\n self.data_management_max_threads = data_management_max_threads\n self.lazy_errors = lazy_errors\n self.retries = retries\n self.run_dir = run_dir\n self.strategy = strategy\n self.usage_tracking = usage_tracking\n self.db_logger_config = db_logger_config\n\n @property\n def executors(self):\n return self._executors\n\n @executors.setter\n def executors(self, executors):\n labels = [e.label for e in executors]\n duplicates = [e for n, e in enumerate(labels) if e in labels[:n]]\n if len(duplicates) > 0:\n raise ConfigurationError('Executors must have unique labels ({})'.format(\n ', '.join(['label={}'.format(repr(d)) for d in duplicates])))\n self._executors = executors\n", "path": "parsl/config.py"}]} | 1,374 | 326 |
gh_patches_debug_576 | rasdani/github-patches | git_diff | pex-tool__pex-975 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.10
On the docket:
+ [x] Improve Pex packaging. (#961)
+ [x] Make the interpreter cache deterministic. (#960)
+ [x] Fix deprecation warning for `rU` mode (#956)
+ [x] Fix runtime resolve error message generation. (#955)
+ [x] Kill dead code. (#954)
+ [x] Many Pex tests fail under Python 2.7 in CI #967
+ [x] Add a `--local` mode for packaging the Pex PEX. #971
+ [x] Split Pex resolve API. (#970)
+ [x] Can't run PEX file when a dependency's wheel includes a build tag #964
+ [x] Expose network configuration in pex options. #803
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.9'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.9'
+__version__ = '2.1.10'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.9'\n+__version__ = '2.1.10'\n", "issue": "Release 2.1.10\nOn the docket:\r\n+ [x] Improve Pex packaging. (#961)\r\n+ [x] Make the interpreter cache deterministic. (#960)\r\n+ [x] Fix deprecation warning for `rU` mode (#956)\r\n+ [x] Fix runtime resolve error message generation. (#955)\r\n+ [x] Kill dead code. (#954)\r\n+ [x] Many Pex tests fail under Python 2.7 in CI #967\r\n+ [x] Add a `--local` mode for packaging the Pex PEX. #971\r\n+ [x] Split Pex resolve API. (#970)\r\n+ [x] Can't run PEX file when a dependency's wheel includes a build tag #964\r\n+ [x] Expose network configuration in pex options. #803\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.9'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.10'\n", "path": "pex/version.py"}]} | 501 | 95 |
gh_patches_debug_2746 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-3351 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BERT classifier doesn't work under distributed_train
The default tokenization is re, I think it's building the dictionary along the way...
**Logs**
Please paste the command line output:
```
ValueError: Dictionaries should be pre-built before distributed train.
ValueError: Dictionaries should be pre-built before distributed train.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/agents/bert_ranker/bert_dictionary.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 from parlai.core.dict import DictionaryAgent
7 from parlai.zoo.bert.build import download
8 from parlai.utils.misc import warn_once
9
10 try:
11 from pytorch_pretrained_bert import BertTokenizer
12 except ImportError:
13 raise ImportError(
14 'BERT rankers needs pytorch-pretrained-BERT installed. \n '
15 'pip install pytorch-pretrained-bert'
16 )
17 from .helpers import VOCAB_PATH
18
19 import os
20
21
22 class BertDictionaryAgent(DictionaryAgent):
23 """
24 Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.
25 """
26
27 def __init__(self, opt):
28 super().__init__(opt)
29 # initialize from vocab path
30 warn_once(
31 'WARNING: BERT uses a Hugging Face tokenizer; ParlAI dictionary args are ignored'
32 )
33 download(opt['datapath'])
34 vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models', VOCAB_PATH)
35 self.tokenizer = BertTokenizer.from_pretrained(vocab_path)
36
37 self.start_token = '[CLS]'
38 self.end_token = '[SEP]'
39 self.null_token = '[PAD]'
40 self.start_idx = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[
41 0
42 ] # should be 101
43 self.end_idx = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[
44 0
45 ] # should be 102
46 self.pad_idx = self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0] # should be 0
47 # set tok2ind for special tokens
48 self.tok2ind[self.start_token] = self.start_idx
49 self.tok2ind[self.end_token] = self.end_idx
50 self.tok2ind[self.null_token] = self.pad_idx
51 # set ind2tok for special tokens
52 self.ind2tok[self.start_idx] = self.start_token
53 self.ind2tok[self.end_idx] = self.end_token
54 self.ind2tok[self.pad_idx] = self.null_token
55
56 def txt2vec(self, text, vec_type=list):
57 tokens = self.tokenizer.tokenize(text)
58 tokens_id = self.tokenizer.convert_tokens_to_ids(tokens)
59 return tokens_id
60
61 def vec2txt(self, vec):
62 if not isinstance(vec, list):
63 # assume tensor
64 idxs = [idx.item() for idx in vec.cpu()]
65 else:
66 idxs = vec
67 toks = self.tokenizer.convert_ids_to_tokens(idxs)
68 return ' '.join(toks)
69
70 def act(self):
71 return {}
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parlai/agents/bert_ranker/bert_dictionary.py b/parlai/agents/bert_ranker/bert_dictionary.py
--- a/parlai/agents/bert_ranker/bert_dictionary.py
+++ b/parlai/agents/bert_ranker/bert_dictionary.py
@@ -24,6 +24,9 @@
Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.
"""
+ def is_prebuit(self):
+ return True
+
def __init__(self, opt):
super().__init__(opt)
# initialize from vocab path
| {"golden_diff": "diff --git a/parlai/agents/bert_ranker/bert_dictionary.py b/parlai/agents/bert_ranker/bert_dictionary.py\n--- a/parlai/agents/bert_ranker/bert_dictionary.py\n+++ b/parlai/agents/bert_ranker/bert_dictionary.py\n@@ -24,6 +24,9 @@\n Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.\n \"\"\"\n \n+ def is_prebuit(self):\n+ return True\n+\n def __init__(self, opt):\n super().__init__(opt)\n # initialize from vocab path\n", "issue": "BERT classifier doesn't work under distributed_train\nThe default tokenization is re, I think it's building the dictionary along the way...\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\nValueError: Dictionaries should be pre-built before distributed train.\r\nValueError: Dictionaries should be pre-built before distributed train.\r\n```\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom parlai.core.dict import DictionaryAgent\nfrom parlai.zoo.bert.build import download\nfrom parlai.utils.misc import warn_once\n\ntry:\n from pytorch_pretrained_bert import BertTokenizer\nexcept ImportError:\n raise ImportError(\n 'BERT rankers needs pytorch-pretrained-BERT installed. \\n '\n 'pip install pytorch-pretrained-bert'\n )\nfrom .helpers import VOCAB_PATH\n\nimport os\n\n\nclass BertDictionaryAgent(DictionaryAgent):\n \"\"\"\n Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.\n \"\"\"\n\n def __init__(self, opt):\n super().__init__(opt)\n # initialize from vocab path\n warn_once(\n 'WARNING: BERT uses a Hugging Face tokenizer; ParlAI dictionary args are ignored'\n )\n download(opt['datapath'])\n vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models', VOCAB_PATH)\n self.tokenizer = BertTokenizer.from_pretrained(vocab_path)\n\n self.start_token = '[CLS]'\n self.end_token = '[SEP]'\n self.null_token = '[PAD]'\n self.start_idx = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[\n 0\n ] # should be 101\n self.end_idx = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[\n 0\n ] # should be 102\n self.pad_idx = self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0] # should be 0\n # set tok2ind for special tokens\n self.tok2ind[self.start_token] = self.start_idx\n self.tok2ind[self.end_token] = self.end_idx\n self.tok2ind[self.null_token] = self.pad_idx\n # set ind2tok for special tokens\n self.ind2tok[self.start_idx] = self.start_token\n self.ind2tok[self.end_idx] = self.end_token\n self.ind2tok[self.pad_idx] = self.null_token\n\n def txt2vec(self, text, vec_type=list):\n tokens = self.tokenizer.tokenize(text)\n tokens_id = self.tokenizer.convert_tokens_to_ids(tokens)\n return tokens_id\n\n def vec2txt(self, vec):\n if not isinstance(vec, list):\n # assume tensor\n idxs = [idx.item() for idx in vec.cpu()]\n else:\n idxs = vec\n toks = self.tokenizer.convert_ids_to_tokens(idxs)\n return ' '.join(toks)\n\n def act(self):\n return {}\n", "path": "parlai/agents/bert_ranker/bert_dictionary.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom parlai.core.dict import DictionaryAgent\nfrom parlai.zoo.bert.build import download\nfrom parlai.utils.misc import warn_once\n\ntry:\n from pytorch_pretrained_bert import BertTokenizer\nexcept ImportError:\n raise ImportError(\n 'BERT rankers needs pytorch-pretrained-BERT installed. \\n '\n 'pip install pytorch-pretrained-bert'\n )\nfrom .helpers import VOCAB_PATH\n\nimport os\n\n\nclass BertDictionaryAgent(DictionaryAgent):\n \"\"\"\n Allow to use the Torch Agent with the wordpiece dictionary of Hugging Face.\n \"\"\"\n\n def is_prebuit(self):\n return True\n\n def __init__(self, opt):\n super().__init__(opt)\n # initialize from vocab path\n warn_once(\n 'WARNING: BERT uses a Hugging Face tokenizer; ParlAI dictionary args are ignored'\n )\n download(opt['datapath'])\n vocab_path = os.path.join(opt['datapath'], 'models', 'bert_models', VOCAB_PATH)\n self.tokenizer = BertTokenizer.from_pretrained(vocab_path)\n\n self.start_token = '[CLS]'\n self.end_token = '[SEP]'\n self.null_token = '[PAD]'\n self.start_idx = self.tokenizer.convert_tokens_to_ids(['[CLS]'])[\n 0\n ] # should be 101\n self.end_idx = self.tokenizer.convert_tokens_to_ids(['[SEP]'])[\n 0\n ] # should be 102\n self.pad_idx = self.tokenizer.convert_tokens_to_ids(['[PAD]'])[0] # should be 0\n # set tok2ind for special tokens\n self.tok2ind[self.start_token] = self.start_idx\n self.tok2ind[self.end_token] = self.end_idx\n self.tok2ind[self.null_token] = self.pad_idx\n # set ind2tok for special tokens\n self.ind2tok[self.start_idx] = self.start_token\n self.ind2tok[self.end_idx] = self.end_token\n self.ind2tok[self.pad_idx] = self.null_token\n\n def txt2vec(self, text, vec_type=list):\n tokens = self.tokenizer.tokenize(text)\n tokens_id = self.tokenizer.convert_tokens_to_ids(tokens)\n return tokens_id\n\n def vec2txt(self, vec):\n if not isinstance(vec, list):\n # assume tensor\n idxs = [idx.item() for idx in vec.cpu()]\n else:\n idxs = vec\n toks = self.tokenizer.convert_ids_to_tokens(idxs)\n return ' '.join(toks)\n\n def act(self):\n return {}\n", "path": "parlai/agents/bert_ranker/bert_dictionary.py"}]} | 1,079 | 138 |
gh_patches_debug_17982 | rasdani/github-patches | git_diff | apluslms__a-plus-820 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add separate button for Aalto login (aside Haka login)
In a recent change (pull request #804 ) Aalto (or other local organization) login button was replaced by general Haka login, that directs user to organization selector, to allow login also using other Haka organization accounts than Aalto. This was an intermediate step (due to difficulties in shibboleth configuration), and a separate button for local organization login should now be added back, as majority of students would be using it, and usually some additional guidance may need to be added, e.g. for open university students to use local organization account instead of other organization.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aplus/urls.py`
Content:
```
1 from django.conf import settings
2 from django.conf.urls import url, include
3 from django.contrib import admin
4 from django.contrib.sitemaps.views import sitemap
5 from django.urls import path
6
7 import shibboleth_login.urls
8 import social_django.urls
9 import userprofile.urls, userprofile.sitemaps
10 import course.urls, course.long_urls, course.sitemaps
11 import exercise.urls, exercise.sitemaps
12 import edit_course.urls
13 import deviations.urls
14 import notification.urls
15 import external_services.urls
16 import news.urls
17 import diploma.urls
18 import apps.urls
19 import api.urls_v2
20 import redirect_old_urls.urls
21
22
23 admin.autodiscover()
24
25 all_sitemaps = {
26 **course.sitemaps.all_sitemaps,
27 **exercise.sitemaps.all_sitemaps,
28 **userprofile.sitemaps.all_sitemaps,
29 }
30
31 # Pay attention to the order the URL patterns will be matched!
32 urlpatterns = [
33 url(r'^admin/', admin.site.urls),
34 url(r'^shibboleth/', include(shibboleth_login.urls)),
35 url('', include(social_django.urls, namespace='social')),
36 url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md
37 url(r'^accounts/', include(userprofile.urls)),
38 url(r'^diploma/', include(diploma.urls)),
39 url(r'^', include(redirect_old_urls.urls)),
40 url(r'^', include(apps.urls)),
41 url(r'^', include(news.urls)),
42 url(r'^', include(external_services.urls)),
43 url(r'^', include(course.long_urls)),
44 url(r'^', include(deviations.urls)),
45 url(r'^', include(edit_course.urls)),
46 url(r'^', include(notification.urls)),
47 url(r'^', include(exercise.urls)),
48 url(r'^', include(course.urls)),
49 path('sitemap.xml', sitemap, { 'sitemaps': all_sitemaps },
50 name='django.contrib.sitemaps.views.sitemap'),
51 ]
52
53 if settings.DEBUG:
54 import django.views.static
55 urlpatterns.insert(0, url(r'^media/(?P<path>.*)$', django.views.static.serve,
56 { 'document_root': settings.MEDIA_ROOT }))
57
```
Path: `shibboleth_login/urls.py`
Content:
```
1 from django.conf import settings
2 from django.conf.urls import url
3
4 from . import views
5
6
7 urlpatterns = [
8 url(r'^login/$', views.login, name="shibboleth-login"),
9 ]
10
11 if settings.DEBUG:
12 urlpatterns.append(url(r'^debug/$', views.debug))
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aplus/urls.py b/aplus/urls.py
--- a/aplus/urls.py
+++ b/aplus/urls.py
@@ -31,7 +31,7 @@
# Pay attention to the order the URL patterns will be matched!
urlpatterns = [
url(r'^admin/', admin.site.urls),
- url(r'^shibboleth/', include(shibboleth_login.urls)),
+ url(r'^', include(shibboleth_login.urls)),
url('', include(social_django.urls, namespace='social')),
url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md
url(r'^accounts/', include(userprofile.urls)),
diff --git a/shibboleth_login/urls.py b/shibboleth_login/urls.py
--- a/shibboleth_login/urls.py
+++ b/shibboleth_login/urls.py
@@ -5,7 +5,8 @@
urlpatterns = [
- url(r'^login/$', views.login, name="shibboleth-login"),
+ url(r'^shibboleth/login/$', views.login, name="shibboleth-login"),
+ url(r'^Shibboleth.sso/haka_login$', views.login, name="haka-login"),
]
if settings.DEBUG:
| {"golden_diff": "diff --git a/aplus/urls.py b/aplus/urls.py\n--- a/aplus/urls.py\n+++ b/aplus/urls.py\n@@ -31,7 +31,7 @@\n # Pay attention to the order the URL patterns will be matched!\n urlpatterns = [\n url(r'^admin/', admin.site.urls),\n- url(r'^shibboleth/', include(shibboleth_login.urls)),\n+ url(r'^', include(shibboleth_login.urls)),\n url('', include(social_django.urls, namespace='social')),\n url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md\n url(r'^accounts/', include(userprofile.urls)),\ndiff --git a/shibboleth_login/urls.py b/shibboleth_login/urls.py\n--- a/shibboleth_login/urls.py\n+++ b/shibboleth_login/urls.py\n@@ -5,7 +5,8 @@\n \n \n urlpatterns = [\n- url(r'^login/$', views.login, name=\"shibboleth-login\"),\n+ url(r'^shibboleth/login/$', views.login, name=\"shibboleth-login\"),\n+ url(r'^Shibboleth.sso/haka_login$', views.login, name=\"haka-login\"),\n ]\n \n if settings.DEBUG:\n", "issue": "Add separate button for Aalto login (aside Haka login)\nIn a recent change (pull request #804 ) Aalto (or other local organization) login button was replaced by general Haka login, that directs user to organization selector, to allow login also using other Haka organization accounts than Aalto. This was an intermediate step (due to difficulties in shibboleth configuration), and a separate button for local organization login should now be added back, as majority of students would be using it, and usually some additional guidance may need to be added, e.g. for open university students to use local organization account instead of other organization.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.urls import path\n\nimport shibboleth_login.urls\nimport social_django.urls\nimport userprofile.urls, userprofile.sitemaps\nimport course.urls, course.long_urls, course.sitemaps\nimport exercise.urls, exercise.sitemaps\nimport edit_course.urls\nimport deviations.urls\nimport notification.urls\nimport external_services.urls\nimport news.urls\nimport diploma.urls\nimport apps.urls\nimport api.urls_v2\nimport redirect_old_urls.urls\n\n\nadmin.autodiscover()\n\nall_sitemaps = {\n **course.sitemaps.all_sitemaps,\n **exercise.sitemaps.all_sitemaps,\n **userprofile.sitemaps.all_sitemaps,\n}\n\n# Pay attention to the order the URL patterns will be matched!\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^shibboleth/', include(shibboleth_login.urls)),\n url('', include(social_django.urls, namespace='social')),\n url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md\n url(r'^accounts/', include(userprofile.urls)),\n url(r'^diploma/', include(diploma.urls)),\n url(r'^', include(redirect_old_urls.urls)),\n url(r'^', include(apps.urls)),\n url(r'^', include(news.urls)),\n url(r'^', include(external_services.urls)),\n url(r'^', include(course.long_urls)),\n url(r'^', include(deviations.urls)),\n url(r'^', include(edit_course.urls)),\n url(r'^', include(notification.urls)),\n url(r'^', include(exercise.urls)),\n url(r'^', include(course.urls)),\n path('sitemap.xml', sitemap, { 'sitemaps': all_sitemaps },\n name='django.contrib.sitemaps.views.sitemap'),\n]\n\nif settings.DEBUG:\n import django.views.static\n urlpatterns.insert(0, url(r'^media/(?P<path>.*)$', django.views.static.serve,\n { 'document_root': settings.MEDIA_ROOT }))\n", "path": "aplus/urls.py"}, {"content": "from django.conf import settings\nfrom django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^login/$', views.login, name=\"shibboleth-login\"),\n]\n\nif settings.DEBUG:\n urlpatterns.append(url(r'^debug/$', views.debug))\n", "path": "shibboleth_login/urls.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.urls import path\n\nimport shibboleth_login.urls\nimport social_django.urls\nimport userprofile.urls, userprofile.sitemaps\nimport course.urls, course.long_urls, course.sitemaps\nimport exercise.urls, exercise.sitemaps\nimport edit_course.urls\nimport deviations.urls\nimport notification.urls\nimport external_services.urls\nimport news.urls\nimport diploma.urls\nimport apps.urls\nimport api.urls_v2\nimport redirect_old_urls.urls\n\n\nadmin.autodiscover()\n\nall_sitemaps = {\n **course.sitemaps.all_sitemaps,\n **exercise.sitemaps.all_sitemaps,\n **userprofile.sitemaps.all_sitemaps,\n}\n\n# Pay attention to the order the URL patterns will be matched!\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^', include(shibboleth_login.urls)),\n url('', include(social_django.urls, namespace='social')),\n url(r'^api/v(?P<version>(2))/', include(api.urls_v2)), # why version in url? doc/api_versioning.md\n url(r'^accounts/', include(userprofile.urls)),\n url(r'^diploma/', include(diploma.urls)),\n url(r'^', include(redirect_old_urls.urls)),\n url(r'^', include(apps.urls)),\n url(r'^', include(news.urls)),\n url(r'^', include(external_services.urls)),\n url(r'^', include(course.long_urls)),\n url(r'^', include(deviations.urls)),\n url(r'^', include(edit_course.urls)),\n url(r'^', include(notification.urls)),\n url(r'^', include(exercise.urls)),\n url(r'^', include(course.urls)),\n path('sitemap.xml', sitemap, { 'sitemaps': all_sitemaps },\n name='django.contrib.sitemaps.views.sitemap'),\n]\n\nif settings.DEBUG:\n import django.views.static\n urlpatterns.insert(0, url(r'^media/(?P<path>.*)$', django.views.static.serve,\n { 'document_root': settings.MEDIA_ROOT }))\n", "path": "aplus/urls.py"}, {"content": "from django.conf import settings\nfrom django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^shibboleth/login/$', views.login, name=\"shibboleth-login\"),\n url(r'^Shibboleth.sso/haka_login$', views.login, name=\"haka-login\"),\n]\n\nif settings.DEBUG:\n urlpatterns.append(url(r'^debug/$', views.debug))\n", "path": "shibboleth_login/urls.py"}]} | 1,059 | 288 |
gh_patches_debug_2281 | rasdani/github-patches | git_diff | rasterio__rasterio-438 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
polygonize method no longer in use / use shapes instead?
Hi,
just going through the examples folders.
If I ran the python interpreter on `poligonize.py` it complains about a missing attribute:
```
(venv)[] ~/Progetti/pyhton-usergroup/rasterio <master> ✗ python examples/polygonize.py
sys:1: FutureWarning: read_band() is deprecated and will be removed by Rasterio 1.0. Please use read() instead.
Traceback (most recent call last):
File "examples/polygonize.py", line 10, in <module>
list(ftrz.polygonize(image)))
AttributeError: 'module' object has no attribute 'polygonize'
```
But, going trough the commits history w/ @ligthyear it seems like instead of poligonize, one should use shapes.
If I ran it in fact with the new method it works smoothly
```
pprint.pprint(
list(ftrz.shapes(image)))
```
Cheers
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/polygonize.py`
Content:
```
1 import pprint
2
3 import rasterio
4 import rasterio._features as ftrz
5
6 with rasterio.open('box.png') as src:
7 image = src.read_band(1)
8
9 pprint.pprint(
10 list(ftrz.polygonize(image)))
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/polygonize.py b/examples/polygonize.py
--- a/examples/polygonize.py
+++ b/examples/polygonize.py
@@ -1,10 +1,12 @@
import pprint
import rasterio
-import rasterio._features as ftrz
+from rasterio.features import shapes
-with rasterio.open('box.png') as src:
- image = src.read_band(1)
+with rasterio.open('tests/data/shade.tif') as src:
+ image = src.read(1)
+# Print the first two shapes...
pprint.pprint(
- list(ftrz.polygonize(image)))
+ list(shapes(image))[:2]
+)
| {"golden_diff": "diff --git a/examples/polygonize.py b/examples/polygonize.py\n--- a/examples/polygonize.py\n+++ b/examples/polygonize.py\n@@ -1,10 +1,12 @@\n import pprint\n \n import rasterio\n-import rasterio._features as ftrz\n+from rasterio.features import shapes\n \n-with rasterio.open('box.png') as src:\n- image = src.read_band(1)\n+with rasterio.open('tests/data/shade.tif') as src:\n+ image = src.read(1)\n \n+# Print the first two shapes...\n pprint.pprint(\n- list(ftrz.polygonize(image)))\n+ list(shapes(image))[:2]\n+)\n", "issue": "polygonize method no longer in use / use shapes instead?\nHi,\n\njust going through the examples folders.\nIf I ran the python interpreter on `poligonize.py` it complains about a missing attribute:\n\n```\n(venv)[] ~/Progetti/pyhton-usergroup/rasterio <master> \u2717 python examples/polygonize.py\nsys:1: FutureWarning: read_band() is deprecated and will be removed by Rasterio 1.0. Please use read() instead.\nTraceback (most recent call last):\n File \"examples/polygonize.py\", line 10, in <module>\n list(ftrz.polygonize(image)))\nAttributeError: 'module' object has no attribute 'polygonize'\n```\n\nBut, going trough the commits history w/ @ligthyear it seems like instead of poligonize, one should use shapes. \nIf I ran it in fact with the new method it works smoothly\n\n```\npprint.pprint(\n list(ftrz.shapes(image)))\n```\n\nCheers\n\n", "before_files": [{"content": "import pprint\n\nimport rasterio\nimport rasterio._features as ftrz\n\nwith rasterio.open('box.png') as src:\n image = src.read_band(1)\n\npprint.pprint(\n list(ftrz.polygonize(image)))\n", "path": "examples/polygonize.py"}], "after_files": [{"content": "import pprint\n\nimport rasterio\nfrom rasterio.features import shapes\n\nwith rasterio.open('tests/data/shade.tif') as src:\n image = src.read(1)\n\n# Print the first two shapes...\npprint.pprint(\n list(shapes(image))[:2]\n)\n", "path": "examples/polygonize.py"}]} | 540 | 149 |
gh_patches_debug_6609 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1249 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Undefined variable issue in 0.77.6
With the latest release of #1213, this [line](https://github.com/strawberry-graphql/strawberry/blob/main/strawberry/types/nodes.py#L40) raises a KeyError when a variable is used in a query, but not defined in the payload under the key `variables`.
This used to work, and, I guess, is ok when the variable/argument is optional.
I do not know what the specs are, meaning if variable definition should be enforced all the way. If it's the way to go, maybe there should be a validation step/graceful way to handle such a case.
If not, `return info.variable_values.get(name)` should do the trick.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/types/nodes.py`
Content:
```
1 """
2 Abstraction layer for graphql-core field nodes.
3
4 Call `convert_sections` on a list of GraphQL `FieldNode`s, such as in `info.field_nodes`.
5
6 If a node has only one useful value, it's value is inlined.
7
8 If a list of nodes have unique names, it's transformed into a mapping.
9 Note Python dicts maintain ordering (for all supported versions).
10 """
11
12 import dataclasses
13 from typing import Any, Dict, Iterable, List, Optional, Union
14
15 from graphql import GraphQLResolveInfo
16 from graphql.language import (
17 ArgumentNode as GQLArgumentNode,
18 DirectiveNode as GQLDirectiveNode,
19 FieldNode as GQLFieldNode,
20 FragmentSpreadNode as GQLFragmentSpreadNode,
21 InlineFragmentNode as GQLInlineFragment,
22 InlineFragmentNode as GQLInlineFragmentNode,
23 ListValueNode as GQLListValueNode,
24 ObjectValueNode as GQLObjectValueNode,
25 ValueNode as GQLValueNode,
26 VariableNode as GQLVariableNode,
27 )
28
29
30 Arguments = Dict[str, Any]
31 Directives = Dict[str, Arguments]
32 Selection = Union["SelectedField", "FragmentSpread", "InlineFragment"]
33
34
35 def convert_value(info: GraphQLResolveInfo, node: GQLValueNode) -> Any:
36 """Return useful value from any node."""
37 if isinstance(node, GQLVariableNode):
38 # Look up variable
39 name = node.name.value
40 return info.variable_values[name]
41 if isinstance(node, GQLListValueNode):
42 return [convert_value(info, value) for value in node.values]
43 if isinstance(node, GQLObjectValueNode):
44 return {
45 field.name.value: convert_value(info, field.value) for field in node.fields
46 }
47 return getattr(node, "value", None)
48
49
50 def convert_arguments(
51 info: GraphQLResolveInfo, nodes: Iterable[GQLArgumentNode]
52 ) -> Arguments:
53 """Return mapping of arguments."""
54 return {node.name.value: convert_value(info, node.value) for node in nodes}
55
56
57 def convert_directives(
58 info: GraphQLResolveInfo, nodes: Iterable[GQLDirectiveNode]
59 ) -> Directives:
60 """Return mapping of directives."""
61 return {node.name.value: convert_arguments(info, node.arguments) for node in nodes}
62
63
64 def convert_selections(
65 info: GraphQLResolveInfo, field_nodes: List[GQLFieldNode]
66 ) -> List[Selection]:
67 """Return typed `Selection` based on node type."""
68 selections: List[Selection] = []
69 for node in field_nodes:
70 if isinstance(node, GQLFieldNode):
71 selections.append(SelectedField.from_node(info, node))
72 elif isinstance(node, GQLInlineFragment):
73 selections.append(InlineFragment.from_node(info, node))
74 elif isinstance(node, GQLFragmentSpreadNode):
75 selections.append(FragmentSpread.from_node(info, node))
76 else:
77 raise TypeError(f"Unknown node type: {node}")
78
79 return selections
80
81
82 @dataclasses.dataclass
83 class FragmentSpread:
84 """Wrapper for a FragmentSpreadNode."""
85
86 name: str
87 type_condition: str
88 directives: Directives
89 selections: List[Selection]
90
91 @classmethod
92 def from_node(cls, info: GraphQLResolveInfo, node: GQLFragmentSpreadNode):
93 # Look up fragment
94 name = node.name.value
95 fragment = info.fragments[name]
96 return cls(
97 name=name,
98 directives=convert_directives(info, node.directives),
99 type_condition=fragment.type_condition.name.value,
100 selections=convert_selections(
101 info, getattr(fragment.selection_set, "selections", [])
102 ),
103 )
104
105
106 @dataclasses.dataclass
107 class InlineFragment:
108 """Wrapper for a InlineFragmentNode."""
109
110 type_condition: str
111 selections: List[Selection]
112 directives: Directives
113
114 @classmethod
115 def from_node(cls, info: GraphQLResolveInfo, node: GQLInlineFragmentNode):
116 return cls(
117 type_condition=node.type_condition.name.value,
118 selections=convert_selections(
119 info, getattr(node.selection_set, "selections", [])
120 ),
121 directives=convert_directives(info, node.directives),
122 )
123
124
125 @dataclasses.dataclass
126 class SelectedField:
127 """Wrapper for a FieldNode."""
128
129 name: str
130 directives: Directives
131 arguments: Arguments
132 selections: List[Selection]
133 alias: Optional[str] = None
134
135 @classmethod
136 def from_node(cls, info: GraphQLResolveInfo, node: GQLFieldNode):
137 return cls(
138 name=node.name.value,
139 directives=convert_directives(info, node.directives),
140 alias=getattr(node.alias, "value", None),
141 arguments=convert_arguments(info, node.arguments),
142 selections=convert_selections(
143 info, getattr(node.selection_set, "selections", [])
144 ),
145 )
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/types/nodes.py b/strawberry/types/nodes.py
--- a/strawberry/types/nodes.py
+++ b/strawberry/types/nodes.py
@@ -37,7 +37,7 @@
if isinstance(node, GQLVariableNode):
# Look up variable
name = node.name.value
- return info.variable_values[name]
+ return info.variable_values.get(name)
if isinstance(node, GQLListValueNode):
return [convert_value(info, value) for value in node.values]
if isinstance(node, GQLObjectValueNode):
| {"golden_diff": "diff --git a/strawberry/types/nodes.py b/strawberry/types/nodes.py\n--- a/strawberry/types/nodes.py\n+++ b/strawberry/types/nodes.py\n@@ -37,7 +37,7 @@\n if isinstance(node, GQLVariableNode):\n # Look up variable\n name = node.name.value\n- return info.variable_values[name]\n+ return info.variable_values.get(name)\n if isinstance(node, GQLListValueNode):\n return [convert_value(info, value) for value in node.values]\n if isinstance(node, GQLObjectValueNode):\n", "issue": "Undefined variable issue in 0.77.6\nWith the latest release of #1213, this [line](https://github.com/strawberry-graphql/strawberry/blob/main/strawberry/types/nodes.py#L40) raises a KeyError when a variable is used in a query, but not defined in the payload under the key `variables`.\r\n\r\nThis used to work, and, I guess, is ok when the variable/argument is optional.\r\n\r\nI do not know what the specs are, meaning if variable definition should be enforced all the way. If it's the way to go, maybe there should be a validation step/graceful way to handle such a case.\r\n\r\nIf not, `return info.variable_values.get(name)` should do the trick.\n", "before_files": [{"content": "\"\"\"\nAbstraction layer for graphql-core field nodes.\n\nCall `convert_sections` on a list of GraphQL `FieldNode`s, such as in `info.field_nodes`.\n\nIf a node has only one useful value, it's value is inlined.\n\nIf a list of nodes have unique names, it's transformed into a mapping.\nNote Python dicts maintain ordering (for all supported versions).\n\"\"\"\n\nimport dataclasses\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nfrom graphql import GraphQLResolveInfo\nfrom graphql.language import (\n ArgumentNode as GQLArgumentNode,\n DirectiveNode as GQLDirectiveNode,\n FieldNode as GQLFieldNode,\n FragmentSpreadNode as GQLFragmentSpreadNode,\n InlineFragmentNode as GQLInlineFragment,\n InlineFragmentNode as GQLInlineFragmentNode,\n ListValueNode as GQLListValueNode,\n ObjectValueNode as GQLObjectValueNode,\n ValueNode as GQLValueNode,\n VariableNode as GQLVariableNode,\n)\n\n\nArguments = Dict[str, Any]\nDirectives = Dict[str, Arguments]\nSelection = Union[\"SelectedField\", \"FragmentSpread\", \"InlineFragment\"]\n\n\ndef convert_value(info: GraphQLResolveInfo, node: GQLValueNode) -> Any:\n \"\"\"Return useful value from any node.\"\"\"\n if isinstance(node, GQLVariableNode):\n # Look up variable\n name = node.name.value\n return info.variable_values[name]\n if isinstance(node, GQLListValueNode):\n return [convert_value(info, value) for value in node.values]\n if isinstance(node, GQLObjectValueNode):\n return {\n field.name.value: convert_value(info, field.value) for field in node.fields\n }\n return getattr(node, \"value\", None)\n\n\ndef convert_arguments(\n info: GraphQLResolveInfo, nodes: Iterable[GQLArgumentNode]\n) -> Arguments:\n \"\"\"Return mapping of arguments.\"\"\"\n return {node.name.value: convert_value(info, node.value) for node in nodes}\n\n\ndef convert_directives(\n info: GraphQLResolveInfo, nodes: Iterable[GQLDirectiveNode]\n) -> Directives:\n \"\"\"Return mapping of directives.\"\"\"\n return {node.name.value: convert_arguments(info, node.arguments) for node in nodes}\n\n\ndef convert_selections(\n info: GraphQLResolveInfo, field_nodes: List[GQLFieldNode]\n) -> List[Selection]:\n \"\"\"Return typed `Selection` based on node type.\"\"\"\n selections: List[Selection] = []\n for node in field_nodes:\n if isinstance(node, GQLFieldNode):\n selections.append(SelectedField.from_node(info, node))\n elif isinstance(node, GQLInlineFragment):\n selections.append(InlineFragment.from_node(info, node))\n elif isinstance(node, GQLFragmentSpreadNode):\n selections.append(FragmentSpread.from_node(info, node))\n else:\n raise TypeError(f\"Unknown node type: {node}\")\n\n return selections\n\n\[email protected]\nclass FragmentSpread:\n \"\"\"Wrapper for a FragmentSpreadNode.\"\"\"\n\n name: str\n type_condition: str\n directives: Directives\n selections: List[Selection]\n\n @classmethod\n def from_node(cls, info: GraphQLResolveInfo, node: GQLFragmentSpreadNode):\n # Look up fragment\n name = node.name.value\n fragment = info.fragments[name]\n return cls(\n name=name,\n directives=convert_directives(info, node.directives),\n type_condition=fragment.type_condition.name.value,\n selections=convert_selections(\n info, getattr(fragment.selection_set, \"selections\", [])\n ),\n )\n\n\[email protected]\nclass InlineFragment:\n \"\"\"Wrapper for a InlineFragmentNode.\"\"\"\n\n type_condition: str\n selections: List[Selection]\n directives: Directives\n\n @classmethod\n def from_node(cls, info: GraphQLResolveInfo, node: GQLInlineFragmentNode):\n return cls(\n type_condition=node.type_condition.name.value,\n selections=convert_selections(\n info, getattr(node.selection_set, \"selections\", [])\n ),\n directives=convert_directives(info, node.directives),\n )\n\n\[email protected]\nclass SelectedField:\n \"\"\"Wrapper for a FieldNode.\"\"\"\n\n name: str\n directives: Directives\n arguments: Arguments\n selections: List[Selection]\n alias: Optional[str] = None\n\n @classmethod\n def from_node(cls, info: GraphQLResolveInfo, node: GQLFieldNode):\n return cls(\n name=node.name.value,\n directives=convert_directives(info, node.directives),\n alias=getattr(node.alias, \"value\", None),\n arguments=convert_arguments(info, node.arguments),\n selections=convert_selections(\n info, getattr(node.selection_set, \"selections\", [])\n ),\n )\n", "path": "strawberry/types/nodes.py"}], "after_files": [{"content": "\"\"\"\nAbstraction layer for graphql-core field nodes.\n\nCall `convert_sections` on a list of GraphQL `FieldNode`s, such as in `info.field_nodes`.\n\nIf a node has only one useful value, it's value is inlined.\n\nIf a list of nodes have unique names, it's transformed into a mapping.\nNote Python dicts maintain ordering (for all supported versions).\n\"\"\"\n\nimport dataclasses\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nfrom graphql import GraphQLResolveInfo\nfrom graphql.language import (\n ArgumentNode as GQLArgumentNode,\n DirectiveNode as GQLDirectiveNode,\n FieldNode as GQLFieldNode,\n FragmentSpreadNode as GQLFragmentSpreadNode,\n InlineFragmentNode as GQLInlineFragment,\n InlineFragmentNode as GQLInlineFragmentNode,\n ListValueNode as GQLListValueNode,\n ObjectValueNode as GQLObjectValueNode,\n ValueNode as GQLValueNode,\n VariableNode as GQLVariableNode,\n)\n\n\nArguments = Dict[str, Any]\nDirectives = Dict[str, Arguments]\nSelection = Union[\"SelectedField\", \"FragmentSpread\", \"InlineFragment\"]\n\n\ndef convert_value(info: GraphQLResolveInfo, node: GQLValueNode) -> Any:\n \"\"\"Return useful value from any node.\"\"\"\n if isinstance(node, GQLVariableNode):\n # Look up variable\n name = node.name.value\n return info.variable_values.get(name)\n if isinstance(node, GQLListValueNode):\n return [convert_value(info, value) for value in node.values]\n if isinstance(node, GQLObjectValueNode):\n return {\n field.name.value: convert_value(info, field.value) for field in node.fields\n }\n return getattr(node, \"value\", None)\n\n\ndef convert_arguments(\n info: GraphQLResolveInfo, nodes: Iterable[GQLArgumentNode]\n) -> Arguments:\n \"\"\"Return mapping of arguments.\"\"\"\n return {node.name.value: convert_value(info, node.value) for node in nodes}\n\n\ndef convert_directives(\n info: GraphQLResolveInfo, nodes: Iterable[GQLDirectiveNode]\n) -> Directives:\n \"\"\"Return mapping of directives.\"\"\"\n return {node.name.value: convert_arguments(info, node.arguments) for node in nodes}\n\n\ndef convert_selections(\n info: GraphQLResolveInfo, field_nodes: List[GQLFieldNode]\n) -> List[Selection]:\n \"\"\"Return typed `Selection` based on node type.\"\"\"\n selections: List[Selection] = []\n for node in field_nodes:\n if isinstance(node, GQLFieldNode):\n selections.append(SelectedField.from_node(info, node))\n elif isinstance(node, GQLInlineFragment):\n selections.append(InlineFragment.from_node(info, node))\n elif isinstance(node, GQLFragmentSpreadNode):\n selections.append(FragmentSpread.from_node(info, node))\n else:\n raise TypeError(f\"Unknown node type: {node}\")\n\n return selections\n\n\[email protected]\nclass FragmentSpread:\n \"\"\"Wrapper for a FragmentSpreadNode.\"\"\"\n\n name: str\n type_condition: str\n directives: Directives\n selections: List[Selection]\n\n @classmethod\n def from_node(cls, info: GraphQLResolveInfo, node: GQLFragmentSpreadNode):\n # Look up fragment\n name = node.name.value\n fragment = info.fragments[name]\n return cls(\n name=name,\n directives=convert_directives(info, node.directives),\n type_condition=fragment.type_condition.name.value,\n selections=convert_selections(\n info, getattr(fragment.selection_set, \"selections\", [])\n ),\n )\n\n\[email protected]\nclass InlineFragment:\n \"\"\"Wrapper for a InlineFragmentNode.\"\"\"\n\n type_condition: str\n selections: List[Selection]\n directives: Directives\n\n @classmethod\n def from_node(cls, info: GraphQLResolveInfo, node: GQLInlineFragmentNode):\n return cls(\n type_condition=node.type_condition.name.value,\n selections=convert_selections(\n info, getattr(node.selection_set, \"selections\", [])\n ),\n directives=convert_directives(info, node.directives),\n )\n\n\[email protected]\nclass SelectedField:\n \"\"\"Wrapper for a FieldNode.\"\"\"\n\n name: str\n directives: Directives\n arguments: Arguments\n selections: List[Selection]\n alias: Optional[str] = None\n\n @classmethod\n def from_node(cls, info: GraphQLResolveInfo, node: GQLFieldNode):\n return cls(\n name=node.name.value,\n directives=convert_directives(info, node.directives),\n alias=getattr(node.alias, \"value\", None),\n arguments=convert_arguments(info, node.arguments),\n selections=convert_selections(\n info, getattr(node.selection_set, \"selections\", [])\n ),\n )\n", "path": "strawberry/types/nodes.py"}]} | 1,781 | 133 |
gh_patches_debug_2475 | rasdani/github-patches | git_diff | Gallopsled__pwntools-597 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A little bug in Buffer class
There is a litttle bug in pwnlib.tubes.Buffer class.The class method unget has a type error in line 117.add a buffer and a list
```
Traceback (most recent call last):
File "<input>", line 1, in <module>
a.unget(b)
File "buffer.py", line 117, in unget
self.data = data + self.data
TypeError: unsupported operand type(s) for +: 'Buffer' and 'list'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/tubes/buffer.py`
Content:
```
1 #!/usr/bin/env python2
2
3 class Buffer(Exception):
4 """
5 List of strings with some helper routines.
6
7 Example:
8
9 >>> b = Buffer()
10 >>> b.add("A" * 10)
11 >>> b.add("B" * 10)
12 >>> len(b)
13 20
14 >>> b.get(1)
15 'A'
16 >>> len(b)
17 19
18 >>> b.get(9999)
19 'AAAAAAAAABBBBBBBBBB'
20 >>> len(b)
21 0
22 >>> b.get(1)
23 ''
24
25 Implementation Details:
26
27 Implemented as a list. Strings are added onto the end.
28 The ``0th`` item in the buffer is the oldest item, and
29 will be received first.
30 """
31 def __init__(self):
32 self.data = [] # Buffer
33 self.size = 0 # Length
34
35
36 def __len__(self):
37 """
38 >>> b = Buffer()
39 >>> b.add('lol')
40 >>> len(b) == 3
41 True
42 >>> b.add('foobar')
43 >>> len(b) == 9
44 True
45 """
46 return self.size
47
48 def __nonzero__(self):
49 return len(self) > 0
50
51 def __contains__(self, x):
52 """
53 >>> b = Buffer()
54 >>> b.add('asdf')
55 >>> 'x' in b
56 False
57 >>> b.add('x')
58 >>> 'x' in b
59 True
60 """
61 for b in self.data:
62 if x in b:
63 return True
64 return False
65
66 def index(self, x):
67 """
68 >>> b = Buffer()
69 >>> b.add('asdf')
70 >>> b.add('qwert')
71 >>> b.index('t') == len(b) - 1
72 True
73 """
74 sofar = 0
75 for b in self.data:
76 if x in b:
77 return sofar + b.index(x)
78 sofar += len(b)
79 raise IndexError()
80
81 def add(self, data):
82 """
83 Adds data to the buffer.
84
85 Arguments:
86 data(str,Buffer): Data to add
87 """
88 # Fast path for ''
89 if not data: return
90
91 if isinstance(data, Buffer):
92 self.size += data.size
93 self.data += data.data
94 else:
95 self.size += len(data)
96 self.data.append(data)
97
98 def unget(self, data):
99 """
100 Places data at the front of the buffer.
101
102 Arguments:
103 data(str,Buffer): Data to place at the beginning of the buffer.
104
105 Example:
106
107 >>> b = Buffer()
108 >>> b.add("hello")
109 >>> b.add("world")
110 >>> b.get(5)
111 'hello'
112 >>> b.unget("goodbye")
113 >>> b.get()
114 'goodbyeworld'
115 """
116 if isinstance(data, Buffer):
117 self.data = data + self.data
118 self.size += data.size
119 else:
120 self.data.insert(0, data)
121 self.size += len(data)
122
123 def get(self, want=float('inf')):
124 """
125 Retrieves bytes from the buffer.
126
127 Arguments:
128 want(int): Maximum number of bytes to fetch
129
130 Returns:
131 Data as string
132
133 Example:
134
135 >>> b = Buffer()
136 >>> b.add('hello')
137 >>> b.add('world')
138 >>> b.get(1)
139 'h'
140 >>> b.get()
141 'elloworld'
142 """
143 # Fast path, get all of the data
144 if want >= self.size:
145 data = ''.join(self.data)
146 self.size = 0
147 self.data = []
148 return data
149
150 # Slow path, find the correct-index chunk
151 have = 0
152 i = 0
153 while want >= have:
154 have += len(self.data[i])
155 i += 1
156
157 # Join the chunks, evict from the buffer
158 data = ''.join(self.data[:i])
159 self.data = self.data[i:]
160
161 # If the last chunk puts us over the limit,
162 # stick the extra back at the beginning.
163 if have > want:
164 extra = data[want:]
165 data = data[:want]
166 self.data.insert(0, extra)
167
168 # Size update
169 self.size -= len(data)
170
171 return data
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/tubes/buffer.py b/pwnlib/tubes/buffer.py
--- a/pwnlib/tubes/buffer.py
+++ b/pwnlib/tubes/buffer.py
@@ -114,7 +114,7 @@
'goodbyeworld'
"""
if isinstance(data, Buffer):
- self.data = data + self.data
+ self.data = data.data + self.data
self.size += data.size
else:
self.data.insert(0, data)
| {"golden_diff": "diff --git a/pwnlib/tubes/buffer.py b/pwnlib/tubes/buffer.py\n--- a/pwnlib/tubes/buffer.py\n+++ b/pwnlib/tubes/buffer.py\n@@ -114,7 +114,7 @@\n 'goodbyeworld'\n \"\"\"\n if isinstance(data, Buffer):\n- self.data = data + self.data\n+ self.data = data.data + self.data\n self.size += data.size\n else:\n self.data.insert(0, data)\n", "issue": "A little bug in Buffer class\nThere is a litttle bug in pwnlib.tubes.Buffer class.The class method unget has a type error in line 117.add a buffer and a list\n\n```\nTraceback (most recent call last):\n File \"<input>\", line 1, in <module>\n a.unget(b)\n File \"buffer.py\", line 117, in unget\n self.data = data + self.data\nTypeError: unsupported operand type(s) for +: 'Buffer' and 'list'\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python2\n\nclass Buffer(Exception):\n \"\"\"\n List of strings with some helper routines.\n\n Example:\n\n >>> b = Buffer()\n >>> b.add(\"A\" * 10)\n >>> b.add(\"B\" * 10)\n >>> len(b)\n 20\n >>> b.get(1)\n 'A'\n >>> len(b)\n 19\n >>> b.get(9999)\n 'AAAAAAAAABBBBBBBBBB'\n >>> len(b)\n 0\n >>> b.get(1)\n ''\n\n Implementation Details:\n\n Implemented as a list. Strings are added onto the end.\n The ``0th`` item in the buffer is the oldest item, and\n will be received first.\n \"\"\"\n def __init__(self):\n self.data = [] # Buffer\n self.size = 0 # Length\n\n\n def __len__(self):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('lol')\n >>> len(b) == 3\n True\n >>> b.add('foobar')\n >>> len(b) == 9\n True\n \"\"\"\n return self.size\n\n def __nonzero__(self):\n return len(self) > 0\n\n def __contains__(self, x):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('asdf')\n >>> 'x' in b\n False\n >>> b.add('x')\n >>> 'x' in b\n True\n \"\"\"\n for b in self.data:\n if x in b:\n return True\n return False\n\n def index(self, x):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('asdf')\n >>> b.add('qwert')\n >>> b.index('t') == len(b) - 1\n True\n \"\"\"\n sofar = 0\n for b in self.data:\n if x in b:\n return sofar + b.index(x)\n sofar += len(b)\n raise IndexError()\n\n def add(self, data):\n \"\"\"\n Adds data to the buffer.\n\n Arguments:\n data(str,Buffer): Data to add\n \"\"\"\n # Fast path for ''\n if not data: return\n\n if isinstance(data, Buffer):\n self.size += data.size\n self.data += data.data\n else:\n self.size += len(data)\n self.data.append(data)\n\n def unget(self, data):\n \"\"\"\n Places data at the front of the buffer.\n\n Arguments:\n data(str,Buffer): Data to place at the beginning of the buffer.\n\n Example:\n\n >>> b = Buffer()\n >>> b.add(\"hello\")\n >>> b.add(\"world\")\n >>> b.get(5)\n 'hello'\n >>> b.unget(\"goodbye\")\n >>> b.get()\n 'goodbyeworld'\n \"\"\"\n if isinstance(data, Buffer):\n self.data = data + self.data\n self.size += data.size\n else:\n self.data.insert(0, data)\n self.size += len(data)\n\n def get(self, want=float('inf')):\n \"\"\"\n Retrieves bytes from the buffer.\n\n Arguments:\n want(int): Maximum number of bytes to fetch\n\n Returns:\n Data as string\n\n Example:\n\n >>> b = Buffer()\n >>> b.add('hello')\n >>> b.add('world')\n >>> b.get(1)\n 'h'\n >>> b.get()\n 'elloworld'\n \"\"\"\n # Fast path, get all of the data\n if want >= self.size:\n data = ''.join(self.data)\n self.size = 0\n self.data = []\n return data\n\n # Slow path, find the correct-index chunk\n have = 0\n i = 0\n while want >= have:\n have += len(self.data[i])\n i += 1\n\n # Join the chunks, evict from the buffer\n data = ''.join(self.data[:i])\n self.data = self.data[i:]\n\n # If the last chunk puts us over the limit,\n # stick the extra back at the beginning.\n if have > want:\n extra = data[want:]\n data = data[:want]\n self.data.insert(0, extra)\n\n # Size update\n self.size -= len(data)\n\n return data\n", "path": "pwnlib/tubes/buffer.py"}], "after_files": [{"content": "#!/usr/bin/env python2\n\nclass Buffer(Exception):\n \"\"\"\n List of strings with some helper routines.\n\n Example:\n\n >>> b = Buffer()\n >>> b.add(\"A\" * 10)\n >>> b.add(\"B\" * 10)\n >>> len(b)\n 20\n >>> b.get(1)\n 'A'\n >>> len(b)\n 19\n >>> b.get(9999)\n 'AAAAAAAAABBBBBBBBBB'\n >>> len(b)\n 0\n >>> b.get(1)\n ''\n\n Implementation Details:\n\n Implemented as a list. Strings are added onto the end.\n The ``0th`` item in the buffer is the oldest item, and\n will be received first.\n \"\"\"\n def __init__(self):\n self.data = [] # Buffer\n self.size = 0 # Length\n\n\n def __len__(self):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('lol')\n >>> len(b) == 3\n True\n >>> b.add('foobar')\n >>> len(b) == 9\n True\n \"\"\"\n return self.size\n\n def __nonzero__(self):\n return len(self) > 0\n\n def __contains__(self, x):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('asdf')\n >>> 'x' in b\n False\n >>> b.add('x')\n >>> 'x' in b\n True\n \"\"\"\n for b in self.data:\n if x in b:\n return True\n return False\n\n def index(self, x):\n \"\"\"\n >>> b = Buffer()\n >>> b.add('asdf')\n >>> b.add('qwert')\n >>> b.index('t') == len(b) - 1\n True\n \"\"\"\n sofar = 0\n for b in self.data:\n if x in b:\n return sofar + b.index(x)\n sofar += len(b)\n raise IndexError()\n\n def add(self, data):\n \"\"\"\n Adds data to the buffer.\n\n Arguments:\n data(str,Buffer): Data to add\n \"\"\"\n # Fast path for ''\n if not data: return\n\n if isinstance(data, Buffer):\n self.size += data.size\n self.data += data.data\n else:\n self.size += len(data)\n self.data.append(data)\n\n def unget(self, data):\n \"\"\"\n Places data at the front of the buffer.\n\n Arguments:\n data(str,Buffer): Data to place at the beginning of the buffer.\n\n Example:\n\n >>> b = Buffer()\n >>> b.add(\"hello\")\n >>> b.add(\"world\")\n >>> b.get(5)\n 'hello'\n >>> b.unget(\"goodbye\")\n >>> b.get()\n 'goodbyeworld'\n \"\"\"\n if isinstance(data, Buffer):\n self.data = data.data + self.data\n self.size += data.size\n else:\n self.data.insert(0, data)\n self.size += len(data)\n\n def get(self, want=float('inf')):\n \"\"\"\n Retrieves bytes from the buffer.\n\n Arguments:\n want(int): Maximum number of bytes to fetch\n\n Returns:\n Data as string\n\n Example:\n\n >>> b = Buffer()\n >>> b.add('hello')\n >>> b.add('world')\n >>> b.get(1)\n 'h'\n >>> b.get()\n 'elloworld'\n \"\"\"\n # Fast path, get all of the data\n if want >= self.size:\n data = ''.join(self.data)\n self.size = 0\n self.data = []\n return data\n\n # Slow path, find the correct-index chunk\n have = 0\n i = 0\n while want >= have:\n have += len(self.data[i])\n i += 1\n\n # Join the chunks, evict from the buffer\n data = ''.join(self.data[:i])\n self.data = self.data[i:]\n\n # If the last chunk puts us over the limit,\n # stick the extra back at the beginning.\n if have > want:\n extra = data[want:]\n data = data[:want]\n self.data.insert(0, extra)\n\n # Size update\n self.size -= len(data)\n\n return data\n", "path": "pwnlib/tubes/buffer.py"}]} | 1,739 | 113 |
gh_patches_debug_11967 | rasdani/github-patches | git_diff | translate__pootle-6574 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Configurable logo url and favicon
It would be nice to have a configurable logo in the config file like you did for the name of pootle site (`POOTLE_TITLE`) instead of creating a custom template or editing the css of the default one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_misc/context_processors.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.conf import settings
10
11 from pootle.core.markup import get_markup_filter_name
12 from pootle_project.models import Project
13 from staticpages.models import LegalPage
14
15
16 def _agreement_context(request):
17 """Returns whether the agreement box should be displayed or not."""
18 request_path = request.META['PATH_INFO']
19 nocheck = filter(lambda x: request_path.startswith(x),
20 settings.POOTLE_LEGALPAGE_NOCHECK_PREFIXES)
21
22 if (request.user.is_authenticated and not nocheck and
23 LegalPage.objects.has_pending_agreement(request.user)):
24 return True
25
26 return False
27
28
29 def _get_social_auth_providers(request):
30 if 'allauth.socialaccount' not in settings.INSTALLED_APPS:
31 return []
32
33 from allauth.socialaccount import providers
34 return [{'name': provider.name, 'url': provider.get_login_url(request)}
35 for provider in providers.registry.get_list()]
36
37
38 def pootle_context(request):
39 """Exposes settings to templates."""
40 # FIXME: maybe we should expose relevant settings only?
41
42 return {
43 'settings': {
44 'POOTLE_CUSTOM_LOGO': getattr(settings, "POOTLE_CUSTOM_LOGO", ""),
45 'POOTLE_TITLE': settings.POOTLE_TITLE,
46 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,
47 'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and
48 settings.POOTLE_CONTACT_EMAIL),
49 'POOTLE_MARKUP_FILTER': get_markup_filter_name(),
50 'POOTLE_SIGNUP_ENABLED': settings.POOTLE_SIGNUP_ENABLED,
51 'SCRIPT_NAME': settings.SCRIPT_NAME,
52 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,
53 'DEBUG': settings.DEBUG,
54 },
55 'custom': settings.POOTLE_CUSTOM_TEMPLATE_CONTEXT,
56 'ALL_PROJECTS': Project.objects.cached_dict(request.user),
57 'SOCIAL_AUTH_PROVIDERS': _get_social_auth_providers(request),
58 'display_agreement': _agreement_context(request),
59 }
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py
--- a/pootle/apps/pootle_misc/context_processors.py
+++ b/pootle/apps/pootle_misc/context_processors.py
@@ -43,6 +43,7 @@
'settings': {
'POOTLE_CUSTOM_LOGO': getattr(settings, "POOTLE_CUSTOM_LOGO", ""),
'POOTLE_TITLE': settings.POOTLE_TITLE,
+ 'POOTLE_FAVICONS_PATH': settings.POOTLE_FAVICONS_PATH,
'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,
'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and
settings.POOTLE_CONTACT_EMAIL),
| {"golden_diff": "diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py\n--- a/pootle/apps/pootle_misc/context_processors.py\n+++ b/pootle/apps/pootle_misc/context_processors.py\n@@ -43,6 +43,7 @@\n 'settings': {\n 'POOTLE_CUSTOM_LOGO': getattr(settings, \"POOTLE_CUSTOM_LOGO\", \"\"),\n 'POOTLE_TITLE': settings.POOTLE_TITLE,\n+ 'POOTLE_FAVICONS_PATH': settings.POOTLE_FAVICONS_PATH,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and\n settings.POOTLE_CONTACT_EMAIL),\n", "issue": "Configurable logo url and favicon\nIt would be nice to have a configurable logo in the config file like you did for the name of pootle site (`POOTLE_TITLE`) instead of creating a custom template or editing the css of the default one.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\n\nfrom pootle.core.markup import get_markup_filter_name\nfrom pootle_project.models import Project\nfrom staticpages.models import LegalPage\n\n\ndef _agreement_context(request):\n \"\"\"Returns whether the agreement box should be displayed or not.\"\"\"\n request_path = request.META['PATH_INFO']\n nocheck = filter(lambda x: request_path.startswith(x),\n settings.POOTLE_LEGALPAGE_NOCHECK_PREFIXES)\n\n if (request.user.is_authenticated and not nocheck and\n LegalPage.objects.has_pending_agreement(request.user)):\n return True\n\n return False\n\n\ndef _get_social_auth_providers(request):\n if 'allauth.socialaccount' not in settings.INSTALLED_APPS:\n return []\n\n from allauth.socialaccount import providers\n return [{'name': provider.name, 'url': provider.get_login_url(request)}\n for provider in providers.registry.get_list()]\n\n\ndef pootle_context(request):\n \"\"\"Exposes settings to templates.\"\"\"\n # FIXME: maybe we should expose relevant settings only?\n\n return {\n 'settings': {\n 'POOTLE_CUSTOM_LOGO': getattr(settings, \"POOTLE_CUSTOM_LOGO\", \"\"),\n 'POOTLE_TITLE': settings.POOTLE_TITLE,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and\n settings.POOTLE_CONTACT_EMAIL),\n 'POOTLE_MARKUP_FILTER': get_markup_filter_name(),\n 'POOTLE_SIGNUP_ENABLED': settings.POOTLE_SIGNUP_ENABLED,\n 'SCRIPT_NAME': settings.SCRIPT_NAME,\n 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,\n 'DEBUG': settings.DEBUG,\n },\n 'custom': settings.POOTLE_CUSTOM_TEMPLATE_CONTEXT,\n 'ALL_PROJECTS': Project.objects.cached_dict(request.user),\n 'SOCIAL_AUTH_PROVIDERS': _get_social_auth_providers(request),\n 'display_agreement': _agreement_context(request),\n }\n", "path": "pootle/apps/pootle_misc/context_processors.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\n\nfrom pootle.core.markup import get_markup_filter_name\nfrom pootle_project.models import Project\nfrom staticpages.models import LegalPage\n\n\ndef _agreement_context(request):\n \"\"\"Returns whether the agreement box should be displayed or not.\"\"\"\n request_path = request.META['PATH_INFO']\n nocheck = filter(lambda x: request_path.startswith(x),\n settings.POOTLE_LEGALPAGE_NOCHECK_PREFIXES)\n\n if (request.user.is_authenticated and not nocheck and\n LegalPage.objects.has_pending_agreement(request.user)):\n return True\n\n return False\n\n\ndef _get_social_auth_providers(request):\n if 'allauth.socialaccount' not in settings.INSTALLED_APPS:\n return []\n\n from allauth.socialaccount import providers\n return [{'name': provider.name, 'url': provider.get_login_url(request)}\n for provider in providers.registry.get_list()]\n\n\ndef pootle_context(request):\n \"\"\"Exposes settings to templates.\"\"\"\n # FIXME: maybe we should expose relevant settings only?\n\n return {\n 'settings': {\n 'POOTLE_CUSTOM_LOGO': getattr(settings, \"POOTLE_CUSTOM_LOGO\", \"\"),\n 'POOTLE_TITLE': settings.POOTLE_TITLE,\n 'POOTLE_FAVICONS_PATH': settings.POOTLE_FAVICONS_PATH,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'POOTLE_CONTACT_ENABLED': (settings.POOTLE_CONTACT_ENABLED and\n settings.POOTLE_CONTACT_EMAIL),\n 'POOTLE_MARKUP_FILTER': get_markup_filter_name(),\n 'POOTLE_SIGNUP_ENABLED': settings.POOTLE_SIGNUP_ENABLED,\n 'SCRIPT_NAME': settings.SCRIPT_NAME,\n 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,\n 'DEBUG': settings.DEBUG,\n },\n 'custom': settings.POOTLE_CUSTOM_TEMPLATE_CONTEXT,\n 'ALL_PROJECTS': Project.objects.cached_dict(request.user),\n 'SOCIAL_AUTH_PROVIDERS': _get_social_auth_providers(request),\n 'display_agreement': _agreement_context(request),\n }\n", "path": "pootle/apps/pootle_misc/context_processors.py"}]} | 928 | 169 |
gh_patches_debug_6623 | rasdani/github-patches | git_diff | netbox-community__netbox-14901 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing event rule action type messes up Conditions field
### Deployment Type
Self-hosted
### NetBox Version
v3.7.1
### Python Version
3.11
### Steps to Reproduce
1. Go to **Operations - Event Rules - Add**
2. Set Conditions = `{ "and": [{"attr": "status.value", "value": "deprecated"}]}`
3. Select Action type = Script
### Expected Behavior
Conditions field stays as-is
### Observed Behavior
Conditions field is rewritten as invalid JSON:
`"{ \"and\": [{\"attr\": \"status.value\", \"value\": \"deprecated\"}]}"`
The quoting is added with every change of the Action type.
This also happens if editing an existing event rule and changing the Action type.
### Workaround
- Copy the Conditions field somewhere
- Change the Action type
- Paste the Conditions field back
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/utilities/forms/fields/fields.py`
Content:
```
1 import json
2
3 from django import forms
4 from django.db.models import Count
5 from django.forms.fields import JSONField as _JSONField, InvalidJSONInput
6 from django.templatetags.static import static
7 from django.utils.translation import gettext_lazy as _
8 from netaddr import AddrFormatError, EUI
9
10 from utilities.forms import widgets
11 from utilities.validators import EnhancedURLValidator
12
13 __all__ = (
14 'ColorField',
15 'CommentField',
16 'JSONField',
17 'LaxURLField',
18 'MACAddressField',
19 'SlugField',
20 'TagFilterField',
21 )
22
23
24 class CommentField(forms.CharField):
25 """
26 A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.
27 """
28 widget = widgets.MarkdownWidget
29 label = _('Comments')
30 help_text = _(
31 '<i class="mdi mdi-information-outline"></i> '
32 '<a href="{url}" target="_blank" tabindex="-1">Markdown</a> syntax is supported'
33 ).format(url=static('docs/reference/markdown/'))
34
35 def __init__(self, *, label=label, help_text=help_text, required=False, **kwargs):
36 super().__init__(label=label, help_text=help_text, required=required, **kwargs)
37
38
39 class SlugField(forms.SlugField):
40 """
41 Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.
42
43 Parameters:
44 slug_source: Name of the form field from which the slug value will be derived
45 """
46 widget = widgets.SlugWidget
47 label = _('Slug')
48 help_text = _("URL-friendly unique shorthand")
49
50 def __init__(self, *, slug_source='name', label=label, help_text=help_text, **kwargs):
51 super().__init__(label=label, help_text=help_text, **kwargs)
52
53 self.widget.attrs['slug-source'] = slug_source
54
55
56 class ColorField(forms.CharField):
57 """
58 A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to
59 render choices.
60 """
61 widget = widgets.ColorSelect
62
63
64 class TagFilterField(forms.MultipleChoiceField):
65 """
66 A filter field for the tags of a model. Only the tags used by a model are displayed.
67
68 :param model: The model of the filter
69 """
70
71 def __init__(self, model, *args, **kwargs):
72 def get_choices():
73 tags = model.tags.annotate(
74 count=Count('extras_taggeditem_items')
75 ).order_by('name')
76 return [
77 (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags
78 ]
79
80 # Choices are fetched each time the form is initialized
81 super().__init__(label=_('Tags'), choices=get_choices, required=False, *args, **kwargs)
82
83
84 class LaxURLField(forms.URLField):
85 """
86 Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names
87 (e.g. http://myserver/ is valid)
88 """
89 default_validators = [EnhancedURLValidator()]
90
91
92 class JSONField(_JSONField):
93 """
94 Custom wrapper around Django's built-in JSONField to avoid presenting "null" as the default text.
95 """
96 def __init__(self, *args, **kwargs):
97 super().__init__(*args, **kwargs)
98 if not self.help_text:
99 self.help_text = _('Enter context data in <a href="https://json.org/">JSON</a> format.')
100 self.widget.attrs['placeholder'] = ''
101 self.widget.attrs['class'] = 'font-monospace'
102
103 def prepare_value(self, value):
104 if isinstance(value, InvalidJSONInput):
105 return value
106 if value in ('', None):
107 return ''
108 return json.dumps(value, sort_keys=True, indent=4)
109
110
111 class MACAddressField(forms.Field):
112 """
113 Validates a 48-bit MAC address.
114 """
115 widget = forms.CharField
116 default_error_messages = {
117 'invalid': _('MAC address must be in EUI-48 format'),
118 }
119
120 def to_python(self, value):
121 value = super().to_python(value)
122
123 # Validate MAC address format
124 try:
125 value = EUI(value.strip())
126 except AddrFormatError:
127 raise forms.ValidationError(self.error_messages['invalid'], code='invalid')
128
129 return value
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/utilities/forms/fields/fields.py b/netbox/utilities/forms/fields/fields.py
--- a/netbox/utilities/forms/fields/fields.py
+++ b/netbox/utilities/forms/fields/fields.py
@@ -105,7 +105,12 @@
return value
if value in ('', None):
return ''
- return json.dumps(value, sort_keys=True, indent=4)
+ if type(value) is str:
+ try:
+ value = json.loads(value, cls=self.decoder)
+ except json.decoder.JSONDecodeError:
+ return value
+ return json.dumps(value, sort_keys=True, indent=4, ensure_ascii=False, cls=self.encoder)
class MACAddressField(forms.Field):
| {"golden_diff": "diff --git a/netbox/utilities/forms/fields/fields.py b/netbox/utilities/forms/fields/fields.py\n--- a/netbox/utilities/forms/fields/fields.py\n+++ b/netbox/utilities/forms/fields/fields.py\n@@ -105,7 +105,12 @@\n return value\n if value in ('', None):\n return ''\n- return json.dumps(value, sort_keys=True, indent=4)\n+ if type(value) is str:\n+ try:\n+ value = json.loads(value, cls=self.decoder)\n+ except json.decoder.JSONDecodeError:\n+ return value\n+ return json.dumps(value, sort_keys=True, indent=4, ensure_ascii=False, cls=self.encoder)\n \n \n class MACAddressField(forms.Field):\n", "issue": "Changing event rule action type messes up Conditions field\n### Deployment Type\r\n\r\nSelf-hosted\r\n\r\n### NetBox Version\r\n\r\nv3.7.1\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to **Operations - Event Rules - Add**\r\n2. Set Conditions = `{ \"and\": [{\"attr\": \"status.value\", \"value\": \"deprecated\"}]}`\r\n3. Select Action type = Script\r\n\r\n### Expected Behavior\r\n\r\nConditions field stays as-is\r\n\r\n### Observed Behavior\r\n\r\nConditions field is rewritten as invalid JSON:\r\n\r\n`\"{ \\\"and\\\": [{\\\"attr\\\": \\\"status.value\\\", \\\"value\\\": \\\"deprecated\\\"}]}\"`\r\n\r\nThe quoting is added with every change of the Action type.\r\n\r\nThis also happens if editing an existing event rule and changing the Action type.\r\n\r\n### Workaround\r\n\r\n- Copy the Conditions field somewhere\r\n- Change the Action type\r\n- Paste the Conditions field back\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.db.models import Count\nfrom django.forms.fields import JSONField as _JSONField, InvalidJSONInput\nfrom django.templatetags.static import static\nfrom django.utils.translation import gettext_lazy as _\nfrom netaddr import AddrFormatError, EUI\n\nfrom utilities.forms import widgets\nfrom utilities.validators import EnhancedURLValidator\n\n__all__ = (\n 'ColorField',\n 'CommentField',\n 'JSONField',\n 'LaxURLField',\n 'MACAddressField',\n 'SlugField',\n 'TagFilterField',\n)\n\n\nclass CommentField(forms.CharField):\n \"\"\"\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = widgets.MarkdownWidget\n label = _('Comments')\n help_text = _(\n '<i class=\"mdi mdi-information-outline\"></i> '\n '<a href=\"{url}\" target=\"_blank\" tabindex=\"-1\">Markdown</a> syntax is supported'\n ).format(url=static('docs/reference/markdown/'))\n\n def __init__(self, *, label=label, help_text=help_text, required=False, **kwargs):\n super().__init__(label=label, help_text=help_text, required=required, **kwargs)\n\n\nclass SlugField(forms.SlugField):\n \"\"\"\n Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.\n\n Parameters:\n slug_source: Name of the form field from which the slug value will be derived\n \"\"\"\n widget = widgets.SlugWidget\n label = _('Slug')\n help_text = _(\"URL-friendly unique shorthand\")\n\n def __init__(self, *, slug_source='name', label=label, help_text=help_text, **kwargs):\n super().__init__(label=label, help_text=help_text, **kwargs)\n\n self.widget.attrs['slug-source'] = slug_source\n\n\nclass ColorField(forms.CharField):\n \"\"\"\n A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to\n render choices.\n \"\"\"\n widget = widgets.ColorSelect\n\n\nclass TagFilterField(forms.MultipleChoiceField):\n \"\"\"\n A filter field for the tags of a model. Only the tags used by a model are displayed.\n\n :param model: The model of the filter\n \"\"\"\n\n def __init__(self, model, *args, **kwargs):\n def get_choices():\n tags = model.tags.annotate(\n count=Count('extras_taggeditem_items')\n ).order_by('name')\n return [\n (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags\n ]\n\n # Choices are fetched each time the form is initialized\n super().__init__(label=_('Tags'), choices=get_choices, required=False, *args, **kwargs)\n\n\nclass LaxURLField(forms.URLField):\n \"\"\"\n Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names\n (e.g. http://myserver/ is valid)\n \"\"\"\n default_validators = [EnhancedURLValidator()]\n\n\nclass JSONField(_JSONField):\n \"\"\"\n Custom wrapper around Django's built-in JSONField to avoid presenting \"null\" as the default text.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.help_text:\n self.help_text = _('Enter context data in <a href=\"https://json.org/\">JSON</a> format.')\n self.widget.attrs['placeholder'] = ''\n self.widget.attrs['class'] = 'font-monospace'\n\n def prepare_value(self, value):\n if isinstance(value, InvalidJSONInput):\n return value\n if value in ('', None):\n return ''\n return json.dumps(value, sort_keys=True, indent=4)\n\n\nclass MACAddressField(forms.Field):\n \"\"\"\n Validates a 48-bit MAC address.\n \"\"\"\n widget = forms.CharField\n default_error_messages = {\n 'invalid': _('MAC address must be in EUI-48 format'),\n }\n\n def to_python(self, value):\n value = super().to_python(value)\n\n # Validate MAC address format\n try:\n value = EUI(value.strip())\n except AddrFormatError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n\n return value\n", "path": "netbox/utilities/forms/fields/fields.py"}], "after_files": [{"content": "import json\n\nfrom django import forms\nfrom django.db.models import Count\nfrom django.forms.fields import JSONField as _JSONField, InvalidJSONInput\nfrom django.templatetags.static import static\nfrom django.utils.translation import gettext_lazy as _\nfrom netaddr import AddrFormatError, EUI\n\nfrom utilities.forms import widgets\nfrom utilities.validators import EnhancedURLValidator\n\n__all__ = (\n 'ColorField',\n 'CommentField',\n 'JSONField',\n 'LaxURLField',\n 'MACAddressField',\n 'SlugField',\n 'TagFilterField',\n)\n\n\nclass CommentField(forms.CharField):\n \"\"\"\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = widgets.MarkdownWidget\n label = _('Comments')\n help_text = _(\n '<i class=\"mdi mdi-information-outline\"></i> '\n '<a href=\"{url}\" target=\"_blank\" tabindex=\"-1\">Markdown</a> syntax is supported'\n ).format(url=static('docs/reference/markdown/'))\n\n def __init__(self, *, label=label, help_text=help_text, required=False, **kwargs):\n super().__init__(label=label, help_text=help_text, required=required, **kwargs)\n\n\nclass SlugField(forms.SlugField):\n \"\"\"\n Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.\n\n Parameters:\n slug_source: Name of the form field from which the slug value will be derived\n \"\"\"\n widget = widgets.SlugWidget\n label = _('Slug')\n help_text = _(\"URL-friendly unique shorthand\")\n\n def __init__(self, *, slug_source='name', label=label, help_text=help_text, **kwargs):\n super().__init__(label=label, help_text=help_text, **kwargs)\n\n self.widget.attrs['slug-source'] = slug_source\n\n\nclass ColorField(forms.CharField):\n \"\"\"\n A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to\n render choices.\n \"\"\"\n widget = widgets.ColorSelect\n\n\nclass TagFilterField(forms.MultipleChoiceField):\n \"\"\"\n A filter field for the tags of a model. Only the tags used by a model are displayed.\n\n :param model: The model of the filter\n \"\"\"\n\n def __init__(self, model, *args, **kwargs):\n def get_choices():\n tags = model.tags.annotate(\n count=Count('extras_taggeditem_items')\n ).order_by('name')\n return [\n (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags\n ]\n\n # Choices are fetched each time the form is initialized\n super().__init__(label=_('Tags'), choices=get_choices, required=False, *args, **kwargs)\n\n\nclass LaxURLField(forms.URLField):\n \"\"\"\n Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names\n (e.g. http://myserver/ is valid)\n \"\"\"\n default_validators = [EnhancedURLValidator()]\n\n\nclass JSONField(_JSONField):\n \"\"\"\n Custom wrapper around Django's built-in JSONField to avoid presenting \"null\" as the default text.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.help_text:\n self.help_text = _('Enter context data in <a href=\"https://json.org/\">JSON</a> format.')\n self.widget.attrs['placeholder'] = ''\n self.widget.attrs['class'] = 'font-monospace'\n\n def prepare_value(self, value):\n if isinstance(value, InvalidJSONInput):\n return value\n if value in ('', None):\n return ''\n if type(value) is str:\n try:\n value = json.loads(value, cls=self.decoder)\n except json.decoder.JSONDecodeError:\n return value\n return json.dumps(value, sort_keys=True, indent=4, ensure_ascii=False, cls=self.encoder)\n\n\nclass MACAddressField(forms.Field):\n \"\"\"\n Validates a 48-bit MAC address.\n \"\"\"\n widget = forms.CharField\n default_error_messages = {\n 'invalid': _('MAC address must be in EUI-48 format'),\n }\n\n def to_python(self, value):\n value = super().to_python(value)\n\n # Validate MAC address format\n try:\n value = EUI(value.strip())\n except AddrFormatError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n\n return value\n", "path": "netbox/utilities/forms/fields/fields.py"}]} | 1,685 | 167 |
gh_patches_debug_23338 | rasdani/github-patches | git_diff | python-pillow__Pillow-3950 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImageGrab fails with multiple monitors
When calling ImageGrab.grab() passing in a bounding box that is outside the area of my primary monitor, I just get black.
For example, my primary monitor is 1920x1200, flanked on either side by monitors running at 1600x1200, making my total desktop size 5120x1200. Also, because my primary monitor is in the middle, the horizontal coordinates for the full virtual desktop go from -1600 to 3519, where 0 is the left-most pixel of my primary monitor. If I try to capture my rightmost monitor using the following code, all I get is a black image:
``` Python
from PIL import ImageGrab
img = ImageGrab.grab([1920, 0, 3519, 1199])
img.save("test.jpg")
```
Poking around the code, it looks like `ImageGrab.grab()` calls into `Image.core.grabscreen` which is an alias for [`PyImaging_GrabScreenWin32()`](https://github.com/python-pillow/Pillow/blob/2be12dec2b231d31400f44bfa855966484997c16/display.c#L323) in `display.c`. That function does retrieve a DC handle to the entire desktop, but the subsequent calls to `GetDeviceCaps` with `HORZRES` and `VERTRES` only return the x/y size of the primary monitor, not the entire desktop.
``` C
screen = CreateDC("DISPLAY", NULL, NULL, NULL);
// ...
width = GetDeviceCaps(screen, HORZRES);
height = GetDeviceCaps(screen, VERTRES);
// ...
if (!BitBlt(screen_copy, 0, 0, width, height, screen, 0, 0, SRCCOPY))
goto error;
```
Another problem with the above code is that monitors to the left of or above the primary display have negative coordinates in the `screen` DC. So, for example, capturing the monitor to the left of my primary display (which has a resolution of 1600x1200) would need to call BitBlt with the following coordinates:
``` C
left = -1600
top = 0
width = 1600
height = 1200
BitBlt(screen_copy, 0, 0, width, height, screen, left, top, SRCCOPY)
```
Similarly, if I was trying to capture a monitor above my primary display, then `top` would be negative. Because of the negative coordinates issue, I don't see any way of fixing this without passing in `left, top, width, height` from the calling python code, which could be calculated easily from the `bbox` parameter. Then it's simply up to the caller to know the coordinates of the monitor they want to capture. If no `bbox` is provided, then the coordinates would default to the primary display (0, 0, HORZRES, VERTRES), keeping the current functionality unchanged so as not to break existing code that uses `ImageGrab.grab()`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/PIL/ImageGrab.py`
Content:
```
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # screen grabber (macOS and Windows only)
6 #
7 # History:
8 # 2001-04-26 fl created
9 # 2001-09-17 fl use builtin driver, if present
10 # 2002-11-19 fl added grabclipboard support
11 #
12 # Copyright (c) 2001-2002 by Secret Labs AB
13 # Copyright (c) 2001-2002 by Fredrik Lundh
14 #
15 # See the README file for information on usage and redistribution.
16 #
17
18 import sys
19
20 from . import Image
21
22 if sys.platform == "win32":
23 grabber = Image.core.grabscreen
24 elif sys.platform == "darwin":
25 import os
26 import tempfile
27 import subprocess
28 else:
29 raise ImportError("ImageGrab is macOS and Windows only")
30
31
32 def grab(bbox=None, include_layered_windows=False):
33 if sys.platform == "darwin":
34 fh, filepath = tempfile.mkstemp(".png")
35 os.close(fh)
36 subprocess.call(["screencapture", "-x", filepath])
37 im = Image.open(filepath)
38 im.load()
39 os.unlink(filepath)
40 else:
41 size, data = grabber(include_layered_windows)
42 im = Image.frombytes(
43 "RGB",
44 size,
45 data,
46 # RGB, 32-bit line padding, origin lower left corner
47 "raw",
48 "BGR",
49 (size[0] * 3 + 3) & -4,
50 -1,
51 )
52 if bbox:
53 im = im.crop(bbox)
54 return im
55
56
57 def grabclipboard():
58 if sys.platform == "darwin":
59 fh, filepath = tempfile.mkstemp(".jpg")
60 os.close(fh)
61 commands = [
62 'set theFile to (open for access POSIX file "'
63 + filepath
64 + '" with write permission)',
65 "try",
66 " write (the clipboard as JPEG picture) to theFile",
67 "end try",
68 "close access theFile",
69 ]
70 script = ["osascript"]
71 for command in commands:
72 script += ["-e", command]
73 subprocess.call(script)
74
75 im = None
76 if os.stat(filepath).st_size != 0:
77 im = Image.open(filepath)
78 im.load()
79 os.unlink(filepath)
80 return im
81 else:
82 data = Image.core.grabclipboard()
83 if isinstance(data, bytes):
84 from . import BmpImagePlugin
85 import io
86
87 return BmpImagePlugin.DibImageFile(io.BytesIO(data))
88 return data
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/PIL/ImageGrab.py b/src/PIL/ImageGrab.py
--- a/src/PIL/ImageGrab.py
+++ b/src/PIL/ImageGrab.py
@@ -29,7 +29,7 @@
raise ImportError("ImageGrab is macOS and Windows only")
-def grab(bbox=None, include_layered_windows=False):
+def grab(bbox=None, include_layered_windows=False, all_screens=False):
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp(".png")
os.close(fh)
@@ -37,8 +37,10 @@
im = Image.open(filepath)
im.load()
os.unlink(filepath)
+ if bbox:
+ im = im.crop(bbox)
else:
- size, data = grabber(include_layered_windows)
+ offset, size, data = grabber(include_layered_windows, all_screens)
im = Image.frombytes(
"RGB",
size,
@@ -49,8 +51,10 @@
(size[0] * 3 + 3) & -4,
-1,
)
- if bbox:
- im = im.crop(bbox)
+ if bbox:
+ x0, y0 = offset
+ left, top, right, bottom = bbox
+ im = im.crop((left - x0, top - y0, right - x0, bottom - y0))
return im
| {"golden_diff": "diff --git a/src/PIL/ImageGrab.py b/src/PIL/ImageGrab.py\n--- a/src/PIL/ImageGrab.py\n+++ b/src/PIL/ImageGrab.py\n@@ -29,7 +29,7 @@\n raise ImportError(\"ImageGrab is macOS and Windows only\")\n \n \n-def grab(bbox=None, include_layered_windows=False):\n+def grab(bbox=None, include_layered_windows=False, all_screens=False):\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".png\")\n os.close(fh)\n@@ -37,8 +37,10 @@\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n+ if bbox:\n+ im = im.crop(bbox)\n else:\n- size, data = grabber(include_layered_windows)\n+ offset, size, data = grabber(include_layered_windows, all_screens)\n im = Image.frombytes(\n \"RGB\",\n size,\n@@ -49,8 +51,10 @@\n (size[0] * 3 + 3) & -4,\n -1,\n )\n- if bbox:\n- im = im.crop(bbox)\n+ if bbox:\n+ x0, y0 = offset\n+ left, top, right, bottom = bbox\n+ im = im.crop((left - x0, top - y0, right - x0, bottom - y0))\n return im\n", "issue": "ImageGrab fails with multiple monitors\nWhen calling ImageGrab.grab() passing in a bounding box that is outside the area of my primary monitor, I just get black.\n\nFor example, my primary monitor is 1920x1200, flanked on either side by monitors running at 1600x1200, making my total desktop size 5120x1200. Also, because my primary monitor is in the middle, the horizontal coordinates for the full virtual desktop go from -1600 to 3519, where 0 is the left-most pixel of my primary monitor. If I try to capture my rightmost monitor using the following code, all I get is a black image:\n\n``` Python\nfrom PIL import ImageGrab\nimg = ImageGrab.grab([1920, 0, 3519, 1199])\nimg.save(\"test.jpg\")\n```\n\nPoking around the code, it looks like `ImageGrab.grab()` calls into `Image.core.grabscreen` which is an alias for [`PyImaging_GrabScreenWin32()`](https://github.com/python-pillow/Pillow/blob/2be12dec2b231d31400f44bfa855966484997c16/display.c#L323) in `display.c`. That function does retrieve a DC handle to the entire desktop, but the subsequent calls to `GetDeviceCaps` with `HORZRES` and `VERTRES` only return the x/y size of the primary monitor, not the entire desktop.\n\n``` C\nscreen = CreateDC(\"DISPLAY\", NULL, NULL, NULL);\n// ...\nwidth = GetDeviceCaps(screen, HORZRES);\nheight = GetDeviceCaps(screen, VERTRES);\n// ...\nif (!BitBlt(screen_copy, 0, 0, width, height, screen, 0, 0, SRCCOPY))\n goto error;\n```\n\nAnother problem with the above code is that monitors to the left of or above the primary display have negative coordinates in the `screen` DC. So, for example, capturing the monitor to the left of my primary display (which has a resolution of 1600x1200) would need to call BitBlt with the following coordinates:\n\n``` C\nleft = -1600\ntop = 0\nwidth = 1600\nheight = 1200\nBitBlt(screen_copy, 0, 0, width, height, screen, left, top, SRCCOPY)\n```\n\nSimilarly, if I was trying to capture a monitor above my primary display, then `top` would be negative. Because of the negative coordinates issue, I don't see any way of fixing this without passing in `left, top, width, height` from the calling python code, which could be calculated easily from the `bbox` parameter. Then it's simply up to the caller to know the coordinates of the monitor they want to capture. If no `bbox` is provided, then the coordinates would default to the primary display (0, 0, HORZRES, VERTRES), keeping the current functionality unchanged so as not to break existing code that uses `ImageGrab.grab()`.\n\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (macOS and Windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport sys\n\nfrom . import Image\n\nif sys.platform == \"win32\":\n grabber = Image.core.grabscreen\nelif sys.platform == \"darwin\":\n import os\n import tempfile\n import subprocess\nelse:\n raise ImportError(\"ImageGrab is macOS and Windows only\")\n\n\ndef grab(bbox=None, include_layered_windows=False):\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".png\")\n os.close(fh)\n subprocess.call([\"screencapture\", \"-x\", filepath])\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n else:\n size, data = grabber(include_layered_windows)\n im = Image.frombytes(\n \"RGB\",\n size,\n data,\n # RGB, 32-bit line padding, origin lower left corner\n \"raw\",\n \"BGR\",\n (size[0] * 3 + 3) & -4,\n -1,\n )\n if bbox:\n im = im.crop(bbox)\n return im\n\n\ndef grabclipboard():\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".jpg\")\n os.close(fh)\n commands = [\n 'set theFile to (open for access POSIX file \"'\n + filepath\n + '\" with write permission)',\n \"try\",\n \" write (the clipboard as JPEG picture) to theFile\",\n \"end try\",\n \"close access theFile\",\n ]\n script = [\"osascript\"]\n for command in commands:\n script += [\"-e\", command]\n subprocess.call(script)\n\n im = None\n if os.stat(filepath).st_size != 0:\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n return im\n else:\n data = Image.core.grabclipboard()\n if isinstance(data, bytes):\n from . import BmpImagePlugin\n import io\n\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "src/PIL/ImageGrab.py"}], "after_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (macOS and Windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport sys\n\nfrom . import Image\n\nif sys.platform == \"win32\":\n grabber = Image.core.grabscreen\nelif sys.platform == \"darwin\":\n import os\n import tempfile\n import subprocess\nelse:\n raise ImportError(\"ImageGrab is macOS and Windows only\")\n\n\ndef grab(bbox=None, include_layered_windows=False, all_screens=False):\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".png\")\n os.close(fh)\n subprocess.call([\"screencapture\", \"-x\", filepath])\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n if bbox:\n im = im.crop(bbox)\n else:\n offset, size, data = grabber(include_layered_windows, all_screens)\n im = Image.frombytes(\n \"RGB\",\n size,\n data,\n # RGB, 32-bit line padding, origin lower left corner\n \"raw\",\n \"BGR\",\n (size[0] * 3 + 3) & -4,\n -1,\n )\n if bbox:\n x0, y0 = offset\n left, top, right, bottom = bbox\n im = im.crop((left - x0, top - y0, right - x0, bottom - y0))\n return im\n\n\ndef grabclipboard():\n if sys.platform == \"darwin\":\n fh, filepath = tempfile.mkstemp(\".jpg\")\n os.close(fh)\n commands = [\n 'set theFile to (open for access POSIX file \"'\n + filepath\n + '\" with write permission)',\n \"try\",\n \" write (the clipboard as JPEG picture) to theFile\",\n \"end try\",\n \"close access theFile\",\n ]\n script = [\"osascript\"]\n for command in commands:\n script += [\"-e\", command]\n subprocess.call(script)\n\n im = None\n if os.stat(filepath).st_size != 0:\n im = Image.open(filepath)\n im.load()\n os.unlink(filepath)\n return im\n else:\n data = Image.core.grabclipboard()\n if isinstance(data, bytes):\n from . import BmpImagePlugin\n import io\n\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "src/PIL/ImageGrab.py"}]} | 1,706 | 312 |
gh_patches_debug_20985 | rasdani/github-patches | git_diff | facebookresearch__fairscale-295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] pip package 0.1.3 fails to install
Both `python3.8 -m pip install fairscale --verbose` and `python3.8 -m pip install fairscale --no-build-isolation --verbose` trigger
```
ninja: error: '/tmp/pip-install-zqe21k7a/fairscale_4066f1fa225242299ead5fd852fd2ce8/fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp', needed by '/t
mp/pip-install-zqe21k7a/fairscale_4066f1fa225242299ead5fd852fd2ce8/build/temp.linux-x86_64-3.8/fairscale/clib/fused_adam_cuda/fused_adam_cuda.o', missin
g and no known rule to make it
```
Cloning the repository and running `python3.8 -m pip install . --verbose` from within the directory works fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fairscale/__init__.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 __version__ = "0.1.3"
7
8 ################################################################################
9 # Import most common subpackages
10 ################################################################################
11
12 from . import nn
13
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4
5 import os
6 import re
7 import warnings
8
9 import setuptools
10 import torch
11 from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension
12
13 this_dir = os.path.dirname(os.path.abspath(__file__))
14
15
16 def fetch_requirements():
17 with open("requirements.txt") as f:
18 reqs = f.read().strip().split("\n")
19 return reqs
20
21
22 # https://packaging.python.org/guides/single-sourcing-package-version/
23 def find_version(version_file_path):
24 with open(version_file_path) as version_file:
25 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M)
26 if version_match:
27 return version_match.group(1)
28 raise RuntimeError("Unable to find version string.")
29
30
31 extensions = []
32 cmdclass = {}
33
34 force_cuda = os.getenv("FORCE_CUDA", "0") == "1"
35 if (torch.cuda.is_available() and CUDA_HOME is not None) or force_cuda:
36 extensions.extend(
37 [
38 CUDAExtension(
39 name="fairscale.fused_adam_cuda",
40 include_dirs=[os.path.join(this_dir, "fairscale/clib/fused_adam_cuda")],
41 sources=[
42 "fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp",
43 "fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu",
44 ],
45 extra_compile_args={"cxx": ["-O3"], "nvcc": ["-O3", "--use_fast_math"]},
46 )
47 ]
48 )
49
50 cmdclass["build_ext"] = BuildExtension
51 else:
52 warnings.warn("Cannot install FusedAdam cuda.")
53
54
55 if __name__ == "__main__":
56 setuptools.setup(
57 name="fairscale",
58 description="fairscale: A PyTorch library for large-scale and high-performance training.",
59 version=find_version("fairscale/__init__.py"),
60 install_requires=fetch_requirements(),
61 include_package_data=True,
62 packages=setuptools.find_packages(exclude=("tests", "tests.*")),
63 ext_modules=extensions,
64 cmdclass=cmdclass,
65 python_requires=">=3.6",
66 author="Facebook AI Research",
67 author_email="[email protected]",
68 classifiers=[
69 "Programming Language :: Python :: 3.6",
70 "Programming Language :: Python :: 3.7",
71 "Programming Language :: Python :: 3.8",
72 "License :: OSI Approved :: BSD License",
73 "Topic :: Scientific/Engineering :: Artificial Intelligence",
74 "Operating System :: OS Independent",
75 ],
76 )
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fairscale/__init__.py b/fairscale/__init__.py
--- a/fairscale/__init__.py
+++ b/fairscale/__init__.py
@@ -3,7 +3,7 @@
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
-__version__ = "0.1.3"
+__version__ = "0.1.4"
################################################################################
# Import most common subpackages
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -65,6 +65,8 @@
python_requires=">=3.6",
author="Facebook AI Research",
author_email="[email protected]",
+ long_description="FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.",
+ long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
| {"golden_diff": "diff --git a/fairscale/__init__.py b/fairscale/__init__.py\n--- a/fairscale/__init__.py\n+++ b/fairscale/__init__.py\n@@ -3,7 +3,7 @@\n # This source code is licensed under the BSD license found in the\n # LICENSE file in the root directory of this source tree.\n \n-__version__ = \"0.1.3\"\n+__version__ = \"0.1.4\"\n \n ################################################################################\n # Import most common subpackages\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,6 +65,8 @@\n python_requires=\">=3.6\",\n author=\"Facebook AI Research\",\n author_email=\"[email protected]\",\n+ long_description=\"FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.\",\n+ long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n", "issue": "[bug] pip package 0.1.3 fails to install\nBoth `python3.8 -m pip install fairscale --verbose` and `python3.8 -m pip install fairscale --no-build-isolation --verbose` trigger\r\n\r\n```\r\nninja: error: '/tmp/pip-install-zqe21k7a/fairscale_4066f1fa225242299ead5fd852fd2ce8/fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp', needed by '/t\r\nmp/pip-install-zqe21k7a/fairscale_4066f1fa225242299ead5fd852fd2ce8/build/temp.linux-x86_64-3.8/fairscale/clib/fused_adam_cuda/fused_adam_cuda.o', missin\r\ng and no known rule to make it\r\n```\r\n\r\nCloning the repository and running `python3.8 -m pip install . --verbose` from within the directory works fine.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n__version__ = \"0.1.3\"\n\n################################################################################\n# Import most common subpackages\n################################################################################\n\nfrom . import nn\n", "path": "fairscale/__init__.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport os\nimport re\nimport warnings\n\nimport setuptools\nimport torch\nfrom torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension\n\nthis_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef fetch_requirements():\n with open(\"requirements.txt\") as f:\n reqs = f.read().strip().split(\"\\n\")\n return reqs\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\ndef find_version(version_file_path):\n with open(version_file_path) as version_file:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nextensions = []\ncmdclass = {}\n\nforce_cuda = os.getenv(\"FORCE_CUDA\", \"0\") == \"1\"\nif (torch.cuda.is_available() and CUDA_HOME is not None) or force_cuda:\n extensions.extend(\n [\n CUDAExtension(\n name=\"fairscale.fused_adam_cuda\",\n include_dirs=[os.path.join(this_dir, \"fairscale/clib/fused_adam_cuda\")],\n sources=[\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp\",\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu\",\n ],\n extra_compile_args={\"cxx\": [\"-O3\"], \"nvcc\": [\"-O3\", \"--use_fast_math\"]},\n )\n ]\n )\n\n cmdclass[\"build_ext\"] = BuildExtension\nelse:\n warnings.warn(\"Cannot install FusedAdam cuda.\")\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"fairscale\",\n description=\"fairscale: A PyTorch library for large-scale and high-performance training.\",\n version=find_version(\"fairscale/__init__.py\"),\n install_requires=fetch_requirements(),\n include_package_data=True,\n packages=setuptools.find_packages(exclude=(\"tests\", \"tests.*\")),\n ext_modules=extensions,\n cmdclass=cmdclass,\n python_requires=\">=3.6\",\n author=\"Facebook AI Research\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Operating System :: OS Independent\",\n ],\n )\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n__version__ = \"0.1.4\"\n\n################################################################################\n# Import most common subpackages\n################################################################################\n\nfrom . import nn\n", "path": "fairscale/__init__.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\nimport os\nimport re\nimport warnings\n\nimport setuptools\nimport torch\nfrom torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension\n\nthis_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef fetch_requirements():\n with open(\"requirements.txt\") as f:\n reqs = f.read().strip().split(\"\\n\")\n return reqs\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\ndef find_version(version_file_path):\n with open(version_file_path) as version_file:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nextensions = []\ncmdclass = {}\n\nforce_cuda = os.getenv(\"FORCE_CUDA\", \"0\") == \"1\"\nif (torch.cuda.is_available() and CUDA_HOME is not None) or force_cuda:\n extensions.extend(\n [\n CUDAExtension(\n name=\"fairscale.fused_adam_cuda\",\n include_dirs=[os.path.join(this_dir, \"fairscale/clib/fused_adam_cuda\")],\n sources=[\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp\",\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu\",\n ],\n extra_compile_args={\"cxx\": [\"-O3\"], \"nvcc\": [\"-O3\", \"--use_fast_math\"]},\n )\n ]\n )\n\n cmdclass[\"build_ext\"] = BuildExtension\nelse:\n warnings.warn(\"Cannot install FusedAdam cuda.\")\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"fairscale\",\n description=\"fairscale: A PyTorch library for large-scale and high-performance training.\",\n version=find_version(\"fairscale/__init__.py\"),\n install_requires=fetch_requirements(),\n include_package_data=True,\n packages=setuptools.find_packages(exclude=(\"tests\", \"tests.*\")),\n ext_modules=extensions,\n cmdclass=cmdclass,\n python_requires=\">=3.6\",\n author=\"Facebook AI Research\",\n author_email=\"[email protected]\",\n long_description=\"FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.\",\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Operating System :: OS Independent\",\n ],\n )\n", "path": "setup.py"}]} | 1,323 | 249 |
gh_patches_debug_26947 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-4194 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Detecting and alerting of duplication keys/components/entries in YAML file
### Is your feature request related to a problem? Please describe
it was found in release 1.3.11 , a PR to update [manifest](https://github.com/opensearch-project/opensearch-build/blob/main/manifests/1.3.11/opensearch-1.3.11.yml) has duplicated components name.
It would cause the resource wasted on CI to rebuild the duplicated components
### Describe the solution you'd like
We want to have a check to detect if there is any duplication entries based on keys/components/names and probably fail the GitHub check
### Describe alternatives you've considered
Manually check for duplicate values
### Acceptance Criteria
* The manifest check should fail at CI level for components with duplicate components.name values in opensearch and opensearch-dashboard as well as test manifests. See what are [manifests](https://github.com/opensearch-project/opensearch-build/wiki/Building-an-OpenSearch-and-OpenSearch-Dashboards-Distribution#what-are-manifests)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ci_workflow/ci_manifests.py`
Content:
```
1 # Copyright OpenSearch Contributors
2 # SPDX-License-Identifier: Apache-2.0
3 #
4 # The OpenSearch Contributors require contributions made to
5 # this file be licensed under the Apache-2.0 license or a
6 # compatible open source license.
7
8
9 import re
10 from collections import Counter
11 from io import TextIOWrapper
12 from typing import Type, Union
13
14 import yaml
15
16 from ci_workflow.ci_args import CiArgs
17 from ci_workflow.ci_input_manifest import CiInputManifest
18 from ci_workflow.ci_test_manifest import CiTestManifest
19
20
21 class CiManifests:
22 @staticmethod
23 def __klass(filename: str) -> Union[Type[CiTestManifest], Type[CiInputManifest]]:
24 if re.search("-test.yml$", filename):
25 return CiTestManifest
26 else:
27 return CiInputManifest
28
29 @staticmethod
30 def __get_duplicate_component_names(count_component_names: Counter) -> list:
31 duplicate_component_names = []
32 for component_name, count in count_component_names.items():
33 if count > 1:
34 duplicate_component_names.append(component_name)
35 return duplicate_component_names
36
37 @staticmethod
38 def __check_duplicate_component_names(file: TextIOWrapper) -> None:
39 yaml_dict = yaml.safe_load(file)
40 component_names = []
41 for component in yaml_dict['components']:
42 component_names.append(component['name'])
43 count_component_names = Counter(component_names)
44
45 if set(count_component_names.values()) != set([1]):
46 duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)
47 duplicate_component_names_string = ', '.join(duplicate_component_names)
48 raise ValueError(f"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. ")
49 file.seek(0)
50
51 @classmethod
52 def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:
53 cls.__check_duplicate_component_names(file)
54 return cls.__klass(file.name)(file, args)
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/ci_workflow/ci_manifests.py b/src/ci_workflow/ci_manifests.py
--- a/src/ci_workflow/ci_manifests.py
+++ b/src/ci_workflow/ci_manifests.py
@@ -26,25 +26,16 @@
else:
return CiInputManifest
- @staticmethod
- def __get_duplicate_component_names(count_component_names: Counter) -> list:
- duplicate_component_names = []
- for component_name, count in count_component_names.items():
- if count > 1:
- duplicate_component_names.append(component_name)
- return duplicate_component_names
-
@staticmethod
def __check_duplicate_component_names(file: TextIOWrapper) -> None:
yaml_dict = yaml.safe_load(file)
component_names = []
for component in yaml_dict['components']:
component_names.append(component['name'])
- count_component_names = Counter(component_names)
- if set(count_component_names.values()) != set([1]):
- duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)
- duplicate_component_names_string = ', '.join(duplicate_component_names)
+ duplicate_component_names = [comp for comp, count in Counter(component_names).items() if count > 1]
+ duplicate_component_names_string = ', '.join(duplicate_component_names)
+ if len(duplicate_component_names) > 0:
raise ValueError(f"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. ")
file.seek(0)
| {"golden_diff": "diff --git a/src/ci_workflow/ci_manifests.py b/src/ci_workflow/ci_manifests.py\n--- a/src/ci_workflow/ci_manifests.py\n+++ b/src/ci_workflow/ci_manifests.py\n@@ -26,25 +26,16 @@\n else:\n return CiInputManifest\n \n- @staticmethod\n- def __get_duplicate_component_names(count_component_names: Counter) -> list:\n- duplicate_component_names = []\n- for component_name, count in count_component_names.items():\n- if count > 1:\n- duplicate_component_names.append(component_name)\n- return duplicate_component_names\n-\n @staticmethod\n def __check_duplicate_component_names(file: TextIOWrapper) -> None:\n yaml_dict = yaml.safe_load(file)\n component_names = []\n for component in yaml_dict['components']:\n component_names.append(component['name'])\n- count_component_names = Counter(component_names)\n \n- if set(count_component_names.values()) != set([1]):\n- duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)\n- duplicate_component_names_string = ', '.join(duplicate_component_names)\n+ duplicate_component_names = [comp for comp, count in Counter(component_names).items() if count > 1]\n+ duplicate_component_names_string = ', '.join(duplicate_component_names)\n+ if len(duplicate_component_names) > 0:\n raise ValueError(f\"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. \")\n file.seek(0)\n", "issue": "Detecting and alerting of duplication keys/components/entries in YAML file\n### Is your feature request related to a problem? Please describe\r\n\r\nit was found in release 1.3.11 , a PR to update [manifest](https://github.com/opensearch-project/opensearch-build/blob/main/manifests/1.3.11/opensearch-1.3.11.yml) has duplicated components name.\r\nIt would cause the resource wasted on CI to rebuild the duplicated components \r\n\r\n### Describe the solution you'd like\r\n\r\nWe want to have a check to detect if there is any duplication entries based on keys/components/names and probably fail the GitHub check\r\n\r\n### Describe alternatives you've considered\r\n\r\nManually check for duplicate values\r\n\r\n### Acceptance Criteria\r\n* The manifest check should fail at CI level for components with duplicate components.name values in opensearch and opensearch-dashboard as well as test manifests. See what are [manifests](https://github.com/opensearch-project/opensearch-build/wiki/Building-an-OpenSearch-and-OpenSearch-Dashboards-Distribution#what-are-manifests)\n", "before_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\n\nimport re\nfrom collections import Counter\nfrom io import TextIOWrapper\nfrom typing import Type, Union\n\nimport yaml\n\nfrom ci_workflow.ci_args import CiArgs\nfrom ci_workflow.ci_input_manifest import CiInputManifest\nfrom ci_workflow.ci_test_manifest import CiTestManifest\n\n\nclass CiManifests:\n @staticmethod\n def __klass(filename: str) -> Union[Type[CiTestManifest], Type[CiInputManifest]]:\n if re.search(\"-test.yml$\", filename):\n return CiTestManifest\n else:\n return CiInputManifest\n\n @staticmethod\n def __get_duplicate_component_names(count_component_names: Counter) -> list:\n duplicate_component_names = []\n for component_name, count in count_component_names.items():\n if count > 1:\n duplicate_component_names.append(component_name)\n return duplicate_component_names\n\n @staticmethod\n def __check_duplicate_component_names(file: TextIOWrapper) -> None:\n yaml_dict = yaml.safe_load(file)\n component_names = []\n for component in yaml_dict['components']:\n component_names.append(component['name'])\n count_component_names = Counter(component_names)\n\n if set(count_component_names.values()) != set([1]):\n duplicate_component_names = CiManifests.__get_duplicate_component_names(count_component_names)\n duplicate_component_names_string = ', '.join(duplicate_component_names)\n raise ValueError(f\"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. \")\n file.seek(0)\n\n @classmethod\n def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:\n cls.__check_duplicate_component_names(file)\n return cls.__klass(file.name)(file, args)\n", "path": "src/ci_workflow/ci_manifests.py"}], "after_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\n\nimport re\nfrom collections import Counter\nfrom io import TextIOWrapper\nfrom typing import Type, Union\n\nimport yaml\n\nfrom ci_workflow.ci_args import CiArgs\nfrom ci_workflow.ci_input_manifest import CiInputManifest\nfrom ci_workflow.ci_test_manifest import CiTestManifest\n\n\nclass CiManifests:\n @staticmethod\n def __klass(filename: str) -> Union[Type[CiTestManifest], Type[CiInputManifest]]:\n if re.search(\"-test.yml$\", filename):\n return CiTestManifest\n else:\n return CiInputManifest\n\n @staticmethod\n def __check_duplicate_component_names(file: TextIOWrapper) -> None:\n yaml_dict = yaml.safe_load(file)\n component_names = []\n for component in yaml_dict['components']:\n component_names.append(component['name'])\n\n duplicate_component_names = [comp for comp, count in Counter(component_names).items() if count > 1]\n duplicate_component_names_string = ', '.join(duplicate_component_names)\n if len(duplicate_component_names) > 0:\n raise ValueError(f\"Found {duplicate_component_names_string} as a duplicate component(s) in manifest {file.name}. \")\n file.seek(0)\n\n @classmethod\n def from_file(cls, file: TextIOWrapper, args: CiArgs) -> Union[CiTestManifest, CiInputManifest]:\n cls.__check_duplicate_component_names(file)\n return cls.__klass(file.name)(file, args)\n", "path": "src/ci_workflow/ci_manifests.py"}]} | 1,018 | 332 |
gh_patches_debug_62586 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-907 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
locale.Error: unsupported locale setting exception when glibc locale files are not present
**Information**
- Solaar version: 1.0.3
- Distribution: Fedora
- Kernel version (ex. `uname -srmo`): `Linux 5.7.11-200.fc32.x86_64 x86_64 GNU/Linux`
- Output of `solaar show`: N/A
**Describe the bug**
Any solaar invocation is failing with a traceback when locale.setlocale() call fails, e.g. due to missing glibc locale files for the currently set locale.
**To Reproduce**
Steps to reproduce the behavior:
```
$ sudo dnf remove glibc-langpack-de
$ export LC_ALL=de_CH.UTF-8
$ export LANG=de_CH.UTF-8
$ solaar --help
Traceback (most recent call last):
File "/usr/bin/solaar", line 59, in <module>
import solaar.gtk
File "/usr/lib/python3.8/site-packages/solaar/gtk.py", line 29, in <module>
import solaar.i18n as _i18n
File "/usr/lib/python3.8/site-packages/solaar/i18n.py", line 50, in <module>
locale.setlocale(locale.LC_ALL, '')
File "/usr/lib64/python3.8/locale.py", line 608, in setlocale
return _setlocale(category, locale)
locale.Error: unsupported locale setting
$
```
**Additional context**
Looks like #190 is still unfixed. Downstream bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1811313 .
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/solaar/i18n.py`
Content:
```
1 # -*- python-mode -*-
2 # -*- coding: UTF-8 -*-
3
4 ## Copyright (C) 2012-2013 Daniel Pavel
5 ##
6 ## This program is free software; you can redistribute it and/or modify
7 ## it under the terms of the GNU General Public License as published by
8 ## the Free Software Foundation; either version 2 of the License, or
9 ## (at your option) any later version.
10 ##
11 ## This program is distributed in the hope that it will be useful,
12 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
13 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 ## GNU General Public License for more details.
15 ##
16 ## You should have received a copy of the GNU General Public License along
17 ## with this program; if not, write to the Free Software Foundation, Inc.,
18 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19
20 from __future__ import absolute_import, division, print_function, unicode_literals
21
22 import gettext as _gettext
23 import locale
24
25 from solaar import NAME as _NAME
26
27 #
28 #
29 #
30
31
32 def _find_locale_path(lc_domain):
33 import os.path as _path
34
35 import sys as _sys
36 prefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))
37 src_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))
38 del _sys
39
40 from glob import glob as _glob
41
42 for location in prefix_share, src_share:
43 mo_files = _glob(_path.join(location, 'locale', '*', 'LC_MESSAGES', lc_domain + '.mo'))
44 if mo_files:
45 return _path.join(location, 'locale')
46
47 # del _path
48
49
50 locale.setlocale(locale.LC_ALL, '')
51 language, encoding = locale.getlocale()
52 del locale
53
54 _LOCALE_DOMAIN = _NAME.lower()
55 path = _find_locale_path(_LOCALE_DOMAIN)
56
57 _gettext.bindtextdomain(_LOCALE_DOMAIN, path)
58 _gettext.textdomain(_LOCALE_DOMAIN)
59 _gettext.install(_LOCALE_DOMAIN)
60
61 try:
62 unicode # noqa: F821
63 _ = lambda x: _gettext.gettext(x).decode('UTF-8')
64 ngettext = lambda *x: _gettext.ngettext(*x).decode('UTF-8')
65 except Exception:
66 _ = _gettext.gettext
67 ngettext = _gettext.ngettext
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/solaar/i18n.py b/lib/solaar/i18n.py
--- a/lib/solaar/i18n.py
+++ b/lib/solaar/i18n.py
@@ -47,7 +47,11 @@
# del _path
-locale.setlocale(locale.LC_ALL, '')
+try:
+ locale.setlocale(locale.LC_ALL, '')
+except Exception:
+ pass
+
language, encoding = locale.getlocale()
del locale
| {"golden_diff": "diff --git a/lib/solaar/i18n.py b/lib/solaar/i18n.py\n--- a/lib/solaar/i18n.py\n+++ b/lib/solaar/i18n.py\n@@ -47,7 +47,11 @@\n # del _path\n \n \n-locale.setlocale(locale.LC_ALL, '')\n+try:\n+ locale.setlocale(locale.LC_ALL, '')\n+except Exception:\n+ pass\n+\n language, encoding = locale.getlocale()\n del locale\n", "issue": "locale.Error: unsupported locale setting exception when glibc locale files are not present\n**Information**\r\n- Solaar version: 1.0.3\r\n- Distribution: Fedora\r\n- Kernel version (ex. `uname -srmo`): `Linux 5.7.11-200.fc32.x86_64 x86_64 GNU/Linux`\r\n- Output of `solaar show`: N/A\r\n\r\n**Describe the bug**\r\nAny solaar invocation is failing with a traceback when locale.setlocale() call fails, e.g. due to missing glibc locale files for the currently set locale.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n```\r\n$ sudo dnf remove glibc-langpack-de\r\n$ export LC_ALL=de_CH.UTF-8\r\n$ export LANG=de_CH.UTF-8\r\n$ solaar --help\r\nTraceback (most recent call last):\r\n File \"/usr/bin/solaar\", line 59, in <module>\r\n import solaar.gtk\r\n File \"/usr/lib/python3.8/site-packages/solaar/gtk.py\", line 29, in <module>\r\n import solaar.i18n as _i18n\r\n File \"/usr/lib/python3.8/site-packages/solaar/i18n.py\", line 50, in <module>\r\n locale.setlocale(locale.LC_ALL, '')\r\n File \"/usr/lib64/python3.8/locale.py\", line 608, in setlocale\r\n return _setlocale(category, locale)\r\nlocale.Error: unsupported locale setting\r\n$ \r\n```\r\n\r\n**Additional context**\r\nLooks like #190 is still unfixed. Downstream bug report: https://bugzilla.redhat.com/show_bug.cgi?id=1811313 .\n", "before_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport gettext as _gettext\nimport locale\n\nfrom solaar import NAME as _NAME\n\n#\n#\n#\n\n\ndef _find_locale_path(lc_domain):\n import os.path as _path\n\n import sys as _sys\n prefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))\n src_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))\n del _sys\n\n from glob import glob as _glob\n\n for location in prefix_share, src_share:\n mo_files = _glob(_path.join(location, 'locale', '*', 'LC_MESSAGES', lc_domain + '.mo'))\n if mo_files:\n return _path.join(location, 'locale')\n\n # del _path\n\n\nlocale.setlocale(locale.LC_ALL, '')\nlanguage, encoding = locale.getlocale()\ndel locale\n\n_LOCALE_DOMAIN = _NAME.lower()\npath = _find_locale_path(_LOCALE_DOMAIN)\n\n_gettext.bindtextdomain(_LOCALE_DOMAIN, path)\n_gettext.textdomain(_LOCALE_DOMAIN)\n_gettext.install(_LOCALE_DOMAIN)\n\ntry:\n unicode # noqa: F821\n _ = lambda x: _gettext.gettext(x).decode('UTF-8')\n ngettext = lambda *x: _gettext.ngettext(*x).decode('UTF-8')\nexcept Exception:\n _ = _gettext.gettext\n ngettext = _gettext.ngettext\n", "path": "lib/solaar/i18n.py"}], "after_files": [{"content": "# -*- python-mode -*-\n# -*- coding: UTF-8 -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport gettext as _gettext\nimport locale\n\nfrom solaar import NAME as _NAME\n\n#\n#\n#\n\n\ndef _find_locale_path(lc_domain):\n import os.path as _path\n\n import sys as _sys\n prefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))\n src_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))\n del _sys\n\n from glob import glob as _glob\n\n for location in prefix_share, src_share:\n mo_files = _glob(_path.join(location, 'locale', '*', 'LC_MESSAGES', lc_domain + '.mo'))\n if mo_files:\n return _path.join(location, 'locale')\n\n # del _path\n\n\ntry:\n locale.setlocale(locale.LC_ALL, '')\nexcept Exception:\n pass\n\nlanguage, encoding = locale.getlocale()\ndel locale\n\n_LOCALE_DOMAIN = _NAME.lower()\npath = _find_locale_path(_LOCALE_DOMAIN)\n\n_gettext.bindtextdomain(_LOCALE_DOMAIN, path)\n_gettext.textdomain(_LOCALE_DOMAIN)\n_gettext.install(_LOCALE_DOMAIN)\n\ntry:\n unicode # noqa: F821\n _ = lambda x: _gettext.gettext(x).decode('UTF-8')\n ngettext = lambda *x: _gettext.ngettext(*x).decode('UTF-8')\nexcept Exception:\n _ = _gettext.gettext\n ngettext = _gettext.ngettext\n", "path": "lib/solaar/i18n.py"}]} | 1,303 | 112 |
gh_patches_debug_32615 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-446 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve documentation of view decorators
Explain what our view decorators are doing and add docstrings of the following format:
```
"""
[Summary]
:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]
:type [ParamName]: [ParamType](, optional)
...
:raises [ErrorType]: [ErrorDescription]
...
:return: [ReturnDescription]
:rtype: [ReturnType]
"""
```
Improve documentation of view decorators
Explain what our view decorators are doing and add docstrings of the following format:
```
"""
[Summary]
:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]
:type [ParamName]: [ParamType](, optional)
...
:raises [ErrorType]: [ErrorDescription]
...
:return: [ReturnDescription]
:rtype: [ReturnType]
"""
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/decorators.py`
Content:
```
1 import time
2 from functools import wraps
3
4 from django.core.exceptions import PermissionDenied
5 from django.shortcuts import redirect
6
7 from .models import Region
8
9
10 def staff_required(function):
11 @wraps(function)
12 def wrap(request, *args, **kwargs):
13 user = request.user
14 # superusers and staff have access to this areas
15 if user.is_superuser or user.is_staff:
16 return function(request, *args, **kwargs)
17 raise PermissionDenied
18 return wrap
19
20
21 def region_permission_required(function):
22 @wraps(function)
23 def wrap(request, *args, **kwargs):
24 user = request.user
25 # superusers and staff have permissions for all regions
26 if user.is_superuser or user.is_staff:
27 return function(request, *args, **kwargs)
28 region = Region.get_current_region(request)
29 if region in user.profile.regions.all():
30 return function(request, *args, **kwargs)
31 raise PermissionDenied
32 return wrap
33
34 def modify_mfa_authenticated(function):
35 @wraps(function)
36 def wrap(request, *args, **kwargs):
37 if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):
38 request.session['mfa_redirect_url'] = request.path
39 return redirect('user_settings_auth_modify_mfa')
40 return function(request, *args, **kwargs)
41 return wrap
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cms/decorators.py b/src/cms/decorators.py
--- a/src/cms/decorators.py
+++ b/src/cms/decorators.py
@@ -1,3 +1,8 @@
+"""
+Django view decorators can be used to restrict the execution of a view function on certain conditions.
+
+For more information, see :doc:`topics/http/decorators`.
+"""
import time
from functools import wraps
@@ -8,6 +13,15 @@
def staff_required(function):
+ """
+ This decorator can be used to make sure a view can only be retrieved by users who are either staff or superusers.
+
+ :param function: The view function which should be protected
+ :type function: ~collections.abc.Callable
+
+ :return: The decorated function
+ :rtype: ~collections.abc.Callable
+ """
@wraps(function)
def wrap(request, *args, **kwargs):
user = request.user
@@ -19,6 +33,15 @@
def region_permission_required(function):
+ """
+ This decorator can be used to make sure a view can only be retrieved by users of the requested region.
+
+ :param function: The view function which should be protected
+ :type function: ~collections.abc.Callable
+
+ :return: The decorated function
+ :rtype: ~collections.abc.Callable
+ """
@wraps(function)
def wrap(request, *args, **kwargs):
user = request.user
@@ -32,6 +55,15 @@
return wrap
def modify_mfa_authenticated(function):
+ """
+ This decorator can be used to make sure a user can only modify his 2FA settings when he has a valid 2FA session.
+
+ :param function: The view function which should be protected
+ :type function: ~collections.abc.Callable
+
+ :return: The decorated function
+ :rtype: ~collections.abc.Callable
+ """
@wraps(function)
def wrap(request, *args, **kwargs):
if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):
| {"golden_diff": "diff --git a/src/cms/decorators.py b/src/cms/decorators.py\n--- a/src/cms/decorators.py\n+++ b/src/cms/decorators.py\n@@ -1,3 +1,8 @@\n+\"\"\"\n+Django view decorators can be used to restrict the execution of a view function on certain conditions.\n+\n+For more information, see :doc:`topics/http/decorators`.\n+\"\"\"\n import time\n from functools import wraps\n \n@@ -8,6 +13,15 @@\n \n \n def staff_required(function):\n+ \"\"\"\n+ This decorator can be used to make sure a view can only be retrieved by users who are either staff or superusers.\n+\n+ :param function: The view function which should be protected\n+ :type function: ~collections.abc.Callable\n+\n+ :return: The decorated function\n+ :rtype: ~collections.abc.Callable\n+ \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n@@ -19,6 +33,15 @@\n \n \n def region_permission_required(function):\n+ \"\"\"\n+ This decorator can be used to make sure a view can only be retrieved by users of the requested region.\n+\n+ :param function: The view function which should be protected\n+ :type function: ~collections.abc.Callable\n+\n+ :return: The decorated function\n+ :rtype: ~collections.abc.Callable\n+ \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n@@ -32,6 +55,15 @@\n return wrap\n \n def modify_mfa_authenticated(function):\n+ \"\"\"\n+ This decorator can be used to make sure a user can only modify his 2FA settings when he has a valid 2FA session.\n+\n+ :param function: The view function which should be protected\n+ :type function: ~collections.abc.Callable\n+\n+ :return: The decorated function\n+ :rtype: ~collections.abc.Callable\n+ \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):\n", "issue": "Improve documentation of view decorators\nExplain what our view decorators are doing and add docstrings of the following format:\r\n```\r\n\"\"\"\r\n[Summary]\r\n\r\n:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]\r\n:type [ParamName]: [ParamType](, optional)\r\n...\r\n:raises [ErrorType]: [ErrorDescription]\r\n...\r\n:return: [ReturnDescription]\r\n:rtype: [ReturnType]\r\n\"\"\"\r\n```\nImprove documentation of view decorators\nExplain what our view decorators are doing and add docstrings of the following format:\r\n```\r\n\"\"\"\r\n[Summary]\r\n\r\n:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]\r\n:type [ParamName]: [ParamType](, optional)\r\n...\r\n:raises [ErrorType]: [ErrorDescription]\r\n...\r\n:return: [ReturnDescription]\r\n:rtype: [ReturnType]\r\n\"\"\"\r\n```\n", "before_files": [{"content": "import time\nfrom functools import wraps\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect\n\nfrom .models import Region\n\n\ndef staff_required(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n # superusers and staff have access to this areas\n if user.is_superuser or user.is_staff:\n return function(request, *args, **kwargs)\n raise PermissionDenied\n return wrap\n\n\ndef region_permission_required(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n # superusers and staff have permissions for all regions\n if user.is_superuser or user.is_staff:\n return function(request, *args, **kwargs)\n region = Region.get_current_region(request)\n if region in user.profile.regions.all():\n return function(request, *args, **kwargs)\n raise PermissionDenied\n return wrap\n\ndef modify_mfa_authenticated(function):\n @wraps(function)\n def wrap(request, *args, **kwargs):\n if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):\n request.session['mfa_redirect_url'] = request.path\n return redirect('user_settings_auth_modify_mfa')\n return function(request, *args, **kwargs)\n return wrap\n", "path": "src/cms/decorators.py"}], "after_files": [{"content": "\"\"\"\nDjango view decorators can be used to restrict the execution of a view function on certain conditions.\n\nFor more information, see :doc:`topics/http/decorators`.\n\"\"\"\nimport time\nfrom functools import wraps\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect\n\nfrom .models import Region\n\n\ndef staff_required(function):\n \"\"\"\n This decorator can be used to make sure a view can only be retrieved by users who are either staff or superusers.\n\n :param function: The view function which should be protected\n :type function: ~collections.abc.Callable\n\n :return: The decorated function\n :rtype: ~collections.abc.Callable\n \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n # superusers and staff have access to this areas\n if user.is_superuser or user.is_staff:\n return function(request, *args, **kwargs)\n raise PermissionDenied\n return wrap\n\n\ndef region_permission_required(function):\n \"\"\"\n This decorator can be used to make sure a view can only be retrieved by users of the requested region.\n\n :param function: The view function which should be protected\n :type function: ~collections.abc.Callable\n\n :return: The decorated function\n :rtype: ~collections.abc.Callable\n \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n user = request.user\n # superusers and staff have permissions for all regions\n if user.is_superuser or user.is_staff:\n return function(request, *args, **kwargs)\n region = Region.get_current_region(request)\n if region in user.profile.regions.all():\n return function(request, *args, **kwargs)\n raise PermissionDenied\n return wrap\n\ndef modify_mfa_authenticated(function):\n \"\"\"\n This decorator can be used to make sure a user can only modify his 2FA settings when he has a valid 2FA session.\n\n :param function: The view function which should be protected\n :type function: ~collections.abc.Callable\n\n :return: The decorated function\n :rtype: ~collections.abc.Callable\n \"\"\"\n @wraps(function)\n def wrap(request, *args, **kwargs):\n if not 'modify_mfa_authentication_time' in request.session or request.session['modify_mfa_authentication_time'] < (time.time() - 5 * 60):\n request.session['mfa_redirect_url'] = request.path\n return redirect('user_settings_auth_modify_mfa')\n return function(request, *args, **kwargs)\n return wrap\n", "path": "src/cms/decorators.py"}]} | 810 | 502 |
gh_patches_debug_15148 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1977 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FIX #565 (Import for apollo tracing extension requires telemetry)
## Description
FIX #565
## Types of Changes
<!--- What types of changes does your pull request introduce? Put an `x` in all the boxes that apply. -->
- [ ] Core
- [x] Bugfix
- [ ] New feature
- [ ] Enhancement/optimization
- [ ] Documentation
## Issues Fixed or Closed by This PR
* #565
## Checklist
<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
- [x] My code follows the code style of this project.
- [ ] My change requires a change to the documentation.
- [ ] I have updated the documentation accordingly.
- [x] I have read the CONTRIBUTING document.
- [ ] I have added tests to cover my changes.
- [x] I have tested the changes and verified that they work and don't break anything (as well as I can manage).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/extensions/tracing/__init__.py`
Content:
```
1 from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa
2 from .opentelemetry import OpenTelemetryExtension, OpenTelemetryExtensionSync # noqa
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/extensions/tracing/__init__.py b/strawberry/extensions/tracing/__init__.py
--- a/strawberry/extensions/tracing/__init__.py
+++ b/strawberry/extensions/tracing/__init__.py
@@ -1,2 +1,27 @@
-from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa
-from .opentelemetry import OpenTelemetryExtension, OpenTelemetryExtensionSync # noqa
+import importlib
+from typing import TYPE_CHECKING
+
+
+if TYPE_CHECKING:
+ from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa
+ from .opentelemetry import ( # noqa
+ OpenTelemetryExtension,
+ OpenTelemetryExtensionSync,
+ )
+
+__all__ = [
+ "ApolloTracingExtension",
+ "ApolloTracingExtensionSync",
+ "OpenTelemetryExtension",
+ "OpenTelemetryExtensionSync",
+]
+
+
+def __getattr__(name: str):
+ if name in {"ApolloTracingExtension", "ApolloTracingExtensionSync"}:
+ return getattr(importlib.import_module(".apollo", __name__), name)
+
+ if name in {"OpenTelemetryExtension", "OpenTelemetryExtensionSync"}:
+ return getattr(importlib.import_module(".opentelemetry", __name__), name)
+
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
| {"golden_diff": "diff --git a/strawberry/extensions/tracing/__init__.py b/strawberry/extensions/tracing/__init__.py\n--- a/strawberry/extensions/tracing/__init__.py\n+++ b/strawberry/extensions/tracing/__init__.py\n@@ -1,2 +1,27 @@\n-from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa\n-from .opentelemetry import OpenTelemetryExtension, OpenTelemetryExtensionSync # noqa\n+import importlib\n+from typing import TYPE_CHECKING\n+\n+\n+if TYPE_CHECKING:\n+ from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa\n+ from .opentelemetry import ( # noqa\n+ OpenTelemetryExtension,\n+ OpenTelemetryExtensionSync,\n+ )\n+\n+__all__ = [\n+ \"ApolloTracingExtension\",\n+ \"ApolloTracingExtensionSync\",\n+ \"OpenTelemetryExtension\",\n+ \"OpenTelemetryExtensionSync\",\n+]\n+\n+\n+def __getattr__(name: str):\n+ if name in {\"ApolloTracingExtension\", \"ApolloTracingExtensionSync\"}:\n+ return getattr(importlib.import_module(\".apollo\", __name__), name)\n+\n+ if name in {\"OpenTelemetryExtension\", \"OpenTelemetryExtensionSync\"}:\n+ return getattr(importlib.import_module(\".opentelemetry\", __name__), name)\n+\n+ raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "issue": "FIX #565 (Import for apollo tracing extension requires telemetry)\n## Description\r\n\r\nFIX #565 \r\n\r\n## Types of Changes\r\n\r\n<!--- What types of changes does your pull request introduce? Put an `x` in all the boxes that apply. -->\r\n- [ ] Core\r\n- [x] Bugfix\r\n- [ ] New feature\r\n- [ ] Enhancement/optimization\r\n- [ ] Documentation\r\n\r\n## Issues Fixed or Closed by This PR\r\n\r\n* #565 \r\n\r\n## Checklist\r\n\r\n<!--- Go over all the following points, and put an `x` in all the boxes that apply. -->\r\n<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->\r\n- [x] My code follows the code style of this project.\r\n- [ ] My change requires a change to the documentation.\r\n- [ ] I have updated the documentation accordingly.\r\n- [x] I have read the CONTRIBUTING document.\r\n- [ ] I have added tests to cover my changes.\r\n- [x] I have tested the changes and verified that they work and don't break anything (as well as I can manage).\r\n\n", "before_files": [{"content": "from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa\nfrom .opentelemetry import OpenTelemetryExtension, OpenTelemetryExtensionSync # noqa\n", "path": "strawberry/extensions/tracing/__init__.py"}], "after_files": [{"content": "import importlib\nfrom typing import TYPE_CHECKING\n\n\nif TYPE_CHECKING:\n from .apollo import ApolloTracingExtension, ApolloTracingExtensionSync # noqa\n from .opentelemetry import ( # noqa\n OpenTelemetryExtension,\n OpenTelemetryExtensionSync,\n )\n\n__all__ = [\n \"ApolloTracingExtension\",\n \"ApolloTracingExtensionSync\",\n \"OpenTelemetryExtension\",\n \"OpenTelemetryExtensionSync\",\n]\n\n\ndef __getattr__(name: str):\n if name in {\"ApolloTracingExtension\", \"ApolloTracingExtensionSync\"}:\n return getattr(importlib.import_module(\".apollo\", __name__), name)\n\n if name in {\"OpenTelemetryExtension\", \"OpenTelemetryExtensionSync\"}:\n return getattr(importlib.import_module(\".opentelemetry\", __name__), name)\n\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n", "path": "strawberry/extensions/tracing/__init__.py"}]} | 544 | 327 |
gh_patches_debug_35676 | rasdani/github-patches | git_diff | pytorch__examples-228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VAE loss
According to the expression in line 95, the KL-divergence term is calculated from
`0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)`
but I think the code in line 96-97 represents
`0.5 * sum(1 + log(sigma^2) - mu^2 - sigma)`
This might not be essential because whether the last term is squared or not, the loss descending behavior stays unchanged.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vae/main.py`
Content:
```
1 from __future__ import print_function
2 import argparse
3 import torch
4 import torch.utils.data
5 from torch import nn, optim
6 from torch.autograd import Variable
7 from torchvision import datasets, transforms
8 from torchvision.utils import save_image
9
10
11 parser = argparse.ArgumentParser(description='VAE MNIST Example')
12 parser.add_argument('--batch-size', type=int, default=128, metavar='N',
13 help='input batch size for training (default: 128)')
14 parser.add_argument('--epochs', type=int, default=10, metavar='N',
15 help='number of epochs to train (default: 10)')
16 parser.add_argument('--no-cuda', action='store_true', default=False,
17 help='enables CUDA training')
18 parser.add_argument('--seed', type=int, default=1, metavar='S',
19 help='random seed (default: 1)')
20 parser.add_argument('--log-interval', type=int, default=10, metavar='N',
21 help='how many batches to wait before logging training status')
22 args = parser.parse_args()
23 args.cuda = not args.no_cuda and torch.cuda.is_available()
24
25
26 torch.manual_seed(args.seed)
27 if args.cuda:
28 torch.cuda.manual_seed(args.seed)
29
30
31 kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
32 train_loader = torch.utils.data.DataLoader(
33 datasets.MNIST('../data', train=True, download=True,
34 transform=transforms.ToTensor()),
35 batch_size=args.batch_size, shuffle=True, **kwargs)
36 test_loader = torch.utils.data.DataLoader(
37 datasets.MNIST('../data', train=False, transform=transforms.ToTensor()),
38 batch_size=args.batch_size, shuffle=True, **kwargs)
39
40
41 class VAE(nn.Module):
42 def __init__(self):
43 super(VAE, self).__init__()
44
45 self.fc1 = nn.Linear(784, 400)
46 self.fc21 = nn.Linear(400, 20)
47 self.fc22 = nn.Linear(400, 20)
48 self.fc3 = nn.Linear(20, 400)
49 self.fc4 = nn.Linear(400, 784)
50
51 self.relu = nn.ReLU()
52 self.sigmoid = nn.Sigmoid()
53
54 def encode(self, x):
55 h1 = self.relu(self.fc1(x))
56 return self.fc21(h1), self.fc22(h1)
57
58 def reparameterize(self, mu, logvar):
59 if self.training:
60 std = logvar.mul(0.5).exp_()
61 eps = Variable(std.data.new(std.size()).normal_())
62 return eps.mul(std).add_(mu)
63 else:
64 return mu
65
66 def decode(self, z):
67 h3 = self.relu(self.fc3(z))
68 return self.sigmoid(self.fc4(h3))
69
70 def forward(self, x):
71 mu, logvar = self.encode(x.view(-1, 784))
72 z = self.reparameterize(mu, logvar)
73 return self.decode(z), mu, logvar
74
75
76 model = VAE()
77 if args.cuda:
78 model.cuda()
79
80 reconstruction_function = nn.BCELoss()
81
82
83 def loss_function(recon_x, x, mu, logvar):
84 BCE = reconstruction_function(recon_x, x.view(-1, 784))
85
86 # see Appendix B from VAE paper:
87 # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
88 # https://arxiv.org/abs/1312.6114
89 # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
90 KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
91 KLD = torch.sum(KLD_element).mul_(-0.5)
92 # Normalise by same number of elements as in reconstruction
93 KLD /= args.batch_size * 784
94
95 return BCE + KLD
96
97
98 optimizer = optim.Adam(model.parameters(), lr=1e-3)
99
100
101 def train(epoch):
102 model.train()
103 train_loss = 0
104 for batch_idx, (data, _) in enumerate(train_loader):
105 data = Variable(data)
106 if args.cuda:
107 data = data.cuda()
108 optimizer.zero_grad()
109 recon_batch, mu, logvar = model(data)
110 loss = loss_function(recon_batch, data, mu, logvar)
111 loss.backward()
112 train_loss += loss.data[0]
113 optimizer.step()
114 if batch_idx % args.log_interval == 0:
115 print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
116 epoch, batch_idx * len(data), len(train_loader.dataset),
117 100. * batch_idx / len(train_loader),
118 loss.data[0] / len(data)))
119
120 print('====> Epoch: {} Average loss: {:.4f}'.format(
121 epoch, train_loss / len(train_loader.dataset)))
122
123
124 def test(epoch):
125 model.eval()
126 test_loss = 0
127 for i, (data, _) in enumerate(test_loader):
128 if args.cuda:
129 data = data.cuda()
130 data = Variable(data, volatile=True)
131 recon_batch, mu, logvar = model(data)
132 test_loss += loss_function(recon_batch, data, mu, logvar).data[0]
133 if i == 0:
134 save_image(recon_batch.data.cpu().view(args.batch_size, 1, 28, 28),
135 'reconstruction_' + str(epoch) + '.png')
136
137 test_loss /= len(test_loader.dataset)
138 print('====> Test set loss: {:.4f}'.format(test_loss))
139
140
141 for epoch in range(1, args.epochs + 1):
142 train(epoch)
143 test(epoch)
144 sample = Variable(torch.randn(64, 20))
145 if args.cuda:
146 sample = sample.cuda()
147 sample = model.decode(sample).cpu()
148 save_image(sample.data.view(64, 1, 28, 28), 'sample_' + str(epoch) + '.png')
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vae/main.py b/vae/main.py
--- a/vae/main.py
+++ b/vae/main.py
@@ -4,6 +4,7 @@
import torch.utils.data
from torch import nn, optim
from torch.autograd import Variable
+from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
@@ -77,18 +78,15 @@
if args.cuda:
model.cuda()
-reconstruction_function = nn.BCELoss()
-
def loss_function(recon_x, x, mu, logvar):
- BCE = reconstruction_function(recon_x, x.view(-1, 784))
+ BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784))
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
- KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
- KLD = torch.sum(KLD_element).mul_(-0.5)
+ KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
# Normalise by same number of elements as in reconstruction
KLD /= args.batch_size * 784
@@ -131,8 +129,11 @@
recon_batch, mu, logvar = model(data)
test_loss += loss_function(recon_batch, data, mu, logvar).data[0]
if i == 0:
- save_image(recon_batch.data.cpu().view(args.batch_size, 1, 28, 28),
- 'reconstruction_' + str(epoch) + '.png')
+ n = min(data.size(0), 8)
+ comparison = torch.cat([data[:n],
+ recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
+ save_image(comparison.data.cpu(),
+ 'results/reconstruction_' + str(epoch) + '.png', nrow=n)
test_loss /= len(test_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
@@ -145,4 +146,5 @@
if args.cuda:
sample = sample.cuda()
sample = model.decode(sample).cpu()
- save_image(sample.data.view(64, 1, 28, 28), 'sample_' + str(epoch) + '.png')
+ save_image(sample.data.view(64, 1, 28, 28),
+ 'results/sample_' + str(epoch) + '.png')
| {"golden_diff": "diff --git a/vae/main.py b/vae/main.py\n--- a/vae/main.py\n+++ b/vae/main.py\n@@ -4,6 +4,7 @@\n import torch.utils.data\n from torch import nn, optim\n from torch.autograd import Variable\n+from torch.nn import functional as F\n from torchvision import datasets, transforms\n from torchvision.utils import save_image\n \n@@ -77,18 +78,15 @@\n if args.cuda:\n model.cuda()\n \n-reconstruction_function = nn.BCELoss()\n-\n \n def loss_function(recon_x, x, mu, logvar):\n- BCE = reconstruction_function(recon_x, x.view(-1, 784))\n+ BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784))\n \n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n- KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)\n- KLD = torch.sum(KLD_element).mul_(-0.5)\n+ KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n # Normalise by same number of elements as in reconstruction\n KLD /= args.batch_size * 784\n \n@@ -131,8 +129,11 @@\n recon_batch, mu, logvar = model(data)\n test_loss += loss_function(recon_batch, data, mu, logvar).data[0]\n if i == 0:\n- save_image(recon_batch.data.cpu().view(args.batch_size, 1, 28, 28),\n- 'reconstruction_' + str(epoch) + '.png')\n+ n = min(data.size(0), 8)\n+ comparison = torch.cat([data[:n],\n+ recon_batch.view(args.batch_size, 1, 28, 28)[:n]])\n+ save_image(comparison.data.cpu(),\n+ 'results/reconstruction_' + str(epoch) + '.png', nrow=n)\n \n test_loss /= len(test_loader.dataset)\n print('====> Test set loss: {:.4f}'.format(test_loss))\n@@ -145,4 +146,5 @@\n if args.cuda:\n sample = sample.cuda()\n sample = model.decode(sample).cpu()\n- save_image(sample.data.view(64, 1, 28, 28), 'sample_' + str(epoch) + '.png')\n+ save_image(sample.data.view(64, 1, 28, 28),\n+ 'results/sample_' + str(epoch) + '.png')\n", "issue": "VAE loss\nAccording to the expression in line 95, the KL-divergence term is calculated from\r\n`0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)`\r\nbut I think the code in line 96-97 represents\r\n`0.5 * sum(1 + log(sigma^2) - mu^2 - sigma)`\r\n\r\nThis might not be essential because whether the last term is squared or not, the loss descending behavior stays unchanged.\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\n\n\nparser = argparse.ArgumentParser(description='VAE MNIST Example')\nparser.add_argument('--batch-size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 128)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.ToTensor()),\n batch_size=args.batch_size, shuffle=True, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.ToTensor()),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n\n self.fc1 = nn.Linear(784, 400)\n self.fc21 = nn.Linear(400, 20)\n self.fc22 = nn.Linear(400, 20)\n self.fc3 = nn.Linear(20, 400)\n self.fc4 = nn.Linear(400, 784)\n\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def encode(self, x):\n h1 = self.relu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = logvar.mul(0.5).exp_()\n eps = Variable(std.data.new(std.size()).normal_())\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n h3 = self.relu(self.fc3(z))\n return self.sigmoid(self.fc4(h3))\n\n def forward(self, x):\n mu, logvar = self.encode(x.view(-1, 784))\n z = self.reparameterize(mu, logvar)\n return self.decode(z), mu, logvar\n\n\nmodel = VAE()\nif args.cuda:\n model.cuda()\n\nreconstruction_function = nn.BCELoss()\n\n\ndef loss_function(recon_x, x, mu, logvar):\n BCE = reconstruction_function(recon_x, x.view(-1, 784))\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)\n KLD = torch.sum(KLD_element).mul_(-0.5)\n # Normalise by same number of elements as in reconstruction\n KLD /= args.batch_size * 784\n\n return BCE + KLD\n\n\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n\ndef train(epoch):\n model.train()\n train_loss = 0\n for batch_idx, (data, _) in enumerate(train_loader):\n data = Variable(data)\n if args.cuda:\n data = data.cuda()\n optimizer.zero_grad()\n recon_batch, mu, logvar = model(data)\n loss = loss_function(recon_batch, data, mu, logvar)\n loss.backward()\n train_loss += loss.data[0]\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss.data[0] / len(data)))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / len(train_loader.dataset)))\n\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n for i, (data, _) in enumerate(test_loader):\n if args.cuda:\n data = data.cuda()\n data = Variable(data, volatile=True)\n recon_batch, mu, logvar = model(data)\n test_loss += loss_function(recon_batch, data, mu, logvar).data[0]\n if i == 0:\n save_image(recon_batch.data.cpu().view(args.batch_size, 1, 28, 28),\n 'reconstruction_' + str(epoch) + '.png')\n\n test_loss /= len(test_loader.dataset)\n print('====> Test set loss: {:.4f}'.format(test_loss))\n\n\nfor epoch in range(1, args.epochs + 1):\n train(epoch)\n test(epoch)\n sample = Variable(torch.randn(64, 20))\n if args.cuda:\n sample = sample.cuda()\n sample = model.decode(sample).cpu()\n save_image(sample.data.view(64, 1, 28, 28), 'sample_' + str(epoch) + '.png')\n", "path": "vae/main.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\n\n\nparser = argparse.ArgumentParser(description='VAE MNIST Example')\nparser.add_argument('--batch-size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 128)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\ntrain_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.ToTensor()),\n batch_size=args.batch_size, shuffle=True, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.ToTensor()),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n\n self.fc1 = nn.Linear(784, 400)\n self.fc21 = nn.Linear(400, 20)\n self.fc22 = nn.Linear(400, 20)\n self.fc3 = nn.Linear(20, 400)\n self.fc4 = nn.Linear(400, 784)\n\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def encode(self, x):\n h1 = self.relu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = logvar.mul(0.5).exp_()\n eps = Variable(std.data.new(std.size()).normal_())\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n h3 = self.relu(self.fc3(z))\n return self.sigmoid(self.fc4(h3))\n\n def forward(self, x):\n mu, logvar = self.encode(x.view(-1, 784))\n z = self.reparameterize(mu, logvar)\n return self.decode(z), mu, logvar\n\n\nmodel = VAE()\nif args.cuda:\n model.cuda()\n\n\ndef loss_function(recon_x, x, mu, logvar):\n BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784))\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n # Normalise by same number of elements as in reconstruction\n KLD /= args.batch_size * 784\n\n return BCE + KLD\n\n\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n\ndef train(epoch):\n model.train()\n train_loss = 0\n for batch_idx, (data, _) in enumerate(train_loader):\n data = Variable(data)\n if args.cuda:\n data = data.cuda()\n optimizer.zero_grad()\n recon_batch, mu, logvar = model(data)\n loss = loss_function(recon_batch, data, mu, logvar)\n loss.backward()\n train_loss += loss.data[0]\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss.data[0] / len(data)))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / len(train_loader.dataset)))\n\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n for i, (data, _) in enumerate(test_loader):\n if args.cuda:\n data = data.cuda()\n data = Variable(data, volatile=True)\n recon_batch, mu, logvar = model(data)\n test_loss += loss_function(recon_batch, data, mu, logvar).data[0]\n if i == 0:\n n = min(data.size(0), 8)\n comparison = torch.cat([data[:n],\n recon_batch.view(args.batch_size, 1, 28, 28)[:n]])\n save_image(comparison.data.cpu(),\n 'results/reconstruction_' + str(epoch) + '.png', nrow=n)\n\n test_loss /= len(test_loader.dataset)\n print('====> Test set loss: {:.4f}'.format(test_loss))\n\n\nfor epoch in range(1, args.epochs + 1):\n train(epoch)\n test(epoch)\n sample = Variable(torch.randn(64, 20))\n if args.cuda:\n sample = sample.cuda()\n sample = model.decode(sample).cpu()\n save_image(sample.data.view(64, 1, 28, 28),\n 'results/sample_' + str(epoch) + '.png')\n", "path": "vae/main.py"}]} | 2,044 | 652 |
gh_patches_debug_523 | rasdani/github-patches | git_diff | streamlit__streamlit-2217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Streamlit fails to start without Git executable
# Summary
Streamlit version `0.69.1` fails to start when run inside a Docker container that doesn't have Git installed.
# Steps to reproduce
1. Create a `Dockerfile` with the following contents:
```dockerfile
FROM python:3.8-slim
RUN pip install streamlit
CMD ["streamlit", "hello"]
```
2. Build the image:
```bash
docker build -t demo .
```
3. Run the app:
```bash
docker run -it --rm demo
```
## Expected behavior:
Streamlit starts without issues.
## Actual behavior:
Streamlit fails to start and displays the following error message:
```bash
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/git/__init__.py", line 83, in <module>
refresh()
File "/usr/local/lib/python3.8/site-packages/git/__init__.py", line 73, in refresh
if not Git.refresh(path=path):
File "/usr/local/lib/python3.8/site-packages/git/cmd.py", line 278, in refresh
raise ImportError(err)
ImportError: Bad git executable.
The git executable must be specified in one of the following ways:
- be included in your $PATH
- be set via $GIT_PYTHON_GIT_EXECUTABLE
- explicitly set via git.refresh()
All git commands will error until this is rectified.
This initial warning can be silenced or aggravated in the future by setting the
$GIT_PYTHON_REFRESH environment variable. Use one of the following values:
- quiet|q|silence|s|none|n|0: for no warning or exception
- warn|w|warning|1: for a printed warning
- error|e|raise|r|2: for a raised exception
Example:
export GIT_PYTHON_REFRESH=quiet
```
## Is this a regression?
**yes** (worked up until at least version `0.67.1`)
# Debug info
- Streamlit version: `0.69.1`
- Python version: `3.8.6`
- Using Conda? PipEnv? PyEnv? Pex? **NO**
- OS version: `4.19.76-linuxkit`
# Additional information
This bug can be worked around by setting `GIT_PYTHON_REFRESH=quiet` environment variable inside the Docker image.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/setup.py`
Content:
```
1 import os
2 import platform
3 import setuptools
4 import subprocess
5 import sys
6
7 from pipenv.project import Project
8 from pipenv.utils import convert_deps_to_pip
9 from setuptools.command.install import install
10
11 VERSION = "0.69.1" # PEP-440
12
13 NAME = "streamlit"
14
15 DESCRIPTION = "The fastest way to build data apps in Python"
16
17 LONG_DESCRIPTION = (
18 "Streamlit's open-source app framework is the easiest way "
19 "for data scientists and machine learning engineers to "
20 "create beautiful, performant apps in only a few hours! "
21 "All in pure Python. All for free."
22 )
23
24 pipfile = Project(chdir=False).parsed_pipfile
25
26 packages = pipfile["packages"].copy()
27 requirements = convert_deps_to_pip(packages, r=False)
28
29 # Check whether xcode tools are available before making watchdog a
30 # dependency (only if the current system is a Mac).
31 if platform.system() == "Darwin":
32 has_xcode = subprocess.call(["xcode-select", "--version"], shell=False) == 0
33 has_gcc = subprocess.call(["gcc", "--version"], shell=False) == 0
34
35 if not (has_xcode and has_gcc):
36 try:
37 requirements.remove("watchdog")
38 except ValueError:
39 pass
40
41
42 class VerifyVersionCommand(install):
43 """Custom command to verify that the git tag matches our version"""
44
45 description = "verify that the git tag matches our version"
46
47 def run(self):
48 tag = os.getenv("CIRCLE_TAG")
49
50 if tag != VERSION:
51 info = "Git tag: {0} does not match the version of this app: {1}".format(
52 tag, VERSION
53 )
54 sys.exit(info)
55
56
57 setuptools.setup(
58 name=NAME,
59 version=VERSION,
60 description=DESCRIPTION,
61 long_description=LONG_DESCRIPTION,
62 url="https://streamlit.io",
63 author="Streamlit Inc",
64 author_email="[email protected]",
65 python_requires=">=3.6",
66 license="Apache 2",
67 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
68 # Requirements
69 install_requires=requirements,
70 zip_safe=False, # install source files not egg
71 include_package_data=True, # copy html and friends
72 entry_points={"console_scripts": ["streamlit = streamlit.cli:main"]},
73 # For Windows so that streamlit * commands work ie.
74 # - streamlit version
75 # - streamlit hello
76 scripts=["bin/streamlit.cmd"],
77 cmdclass={
78 "verify": VerifyVersionCommand,
79 },
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/setup.py b/lib/setup.py
--- a/lib/setup.py
+++ b/lib/setup.py
@@ -8,7 +8,7 @@
from pipenv.utils import convert_deps_to_pip
from setuptools.command.install import install
-VERSION = "0.69.1" # PEP-440
+VERSION = "0.69.2" # PEP-440
NAME = "streamlit"
| {"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -8,7 +8,7 @@\n from pipenv.utils import convert_deps_to_pip\n from setuptools.command.install import install\n \n-VERSION = \"0.69.1\" # PEP-440\n+VERSION = \"0.69.2\" # PEP-440\n \n NAME = \"streamlit\"\n", "issue": "Streamlit fails to start without Git executable\n# Summary\r\n\r\nStreamlit version `0.69.1` fails to start when run inside a Docker container that doesn't have Git installed.\r\n\r\n# Steps to reproduce\r\n\r\n1. Create a `Dockerfile` with the following contents:\r\n```dockerfile\r\nFROM python:3.8-slim\r\nRUN pip install streamlit\r\nCMD [\"streamlit\", \"hello\"]\r\n```\r\n2. Build the image:\r\n```bash\r\ndocker build -t demo .\r\n```\r\n3. Run the app:\r\n```bash\r\ndocker run -it --rm demo\r\n```\r\n\r\n## Expected behavior:\r\n\r\nStreamlit starts without issues.\r\n\r\n## Actual behavior:\r\n\r\nStreamlit fails to start and displays the following error message:\r\n\r\n```bash\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/git/__init__.py\", line 83, in <module>\r\n refresh()\r\n File \"/usr/local/lib/python3.8/site-packages/git/__init__.py\", line 73, in refresh\r\n if not Git.refresh(path=path):\r\n File \"/usr/local/lib/python3.8/site-packages/git/cmd.py\", line 278, in refresh\r\n raise ImportError(err)\r\nImportError: Bad git executable.\r\nThe git executable must be specified in one of the following ways:\r\n - be included in your $PATH\r\n - be set via $GIT_PYTHON_GIT_EXECUTABLE\r\n - explicitly set via git.refresh()\r\n\r\nAll git commands will error until this is rectified.\r\n\r\nThis initial warning can be silenced or aggravated in the future by setting the\r\n$GIT_PYTHON_REFRESH environment variable. Use one of the following values:\r\n - quiet|q|silence|s|none|n|0: for no warning or exception\r\n - warn|w|warning|1: for a printed warning\r\n - error|e|raise|r|2: for a raised exception\r\n\r\nExample:\r\n export GIT_PYTHON_REFRESH=quiet\r\n```\r\n\r\n## Is this a regression?\r\n\r\n**yes** (worked up until at least version `0.67.1`)\r\n\r\n# Debug info\r\n\r\n- Streamlit version: `0.69.1`\r\n- Python version: `3.8.6`\r\n- Using Conda? PipEnv? PyEnv? Pex? **NO**\r\n- OS version: `4.19.76-linuxkit`\r\n\r\n# Additional information\r\n\r\nThis bug can be worked around by setting `GIT_PYTHON_REFRESH=quiet` environment variable inside the Docker image.\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport setuptools\nimport subprocess\nimport sys\n\nfrom pipenv.project import Project\nfrom pipenv.utils import convert_deps_to_pip\nfrom setuptools.command.install import install\n\nVERSION = \"0.69.1\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n# Check whether xcode tools are available before making watchdog a\n# dependency (only if the current system is a Mac).\nif platform.system() == \"Darwin\":\n has_xcode = subprocess.call([\"xcode-select\", \"--version\"], shell=False) == 0\n has_gcc = subprocess.call([\"gcc\", \"--version\"], shell=False) == 0\n\n if not (has_xcode and has_gcc):\n try:\n requirements.remove(\"watchdog\")\n except ValueError:\n pass\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}], "after_files": [{"content": "import os\nimport platform\nimport setuptools\nimport subprocess\nimport sys\n\nfrom pipenv.project import Project\nfrom pipenv.utils import convert_deps_to_pip\nfrom setuptools.command.install import install\n\nVERSION = \"0.69.2\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n# Check whether xcode tools are available before making watchdog a\n# dependency (only if the current system is a Mac).\nif platform.system() == \"Darwin\":\n has_xcode = subprocess.call([\"xcode-select\", \"--version\"], shell=False) == 0\n has_gcc = subprocess.call([\"gcc\", \"--version\"], shell=False) == 0\n\n if not (has_xcode and has_gcc):\n try:\n requirements.remove(\"watchdog\")\n except ValueError:\n pass\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]} | 1,507 | 98 |
gh_patches_debug_34311 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2084 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive for CKV_AZURE_43: check storage account name
I'm building my Storage Account names like this
```
name = "${local.saname_prefix}diagnostics${module.tf-var-project.random_id}
```
With https://github.com/bridgecrewio/checkov/pull/429 merged I now get a Check failure on the SA name:
```
Check: CKV_AZURE_43: "Ensure the Storage Account naming rules"
FAILED for resource: azurerm_storage_account.diagnostics
File: /az_diag_sa.tf:8-22
8 | resource "azurerm_storage_account" "diagnostics" {
9 | #checkov:skip=CKV_AZURE_35:Public access is allowed
10 | name = "${local.saname_prefix}diagnostics${module.tf-var-project.random_id}"
````
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/azure/StorageAccountName.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
2 from checkov.common.models.enums import CheckResult, CheckCategories
3 import re
4 from typing import List
5
6 STO_NAME_REGEX = re.compile('^[a-z0-9]{3,24}$')
7
8
9 class StorageAccountName(BaseResourceCheck):
10 def __init__(self):
11 name = "Ensure Storage Accounts adhere to the naming rules"
12 id = "CKV_AZURE_43"
13 supported_resources = ['azurerm_storage_account']
14 categories = [CheckCategories.CONVENTION]
15 super().__init__(name=name, id=id, categories=categories,
16 supported_resources=supported_resources)
17
18 def scan_resource_conf(self, conf):
19 """
20 The Storage Account naming reference:
21 https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts
22 :param conf: azurerm_storage_account configuration
23 :return: <CheckResult>
24 """
25 return CheckResult.PASSED if conf.get('name') and re.findall(STO_NAME_REGEX, str(conf['name'][0])) else CheckResult.FAILED
26
27 def get_evaluated_keys(self) -> List[str]:
28 return ['name']
29
30
31 check = StorageAccountName()
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/azure/StorageAccountName.py b/checkov/terraform/checks/resource/azure/StorageAccountName.py
--- a/checkov/terraform/checks/resource/azure/StorageAccountName.py
+++ b/checkov/terraform/checks/resource/azure/StorageAccountName.py
@@ -1,31 +1,41 @@
+import re
+from typing import List, Dict, Any
+
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckResult, CheckCategories
-import re
-from typing import List
-STO_NAME_REGEX = re.compile('^[a-z0-9]{3,24}$')
+STO_NAME_REGEX = re.compile(r"^[a-z0-9]{3,24}$")
+VARIABLE_REFS = ("local.", "module.", "var.")
class StorageAccountName(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure Storage Accounts adhere to the naming rules"
id = "CKV_AZURE_43"
- supported_resources = ['azurerm_storage_account']
+ supported_resources = ["azurerm_storage_account"]
categories = [CheckCategories.CONVENTION]
- super().__init__(name=name, id=id, categories=categories,
- supported_resources=supported_resources)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
+ def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:
"""
The Storage Account naming reference:
https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts
:param conf: azurerm_storage_account configuration
:return: <CheckResult>
"""
- return CheckResult.PASSED if conf.get('name') and re.findall(STO_NAME_REGEX, str(conf['name'][0])) else CheckResult.FAILED
+ name = conf.get("name")
+ if name:
+ name = name[0]
+ if any(x in name for x in VARIABLE_REFS):
+ # in the case we couldn't evaluate the name, just ignore
+ return CheckResult.UNKNOWN
+ if re.findall(STO_NAME_REGEX, str(conf["name"][0])):
+ return CheckResult.PASSED
+
+ return CheckResult.FAILED
def get_evaluated_keys(self) -> List[str]:
- return ['name']
+ return ["name"]
check = StorageAccountName()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/StorageAccountName.py b/checkov/terraform/checks/resource/azure/StorageAccountName.py\n--- a/checkov/terraform/checks/resource/azure/StorageAccountName.py\n+++ b/checkov/terraform/checks/resource/azure/StorageAccountName.py\n@@ -1,31 +1,41 @@\n+import re\n+from typing import List, Dict, Any\n+\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n from checkov.common.models.enums import CheckResult, CheckCategories\n-import re\n-from typing import List\n \n-STO_NAME_REGEX = re.compile('^[a-z0-9]{3,24}$')\n+STO_NAME_REGEX = re.compile(r\"^[a-z0-9]{3,24}$\")\n+VARIABLE_REFS = (\"local.\", \"module.\", \"var.\")\n \n \n class StorageAccountName(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure Storage Accounts adhere to the naming rules\"\n id = \"CKV_AZURE_43\"\n- supported_resources = ['azurerm_storage_account']\n+ supported_resources = [\"azurerm_storage_account\"]\n categories = [CheckCategories.CONVENTION]\n- super().__init__(name=name, id=id, categories=categories,\n- supported_resources=supported_resources)\n+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n+ def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n \"\"\"\n The Storage Account naming reference:\n https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts\n :param conf: azurerm_storage_account configuration\n :return: <CheckResult>\n \"\"\"\n- return CheckResult.PASSED if conf.get('name') and re.findall(STO_NAME_REGEX, str(conf['name'][0])) else CheckResult.FAILED\n+ name = conf.get(\"name\")\n+ if name:\n+ name = name[0]\n+ if any(x in name for x in VARIABLE_REFS):\n+ # in the case we couldn't evaluate the name, just ignore\n+ return CheckResult.UNKNOWN\n+ if re.findall(STO_NAME_REGEX, str(conf[\"name\"][0])):\n+ return CheckResult.PASSED\n+\n+ return CheckResult.FAILED\n \n def get_evaluated_keys(self) -> List[str]:\n- return ['name']\n+ return [\"name\"]\n \n \n check = StorageAccountName()\n", "issue": "False positive for CKV_AZURE_43: check storage account name\nI'm building my Storage Account names like this\r\n```\r\nname = \"${local.saname_prefix}diagnostics${module.tf-var-project.random_id}\r\n```\r\n\r\nWith https://github.com/bridgecrewio/checkov/pull/429 merged I now get a Check failure on the SA name:\r\n\r\n```\r\nCheck: CKV_AZURE_43: \"Ensure the Storage Account naming rules\"\r\n\tFAILED for resource: azurerm_storage_account.diagnostics\r\n\tFile: /az_diag_sa.tf:8-22\r\n\r\n\t\t8 | resource \"azurerm_storage_account\" \"diagnostics\" {\r\n\t\t9 | #checkov:skip=CKV_AZURE_35:Public access is allowed\r\n\t\t10 | name = \"${local.saname_prefix}diagnostics${module.tf-var-project.random_id}\"\r\n\r\n````\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nimport re\nfrom typing import List\n\nSTO_NAME_REGEX = re.compile('^[a-z0-9]{3,24}$')\n\n\nclass StorageAccountName(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure Storage Accounts adhere to the naming rules\"\n id = \"CKV_AZURE_43\"\n supported_resources = ['azurerm_storage_account']\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories,\n supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n The Storage Account naming reference:\n https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts\n :param conf: azurerm_storage_account configuration\n :return: <CheckResult>\n \"\"\"\n return CheckResult.PASSED if conf.get('name') and re.findall(STO_NAME_REGEX, str(conf['name'][0])) else CheckResult.FAILED\n\n def get_evaluated_keys(self) -> List[str]:\n return ['name']\n\n\ncheck = StorageAccountName()\n", "path": "checkov/terraform/checks/resource/azure/StorageAccountName.py"}], "after_files": [{"content": "import re\nfrom typing import List, Dict, Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\nSTO_NAME_REGEX = re.compile(r\"^[a-z0-9]{3,24}$\")\nVARIABLE_REFS = (\"local.\", \"module.\", \"var.\")\n\n\nclass StorageAccountName(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure Storage Accounts adhere to the naming rules\"\n id = \"CKV_AZURE_43\"\n supported_resources = [\"azurerm_storage_account\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n \"\"\"\n The Storage Account naming reference:\n https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts\n :param conf: azurerm_storage_account configuration\n :return: <CheckResult>\n \"\"\"\n name = conf.get(\"name\")\n if name:\n name = name[0]\n if any(x in name for x in VARIABLE_REFS):\n # in the case we couldn't evaluate the name, just ignore\n return CheckResult.UNKNOWN\n if re.findall(STO_NAME_REGEX, str(conf[\"name\"][0])):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n def get_evaluated_keys(self) -> List[str]:\n return [\"name\"]\n\n\ncheck = StorageAccountName()\n", "path": "checkov/terraform/checks/resource/azure/StorageAccountName.py"}]} | 792 | 571 |
gh_patches_debug_23360 | rasdani/github-patches | git_diff | allegro__ralph-3159 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Doc fixes
Some minor doc fixes with a bit of style change
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ralph/dashboards/management/commands/push_graphs_to_statsd.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import logging
3 import textwrap
4
5 from django.conf import settings
6 from django.core.management.base import BaseCommand
7 from django.utils.text import slugify
8
9 from ralph.dashboards.models import Graph
10 from ralph.lib.metrics import build_statsd_client
11
12 logger = logging.getLogger(__name__)
13 PREFIX = settings.STATSD_GRAPHS_PREFIX
14 STATSD_PATH = '{}.{{}}.{{}}'.format(PREFIX)
15
16
17 def normalize(s):
18 s = slugify(s)
19 return s.replace('-', '_')
20
21
22 class Command(BaseCommand):
23 """Push to statsd data generated by graphs."""
24 help = textwrap.dedent(__doc__).strip()
25
26 def handle(self, *args, **kwargs):
27 statsd = build_statsd_client(prefix=STATSD_PATH)
28 graphs = Graph.objects.filter(push_to_statsd=True)
29 for graph in graphs:
30 graph_data = graph.get_data()
31 graph_name = normalize(graph.name)
32 for label, value in zip(graph_data['labels'], graph_data['series']):
33 path = STATSD_PATH.format(graph_name, normalize(label))
34 statsd.gauge(path, value)
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py b/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py
--- a/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py
+++ b/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py
@@ -10,8 +10,6 @@
from ralph.lib.metrics import build_statsd_client
logger = logging.getLogger(__name__)
-PREFIX = settings.STATSD_GRAPHS_PREFIX
-STATSD_PATH = '{}.{{}}.{{}}'.format(PREFIX)
def normalize(s):
@@ -24,11 +22,11 @@
help = textwrap.dedent(__doc__).strip()
def handle(self, *args, **kwargs):
- statsd = build_statsd_client(prefix=STATSD_PATH)
+ statsd = build_statsd_client(prefix=settings.STATSD_GRAPHS_PREFIX)
graphs = Graph.objects.filter(push_to_statsd=True)
for graph in graphs:
graph_data = graph.get_data()
graph_name = normalize(graph.name)
for label, value in zip(graph_data['labels'], graph_data['series']):
- path = STATSD_PATH.format(graph_name, normalize(label))
+ path = '.'.join((graph_name, normalize(label)))
statsd.gauge(path, value)
| {"golden_diff": "diff --git a/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py b/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py\n--- a/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py\n+++ b/src/ralph/dashboards/management/commands/push_graphs_to_statsd.py\n@@ -10,8 +10,6 @@\n from ralph.lib.metrics import build_statsd_client\n \n logger = logging.getLogger(__name__)\n-PREFIX = settings.STATSD_GRAPHS_PREFIX\n-STATSD_PATH = '{}.{{}}.{{}}'.format(PREFIX)\n \n \n def normalize(s):\n@@ -24,11 +22,11 @@\n help = textwrap.dedent(__doc__).strip()\n \n def handle(self, *args, **kwargs):\n- statsd = build_statsd_client(prefix=STATSD_PATH)\n+ statsd = build_statsd_client(prefix=settings.STATSD_GRAPHS_PREFIX)\n graphs = Graph.objects.filter(push_to_statsd=True)\n for graph in graphs:\n graph_data = graph.get_data()\n graph_name = normalize(graph.name)\n for label, value in zip(graph_data['labels'], graph_data['series']):\n- path = STATSD_PATH.format(graph_name, normalize(label))\n+ path = '.'.join((graph_name, normalize(label)))\n statsd.gauge(path, value)\n", "issue": "Doc fixes\nSome minor doc fixes with a bit of style change\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport logging\nimport textwrap\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils.text import slugify\n\nfrom ralph.dashboards.models import Graph\nfrom ralph.lib.metrics import build_statsd_client\n\nlogger = logging.getLogger(__name__)\nPREFIX = settings.STATSD_GRAPHS_PREFIX\nSTATSD_PATH = '{}.{{}}.{{}}'.format(PREFIX)\n\n\ndef normalize(s):\n s = slugify(s)\n return s.replace('-', '_')\n\n\nclass Command(BaseCommand):\n \"\"\"Push to statsd data generated by graphs.\"\"\"\n help = textwrap.dedent(__doc__).strip()\n\n def handle(self, *args, **kwargs):\n statsd = build_statsd_client(prefix=STATSD_PATH)\n graphs = Graph.objects.filter(push_to_statsd=True)\n for graph in graphs:\n graph_data = graph.get_data()\n graph_name = normalize(graph.name)\n for label, value in zip(graph_data['labels'], graph_data['series']):\n path = STATSD_PATH.format(graph_name, normalize(label))\n statsd.gauge(path, value)\n", "path": "src/ralph/dashboards/management/commands/push_graphs_to_statsd.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport logging\nimport textwrap\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils.text import slugify\n\nfrom ralph.dashboards.models import Graph\nfrom ralph.lib.metrics import build_statsd_client\n\nlogger = logging.getLogger(__name__)\n\n\ndef normalize(s):\n s = slugify(s)\n return s.replace('-', '_')\n\n\nclass Command(BaseCommand):\n \"\"\"Push to statsd data generated by graphs.\"\"\"\n help = textwrap.dedent(__doc__).strip()\n\n def handle(self, *args, **kwargs):\n statsd = build_statsd_client(prefix=settings.STATSD_GRAPHS_PREFIX)\n graphs = Graph.objects.filter(push_to_statsd=True)\n for graph in graphs:\n graph_data = graph.get_data()\n graph_name = normalize(graph.name)\n for label, value in zip(graph_data['labels'], graph_data['series']):\n path = '.'.join((graph_name, normalize(label)))\n statsd.gauge(path, value)\n", "path": "src/ralph/dashboards/management/commands/push_graphs_to_statsd.py"}]} | 594 | 313 |
gh_patches_debug_23048 | rasdani/github-patches | git_diff | cupy__cupy-5759 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`cupy.concatenate()` misses arguments `dtype` and `casting`
Refs:
- NumPy: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html
- CuPy: https://docs.cupy.dev/en/stable/reference/generated/cupy.concatenate.html
The `dtype` argument is needed by the Array API standard (#5698, #4789).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/_manipulation/join.py`
Content:
```
1 import cupy
2 from cupy import _core
3
4
5 def column_stack(tup):
6 """Stacks 1-D and 2-D arrays as columns into a 2-D array.
7
8 A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays
9 are concatenated along the second axis.
10
11 Args:
12 tup (sequence of arrays): 1-D or 2-D arrays to be stacked.
13
14 Returns:
15 cupy.ndarray: A new 2-D array of stacked columns.
16
17 .. seealso:: :func:`numpy.column_stack`
18
19 """
20 if any(not isinstance(a, cupy.ndarray) for a in tup):
21 raise TypeError('Only cupy arrays can be column stacked')
22
23 lst = list(tup)
24 for i, a in enumerate(lst):
25 if a.ndim == 1:
26 a = a[:, cupy.newaxis]
27 lst[i] = a
28 elif a.ndim != 2:
29 raise ValueError(
30 'Only 1 or 2 dimensional arrays can be column stacked')
31
32 return concatenate(lst, axis=1)
33
34
35 def concatenate(tup, axis=0, out=None):
36 """Joins arrays along an axis.
37
38 Args:
39 tup (sequence of arrays): Arrays to be joined. All of these should have
40 same dimensionalities except the specified axis.
41 axis (int or None): The axis to join arrays along.
42 If axis is None, arrays are flattened before use.
43 Default is 0.
44 out (cupy.ndarray): Output array.
45
46 Returns:
47 cupy.ndarray: Joined array.
48
49 .. seealso:: :func:`numpy.concatenate`
50
51 """
52 if axis is None:
53 tup = [m.ravel() for m in tup]
54 axis = 0
55 return _core.concatenate_method(tup, axis, out)
56
57
58 def dstack(tup):
59 """Stacks arrays along the third axis.
60
61 Args:
62 tup (sequence of arrays): Arrays to be stacked. Each array is converted
63 by :func:`cupy.atleast_3d` before stacking.
64
65 Returns:
66 cupy.ndarray: Stacked array.
67
68 .. seealso:: :func:`numpy.dstack`
69
70 """
71 return concatenate([cupy.atleast_3d(m) for m in tup], 2)
72
73
74 def hstack(tup):
75 """Stacks arrays horizontally.
76
77 If an input array has one dimension, then the array is treated as a
78 horizontal vector and stacked along the first axis. Otherwise, the array is
79 stacked along the second axis.
80
81 Args:
82 tup (sequence of arrays): Arrays to be stacked.
83
84 Returns:
85 cupy.ndarray: Stacked array.
86
87 .. seealso:: :func:`numpy.hstack`
88
89 """
90 arrs = [cupy.atleast_1d(a) for a in tup]
91 axis = 1
92 if arrs[0].ndim == 1:
93 axis = 0
94 return concatenate(arrs, axis)
95
96
97 def vstack(tup):
98 """Stacks arrays vertically.
99
100 If an input array has one dimension, then the array is treated as a
101 horizontal vector and stacked along the additional axis at the head.
102 Otherwise, the array is stacked along the first axis.
103
104 Args:
105 tup (sequence of arrays): Arrays to be stacked. Each array is converted
106 by :func:`cupy.atleast_2d` before stacking.
107
108 Returns:
109 cupy.ndarray: Stacked array.
110
111 .. seealso:: :func:`numpy.dstack`
112
113 """
114 return concatenate([cupy.atleast_2d(m) for m in tup], 0)
115
116
117 def stack(tup, axis=0, out=None):
118 """Stacks arrays along a new axis.
119
120 Args:
121 tup (sequence of arrays): Arrays to be stacked.
122 axis (int): Axis along which the arrays are stacked.
123 out (cupy.ndarray): Output array.
124
125 Returns:
126 cupy.ndarray: Stacked array.
127
128 .. seealso:: :func:`numpy.stack`
129 """
130 return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/_manipulation/join.py b/cupy/_manipulation/join.py
--- a/cupy/_manipulation/join.py
+++ b/cupy/_manipulation/join.py
@@ -32,7 +32,7 @@
return concatenate(lst, axis=1)
-def concatenate(tup, axis=0, out=None):
+def concatenate(tup, axis=0, out=None, *, dtype=None, casting='same_kind'):
"""Joins arrays along an axis.
Args:
@@ -42,6 +42,11 @@
If axis is None, arrays are flattened before use.
Default is 0.
out (cupy.ndarray): Output array.
+ dtype (str or dtype): If provided, the destination array will have this
+ dtype. Cannot be provided together with ``out``.
+ casting ({‘no’, ‘equiv’, ‘safe’, ‘same_kind’, ‘unsafe’}, optional):
+ Controls what kind of data casting may occur. Defaults to
+ ``'same_kind'``.
Returns:
cupy.ndarray: Joined array.
@@ -52,7 +57,7 @@
if axis is None:
tup = [m.ravel() for m in tup]
axis = 0
- return _core.concatenate_method(tup, axis, out)
+ return _core.concatenate_method(tup, axis, out, dtype, casting)
def dstack(tup):
| {"golden_diff": "diff --git a/cupy/_manipulation/join.py b/cupy/_manipulation/join.py\n--- a/cupy/_manipulation/join.py\n+++ b/cupy/_manipulation/join.py\n@@ -32,7 +32,7 @@\n return concatenate(lst, axis=1)\n \n \n-def concatenate(tup, axis=0, out=None):\n+def concatenate(tup, axis=0, out=None, *, dtype=None, casting='same_kind'):\n \"\"\"Joins arrays along an axis.\n \n Args:\n@@ -42,6 +42,11 @@\n If axis is None, arrays are flattened before use.\n Default is 0.\n out (cupy.ndarray): Output array.\n+ dtype (str or dtype): If provided, the destination array will have this\n+ dtype. Cannot be provided together with ``out``.\n+ casting ({\u2018no\u2019, \u2018equiv\u2019, \u2018safe\u2019, \u2018same_kind\u2019, \u2018unsafe\u2019}, optional):\n+ Controls what kind of data casting may occur. Defaults to\n+ ``'same_kind'``.\n \n Returns:\n cupy.ndarray: Joined array.\n@@ -52,7 +57,7 @@\n if axis is None:\n tup = [m.ravel() for m in tup]\n axis = 0\n- return _core.concatenate_method(tup, axis, out)\n+ return _core.concatenate_method(tup, axis, out, dtype, casting)\n \n \n def dstack(tup):\n", "issue": "`cupy.concatenate()` misses arguments `dtype` and `casting`\nRefs:\r\n- NumPy: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html\r\n- CuPy: https://docs.cupy.dev/en/stable/reference/generated/cupy.concatenate.html\r\n\r\nThe `dtype` argument is needed by the Array API standard (#5698, #4789).\n", "before_files": [{"content": "import cupy\nfrom cupy import _core\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0, out=None):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int or None): The axis to join arrays along.\n If axis is None, arrays are flattened before use.\n Default is 0.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n if axis is None:\n tup = [m.ravel() for m in tup]\n axis = 0\n return _core.concatenate_method(tup, axis, out)\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0, out=None):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out)\n", "path": "cupy/_manipulation/join.py"}], "after_files": [{"content": "import cupy\nfrom cupy import _core\n\n\ndef column_stack(tup):\n \"\"\"Stacks 1-D and 2-D arrays as columns into a 2-D array.\n\n A 1-D array is first converted to a 2-D column array. Then, the 2-D arrays\n are concatenated along the second axis.\n\n Args:\n tup (sequence of arrays): 1-D or 2-D arrays to be stacked.\n\n Returns:\n cupy.ndarray: A new 2-D array of stacked columns.\n\n .. seealso:: :func:`numpy.column_stack`\n\n \"\"\"\n if any(not isinstance(a, cupy.ndarray) for a in tup):\n raise TypeError('Only cupy arrays can be column stacked')\n\n lst = list(tup)\n for i, a in enumerate(lst):\n if a.ndim == 1:\n a = a[:, cupy.newaxis]\n lst[i] = a\n elif a.ndim != 2:\n raise ValueError(\n 'Only 1 or 2 dimensional arrays can be column stacked')\n\n return concatenate(lst, axis=1)\n\n\ndef concatenate(tup, axis=0, out=None, *, dtype=None, casting='same_kind'):\n \"\"\"Joins arrays along an axis.\n\n Args:\n tup (sequence of arrays): Arrays to be joined. All of these should have\n same dimensionalities except the specified axis.\n axis (int or None): The axis to join arrays along.\n If axis is None, arrays are flattened before use.\n Default is 0.\n out (cupy.ndarray): Output array.\n dtype (str or dtype): If provided, the destination array will have this\n dtype. Cannot be provided together with ``out``.\n casting ({\u2018no\u2019, \u2018equiv\u2019, \u2018safe\u2019, \u2018same_kind\u2019, \u2018unsafe\u2019}, optional):\n Controls what kind of data casting may occur. Defaults to\n ``'same_kind'``.\n\n Returns:\n cupy.ndarray: Joined array.\n\n .. seealso:: :func:`numpy.concatenate`\n\n \"\"\"\n if axis is None:\n tup = [m.ravel() for m in tup]\n axis = 0\n return _core.concatenate_method(tup, axis, out, dtype, casting)\n\n\ndef dstack(tup):\n \"\"\"Stacks arrays along the third axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_3d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_3d(m) for m in tup], 2)\n\n\ndef hstack(tup):\n \"\"\"Stacks arrays horizontally.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the first axis. Otherwise, the array is\n stacked along the second axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.hstack`\n\n \"\"\"\n arrs = [cupy.atleast_1d(a) for a in tup]\n axis = 1\n if arrs[0].ndim == 1:\n axis = 0\n return concatenate(arrs, axis)\n\n\ndef vstack(tup):\n \"\"\"Stacks arrays vertically.\n\n If an input array has one dimension, then the array is treated as a\n horizontal vector and stacked along the additional axis at the head.\n Otherwise, the array is stacked along the first axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked. Each array is converted\n by :func:`cupy.atleast_2d` before stacking.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.dstack`\n\n \"\"\"\n return concatenate([cupy.atleast_2d(m) for m in tup], 0)\n\n\ndef stack(tup, axis=0, out=None):\n \"\"\"Stacks arrays along a new axis.\n\n Args:\n tup (sequence of arrays): Arrays to be stacked.\n axis (int): Axis along which the arrays are stacked.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Stacked array.\n\n .. seealso:: :func:`numpy.stack`\n \"\"\"\n return concatenate([cupy.expand_dims(x, axis) for x in tup], axis, out)\n", "path": "cupy/_manipulation/join.py"}]} | 1,538 | 322 |
gh_patches_debug_25497 | rasdani/github-patches | git_diff | tensorflow__addons-2048 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate GELU
Per https://github.com/tensorflow/community/pull/252, oonce https://github.com/tensorflow/tensorflow/pull/41178 merges we'll need to deprecate our GELU for versions of TensorFlow that include it within core.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/layers/gelu.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Implements GELU activation."""
16
17 import tensorflow as tf
18 from tensorflow_addons.activations import gelu
19 from typeguard import typechecked
20
21
22 @tf.keras.utils.register_keras_serializable(package="Addons")
23 class GELU(tf.keras.layers.Layer):
24 """Gaussian Error Linear Unit.
25
26 A smoother version of ReLU generally used
27 in the BERT or BERT architecture based models.
28 Original paper: https://arxiv.org/abs/1606.08415
29
30 Input shape:
31 Arbitrary. Use the keyword argument `input_shape`
32 (tuple of integers, does not include the samples axis)
33 when using this layer as the first layer in a model.
34
35 Output shape:
36 Same shape as the input.
37 """
38
39 @typechecked
40 def __init__(self, approximate: bool = True, **kwargs):
41 super().__init__(**kwargs)
42 self.approximate = approximate
43 self.supports_masking = True
44
45 def call(self, inputs):
46 return gelu(inputs, approximate=self.approximate)
47
48 def get_config(self):
49 config = {"approximate": self.approximate}
50 base_config = super().get_config()
51 return {**base_config, **config}
52
53 def compute_output_shape(self, input_shape):
54 return input_shape
55
```
Path: `tensorflow_addons/activations/gelu.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16 import tensorflow as tf
17 import math
18 import warnings
19
20 from tensorflow_addons.utils import types
21 from tensorflow_addons.utils.resource_loader import LazySO
22 from tensorflow_addons import options
23
24 _activation_so = LazySO("custom_ops/activations/_activation_ops.so")
25
26
27 @tf.keras.utils.register_keras_serializable(package="Addons")
28 def gelu(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:
29 """Gaussian Error Linear Unit.
30
31 Computes gaussian error linear:
32 `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or
33 `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),
34 depending on whether approximation is enabled.
35
36 See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)
37 and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).
38
39 Args:
40 x: A `Tensor`. Must be one of the following types:
41 `float16`, `float32`, `float64`.
42 approximate: bool, whether to enable approximation.
43 Returns:
44 A `Tensor`. Has the same type as `x`.
45 """
46 x = tf.convert_to_tensor(x)
47
48 if not options.TF_ADDONS_PY_OPS:
49 try:
50 return _gelu_custom_op(x, approximate)
51 except tf.errors.NotFoundError:
52 options.warn_fallback("gelu")
53
54 return _gelu_py(x, approximate)
55
56
57 def _gelu_custom_op(x, approximate):
58 warnings.warn(
59 "The activations custom ops are deprecated and will be removed in TensorFlow Addons "
60 "v0.12.0. \nPlease use the pure python version of Gelu instead by using the "
61 "`TF_ADDONS_PY_OPS` flag. \nFor more info about this flag, see "
62 "https://github.com/tensorflow/addons#gpucpu-custom-ops ",
63 DeprecationWarning,
64 )
65 return _activation_so.ops.addons_gelu(x, approximate)
66
67
68 @tf.RegisterGradient("Addons>Gelu")
69 def _gelu_grad(op, grad):
70 return _activation_so.ops.addons_gelu_grad(
71 grad, op.inputs[0], op.get_attr("approximate")
72 )
73
74
75 def _gelu_py(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:
76 x = tf.convert_to_tensor(x)
77 if approximate:
78 pi = tf.cast(math.pi, x.dtype)
79 coeff = tf.cast(0.044715, x.dtype)
80 return 0.5 * x * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))
81 else:
82 return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensorflow_addons/activations/gelu.py b/tensorflow_addons/activations/gelu.py
--- a/tensorflow_addons/activations/gelu.py
+++ b/tensorflow_addons/activations/gelu.py
@@ -43,6 +43,12 @@
Returns:
A `Tensor`. Has the same type as `x`.
"""
+ warnings.warn(
+ "gelu activation has been migrated to core TensorFlow, "
+ "and will be deprecated in Addons 0.12.",
+ DeprecationWarning,
+ )
+
x = tf.convert_to_tensor(x)
if not options.TF_ADDONS_PY_OPS:
diff --git a/tensorflow_addons/layers/gelu.py b/tensorflow_addons/layers/gelu.py
--- a/tensorflow_addons/layers/gelu.py
+++ b/tensorflow_addons/layers/gelu.py
@@ -14,6 +14,8 @@
# ==============================================================================
"""Implements GELU activation."""
+import warnings
+
import tensorflow as tf
from tensorflow_addons.activations import gelu
from typeguard import typechecked
@@ -38,6 +40,12 @@
@typechecked
def __init__(self, approximate: bool = True, **kwargs):
+ warnings.warn(
+ "gelu activation has been migrated to core TensorFlow, "
+ "and will be deprecated in Addons 0.12.",
+ DeprecationWarning,
+ )
+
super().__init__(**kwargs)
self.approximate = approximate
self.supports_masking = True
| {"golden_diff": "diff --git a/tensorflow_addons/activations/gelu.py b/tensorflow_addons/activations/gelu.py\n--- a/tensorflow_addons/activations/gelu.py\n+++ b/tensorflow_addons/activations/gelu.py\n@@ -43,6 +43,12 @@\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n+ warnings.warn(\n+ \"gelu activation has been migrated to core TensorFlow, \"\n+ \"and will be deprecated in Addons 0.12.\",\n+ DeprecationWarning,\n+ )\n+\n x = tf.convert_to_tensor(x)\n \n if not options.TF_ADDONS_PY_OPS:\ndiff --git a/tensorflow_addons/layers/gelu.py b/tensorflow_addons/layers/gelu.py\n--- a/tensorflow_addons/layers/gelu.py\n+++ b/tensorflow_addons/layers/gelu.py\n@@ -14,6 +14,8 @@\n # ==============================================================================\n \"\"\"Implements GELU activation.\"\"\"\n \n+import warnings\n+\n import tensorflow as tf\n from tensorflow_addons.activations import gelu\n from typeguard import typechecked\n@@ -38,6 +40,12 @@\n \n @typechecked\n def __init__(self, approximate: bool = True, **kwargs):\n+ warnings.warn(\n+ \"gelu activation has been migrated to core TensorFlow, \"\n+ \"and will be deprecated in Addons 0.12.\",\n+ DeprecationWarning,\n+ )\n+\n super().__init__(**kwargs)\n self.approximate = approximate\n self.supports_masking = True\n", "issue": "Deprecate GELU \nPer https://github.com/tensorflow/community/pull/252, oonce https://github.com/tensorflow/tensorflow/pull/41178 merges we'll need to deprecate our GELU for versions of TensorFlow that include it within core.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements GELU activation.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_addons.activations import gelu\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass GELU(tf.keras.layers.Layer):\n \"\"\"Gaussian Error Linear Unit.\n\n A smoother version of ReLU generally used\n in the BERT or BERT architecture based models.\n Original paper: https://arxiv.org/abs/1606.08415\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n \"\"\"\n\n @typechecked\n def __init__(self, approximate: bool = True, **kwargs):\n super().__init__(**kwargs)\n self.approximate = approximate\n self.supports_masking = True\n\n def call(self, inputs):\n return gelu(inputs, approximate=self.approximate)\n\n def get_config(self):\n config = {\"approximate\": self.approximate}\n base_config = super().get_config()\n return {**base_config, **config}\n\n def compute_output_shape(self, input_shape):\n return input_shape\n", "path": "tensorflow_addons/layers/gelu.py"}, {"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nimport math\nimport warnings\n\nfrom tensorflow_addons.utils import types\nfrom tensorflow_addons.utils.resource_loader import LazySO\nfrom tensorflow_addons import options\n\n_activation_so = LazySO(\"custom_ops/activations/_activation_ops.so\")\n\n\[email protected]_keras_serializable(package=\"Addons\")\ndef gelu(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:\n \"\"\"Gaussian Error Linear Unit.\n\n Computes gaussian error linear:\n `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or\n `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),\n depending on whether approximation is enabled.\n\n See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)\n and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).\n\n Args:\n x: A `Tensor`. Must be one of the following types:\n `float16`, `float32`, `float64`.\n approximate: bool, whether to enable approximation.\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n x = tf.convert_to_tensor(x)\n\n if not options.TF_ADDONS_PY_OPS:\n try:\n return _gelu_custom_op(x, approximate)\n except tf.errors.NotFoundError:\n options.warn_fallback(\"gelu\")\n\n return _gelu_py(x, approximate)\n\n\ndef _gelu_custom_op(x, approximate):\n warnings.warn(\n \"The activations custom ops are deprecated and will be removed in TensorFlow Addons \"\n \"v0.12.0. \\nPlease use the pure python version of Gelu instead by using the \"\n \"`TF_ADDONS_PY_OPS` flag. \\nFor more info about this flag, see \"\n \"https://github.com/tensorflow/addons#gpucpu-custom-ops \",\n DeprecationWarning,\n )\n return _activation_so.ops.addons_gelu(x, approximate)\n\n\[email protected](\"Addons>Gelu\")\ndef _gelu_grad(op, grad):\n return _activation_so.ops.addons_gelu_grad(\n grad, op.inputs[0], op.get_attr(\"approximate\")\n )\n\n\ndef _gelu_py(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:\n x = tf.convert_to_tensor(x)\n if approximate:\n pi = tf.cast(math.pi, x.dtype)\n coeff = tf.cast(0.044715, x.dtype)\n return 0.5 * x * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))\n else:\n return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))\n", "path": "tensorflow_addons/activations/gelu.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements GELU activation.\"\"\"\n\nimport warnings\n\nimport tensorflow as tf\nfrom tensorflow_addons.activations import gelu\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass GELU(tf.keras.layers.Layer):\n \"\"\"Gaussian Error Linear Unit.\n\n A smoother version of ReLU generally used\n in the BERT or BERT architecture based models.\n Original paper: https://arxiv.org/abs/1606.08415\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n \"\"\"\n\n @typechecked\n def __init__(self, approximate: bool = True, **kwargs):\n warnings.warn(\n \"gelu activation has been migrated to core TensorFlow, \"\n \"and will be deprecated in Addons 0.12.\",\n DeprecationWarning,\n )\n\n super().__init__(**kwargs)\n self.approximate = approximate\n self.supports_masking = True\n\n def call(self, inputs):\n return gelu(inputs, approximate=self.approximate)\n\n def get_config(self):\n config = {\"approximate\": self.approximate}\n base_config = super().get_config()\n return {**base_config, **config}\n\n def compute_output_shape(self, input_shape):\n return input_shape\n", "path": "tensorflow_addons/layers/gelu.py"}, {"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport tensorflow as tf\nimport math\nimport warnings\n\nfrom tensorflow_addons.utils import types\nfrom tensorflow_addons.utils.resource_loader import LazySO\nfrom tensorflow_addons import options\n\n_activation_so = LazySO(\"custom_ops/activations/_activation_ops.so\")\n\n\[email protected]_keras_serializable(package=\"Addons\")\ndef gelu(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:\n \"\"\"Gaussian Error Linear Unit.\n\n Computes gaussian error linear:\n `0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))` or\n `x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`, where P(X) ~ N(0, 1),\n depending on whether approximation is enabled.\n\n See [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)\n and [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).\n\n Args:\n x: A `Tensor`. Must be one of the following types:\n `float16`, `float32`, `float64`.\n approximate: bool, whether to enable approximation.\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n warnings.warn(\n \"gelu activation has been migrated to core TensorFlow, \"\n \"and will be deprecated in Addons 0.12.\",\n DeprecationWarning,\n )\n\n x = tf.convert_to_tensor(x)\n\n if not options.TF_ADDONS_PY_OPS:\n try:\n return _gelu_custom_op(x, approximate)\n except tf.errors.NotFoundError:\n options.warn_fallback(\"gelu\")\n\n return _gelu_py(x, approximate)\n\n\ndef _gelu_custom_op(x, approximate):\n warnings.warn(\n \"The activations custom ops are deprecated and will be removed in TensorFlow Addons \"\n \"v0.12.0. \\nPlease use the pure python version of Gelu instead by using the \"\n \"`TF_ADDONS_PY_OPS` flag. \\nFor more info about this flag, see \"\n \"https://github.com/tensorflow/addons#gpucpu-custom-ops \",\n DeprecationWarning,\n )\n return _activation_so.ops.addons_gelu(x, approximate)\n\n\[email protected](\"Addons>Gelu\")\ndef _gelu_grad(op, grad):\n return _activation_so.ops.addons_gelu_grad(\n grad, op.inputs[0], op.get_attr(\"approximate\")\n )\n\n\ndef _gelu_py(x: types.TensorLike, approximate: bool = True) -> tf.Tensor:\n x = tf.convert_to_tensor(x)\n if approximate:\n pi = tf.cast(math.pi, x.dtype)\n coeff = tf.cast(0.044715, x.dtype)\n return 0.5 * x * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))\n else:\n return 0.5 * x * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))\n", "path": "tensorflow_addons/activations/gelu.py"}]} | 1,870 | 364 |
gh_patches_debug_7541 | rasdani/github-patches | git_diff | twisted__twisted-12106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 24.1.0 to unbreak users who use the latest PyPy
#12084 is breaking CI for Tahoe-LAFS, so probably is breaking real-world usage for someone somewhere too. So it'd be good to have a release sooner rather than later.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/twisted/_version.py`
Content:
```
1 """
2 Provides Twisted version information.
3 """
4
5 # This file is auto-generated! Do not edit!
6 # Use `python -m incremental.update Twisted` to change this file.
7
8 from incremental import Version
9
10 __version__ = Version("Twisted", 23, 10, 0, post=0)
11 __all__ = ["__version__"]
12
```
Path: `src/twisted/copyright.py`
Content:
```
1 # Copyright (c) Twisted Matrix Laboratories.
2 # See LICENSE for details.
3
4 """
5 Copyright information for Twisted.
6 """
7
8
9 __all__ = ["copyright", "disclaimer", "longversion", "version"]
10
11 from twisted import __version__ as version, version as _longversion
12
13 longversion = str(_longversion)
14
15 copyright = """\
16 Copyright (c) 2001-2023 Twisted Matrix Laboratories.
17 See LICENSE for details."""
18
19 disclaimer = """
20 Twisted, the Framework of Your Internet
21 {}
22
23 Permission is hereby granted, free of charge, to any person obtaining
24 a copy of this software and associated documentation files (the
25 "Software"), to deal in the Software without restriction, including
26 without limitation the rights to use, copy, modify, merge, publish,
27 distribute, sublicense, and/or sell copies of the Software, and to
28 permit persons to whom the Software is furnished to do so, subject to
29 the following conditions:
30
31 The above copyright notice and this permission notice shall be
32 included in all copies or substantial portions of the Software.
33
34 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
37 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
38 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
39 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41
42 """.format(
43 copyright,
44 )
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/twisted/_version.py b/src/twisted/_version.py
--- a/src/twisted/_version.py
+++ b/src/twisted/_version.py
@@ -7,5 +7,5 @@
from incremental import Version
-__version__ = Version("Twisted", 23, 10, 0, post=0)
+__version__ = Version("Twisted", 24, 3, 0, post=0)
__all__ = ["__version__"]
diff --git a/src/twisted/copyright.py b/src/twisted/copyright.py
--- a/src/twisted/copyright.py
+++ b/src/twisted/copyright.py
@@ -13,7 +13,7 @@
longversion = str(_longversion)
copyright = """\
-Copyright (c) 2001-2023 Twisted Matrix Laboratories.
+Copyright (c) 2001-2024 Twisted Matrix Laboratories.
See LICENSE for details."""
disclaimer = """
| {"golden_diff": "diff --git a/src/twisted/_version.py b/src/twisted/_version.py\n--- a/src/twisted/_version.py\n+++ b/src/twisted/_version.py\n@@ -7,5 +7,5 @@\n \n from incremental import Version\n \n-__version__ = Version(\"Twisted\", 23, 10, 0, post=0)\n+__version__ = Version(\"Twisted\", 24, 3, 0, post=0)\n __all__ = [\"__version__\"]\ndiff --git a/src/twisted/copyright.py b/src/twisted/copyright.py\n--- a/src/twisted/copyright.py\n+++ b/src/twisted/copyright.py\n@@ -13,7 +13,7 @@\n longversion = str(_longversion)\n \n copyright = \"\"\"\\\n-Copyright (c) 2001-2023 Twisted Matrix Laboratories.\n+Copyright (c) 2001-2024 Twisted Matrix Laboratories.\n See LICENSE for details.\"\"\"\n \n disclaimer = \"\"\"\n", "issue": "Release 24.1.0 to unbreak users who use the latest PyPy\n#12084 is breaking CI for Tahoe-LAFS, so probably is breaking real-world usage for someone somewhere too. So it'd be good to have a release sooner rather than later.\n", "before_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 23, 10, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}, {"content": "# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nCopyright information for Twisted.\n\"\"\"\n\n\n__all__ = [\"copyright\", \"disclaimer\", \"longversion\", \"version\"]\n\nfrom twisted import __version__ as version, version as _longversion\n\nlongversion = str(_longversion)\n\ncopyright = \"\"\"\\\nCopyright (c) 2001-2023 Twisted Matrix Laboratories.\nSee LICENSE for details.\"\"\"\n\ndisclaimer = \"\"\"\nTwisted, the Framework of Your Internet\n{}\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\".format(\n copyright,\n)\n", "path": "src/twisted/copyright.py"}], "after_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 24, 3, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}, {"content": "# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nCopyright information for Twisted.\n\"\"\"\n\n\n__all__ = [\"copyright\", \"disclaimer\", \"longversion\", \"version\"]\n\nfrom twisted import __version__ as version, version as _longversion\n\nlongversion = str(_longversion)\n\ncopyright = \"\"\"\\\nCopyright (c) 2001-2024 Twisted Matrix Laboratories.\nSee LICENSE for details.\"\"\"\n\ndisclaimer = \"\"\"\nTwisted, the Framework of Your Internet\n{}\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\".format(\n copyright,\n)\n", "path": "src/twisted/copyright.py"}]} | 838 | 228 |
gh_patches_debug_6887 | rasdani/github-patches | git_diff | sherlock-project__sherlock-911 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[site_list.py] change numbering to reduce commit size
letting the markdown renderer do the counting lets us reduce commit size and avoide possible merge conflicts.
---
```
1.
1.
1.
```
renders to:
1.
1.
1.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `site_list.py`
Content:
```
1 """Sherlock: Supported Site Listing
2 This module generates the listing of supported sites
3 which can be found in sites.md
4 It also organizes all the sites in alphanumeric order
5 """
6 import json
7
8 pool = list()
9
10 with open("sherlock/resources/data.json", "r", encoding="utf-8") as data_file:
11 data = json.load(data_file)
12
13 with open("sites.md", "w") as site_file:
14 data_length = len(data)
15 site_file.write(f'## List Of Supported Sites ({data_length} Sites In Total!)\n')
16
17 for social_network in data:
18 url_main = data.get(social_network).get("urlMain")
19 pool.append((social_network, url_main))
20
21 index = 1
22 for social_network, url_main in pool:
23 site_file.write(f'{index}. [{social_network}]({url_main})\n')
24 index = index + 1
25
26
27 sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
28
29 with open("sherlock/resources/data.json", "w") as data_file:
30 data_file.write(sorted_json_data)
31
32 print("Finished updating supported site listing!")
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/site_list.py b/site_list.py
--- a/site_list.py
+++ b/site_list.py
@@ -18,11 +18,8 @@
url_main = data.get(social_network).get("urlMain")
pool.append((social_network, url_main))
- index = 1
for social_network, url_main in pool:
- site_file.write(f'{index}. [{social_network}]({url_main})\n')
- index = index + 1
-
+ site_file.write(f'1. [{social_network}]({url_main})\n')
sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
| {"golden_diff": "diff --git a/site_list.py b/site_list.py\n--- a/site_list.py\n+++ b/site_list.py\n@@ -18,11 +18,8 @@\n url_main = data.get(social_network).get(\"urlMain\")\n pool.append((social_network, url_main))\n \n- index = 1\n for social_network, url_main in pool:\n- site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n- index = index + 1\n-\n+ site_file.write(f'1. [{social_network}]({url_main})\\n')\n \n sorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n", "issue": "[site_list.py] change numbering to reduce commit size\nletting the markdown renderer do the counting lets us reduce commit size and avoide possible merge conflicts.\r\n\r\n---\r\n\r\n```\r\n1.\r\n1.\r\n1.\r\n```\r\nrenders to:\r\n\r\n1.\r\n1.\r\n1.\n", "before_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\nThis module generates the listing of supported sites\nwhich can be found in sites.md\nIt also organizes all the sites in alphanumeric order\n\"\"\"\nimport json\n\npool = list()\n\nwith open(\"sherlock/resources/data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nwith open(\"sites.md\", \"w\") as site_file:\n data_length = len(data)\n site_file.write(f'## List Of Supported Sites ({data_length} Sites In Total!)\\n')\n\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n pool.append((social_network, url_main))\n\n index = 1\n for social_network, url_main in pool:\n site_file.write(f'{index}. [{social_network}]({url_main})\\n')\n index = index + 1\n\n\nsorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n\nwith open(\"sherlock/resources/data.json\", \"w\") as data_file:\n data_file.write(sorted_json_data)\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}], "after_files": [{"content": "\"\"\"Sherlock: Supported Site Listing\nThis module generates the listing of supported sites\nwhich can be found in sites.md\nIt also organizes all the sites in alphanumeric order\n\"\"\"\nimport json\n\npool = list()\n\nwith open(\"sherlock/resources/data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\nwith open(\"sites.md\", \"w\") as site_file:\n data_length = len(data)\n site_file.write(f'## List Of Supported Sites ({data_length} Sites In Total!)\\n')\n\n for social_network in data:\n url_main = data.get(social_network).get(\"urlMain\")\n pool.append((social_network, url_main))\n\n for social_network, url_main in pool:\n site_file.write(f'1. [{social_network}]({url_main})\\n')\n\nsorted_json_data = json.dumps(data, indent=2, sort_keys=True)\n\nwith open(\"sherlock/resources/data.json\", \"w\") as data_file:\n data_file.write(sorted_json_data)\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}]} | 616 | 147 |
gh_patches_debug_10290 | rasdani/github-patches | git_diff | goauthentik__authentik-8139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2023.10.6 - "Please select a username" after Azure AD login
**Describe your question/**
Is it now a expected behavior in 2023.10.6 version to ask every user for username input after logging in with azure ad?

In previous versions it was simply authenticating without any prompt, using email address from Azure AD as username.
Now it expects user to input username (and it leads to duplicated accounts, because users with mail as username already exist), and if you enter already existing mail as username it shows error:

I think it can be related to this fix:
https://github.com/goauthentik/authentik/pull/7970
Is it possible somehow to set this username automatically, or revert back to using email address so old user accounts will work again?
**Version and Deployment (please complete the following information):**
- authentik version: 2023.10.6
- Deployment: helm
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/oauth/types/azure_ad.py`
Content:
```
1 """AzureAD OAuth2 Views"""
2 from typing import Any
3
4 from structlog.stdlib import get_logger
5
6 from authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient
7 from authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback
8 from authentik.sources.oauth.types.registry import SourceType, registry
9 from authentik.sources.oauth.views.redirect import OAuthRedirect
10
11 LOGGER = get_logger()
12
13
14 class AzureADOAuthRedirect(OAuthRedirect):
15 """Azure AD OAuth2 Redirect"""
16
17 def get_additional_parameters(self, source): # pragma: no cover
18 return {
19 "scope": ["openid", "https://graph.microsoft.com/User.Read"],
20 }
21
22
23 class AzureADOAuthCallback(OpenIDConnectOAuth2Callback):
24 """AzureAD OAuth2 Callback"""
25
26 client_class = UserprofileHeaderAuthClient
27
28 def get_user_enroll_context(
29 self,
30 info: dict[str, Any],
31 ) -> dict[str, Any]:
32 mail = info.get("mail", None) or info.get("otherMails", [None])[0]
33 return {
34 "username": info.get("userPrincipalName"),
35 "email": mail,
36 "name": info.get("displayName"),
37 }
38
39
40 @registry.register()
41 class AzureADType(SourceType):
42 """Azure AD Type definition"""
43
44 callback_view = AzureADOAuthCallback
45 redirect_view = AzureADOAuthRedirect
46 verbose_name = "Azure AD"
47 name = "azuread"
48
49 urls_customizable = True
50
51 authorization_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize"
52 access_token_url = "https://login.microsoftonline.com/common/oauth2/v2.0/token" # nosec
53 profile_url = "https://login.microsoftonline.com/common/openid/userinfo"
54 oidc_well_known_url = (
55 "https://login.microsoftonline.com/common/.well-known/openid-configuration"
56 )
57 oidc_jwks_url = "https://login.microsoftonline.com/common/discovery/keys"
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/oauth/types/azure_ad.py b/authentik/sources/oauth/types/azure_ad.py
--- a/authentik/sources/oauth/types/azure_ad.py
+++ b/authentik/sources/oauth/types/azure_ad.py
@@ -50,7 +50,7 @@
authorization_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize"
access_token_url = "https://login.microsoftonline.com/common/oauth2/v2.0/token" # nosec
- profile_url = "https://login.microsoftonline.com/common/openid/userinfo"
+ profile_url = "https://graph.microsoft.com/v1.0/me"
oidc_well_known_url = (
"https://login.microsoftonline.com/common/.well-known/openid-configuration"
)
| {"golden_diff": "diff --git a/authentik/sources/oauth/types/azure_ad.py b/authentik/sources/oauth/types/azure_ad.py\n--- a/authentik/sources/oauth/types/azure_ad.py\n+++ b/authentik/sources/oauth/types/azure_ad.py\n@@ -50,7 +50,7 @@\n \n authorization_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/authorize\"\n access_token_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/token\" # nosec\n- profile_url = \"https://login.microsoftonline.com/common/openid/userinfo\"\n+ profile_url = \"https://graph.microsoft.com/v1.0/me\"\n oidc_well_known_url = (\n \"https://login.microsoftonline.com/common/.well-known/openid-configuration\"\n )\n", "issue": "2023.10.6 - \"Please select a username\" after Azure AD login\n**Describe your question/**\r\n\r\nIs it now a expected behavior in 2023.10.6 version to ask every user for username input after logging in with azure ad?\r\n\r\n\r\nIn previous versions it was simply authenticating without any prompt, using email address from Azure AD as username.\r\n\r\nNow it expects user to input username (and it leads to duplicated accounts, because users with mail as username already exist), and if you enter already existing mail as username it shows error:\r\n\r\n\r\nI think it can be related to this fix:\r\nhttps://github.com/goauthentik/authentik/pull/7970\r\n\r\nIs it possible somehow to set this username automatically, or revert back to using email address so old user accounts will work again?\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.10.6\r\n- Deployment: helm\r\n\r\n\n", "before_files": [{"content": "\"\"\"AzureAD OAuth2 Views\"\"\"\nfrom typing import Any\n\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\nfrom authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback\nfrom authentik.sources.oauth.types.registry import SourceType, registry\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass AzureADOAuthRedirect(OAuthRedirect):\n \"\"\"Azure AD OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"openid\", \"https://graph.microsoft.com/User.Read\"],\n }\n\n\nclass AzureADOAuthCallback(OpenIDConnectOAuth2Callback):\n \"\"\"AzureAD OAuth2 Callback\"\"\"\n\n client_class = UserprofileHeaderAuthClient\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n mail = info.get(\"mail\", None) or info.get(\"otherMails\", [None])[0]\n return {\n \"username\": info.get(\"userPrincipalName\"),\n \"email\": mail,\n \"name\": info.get(\"displayName\"),\n }\n\n\[email protected]()\nclass AzureADType(SourceType):\n \"\"\"Azure AD Type definition\"\"\"\n\n callback_view = AzureADOAuthCallback\n redirect_view = AzureADOAuthRedirect\n verbose_name = \"Azure AD\"\n name = \"azuread\"\n\n urls_customizable = True\n\n authorization_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/authorize\"\n access_token_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/token\" # nosec\n profile_url = \"https://login.microsoftonline.com/common/openid/userinfo\"\n oidc_well_known_url = (\n \"https://login.microsoftonline.com/common/.well-known/openid-configuration\"\n )\n oidc_jwks_url = \"https://login.microsoftonline.com/common/discovery/keys\"\n", "path": "authentik/sources/oauth/types/azure_ad.py"}], "after_files": [{"content": "\"\"\"AzureAD OAuth2 Views\"\"\"\nfrom typing import Any\n\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import UserprofileHeaderAuthClient\nfrom authentik.sources.oauth.types.oidc import OpenIDConnectOAuth2Callback\nfrom authentik.sources.oauth.types.registry import SourceType, registry\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass AzureADOAuthRedirect(OAuthRedirect):\n \"\"\"Azure AD OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"openid\", \"https://graph.microsoft.com/User.Read\"],\n }\n\n\nclass AzureADOAuthCallback(OpenIDConnectOAuth2Callback):\n \"\"\"AzureAD OAuth2 Callback\"\"\"\n\n client_class = UserprofileHeaderAuthClient\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n mail = info.get(\"mail\", None) or info.get(\"otherMails\", [None])[0]\n return {\n \"username\": info.get(\"userPrincipalName\"),\n \"email\": mail,\n \"name\": info.get(\"displayName\"),\n }\n\n\[email protected]()\nclass AzureADType(SourceType):\n \"\"\"Azure AD Type definition\"\"\"\n\n callback_view = AzureADOAuthCallback\n redirect_view = AzureADOAuthRedirect\n verbose_name = \"Azure AD\"\n name = \"azuread\"\n\n urls_customizable = True\n\n authorization_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/authorize\"\n access_token_url = \"https://login.microsoftonline.com/common/oauth2/v2.0/token\" # nosec\n profile_url = \"https://graph.microsoft.com/v1.0/me\"\n oidc_well_known_url = (\n \"https://login.microsoftonline.com/common/.well-known/openid-configuration\"\n )\n oidc_jwks_url = \"https://login.microsoftonline.com/common/discovery/keys\"\n", "path": "authentik/sources/oauth/types/azure_ad.py"}]} | 1,132 | 175 |
gh_patches_debug_15565 | rasdani/github-patches | git_diff | deepset-ai__haystack-7796 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[V2.2.0] ChatPromptBuilder is not export
**Describe the bug**
v2.2.0 => ChatPromptBuilder is not export
**Error message**
<img width="1102" alt="image" src="https://github.com/deepset-ai/haystack/assets/15232298/b9372767-42f5-464c-832f-cca38a00cf60">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/components/builders/__init__.py`
Content:
```
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 from haystack.components.builders.answer_builder import AnswerBuilder
6 from haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder
7 from haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder
8 from haystack.components.builders.prompt_builder import PromptBuilder
9
10 __all__ = ["AnswerBuilder", "PromptBuilder", "DynamicPromptBuilder", "DynamicChatPromptBuilder"]
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/components/builders/__init__.py b/haystack/components/builders/__init__.py
--- a/haystack/components/builders/__init__.py
+++ b/haystack/components/builders/__init__.py
@@ -3,8 +3,9 @@
# SPDX-License-Identifier: Apache-2.0
from haystack.components.builders.answer_builder import AnswerBuilder
+from haystack.components.builders.chat_prompt_builder import ChatPromptBuilder
from haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder
from haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder
from haystack.components.builders.prompt_builder import PromptBuilder
-__all__ = ["AnswerBuilder", "PromptBuilder", "DynamicPromptBuilder", "DynamicChatPromptBuilder"]
+__all__ = ["AnswerBuilder", "PromptBuilder", "DynamicPromptBuilder", "DynamicChatPromptBuilder", "ChatPromptBuilder"]
| {"golden_diff": "diff --git a/haystack/components/builders/__init__.py b/haystack/components/builders/__init__.py\n--- a/haystack/components/builders/__init__.py\n+++ b/haystack/components/builders/__init__.py\n@@ -3,8 +3,9 @@\n # SPDX-License-Identifier: Apache-2.0\n \n from haystack.components.builders.answer_builder import AnswerBuilder\n+from haystack.components.builders.chat_prompt_builder import ChatPromptBuilder\n from haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder\n from haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder\n from haystack.components.builders.prompt_builder import PromptBuilder\n \n-__all__ = [\"AnswerBuilder\", \"PromptBuilder\", \"DynamicPromptBuilder\", \"DynamicChatPromptBuilder\"]\n+__all__ = [\"AnswerBuilder\", \"PromptBuilder\", \"DynamicPromptBuilder\", \"DynamicChatPromptBuilder\", \"ChatPromptBuilder\"]\n", "issue": "[V2.2.0] ChatPromptBuilder is not export\n**Describe the bug**\r\nv2.2.0 => ChatPromptBuilder is not export\r\n\r\n**Error message**\r\n<img width=\"1102\" alt=\"image\" src=\"https://github.com/deepset-ai/haystack/assets/15232298/b9372767-42f5-464c-832f-cca38a00cf60\">\r\n\r\n\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom haystack.components.builders.answer_builder import AnswerBuilder\nfrom haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder\nfrom haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder\nfrom haystack.components.builders.prompt_builder import PromptBuilder\n\n__all__ = [\"AnswerBuilder\", \"PromptBuilder\", \"DynamicPromptBuilder\", \"DynamicChatPromptBuilder\"]\n", "path": "haystack/components/builders/__init__.py"}], "after_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom haystack.components.builders.answer_builder import AnswerBuilder\nfrom haystack.components.builders.chat_prompt_builder import ChatPromptBuilder\nfrom haystack.components.builders.dynamic_chat_prompt_builder import DynamicChatPromptBuilder\nfrom haystack.components.builders.dynamic_prompt_builder import DynamicPromptBuilder\nfrom haystack.components.builders.prompt_builder import PromptBuilder\n\n__all__ = [\"AnswerBuilder\", \"PromptBuilder\", \"DynamicPromptBuilder\", \"DynamicChatPromptBuilder\", \"ChatPromptBuilder\"]\n", "path": "haystack/components/builders/__init__.py"}]} | 491 | 185 |
gh_patches_debug_36530 | rasdani/github-patches | git_diff | getsentry__sentry-python-541 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.12.0 breaks Django function-based middleware
Similar to #504, but a different stack trace:
AttributeError: 'method-wrapper' object has no attribute '__module__'
File "django/core/handlers/exception.py", line 41, in inner
response = get_response(request)
File "functools.py", line 33, in update_wrapper
setattr(wrapper, attr, getattr(wrapped, attr))
According to sentry (kind-of neat how I get this in this case...), the `get_response` object at that point in time is `<sentry_sdk.integrations.django.middleware.AuditMiddleware object at 0x7f37d64d4450>`.
This problem only occurs in 0.12.0 and newer, and with Django 1.11.x
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/django/middleware.py`
Content:
```
1 """
2 Create spans from Django middleware invocations
3 """
4
5 from functools import wraps
6
7 from django import VERSION as DJANGO_VERSION
8
9 from sentry_sdk import Hub
10 from sentry_sdk.utils import ContextVar, transaction_from_function
11
12 from sentry_sdk._types import MYPY
13
14 if MYPY:
15 from typing import Any
16 from typing import Callable
17 from typing import TypeVar
18
19 F = TypeVar("F", bound=Callable[..., Any])
20
21 _import_string_should_wrap_middleware = ContextVar(
22 "import_string_should_wrap_middleware"
23 )
24
25 if DJANGO_VERSION < (1, 7):
26 import_string_name = "import_by_path"
27 else:
28 import_string_name = "import_string"
29
30
31 def patch_django_middlewares():
32 # type: () -> None
33 from django.core.handlers import base
34
35 old_import_string = getattr(base, import_string_name)
36
37 def sentry_patched_import_string(dotted_path):
38 # type: (str) -> Any
39 rv = old_import_string(dotted_path)
40
41 if _import_string_should_wrap_middleware.get(None):
42 rv = _wrap_middleware(rv, dotted_path)
43
44 return rv
45
46 setattr(base, import_string_name, sentry_patched_import_string)
47
48 old_load_middleware = base.BaseHandler.load_middleware
49
50 def sentry_patched_load_middleware(self):
51 # type: (base.BaseHandler) -> Any
52 _import_string_should_wrap_middleware.set(True)
53 try:
54 return old_load_middleware(self)
55 finally:
56 _import_string_should_wrap_middleware.set(False)
57
58 base.BaseHandler.load_middleware = sentry_patched_load_middleware
59
60
61 def _wrap_middleware(middleware, middleware_name):
62 # type: (Any, str) -> Any
63 from sentry_sdk.integrations.django import DjangoIntegration
64
65 def _get_wrapped_method(old_method):
66 # type: (F) -> F
67 @wraps(old_method)
68 def sentry_wrapped_method(*args, **kwargs):
69 # type: (*Any, **Any) -> Any
70 hub = Hub.current
71 integration = hub.get_integration(DjangoIntegration)
72 if integration is None or not integration.middleware_spans:
73 return old_method(*args, **kwargs)
74
75 function_name = transaction_from_function(old_method)
76
77 description = middleware_name
78 function_basename = getattr(old_method, "__name__", None)
79 if function_basename:
80 description = "{}.{}".format(description, function_basename)
81
82 with hub.start_span(
83 op="django.middleware", description=description
84 ) as span:
85 span.set_tag("django.function_name", function_name)
86 span.set_tag("django.middleware_name", middleware_name)
87 return old_method(*args, **kwargs)
88
89 return sentry_wrapped_method # type: ignore
90
91 class SentryWrappingMiddleware(object):
92 def __init__(self, *args, **kwargs):
93 # type: (*Any, **Any) -> None
94 self._inner = middleware(*args, **kwargs)
95 self._call_method = None
96
97 # We need correct behavior for `hasattr()`, which we can only determine
98 # when we have an instance of the middleware we're wrapping.
99 def __getattr__(self, method_name):
100 # type: (str) -> Any
101 if method_name not in (
102 "process_request",
103 "process_view",
104 "process_template_response",
105 "process_response",
106 "process_exception",
107 ):
108 raise AttributeError()
109
110 old_method = getattr(self._inner, method_name)
111 rv = _get_wrapped_method(old_method)
112 self.__dict__[method_name] = rv
113 return rv
114
115 def __call__(self, *args, **kwargs):
116 # type: (*Any, **Any) -> Any
117 f = self._call_method
118 if f is None:
119 self._call_method = f = _get_wrapped_method(self._inner.__call__)
120 return f(*args, **kwargs)
121
122 if hasattr(middleware, "__name__"):
123 SentryWrappingMiddleware.__name__ = middleware.__name__
124
125 return SentryWrappingMiddleware
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py
--- a/sentry_sdk/integrations/django/middleware.py
+++ b/sentry_sdk/integrations/django/middleware.py
@@ -7,7 +7,11 @@
from django import VERSION as DJANGO_VERSION
from sentry_sdk import Hub
-from sentry_sdk.utils import ContextVar, transaction_from_function
+from sentry_sdk.utils import (
+ ContextVar,
+ transaction_from_function,
+ capture_internal_exceptions,
+)
from sentry_sdk._types import MYPY
@@ -64,29 +68,36 @@
def _get_wrapped_method(old_method):
# type: (F) -> F
- @wraps(old_method)
- def sentry_wrapped_method(*args, **kwargs):
- # type: (*Any, **Any) -> Any
- hub = Hub.current
- integration = hub.get_integration(DjangoIntegration)
- if integration is None or not integration.middleware_spans:
- return old_method(*args, **kwargs)
-
- function_name = transaction_from_function(old_method)
-
- description = middleware_name
- function_basename = getattr(old_method, "__name__", None)
- if function_basename:
- description = "{}.{}".format(description, function_basename)
-
- with hub.start_span(
- op="django.middleware", description=description
- ) as span:
- span.set_tag("django.function_name", function_name)
- span.set_tag("django.middleware_name", middleware_name)
- return old_method(*args, **kwargs)
-
- return sentry_wrapped_method # type: ignore
+ with capture_internal_exceptions():
+
+ def sentry_wrapped_method(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(DjangoIntegration)
+ if integration is None or not integration.middleware_spans:
+ return old_method(*args, **kwargs)
+
+ function_name = transaction_from_function(old_method)
+
+ description = middleware_name
+ function_basename = getattr(old_method, "__name__", None)
+ if function_basename:
+ description = "{}.{}".format(description, function_basename)
+
+ with hub.start_span(
+ op="django.middleware", description=description
+ ) as span:
+ span.set_tag("django.function_name", function_name)
+ span.set_tag("django.middleware_name", middleware_name)
+ return old_method(*args, **kwargs)
+
+ try:
+ # fails for __call__ of function on Python 2 (see py2.7-django-1.11)
+ return wraps(old_method)(sentry_wrapped_method) # type: ignore
+ except Exception:
+ return sentry_wrapped_method # type: ignore
+
+ return old_method
class SentryWrappingMiddleware(object):
def __init__(self, *args, **kwargs):
| {"golden_diff": "diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py\n--- a/sentry_sdk/integrations/django/middleware.py\n+++ b/sentry_sdk/integrations/django/middleware.py\n@@ -7,7 +7,11 @@\n from django import VERSION as DJANGO_VERSION\n \n from sentry_sdk import Hub\n-from sentry_sdk.utils import ContextVar, transaction_from_function\n+from sentry_sdk.utils import (\n+ ContextVar,\n+ transaction_from_function,\n+ capture_internal_exceptions,\n+)\n \n from sentry_sdk._types import MYPY\n \n@@ -64,29 +68,36 @@\n \n def _get_wrapped_method(old_method):\n # type: (F) -> F\n- @wraps(old_method)\n- def sentry_wrapped_method(*args, **kwargs):\n- # type: (*Any, **Any) -> Any\n- hub = Hub.current\n- integration = hub.get_integration(DjangoIntegration)\n- if integration is None or not integration.middleware_spans:\n- return old_method(*args, **kwargs)\n-\n- function_name = transaction_from_function(old_method)\n-\n- description = middleware_name\n- function_basename = getattr(old_method, \"__name__\", None)\n- if function_basename:\n- description = \"{}.{}\".format(description, function_basename)\n-\n- with hub.start_span(\n- op=\"django.middleware\", description=description\n- ) as span:\n- span.set_tag(\"django.function_name\", function_name)\n- span.set_tag(\"django.middleware_name\", middleware_name)\n- return old_method(*args, **kwargs)\n-\n- return sentry_wrapped_method # type: ignore\n+ with capture_internal_exceptions():\n+\n+ def sentry_wrapped_method(*args, **kwargs):\n+ # type: (*Any, **Any) -> Any\n+ hub = Hub.current\n+ integration = hub.get_integration(DjangoIntegration)\n+ if integration is None or not integration.middleware_spans:\n+ return old_method(*args, **kwargs)\n+\n+ function_name = transaction_from_function(old_method)\n+\n+ description = middleware_name\n+ function_basename = getattr(old_method, \"__name__\", None)\n+ if function_basename:\n+ description = \"{}.{}\".format(description, function_basename)\n+\n+ with hub.start_span(\n+ op=\"django.middleware\", description=description\n+ ) as span:\n+ span.set_tag(\"django.function_name\", function_name)\n+ span.set_tag(\"django.middleware_name\", middleware_name)\n+ return old_method(*args, **kwargs)\n+\n+ try:\n+ # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n+ return wraps(old_method)(sentry_wrapped_method) # type: ignore\n+ except Exception:\n+ return sentry_wrapped_method # type: ignore\n+\n+ return old_method\n \n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n", "issue": "0.12.0 breaks Django function-based middleware\nSimilar to #504, but a different stack trace:\r\n\r\n AttributeError: 'method-wrapper' object has no attribute '__module__'\r\n File \"django/core/handlers/exception.py\", line 41, in inner\r\n response = get_response(request)\r\n File \"functools.py\", line 33, in update_wrapper\r\n setattr(wrapper, attr, getattr(wrapped, attr))\r\n\r\nAccording to sentry (kind-of neat how I get this in this case...), the `get_response` object at that point in time is `<sentry_sdk.integrations.django.middleware.AuditMiddleware object at 0x7f37d64d4450>`.\r\n\r\nThis problem only occurs in 0.12.0 and newer, and with Django 1.11.x\n", "before_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom functools import wraps\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk.utils import ContextVar, transaction_from_function\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(self):\n # type: (base.BaseHandler) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(self)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n @wraps(old_method)\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n return sentry_wrapped_method # type: ignore\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}], "after_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom functools import wraps\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk.utils import (\n ContextVar,\n transaction_from_function,\n capture_internal_exceptions,\n)\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(self):\n # type: (base.BaseHandler) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(self)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n with capture_internal_exceptions():\n\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n return wraps(old_method)(sentry_wrapped_method) # type: ignore\n except Exception:\n return sentry_wrapped_method # type: ignore\n\n return old_method\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}]} | 1,616 | 672 |
gh_patches_debug_28409 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
recycleapp_be not working for some addresses
when I enter my address into the configuration.yaml I receive this error on restart:
```
fetch failed for source Recycle!: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py",
line 116, in fetch entries = self._source.fetch() File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py",
line 79, in fetch entries.append(Collection(date, item["fraction"]["name"]["en"])) KeyError: 'name'
```
when I use the example address or some other addresses everything works fine. Is it a problem with my city? Because other addresses of this city also don't work, even though those addresses work on [Recycle!](https://recycleapp.be/home).
this is what I have in configuration.yaml
```
waste_collection_schedule:
sources:
- name: recycleapp_be
args:
postcode: 3001
street: Waversebaan
house_number: 276
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py`
Content:
```
1 import logging
2 from datetime import datetime, timedelta
3
4 import requests
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "Recycle!"
8 DESCRIPTION = "Source for RecycleApp.be"
9 URL = "https://www.recycleapp.be"
10 TEST_CASES = {
11 "1140 Evere, Bazellaan 1": {
12 "postcode": 1140,
13 "street": "Bazellaan",
14 "house_number": 1,
15 }
16 }
17
18 _LOGGER = logging.getLogger(__name__)
19
20
21 class Source:
22 def __init__(self, postcode, street, house_number):
23 self._postcode = postcode
24 self._street = street
25 self._house_number = house_number
26
27 def fetch(self):
28 url = "https://recycleapp.be/api/app/v1"
29 headers = {
30 "x-secret": "Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW",
31 "x-consumer": "recycleapp.be",
32 "User-Agent": "",
33 "Authorization": "",
34 }
35 r = requests.get(f"{url}/access-token", headers=headers)
36 headers["Authorization"] = r.json()["accessToken"]
37
38 params = {"q": self._postcode}
39 r = requests.get(f"{url}/zipcodes", params=params, headers=headers)
40 if r.status_code != 200:
41 _LOGGER.error("Get zip code failed")
42 return []
43 zipcodeId = r.json()["items"][0]["id"]
44
45 params = {"q": self._street, "zipcodes": zipcodeId}
46 r = requests.get(f"{url}/streets", params=params, headers=headers)
47 if r.status_code != 200:
48 _LOGGER.error("Get street id failed")
49 return []
50
51 for item in r.json()["items"]:
52 if item["name"] == self._street:
53 streetId = item["id"]
54 if streetId is None:
55 streetId = r.json()["items"][0]["id"]
56
57 now = datetime.now()
58 fromDate = now.strftime("%Y-%m-%d")
59 untilDate = (now + timedelta(days=365)).strftime("%Y-%m-%d")
60 params = {
61 "zipcodeId": zipcodeId,
62 "streetId": streetId,
63 "houseNumber": self._house_number,
64 "fromDate": fromDate,
65 "untilDate": untilDate,
66 # "size":100,
67 }
68 r = requests.get(f"{url}/collections", params=params, headers=headers)
69 if r.status_code != 200:
70 _LOGGER.error("Get data failed")
71 return []
72
73 entries = []
74 for item in r.json()["items"]:
75 if "exception" in item and "replacedBy" in item["exception"]:
76 continue
77
78 date = datetime.strptime(item["timestamp"], "%Y-%m-%dT%H:%M:%S.000Z").date()
79 entries.append(Collection(date, item["fraction"]["name"]["en"]))
80 return entries
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
@@ -12,17 +12,29 @@
"postcode": 1140,
"street": "Bazellaan",
"house_number": 1,
- }
+ },
+ "3001, Waversebaan 276 with events": {
+ "postcode": 3001,
+ "street": "Waversebaan",
+ "house_number": 276,
+ },
+ "3001, Waversebaan 276 without events": {
+ "postcode": 3001,
+ "street": "Waversebaan",
+ "house_number": 276,
+ "add_events": False,
+ },
}
_LOGGER = logging.getLogger(__name__)
class Source:
- def __init__(self, postcode, street, house_number):
+ def __init__(self, postcode, street, house_number, add_events=True):
self._postcode = postcode
self._street = street
self._house_number = house_number
+ self._add_events = add_events
def fetch(self):
url = "https://recycleapp.be/api/app/v1"
@@ -76,5 +88,9 @@
continue
date = datetime.strptime(item["timestamp"], "%Y-%m-%dT%H:%M:%S.000Z").date()
- entries.append(Collection(date, item["fraction"]["name"]["en"]))
+ if item["type"] == "collection":
+ entries.append(Collection(date, item["fraction"]["name"]["en"]))
+ elif item["type"] == "event" and self._add_events:
+ entries.append(Collection(date, item["event"]["title"]["en"]))
+
return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n@@ -12,17 +12,29 @@\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n- }\n+ },\n+ \"3001, Waversebaan 276 with events\": {\n+ \"postcode\": 3001,\n+ \"street\": \"Waversebaan\",\n+ \"house_number\": 276,\n+ },\n+ \"3001, Waversebaan 276 without events\": {\n+ \"postcode\": 3001,\n+ \"street\": \"Waversebaan\",\n+ \"house_number\": 276,\n+ \"add_events\": False,\n+ },\n }\n \n _LOGGER = logging.getLogger(__name__)\n \n \n class Source:\n- def __init__(self, postcode, street, house_number):\n+ def __init__(self, postcode, street, house_number, add_events=True):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n+ self._add_events = add_events\n \n def fetch(self):\n url = \"https://recycleapp.be/api/app/v1\"\n@@ -76,5 +88,9 @@\n continue\n \n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n- entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n+ if item[\"type\"] == \"collection\":\n+ entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n+ elif item[\"type\"] == \"event\" and self._add_events:\n+ entries.append(Collection(date, item[\"event\"][\"title\"][\"en\"]))\n+\n return entries\n", "issue": "recycleapp_be not working for some addresses\nwhen I enter my address into the configuration.yaml I receive this error on restart:\r\n```\r\nfetch failed for source Recycle!: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py\", \r\nline 116, in fetch entries = self._source.fetch() File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\", \r\nline 79, in fetch entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"])) KeyError: 'name'\r\n```\r\nwhen I use the example address or some other addresses everything works fine. Is it a problem with my city? Because other addresses of this city also don't work, even though those addresses work on [Recycle!](https://recycleapp.be/home).\r\nthis is what I have in configuration.yaml\r\n```\r\nwaste_collection_schedule:\r\n sources:\r\n - name: recycleapp_be\r\n args:\r\n postcode: 3001\r\n street: Waversebaan\r\n house_number: 276\r\n```\n", "before_files": [{"content": "import logging\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle!\"\nDESCRIPTION = \"Source for RecycleApp.be\"\nURL = \"https://www.recycleapp.be\"\nTEST_CASES = {\n \"1140 Evere, Bazellaan 1\": {\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n }\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, postcode, street, house_number):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n\n def fetch(self):\n url = \"https://recycleapp.be/api/app/v1\"\n headers = {\n \"x-secret\": \"Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW\",\n \"x-consumer\": \"recycleapp.be\",\n \"User-Agent\": \"\",\n \"Authorization\": \"\",\n }\n r = requests.get(f\"{url}/access-token\", headers=headers)\n headers[\"Authorization\"] = r.json()[\"accessToken\"]\n\n params = {\"q\": self._postcode}\n r = requests.get(f\"{url}/zipcodes\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get zip code failed\")\n return []\n zipcodeId = r.json()[\"items\"][0][\"id\"]\n\n params = {\"q\": self._street, \"zipcodes\": zipcodeId}\n r = requests.get(f\"{url}/streets\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get street id failed\")\n return []\n\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n if streetId is None:\n streetId = r.json()[\"items\"][0][\"id\"]\n\n now = datetime.now()\n fromDate = now.strftime(\"%Y-%m-%d\")\n untilDate = (now + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n params = {\n \"zipcodeId\": zipcodeId,\n \"streetId\": streetId,\n \"houseNumber\": self._house_number,\n \"fromDate\": fromDate,\n \"untilDate\": untilDate,\n # \"size\":100,\n }\n r = requests.get(f\"{url}/collections\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get data failed\")\n return []\n\n entries = []\n for item in r.json()[\"items\"]:\n if \"exception\" in item and \"replacedBy\" in item[\"exception\"]:\n continue\n\n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py"}], "after_files": [{"content": "import logging\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle!\"\nDESCRIPTION = \"Source for RecycleApp.be\"\nURL = \"https://www.recycleapp.be\"\nTEST_CASES = {\n \"1140 Evere, Bazellaan 1\": {\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n },\n \"3001, Waversebaan 276 with events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n },\n \"3001, Waversebaan 276 without events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n \"add_events\": False,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, postcode, street, house_number, add_events=True):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n self._add_events = add_events\n\n def fetch(self):\n url = \"https://recycleapp.be/api/app/v1\"\n headers = {\n \"x-secret\": \"Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW\",\n \"x-consumer\": \"recycleapp.be\",\n \"User-Agent\": \"\",\n \"Authorization\": \"\",\n }\n r = requests.get(f\"{url}/access-token\", headers=headers)\n headers[\"Authorization\"] = r.json()[\"accessToken\"]\n\n params = {\"q\": self._postcode}\n r = requests.get(f\"{url}/zipcodes\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get zip code failed\")\n return []\n zipcodeId = r.json()[\"items\"][0][\"id\"]\n\n params = {\"q\": self._street, \"zipcodes\": zipcodeId}\n r = requests.get(f\"{url}/streets\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get street id failed\")\n return []\n\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n if streetId is None:\n streetId = r.json()[\"items\"][0][\"id\"]\n\n now = datetime.now()\n fromDate = now.strftime(\"%Y-%m-%d\")\n untilDate = (now + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n params = {\n \"zipcodeId\": zipcodeId,\n \"streetId\": streetId,\n \"houseNumber\": self._house_number,\n \"fromDate\": fromDate,\n \"untilDate\": untilDate,\n # \"size\":100,\n }\n r = requests.get(f\"{url}/collections\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get data failed\")\n return []\n\n entries = []\n for item in r.json()[\"items\"]:\n if \"exception\" in item and \"replacedBy\" in item[\"exception\"]:\n continue\n\n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n if item[\"type\"] == \"collection\":\n entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n elif item[\"type\"] == \"event\" and self._add_events:\n entries.append(Collection(date, item[\"event\"][\"title\"][\"en\"]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py"}]} | 1,434 | 466 |
gh_patches_debug_27458 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-4889 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Py3.6: Unable to find .../site-packages/importlib_resources/version.txt"
Hello,
On latest version of pyinstaller, the hook for importlib_resource seems to look for a non existing version.txt file. It is not provided by the latest version 1.2.0 of the backport: https://gitlab.com/python-devs/importlib_resources
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-importlib_resources.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2019-2020, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11 """
12 `importlib_resources` is a backport of the 3.7+ module `importlib.resources`
13 """
14
15 import os
16 from PyInstaller.compat import is_py37
17 from PyInstaller.utils.hooks import get_module_file_attribute
18
19 # Include the version.txt file, used to set __version__
20 res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))
21 datas = [
22 (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),
23 ]
24
25 # Replicate the module's version checks to exclude unused modules.
26 if is_py37:
27 # Stdlib now has the implmentation of this, so the backports
28 # aren't used at all
29 excludedmodules = [
30 'importlib_resources._py2',
31 'importlib_resources._py3',
32 ]
33 else:
34 excludedmodules = ['importlib_resources._py2']
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-importlib_resources.py b/PyInstaller/hooks/hook-importlib_resources.py
--- a/PyInstaller/hooks/hook-importlib_resources.py
+++ b/PyInstaller/hooks/hook-importlib_resources.py
@@ -9,26 +9,25 @@
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
-`importlib_resources` is a backport of the 3.7+ module `importlib.resources`
+`importlib_resources` is a backport of the 3.9+ module `importlib.resources`
"""
import os
-from PyInstaller.compat import is_py37
-from PyInstaller.utils.hooks import get_module_file_attribute
+from PyInstaller.utils.hooks import get_module_file_attribute, \
+ is_module_satisfies, copy_metadata
-# Include the version.txt file, used to set __version__
-res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))
-datas = [
- (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),
-]
-
-# Replicate the module's version checks to exclude unused modules.
-if is_py37:
- # Stdlib now has the implmentation of this, so the backports
- # aren't used at all
- excludedmodules = [
- 'importlib_resources._py2',
- 'importlib_resources._py3',
- ]
+if is_module_satisfies("importlib_resources >= 1.2.0"):
+ # since 1.2.0 importlib.metadata is used
+ datas = copy_metadata('importlib_resources')
else:
- excludedmodules = ['importlib_resources._py2']
+ # include the version.txt file, used to set __version__
+ res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))
+ datas = [
+ (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),
+ ]
+
+if is_module_satisfies("importlib_resources >= 1.3.1"):
+ hiddenimports = ['importlib_resources.trees']
+
+# this is only required for python2 support
+excludedimports = ['importlib_resources._py2']
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-importlib_resources.py b/PyInstaller/hooks/hook-importlib_resources.py\n--- a/PyInstaller/hooks/hook-importlib_resources.py\n+++ b/PyInstaller/hooks/hook-importlib_resources.py\n@@ -9,26 +9,25 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \"\"\"\n-`importlib_resources` is a backport of the 3.7+ module `importlib.resources`\n+`importlib_resources` is a backport of the 3.9+ module `importlib.resources`\n \"\"\"\n \n import os\n-from PyInstaller.compat import is_py37\n-from PyInstaller.utils.hooks import get_module_file_attribute\n+from PyInstaller.utils.hooks import get_module_file_attribute, \\\n+ is_module_satisfies, copy_metadata\n \n-# Include the version.txt file, used to set __version__\n-res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))\n-datas = [\n- (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),\n-]\n-\n-# Replicate the module's version checks to exclude unused modules.\n-if is_py37:\n- # Stdlib now has the implmentation of this, so the backports\n- # aren't used at all\n- excludedmodules = [\n- 'importlib_resources._py2',\n- 'importlib_resources._py3',\n- ]\n+if is_module_satisfies(\"importlib_resources >= 1.2.0\"):\n+ # since 1.2.0 importlib.metadata is used\n+ datas = copy_metadata('importlib_resources')\n else:\n- excludedmodules = ['importlib_resources._py2']\n+ # include the version.txt file, used to set __version__\n+ res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))\n+ datas = [\n+ (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),\n+ ]\n+\n+if is_module_satisfies(\"importlib_resources >= 1.3.1\"):\n+ hiddenimports = ['importlib_resources.trees']\n+\n+# this is only required for python2 support\n+excludedimports = ['importlib_resources._py2']\n", "issue": "Py3.6: Unable to find .../site-packages/importlib_resources/version.txt\"\nHello,\r\n\r\nOn latest version of pyinstaller, the hook for importlib_resource seems to look for a non existing version.txt file. It is not provided by the latest version 1.2.0 of the backport: https://gitlab.com/python-devs/importlib_resources\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2019-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\n`importlib_resources` is a backport of the 3.7+ module `importlib.resources`\n\"\"\"\n\nimport os\nfrom PyInstaller.compat import is_py37\nfrom PyInstaller.utils.hooks import get_module_file_attribute\n\n# Include the version.txt file, used to set __version__\nres_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))\ndatas = [\n (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),\n]\n\n# Replicate the module's version checks to exclude unused modules.\nif is_py37:\n # Stdlib now has the implmentation of this, so the backports\n # aren't used at all\n excludedmodules = [\n 'importlib_resources._py2',\n 'importlib_resources._py3',\n ]\nelse:\n excludedmodules = ['importlib_resources._py2']\n", "path": "PyInstaller/hooks/hook-importlib_resources.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2019-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\"\"\"\n`importlib_resources` is a backport of the 3.9+ module `importlib.resources`\n\"\"\"\n\nimport os\nfrom PyInstaller.utils.hooks import get_module_file_attribute, \\\n is_module_satisfies, copy_metadata\n\nif is_module_satisfies(\"importlib_resources >= 1.2.0\"):\n # since 1.2.0 importlib.metadata is used\n datas = copy_metadata('importlib_resources')\nelse:\n # include the version.txt file, used to set __version__\n res_loc = os.path.dirname(get_module_file_attribute('importlib_resources'))\n datas = [\n (os.path.join(res_loc, 'version.txt'), 'importlib_resources'),\n ]\n\nif is_module_satisfies(\"importlib_resources >= 1.3.1\"):\n hiddenimports = ['importlib_resources.trees']\n\n# this is only required for python2 support\nexcludedimports = ['importlib_resources._py2']\n", "path": "PyInstaller/hooks/hook-importlib_resources.py"}]} | 676 | 491 |
gh_patches_debug_27546 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10822 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maths/binomial_coefficient.py`
Content:
```
1 def binomial_coefficient(n: int, r: int) -> int:
2 """
3 Find binomial coefficient using pascals triangle.
4
5 >>> binomial_coefficient(10, 5)
6 252
7 """
8 c = [0 for i in range(r + 1)]
9 # nc0 = 1
10 c[0] = 1
11 for i in range(1, n + 1):
12 # to compute current row from previous row.
13 j = min(i, r)
14 while j > 0:
15 c[j] += c[j - 1]
16 j -= 1
17 return c[r]
18
19
20 print(binomial_coefficient(n=10, r=5))
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py
--- a/maths/binomial_coefficient.py
+++ b/maths/binomial_coefficient.py
@@ -1,10 +1,48 @@
def binomial_coefficient(n: int, r: int) -> int:
"""
- Find binomial coefficient using pascals triangle.
+ Find binomial coefficient using Pascal's triangle.
+
+ Calculate C(n, r) using Pascal's triangle.
+
+ :param n: The total number of items.
+ :param r: The number of items to choose.
+ :return: The binomial coefficient C(n, r).
>>> binomial_coefficient(10, 5)
252
+ >>> binomial_coefficient(10, 0)
+ 1
+ >>> binomial_coefficient(0, 10)
+ 1
+ >>> binomial_coefficient(10, 10)
+ 1
+ >>> binomial_coefficient(5, 2)
+ 10
+ >>> binomial_coefficient(5, 6)
+ 0
+ >>> binomial_coefficient(3, 5)
+ 0
+ >>> binomial_coefficient(-2, 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: n and r must be non-negative integers
+ >>> binomial_coefficient(5, -1)
+ Traceback (most recent call last):
+ ...
+ ValueError: n and r must be non-negative integers
+ >>> binomial_coefficient(10.1, 5)
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
+ >>> binomial_coefficient(10, 5.1)
+ Traceback (most recent call last):
+ ...
+ TypeError: 'float' object cannot be interpreted as an integer
"""
+ if n < 0 or r < 0:
+ raise ValueError("n and r must be non-negative integers")
+ if 0 in (n, r):
+ return 1
c = [0 for i in range(r + 1)]
# nc0 = 1
c[0] = 1
@@ -17,4 +55,8 @@
return c[r]
-print(binomial_coefficient(n=10, r=5))
+if __name__ == "__main__":
+ from doctest import testmod
+
+ testmod()
+ print(binomial_coefficient(n=10, r=5))
| {"golden_diff": "diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py\n--- a/maths/binomial_coefficient.py\n+++ b/maths/binomial_coefficient.py\n@@ -1,10 +1,48 @@\n def binomial_coefficient(n: int, r: int) -> int:\n \"\"\"\n- Find binomial coefficient using pascals triangle.\n+ Find binomial coefficient using Pascal's triangle.\n+\n+ Calculate C(n, r) using Pascal's triangle.\n+\n+ :param n: The total number of items.\n+ :param r: The number of items to choose.\n+ :return: The binomial coefficient C(n, r).\n \n >>> binomial_coefficient(10, 5)\n 252\n+ >>> binomial_coefficient(10, 0)\n+ 1\n+ >>> binomial_coefficient(0, 10)\n+ 1\n+ >>> binomial_coefficient(10, 10)\n+ 1\n+ >>> binomial_coefficient(5, 2)\n+ 10\n+ >>> binomial_coefficient(5, 6)\n+ 0\n+ >>> binomial_coefficient(3, 5)\n+ 0\n+ >>> binomial_coefficient(-2, 3)\n+ Traceback (most recent call last):\n+ ...\n+ ValueError: n and r must be non-negative integers\n+ >>> binomial_coefficient(5, -1)\n+ Traceback (most recent call last):\n+ ...\n+ ValueError: n and r must be non-negative integers\n+ >>> binomial_coefficient(10.1, 5)\n+ Traceback (most recent call last):\n+ ...\n+ TypeError: 'float' object cannot be interpreted as an integer\n+ >>> binomial_coefficient(10, 5.1)\n+ Traceback (most recent call last):\n+ ...\n+ TypeError: 'float' object cannot be interpreted as an integer\n \"\"\"\n+ if n < 0 or r < 0:\n+ raise ValueError(\"n and r must be non-negative integers\")\n+ if 0 in (n, r):\n+ return 1\n c = [0 for i in range(r + 1)]\n # nc0 = 1\n c[0] = 1\n@@ -17,4 +55,8 @@\n return c[r]\n \n \n-print(binomial_coefficient(n=10, r=5))\n+if __name__ == \"__main__\":\n+ from doctest import testmod\n+\n+ testmod()\n+ print(binomial_coefficient(n=10, r=5))\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "def binomial_coefficient(n: int, r: int) -> int:\n \"\"\"\n Find binomial coefficient using pascals triangle.\n\n >>> binomial_coefficient(10, 5)\n 252\n \"\"\"\n c = [0 for i in range(r + 1)]\n # nc0 = 1\n c[0] = 1\n for i in range(1, n + 1):\n # to compute current row from previous row.\n j = min(i, r)\n while j > 0:\n c[j] += c[j - 1]\n j -= 1\n return c[r]\n\n\nprint(binomial_coefficient(n=10, r=5))\n", "path": "maths/binomial_coefficient.py"}], "after_files": [{"content": "def binomial_coefficient(n: int, r: int) -> int:\n \"\"\"\n Find binomial coefficient using Pascal's triangle.\n\n Calculate C(n, r) using Pascal's triangle.\n\n :param n: The total number of items.\n :param r: The number of items to choose.\n :return: The binomial coefficient C(n, r).\n\n >>> binomial_coefficient(10, 5)\n 252\n >>> binomial_coefficient(10, 0)\n 1\n >>> binomial_coefficient(0, 10)\n 1\n >>> binomial_coefficient(10, 10)\n 1\n >>> binomial_coefficient(5, 2)\n 10\n >>> binomial_coefficient(5, 6)\n 0\n >>> binomial_coefficient(3, 5)\n 0\n >>> binomial_coefficient(-2, 3)\n Traceback (most recent call last):\n ...\n ValueError: n and r must be non-negative integers\n >>> binomial_coefficient(5, -1)\n Traceback (most recent call last):\n ...\n ValueError: n and r must be non-negative integers\n >>> binomial_coefficient(10.1, 5)\n Traceback (most recent call last):\n ...\n TypeError: 'float' object cannot be interpreted as an integer\n >>> binomial_coefficient(10, 5.1)\n Traceback (most recent call last):\n ...\n TypeError: 'float' object cannot be interpreted as an integer\n \"\"\"\n if n < 0 or r < 0:\n raise ValueError(\"n and r must be non-negative integers\")\n if 0 in (n, r):\n return 1\n c = [0 for i in range(r + 1)]\n # nc0 = 1\n c[0] = 1\n for i in range(1, n + 1):\n # to compute current row from previous row.\n j = min(i, r)\n while j > 0:\n c[j] += c[j - 1]\n j -= 1\n return c[r]\n\n\nif __name__ == \"__main__\":\n from doctest import testmod\n\n testmod()\n print(binomial_coefficient(n=10, r=5))\n", "path": "maths/binomial_coefficient.py"}]} | 1,293 | 595 |
gh_patches_debug_16578 | rasdani/github-patches | git_diff | doccano__doccano-1668 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pagination of the project list
When fetching projects in the project list page, is it intentional that all projects are fetched at once even though there is pagination?
Endpoint of project list fetching: `/v1/projects`
When there are a lot of projects, it takes a long time to display them.
Your Environment
---------
doccano v1.5.5
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/api/views/project.py`
Content:
```
1 from django.conf import settings
2 from rest_framework import generics, status
3 from rest_framework.permissions import IsAdminUser, IsAuthenticated
4 from rest_framework.response import Response
5
6 from members.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly
7
8 from ..models import Project
9 from ..serializers import ProjectPolymorphicSerializer
10
11
12 class ProjectList(generics.ListCreateAPIView):
13 serializer_class = ProjectPolymorphicSerializer
14 pagination_class = None
15
16 def get_permissions(self):
17 if self.request.method == 'GET':
18 self.permission_classes = [IsAuthenticated, ]
19 else:
20 self.permission_classes = [IsAuthenticated & IsAdminUser]
21 return super().get_permissions()
22
23 def get_queryset(self):
24 return Project.objects.filter(role_mappings__user=self.request.user)
25
26 def perform_create(self, serializer):
27 serializer.save(created_by=self.request.user)
28
29 def delete(self, request, *args, **kwargs):
30 delete_ids = request.data['ids']
31 projects = Project.objects.filter(
32 role_mappings__user=self.request.user,
33 role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,
34 pk__in=delete_ids
35 )
36 # Todo: I want to use bulk delete.
37 # But it causes the constraint error.
38 # See https://github.com/django-polymorphic/django-polymorphic/issues/229
39 for project in projects:
40 project.delete()
41 return Response(status=status.HTTP_204_NO_CONTENT)
42
43
44 class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
45 queryset = Project.objects.all()
46 serializer_class = ProjectPolymorphicSerializer
47 lookup_url_kwarg = 'project_id'
48 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/api/views/project.py b/backend/api/views/project.py
--- a/backend/api/views/project.py
+++ b/backend/api/views/project.py
@@ -1,5 +1,6 @@
from django.conf import settings
-from rest_framework import generics, status
+from django_filters.rest_framework import DjangoFilterBackend
+from rest_framework import filters, generics, status
from rest_framework.permissions import IsAdminUser, IsAuthenticated
from rest_framework.response import Response
@@ -11,7 +12,8 @@
class ProjectList(generics.ListCreateAPIView):
serializer_class = ProjectPolymorphicSerializer
- pagination_class = None
+ filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
+ search_fields = ('name', 'description')
def get_permissions(self):
if self.request.method == 'GET':
| {"golden_diff": "diff --git a/backend/api/views/project.py b/backend/api/views/project.py\n--- a/backend/api/views/project.py\n+++ b/backend/api/views/project.py\n@@ -1,5 +1,6 @@\n from django.conf import settings\n-from rest_framework import generics, status\n+from django_filters.rest_framework import DjangoFilterBackend\n+from rest_framework import filters, generics, status\n from rest_framework.permissions import IsAdminUser, IsAuthenticated\n from rest_framework.response import Response\n \n@@ -11,7 +12,8 @@\n \n class ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n- pagination_class = None\n+ filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n+ search_fields = ('name', 'description')\n \n def get_permissions(self):\n if self.request.method == 'GET':\n", "issue": "Pagination of the project list\nWhen fetching projects in the project list page, is it intentional that all projects are fetched at once even though there is pagination?\r\n\r\nEndpoint of project list fetching: `/v1/projects`\r\n\r\nWhen there are a lot of projects, it takes a long time to display them.\r\n\r\nYour Environment\r\n---------\r\ndoccano v1.5.5\n", "before_files": [{"content": "from django.conf import settings\nfrom rest_framework import generics, status\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom members.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n\nfrom ..models import Project\nfrom ..serializers import ProjectPolymorphicSerializer\n\n\nclass ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n pagination_class = None\n\n def get_permissions(self):\n if self.request.method == 'GET':\n self.permission_classes = [IsAuthenticated, ]\n else:\n self.permission_classes = [IsAuthenticated & IsAdminUser]\n return super().get_permissions()\n\n def get_queryset(self):\n return Project.objects.filter(role_mappings__user=self.request.user)\n\n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user)\n\n def delete(self, request, *args, **kwargs):\n delete_ids = request.data['ids']\n projects = Project.objects.filter(\n role_mappings__user=self.request.user,\n role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,\n pk__in=delete_ids\n )\n # Todo: I want to use bulk delete.\n # But it causes the constraint error.\n # See https://github.com/django-polymorphic/django-polymorphic/issues/229\n for project in projects:\n project.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectPolymorphicSerializer\n lookup_url_kwarg = 'project_id'\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n", "path": "backend/api/views/project.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, generics, status\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom members.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n\nfrom ..models import Project\nfrom ..serializers import ProjectPolymorphicSerializer\n\n\nclass ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n search_fields = ('name', 'description')\n\n def get_permissions(self):\n if self.request.method == 'GET':\n self.permission_classes = [IsAuthenticated, ]\n else:\n self.permission_classes = [IsAuthenticated & IsAdminUser]\n return super().get_permissions()\n\n def get_queryset(self):\n return Project.objects.filter(role_mappings__user=self.request.user)\n\n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user)\n\n def delete(self, request, *args, **kwargs):\n delete_ids = request.data['ids']\n projects = Project.objects.filter(\n role_mappings__user=self.request.user,\n role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,\n pk__in=delete_ids\n )\n # Todo: I want to use bulk delete.\n # But it causes the constraint error.\n # See https://github.com/django-polymorphic/django-polymorphic/issues/229\n for project in projects:\n project.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectPolymorphicSerializer\n lookup_url_kwarg = 'project_id'\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n", "path": "backend/api/views/project.py"}]} | 792 | 185 |
gh_patches_debug_20614 | rasdani/github-patches | git_diff | pytorch__examples-1189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `save_model` arg to `mnist_hogwild` example
Currently the example doesn't support the `--save_model` argument like the other examples
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mnist_hogwild/main.py`
Content:
```
1 from __future__ import print_function
2 import argparse
3 import torch
4 import torch.nn as nn
5 import torch.nn.functional as F
6 import torch.multiprocessing as mp
7 from torch.utils.data.sampler import Sampler
8 from torchvision import datasets, transforms
9
10 from train import train, test
11
12 # Training settings
13 parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
14 parser.add_argument('--batch-size', type=int, default=64, metavar='N',
15 help='input batch size for training (default: 64)')
16 parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
17 help='input batch size for testing (default: 1000)')
18 parser.add_argument('--epochs', type=int, default=10, metavar='N',
19 help='number of epochs to train (default: 10)')
20 parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
21 help='learning rate (default: 0.01)')
22 parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
23 help='SGD momentum (default: 0.5)')
24 parser.add_argument('--seed', type=int, default=1, metavar='S',
25 help='random seed (default: 1)')
26 parser.add_argument('--log-interval', type=int, default=10, metavar='N',
27 help='how many batches to wait before logging training status')
28 parser.add_argument('--num-processes', type=int, default=2, metavar='N',
29 help='how many training processes to use (default: 2)')
30 parser.add_argument('--cuda', action='store_true', default=False,
31 help='enables CUDA training')
32 parser.add_argument('--mps', action='store_true', default=False,
33 help='enables macOS GPU training')
34 parser.add_argument('--dry-run', action='store_true', default=False,
35 help='quickly check a single pass')
36
37 class Net(nn.Module):
38 def __init__(self):
39 super(Net, self).__init__()
40 self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
41 self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
42 self.conv2_drop = nn.Dropout2d()
43 self.fc1 = nn.Linear(320, 50)
44 self.fc2 = nn.Linear(50, 10)
45
46 def forward(self, x):
47 x = F.relu(F.max_pool2d(self.conv1(x), 2))
48 x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
49 x = x.view(-1, 320)
50 x = F.relu(self.fc1(x))
51 x = F.dropout(x, training=self.training)
52 x = self.fc2(x)
53 return F.log_softmax(x, dim=1)
54
55
56 if __name__ == '__main__':
57 args = parser.parse_args()
58
59 use_cuda = args.cuda and torch.cuda.is_available()
60 use_mps = args.mps and torch.backends.mps.is_available()
61 if use_cuda:
62 device = torch.device("cuda")
63 elif use_mps:
64 device = torch.device("mps")
65 else:
66 device = torch.device("cpu")
67
68 transform=transforms.Compose([
69 transforms.ToTensor(),
70 transforms.Normalize((0.1307,), (0.3081,))
71 ])
72 dataset1 = datasets.MNIST('../data', train=True, download=True,
73 transform=transform)
74 dataset2 = datasets.MNIST('../data', train=False,
75 transform=transform)
76 kwargs = {'batch_size': args.batch_size,
77 'shuffle': True}
78 if use_cuda:
79 kwargs.update({'num_workers': 1,
80 'pin_memory': True,
81 })
82
83 torch.manual_seed(args.seed)
84 mp.set_start_method('spawn', force=True)
85
86 model = Net().to(device)
87 model.share_memory() # gradients are allocated lazily, so they are not shared here
88
89 processes = []
90 for rank in range(args.num_processes):
91 p = mp.Process(target=train, args=(rank, args, model, device,
92 dataset1, kwargs))
93 # We first train the model across `num_processes` processes
94 p.start()
95 processes.append(p)
96 for p in processes:
97 p.join()
98
99 # Once training is complete, we can test the model
100 test(args, model, device, dataset2, kwargs)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mnist_hogwild/main.py b/mnist_hogwild/main.py
--- a/mnist_hogwild/main.py
+++ b/mnist_hogwild/main.py
@@ -30,7 +30,9 @@
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--mps', action='store_true', default=False,
- help='enables macOS GPU training')
+ help='enables macOS GPU training')
+parser.add_argument('--save_model', action='store_true', default=False,
+ help='save the trained model to state_dict')
parser.add_argument('--dry-run', action='store_true', default=False,
help='quickly check a single pass')
@@ -96,5 +98,8 @@
for p in processes:
p.join()
+ if args.save_model:
+ torch.save(model.state_dict(), "MNIST_hogwild.pt")
+
# Once training is complete, we can test the model
test(args, model, device, dataset2, kwargs)
| {"golden_diff": "diff --git a/mnist_hogwild/main.py b/mnist_hogwild/main.py\n--- a/mnist_hogwild/main.py\n+++ b/mnist_hogwild/main.py\n@@ -30,7 +30,9 @@\n parser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\n parser.add_argument('--mps', action='store_true', default=False,\n- help='enables macOS GPU training')\n+ help='enables macOS GPU training')\n+parser.add_argument('--save_model', action='store_true', default=False,\n+ help='save the trained model to state_dict')\n parser.add_argument('--dry-run', action='store_true', default=False,\n help='quickly check a single pass')\n \n@@ -96,5 +98,8 @@\n for p in processes:\n p.join()\n \n+ if args.save_model:\n+ torch.save(model.state_dict(), \"MNIST_hogwild.pt\")\n+\n # Once training is complete, we can test the model\n test(args, model, device, dataset2, kwargs)\n", "issue": "Add `save_model` arg to `mnist_hogwild` example\nCurrently the example doesn't support the `--save_model` argument like the other examples\r\n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.multiprocessing as mp\nfrom torch.utils.data.sampler import Sampler\nfrom torchvision import datasets, transforms\n\nfrom train import train, test\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--num-processes', type=int, default=2, metavar='N',\n help='how many training processes to use (default: 2)')\nparser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--mps', action='store_true', default=False,\n help='enables macOS GPU training')\nparser.add_argument('--dry-run', action='store_true', default=False,\n help='quickly check a single pass')\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n use_cuda = args.cuda and torch.cuda.is_available()\n use_mps = args.mps and torch.backends.mps.is_available()\n if use_cuda:\n device = torch.device(\"cuda\")\n elif use_mps:\n device = torch.device(\"mps\")\n else:\n device = torch.device(\"cpu\")\n\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n dataset1 = datasets.MNIST('../data', train=True, download=True,\n transform=transform)\n dataset2 = datasets.MNIST('../data', train=False,\n transform=transform)\n kwargs = {'batch_size': args.batch_size,\n 'shuffle': True}\n if use_cuda:\n kwargs.update({'num_workers': 1,\n 'pin_memory': True,\n })\n\n torch.manual_seed(args.seed)\n mp.set_start_method('spawn', force=True)\n\n model = Net().to(device)\n model.share_memory() # gradients are allocated lazily, so they are not shared here\n\n processes = []\n for rank in range(args.num_processes):\n p = mp.Process(target=train, args=(rank, args, model, device,\n dataset1, kwargs))\n # We first train the model across `num_processes` processes\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n\n # Once training is complete, we can test the model\n test(args, model, device, dataset2, kwargs)\n", "path": "mnist_hogwild/main.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.multiprocessing as mp\nfrom torch.utils.data.sampler import Sampler\nfrom torchvision import datasets, transforms\n\nfrom train import train, test\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--num-processes', type=int, default=2, metavar='N',\n help='how many training processes to use (default: 2)')\nparser.add_argument('--cuda', action='store_true', default=False,\n help='enables CUDA training')\nparser.add_argument('--mps', action='store_true', default=False,\n help='enables macOS GPU training')\nparser.add_argument('--save_model', action='store_true', default=False,\n help='save the trained model to state_dict')\nparser.add_argument('--dry-run', action='store_true', default=False,\n help='quickly check a single pass')\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n use_cuda = args.cuda and torch.cuda.is_available()\n use_mps = args.mps and torch.backends.mps.is_available()\n if use_cuda:\n device = torch.device(\"cuda\")\n elif use_mps:\n device = torch.device(\"mps\")\n else:\n device = torch.device(\"cpu\")\n\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n dataset1 = datasets.MNIST('../data', train=True, download=True,\n transform=transform)\n dataset2 = datasets.MNIST('../data', train=False,\n transform=transform)\n kwargs = {'batch_size': args.batch_size,\n 'shuffle': True}\n if use_cuda:\n kwargs.update({'num_workers': 1,\n 'pin_memory': True,\n })\n\n torch.manual_seed(args.seed)\n mp.set_start_method('spawn', force=True)\n\n model = Net().to(device)\n model.share_memory() # gradients are allocated lazily, so they are not shared here\n\n processes = []\n for rank in range(args.num_processes):\n p = mp.Process(target=train, args=(rank, args, model, device,\n dataset1, kwargs))\n # We first train the model across `num_processes` processes\n p.start()\n processes.append(p)\n for p in processes:\n p.join()\n\n if args.save_model:\n torch.save(model.state_dict(), \"MNIST_hogwild.pt\")\n\n # Once training is complete, we can test the model\n test(args, model, device, dataset2, kwargs)\n", "path": "mnist_hogwild/main.py"}]} | 1,460 | 235 |
gh_patches_debug_5067 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-1111 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Passwords beginning or ending with a whitespace are not supported
Due to POST argument stripping, passwords with a beginning or ending whitespace are not allowed.
**How to reproduce the issue**
Set up a user password with an ending or beginning whitespace.
**What you expected to happen**
The user should be allowed to login with the password, given that the password should be any complicated sequence of characters the user can reproduce.
**What actually happens**
The user is denied access, because the LoginHandler will strip all posted values before considering the password for authentication (line 81, get_argument has a default "strip=True")
**Share what version of JupyterHub you are using**
HEAD (006488fc749923851df97d47d8850bdf5fd157cf)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/handlers/login.py`
Content:
```
1 """HTTP Handlers for the hub server"""
2
3 # Copyright (c) Jupyter Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 from urllib.parse import urlparse
7
8 from tornado.escape import url_escape
9 from tornado import gen
10 from tornado.httputil import url_concat
11
12 from .base import BaseHandler
13
14
15 class LogoutHandler(BaseHandler):
16 """Log a user out by clearing their login cookie."""
17 def get(self):
18 user = self.get_current_user()
19 if user:
20 self.log.info("User logged out: %s", user.name)
21 self.clear_login_cookie()
22 self.statsd.incr('logout')
23 if self.authenticator.auto_login:
24 self.render('logout.html')
25 else:
26 self.redirect(self.settings['login_url'], permanent=False)
27
28
29 class LoginHandler(BaseHandler):
30 """Render the login page."""
31
32 def _render(self, login_error=None, username=None):
33 return self.render_template('login.html',
34 next=url_escape(self.get_argument('next', default='')),
35 username=username,
36 login_error=login_error,
37 custom_html=self.authenticator.custom_html,
38 login_url=self.settings['login_url'],
39 authenticator_login_url=self.authenticator.login_url(self.hub.server.base_url),
40 )
41
42 def get(self):
43 self.statsd.incr('login.request')
44 next_url = self.get_argument('next', '')
45 if (next_url + '/').startswith('%s://%s/' % (self.request.protocol, self.request.host)):
46 # treat absolute URLs for our host as absolute paths:
47 next_url = urlparse(next_url).path
48 elif not next_url.startswith('/'):
49 # disallow non-absolute next URLs (e.g. full URLs to other hosts)
50 next_url = ''
51 user = self.get_current_user()
52 if user:
53 if not next_url:
54 if user.running:
55 next_url = user.url
56 else:
57 next_url = self.hub.server.base_url
58 # set new login cookie
59 # because single-user cookie may have been cleared or incorrect
60 self.set_login_cookie(self.get_current_user())
61 self.redirect(next_url, permanent=False)
62 else:
63 if self.authenticator.auto_login:
64 auto_login_url = self.authenticator.login_url(self.hub.server.base_url)
65 if auto_login_url == self.settings['login_url']:
66 self.authenticator.auto_login = False
67 self.log.warning("Authenticator.auto_login cannot be used without a custom login_url")
68 else:
69 if next_url:
70 auto_login_url = url_concat(auto_login_url, {'next': next_url})
71 self.redirect(auto_login_url)
72 return
73 username = self.get_argument('username', default='')
74 self.finish(self._render(username=username))
75
76 @gen.coroutine
77 def post(self):
78 # parse the arguments dict
79 data = {}
80 for arg in self.request.arguments:
81 data[arg] = self.get_argument(arg)
82
83 auth_timer = self.statsd.timer('login.authenticate').start()
84 username = yield self.authenticate(data)
85 auth_timer.stop(send=False)
86
87 if username:
88 self.statsd.incr('login.success')
89 self.statsd.timing('login.authenticate.success', auth_timer.ms)
90 user = self.user_from_username(username)
91 already_running = False
92 if user.spawner:
93 status = yield user.spawner.poll()
94 already_running = (status == None)
95 if not already_running and not user.spawner.options_form:
96 yield self.spawn_single_user(user)
97 self.set_login_cookie(user)
98 next_url = self.get_argument('next', default='')
99 if not next_url.startswith('/'):
100 next_url = ''
101 next_url = next_url or self.hub.server.base_url
102 self.redirect(next_url)
103 self.log.info("User logged in: %s", username)
104 else:
105 self.statsd.incr('login.failure')
106 self.statsd.timing('login.authenticate.failure', auth_timer.ms)
107 self.log.debug("Failed login for %s", data.get('username', 'unknown user'))
108 html = self._render(
109 login_error='Invalid username or password',
110 username=username,
111 )
112 self.finish(html)
113
114
115 # /login renders the login page or the "Login with..." link,
116 # so it should always be registered.
117 # /logout clears cookies.
118 default_handlers = [
119 (r"/login", LoginHandler),
120 (r"/logout", LogoutHandler),
121 ]
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/handlers/login.py b/jupyterhub/handlers/login.py
--- a/jupyterhub/handlers/login.py
+++ b/jupyterhub/handlers/login.py
@@ -78,7 +78,7 @@
# parse the arguments dict
data = {}
for arg in self.request.arguments:
- data[arg] = self.get_argument(arg)
+ data[arg] = self.get_argument(arg, strip=False)
auth_timer = self.statsd.timer('login.authenticate').start()
username = yield self.authenticate(data)
| {"golden_diff": "diff --git a/jupyterhub/handlers/login.py b/jupyterhub/handlers/login.py\n--- a/jupyterhub/handlers/login.py\n+++ b/jupyterhub/handlers/login.py\n@@ -78,7 +78,7 @@\n # parse the arguments dict\n data = {}\n for arg in self.request.arguments:\n- data[arg] = self.get_argument(arg)\n+ data[arg] = self.get_argument(arg, strip=False)\n \n auth_timer = self.statsd.timer('login.authenticate').start()\n username = yield self.authenticate(data)\n", "issue": "Passwords beginning or ending with a whitespace are not supported\nDue to POST argument stripping, passwords with a beginning or ending whitespace are not allowed.\r\n\r\n**How to reproduce the issue**\r\nSet up a user password with an ending or beginning whitespace.\r\n\r\n**What you expected to happen**\r\nThe user should be allowed to login with the password, given that the password should be any complicated sequence of characters the user can reproduce.\r\n\r\n**What actually happens**\r\nThe user is denied access, because the LoginHandler will strip all posted values before considering the password for authentication (line 81, get_argument has a default \"strip=True\")\r\n\r\n**Share what version of JupyterHub you are using**\r\nHEAD (006488fc749923851df97d47d8850bdf5fd157cf)\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"HTTP Handlers for the hub server\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom urllib.parse import urlparse\n\nfrom tornado.escape import url_escape\nfrom tornado import gen\nfrom tornado.httputil import url_concat\n\nfrom .base import BaseHandler\n\n\nclass LogoutHandler(BaseHandler):\n \"\"\"Log a user out by clearing their login cookie.\"\"\"\n def get(self):\n user = self.get_current_user()\n if user:\n self.log.info(\"User logged out: %s\", user.name)\n self.clear_login_cookie()\n self.statsd.incr('logout')\n if self.authenticator.auto_login:\n self.render('logout.html')\n else:\n self.redirect(self.settings['login_url'], permanent=False)\n\n\nclass LoginHandler(BaseHandler):\n \"\"\"Render the login page.\"\"\"\n\n def _render(self, login_error=None, username=None):\n return self.render_template('login.html',\n next=url_escape(self.get_argument('next', default='')),\n username=username,\n login_error=login_error,\n custom_html=self.authenticator.custom_html,\n login_url=self.settings['login_url'],\n authenticator_login_url=self.authenticator.login_url(self.hub.server.base_url),\n )\n\n def get(self):\n self.statsd.incr('login.request')\n next_url = self.get_argument('next', '')\n if (next_url + '/').startswith('%s://%s/' % (self.request.protocol, self.request.host)):\n # treat absolute URLs for our host as absolute paths:\n next_url = urlparse(next_url).path\n elif not next_url.startswith('/'):\n # disallow non-absolute next URLs (e.g. full URLs to other hosts)\n next_url = ''\n user = self.get_current_user()\n if user:\n if not next_url:\n if user.running:\n next_url = user.url\n else:\n next_url = self.hub.server.base_url\n # set new login cookie\n # because single-user cookie may have been cleared or incorrect\n self.set_login_cookie(self.get_current_user())\n self.redirect(next_url, permanent=False)\n else:\n if self.authenticator.auto_login:\n auto_login_url = self.authenticator.login_url(self.hub.server.base_url)\n if auto_login_url == self.settings['login_url']:\n self.authenticator.auto_login = False\n self.log.warning(\"Authenticator.auto_login cannot be used without a custom login_url\")\n else:\n if next_url:\n auto_login_url = url_concat(auto_login_url, {'next': next_url})\n self.redirect(auto_login_url)\n return\n username = self.get_argument('username', default='')\n self.finish(self._render(username=username))\n\n @gen.coroutine\n def post(self):\n # parse the arguments dict\n data = {}\n for arg in self.request.arguments:\n data[arg] = self.get_argument(arg)\n\n auth_timer = self.statsd.timer('login.authenticate').start()\n username = yield self.authenticate(data)\n auth_timer.stop(send=False)\n\n if username:\n self.statsd.incr('login.success')\n self.statsd.timing('login.authenticate.success', auth_timer.ms)\n user = self.user_from_username(username)\n already_running = False\n if user.spawner:\n status = yield user.spawner.poll()\n already_running = (status == None)\n if not already_running and not user.spawner.options_form:\n yield self.spawn_single_user(user)\n self.set_login_cookie(user)\n next_url = self.get_argument('next', default='')\n if not next_url.startswith('/'):\n next_url = ''\n next_url = next_url or self.hub.server.base_url\n self.redirect(next_url)\n self.log.info(\"User logged in: %s\", username)\n else:\n self.statsd.incr('login.failure')\n self.statsd.timing('login.authenticate.failure', auth_timer.ms)\n self.log.debug(\"Failed login for %s\", data.get('username', 'unknown user'))\n html = self._render(\n login_error='Invalid username or password',\n username=username,\n )\n self.finish(html)\n\n\n# /login renders the login page or the \"Login with...\" link,\n# so it should always be registered.\n# /logout clears cookies.\ndefault_handlers = [\n (r\"/login\", LoginHandler),\n (r\"/logout\", LogoutHandler),\n]\n", "path": "jupyterhub/handlers/login.py"}], "after_files": [{"content": "\"\"\"HTTP Handlers for the hub server\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom urllib.parse import urlparse\n\nfrom tornado.escape import url_escape\nfrom tornado import gen\nfrom tornado.httputil import url_concat\n\nfrom .base import BaseHandler\n\n\nclass LogoutHandler(BaseHandler):\n \"\"\"Log a user out by clearing their login cookie.\"\"\"\n def get(self):\n user = self.get_current_user()\n if user:\n self.log.info(\"User logged out: %s\", user.name)\n self.clear_login_cookie()\n self.statsd.incr('logout')\n if self.authenticator.auto_login:\n self.render('logout.html')\n else:\n self.redirect(self.settings['login_url'], permanent=False)\n\n\nclass LoginHandler(BaseHandler):\n \"\"\"Render the login page.\"\"\"\n\n def _render(self, login_error=None, username=None):\n return self.render_template('login.html',\n next=url_escape(self.get_argument('next', default='')),\n username=username,\n login_error=login_error,\n custom_html=self.authenticator.custom_html,\n login_url=self.settings['login_url'],\n authenticator_login_url=self.authenticator.login_url(self.hub.server.base_url),\n )\n\n def get(self):\n self.statsd.incr('login.request')\n next_url = self.get_argument('next', '')\n if (next_url + '/').startswith('%s://%s/' % (self.request.protocol, self.request.host)):\n # treat absolute URLs for our host as absolute paths:\n next_url = urlparse(next_url).path\n elif not next_url.startswith('/'):\n # disallow non-absolute next URLs (e.g. full URLs to other hosts)\n next_url = ''\n user = self.get_current_user()\n if user:\n if not next_url:\n if user.running:\n next_url = user.url\n else:\n next_url = self.hub.server.base_url\n # set new login cookie\n # because single-user cookie may have been cleared or incorrect\n self.set_login_cookie(self.get_current_user())\n self.redirect(next_url, permanent=False)\n else:\n if self.authenticator.auto_login:\n auto_login_url = self.authenticator.login_url(self.hub.server.base_url)\n if auto_login_url == self.settings['login_url']:\n self.authenticator.auto_login = False\n self.log.warning(\"Authenticator.auto_login cannot be used without a custom login_url\")\n else:\n if next_url:\n auto_login_url = url_concat(auto_login_url, {'next': next_url})\n self.redirect(auto_login_url)\n return\n username = self.get_argument('username', default='')\n self.finish(self._render(username=username))\n\n @gen.coroutine\n def post(self):\n # parse the arguments dict\n data = {}\n for arg in self.request.arguments:\n data[arg] = self.get_argument(arg, strip=False)\n\n auth_timer = self.statsd.timer('login.authenticate').start()\n username = yield self.authenticate(data)\n auth_timer.stop(send=False)\n\n if username:\n self.statsd.incr('login.success')\n self.statsd.timing('login.authenticate.success', auth_timer.ms)\n user = self.user_from_username(username)\n already_running = False\n if user.spawner:\n status = yield user.spawner.poll()\n already_running = (status == None)\n if not already_running and not user.spawner.options_form:\n yield self.spawn_single_user(user)\n self.set_login_cookie(user)\n next_url = self.get_argument('next', default='')\n if not next_url.startswith('/'):\n next_url = ''\n next_url = next_url or self.hub.server.base_url\n self.redirect(next_url)\n self.log.info(\"User logged in: %s\", username)\n else:\n self.statsd.incr('login.failure')\n self.statsd.timing('login.authenticate.failure', auth_timer.ms)\n self.log.debug(\"Failed login for %s\", data.get('username', 'unknown user'))\n html = self._render(\n login_error='Invalid username or password',\n username=username,\n )\n self.finish(html)\n\n\n# /login renders the login page or the \"Login with...\" link,\n# so it should always be registered.\n# /logout clears cookies.\ndefault_handlers = [\n (r\"/login\", LoginHandler),\n (r\"/logout\", LogoutHandler),\n]\n", "path": "jupyterhub/handlers/login.py"}]} | 1,622 | 123 |
gh_patches_debug_34606 | rasdani/github-patches | git_diff | ansible__awx-8016 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add insignts_credential paramter to tower_inventory
<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:
- http://webchat.freenode.net/?channels=ansible-awx
- https://groups.google.com/forum/#!forum/awx-project
We have to limit this because of limited volunteer time to respond to issues! -->
##### ISSUE TYPE
- Feature Idea
##### SUMMARY
<!-- Briefly describe the problem or desired enhancement. -->
Per PR #7963 tower_inventory is missing support for the insights_credential API parameter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx_collection/plugins/modules/tower_inventory.py`
Content:
```
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 # (c) 2017, Wayne Witzel III <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10
11 ANSIBLE_METADATA = {'metadata_version': '1.1',
12 'status': ['preview'],
13 'supported_by': 'community'}
14
15
16 DOCUMENTATION = '''
17 ---
18 module: tower_inventory
19 author: "Wayne Witzel III (@wwitzel3)"
20 short_description: create, update, or destroy Ansible Tower inventory.
21 description:
22 - Create, update, or destroy Ansible Tower inventories. See
23 U(https://www.ansible.com/tower) for an overview.
24 options:
25 name:
26 description:
27 - The name to use for the inventory.
28 required: True
29 type: str
30 description:
31 description:
32 - The description to use for the inventory.
33 type: str
34 organization:
35 description:
36 - Organization the inventory belongs to.
37 required: True
38 type: str
39 variables:
40 description:
41 - Inventory variables.
42 type: dict
43 kind:
44 description:
45 - The kind field. Cannot be modified after created.
46 default: ""
47 choices: ["", "smart"]
48 type: str
49 host_filter:
50 description:
51 - The host_filter field. Only useful when C(kind=smart).
52 type: str
53 state:
54 description:
55 - Desired state of the resource.
56 default: "present"
57 choices: ["present", "absent"]
58 type: str
59 extends_documentation_fragment: awx.awx.auth
60 '''
61
62
63 EXAMPLES = '''
64 - name: Add tower inventory
65 tower_inventory:
66 name: "Foo Inventory"
67 description: "Our Foo Cloud Servers"
68 organization: "Bar Org"
69 state: present
70 tower_config_file: "~/tower_cli.cfg"
71 '''
72
73
74 from ..module_utils.tower_api import TowerAPIModule
75 import json
76
77
78 def main():
79 # Any additional arguments that are not fields of the item can be added here
80 argument_spec = dict(
81 name=dict(required=True),
82 description=dict(),
83 organization=dict(required=True),
84 variables=dict(type='dict'),
85 kind=dict(choices=['', 'smart'], default=''),
86 host_filter=dict(),
87 state=dict(choices=['present', 'absent'], default='present'),
88 )
89
90 # Create a module for ourselves
91 module = TowerAPIModule(argument_spec=argument_spec)
92
93 # Extract our parameters
94 name = module.params.get('name')
95 description = module.params.get('description')
96 organization = module.params.get('organization')
97 variables = module.params.get('variables')
98 state = module.params.get('state')
99 kind = module.params.get('kind')
100 host_filter = module.params.get('host_filter')
101
102 # Attempt to look up the related items the user specified (these will fail the module if not found)
103 org_id = module.resolve_name_to_id('organizations', organization)
104
105 # Attempt to look up inventory based on the provided name and org ID
106 inventory = module.get_one('inventories', **{
107 'data': {
108 'name': name,
109 'organization': org_id
110 }
111 })
112
113 if state == 'absent':
114 # If the state was absent we can let the module delete it if needed, the module will handle exiting from this
115 module.delete_if_needed(inventory)
116
117 # Create the data that gets sent for create and update
118 inventory_fields = {
119 'name': name,
120 'organization': org_id,
121 'kind': kind,
122 'host_filter': host_filter,
123 }
124 if description is not None:
125 inventory_fields['description'] = description
126 if variables is not None:
127 inventory_fields['variables'] = json.dumps(variables)
128
129 # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one.
130 if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':
131 module.fail_json(msg='You cannot turn a regular inventory into a "smart" inventory.')
132
133 # If the state was present and we can let the module build or update the existing inventory, this will return on its own
134 module.create_or_update_if_needed(inventory, inventory_fields, endpoint='inventories', item_type='inventory')
135
136
137 if __name__ == '__main__':
138 main()
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awx_collection/plugins/modules/tower_inventory.py b/awx_collection/plugins/modules/tower_inventory.py
--- a/awx_collection/plugins/modules/tower_inventory.py
+++ b/awx_collection/plugins/modules/tower_inventory.py
@@ -48,7 +48,11 @@
type: str
host_filter:
description:
- - The host_filter field. Only useful when C(kind=smart).
+ - The host_filter field. Only useful when C(kind=smart).
+ type: str
+ insights_credential:
+ description:
+ - Credentials to be used by hosts belonging to this inventory when accessing Red Hat Insights API.
type: str
state:
description:
@@ -84,6 +88,7 @@
variables=dict(type='dict'),
kind=dict(choices=['', 'smart'], default=''),
host_filter=dict(),
+ insights_credential=dict(),
state=dict(choices=['present', 'absent'], default='present'),
)
@@ -98,6 +103,7 @@
state = module.params.get('state')
kind = module.params.get('kind')
host_filter = module.params.get('host_filter')
+ insights_credential = module.params.get('insights_credential')
# Attempt to look up the related items the user specified (these will fail the module if not found)
org_id = module.resolve_name_to_id('organizations', organization)
@@ -125,6 +131,8 @@
inventory_fields['description'] = description
if variables is not None:
inventory_fields['variables'] = json.dumps(variables)
+ if insights_credential is not None:
+ inventory_fields['insights_credential'] = module.resolve_name_to_id('credentials', insights_credential)
# We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one.
if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':
| {"golden_diff": "diff --git a/awx_collection/plugins/modules/tower_inventory.py b/awx_collection/plugins/modules/tower_inventory.py\n--- a/awx_collection/plugins/modules/tower_inventory.py\n+++ b/awx_collection/plugins/modules/tower_inventory.py\n@@ -48,7 +48,11 @@\n type: str\n host_filter:\n description:\n- - The host_filter field. Only useful when C(kind=smart).\n+ - The host_filter field. Only useful when C(kind=smart).\n+ type: str\n+ insights_credential:\n+ description:\n+ - Credentials to be used by hosts belonging to this inventory when accessing Red Hat Insights API.\n type: str\n state:\n description:\n@@ -84,6 +88,7 @@\n variables=dict(type='dict'),\n kind=dict(choices=['', 'smart'], default=''),\n host_filter=dict(),\n+ insights_credential=dict(),\n state=dict(choices=['present', 'absent'], default='present'),\n )\n \n@@ -98,6 +103,7 @@\n state = module.params.get('state')\n kind = module.params.get('kind')\n host_filter = module.params.get('host_filter')\n+ insights_credential = module.params.get('insights_credential')\n \n # Attempt to look up the related items the user specified (these will fail the module if not found)\n org_id = module.resolve_name_to_id('organizations', organization)\n@@ -125,6 +131,8 @@\n inventory_fields['description'] = description\n if variables is not None:\n inventory_fields['variables'] = json.dumps(variables)\n+ if insights_credential is not None:\n+ inventory_fields['insights_credential'] = module.resolve_name_to_id('credentials', insights_credential)\n \n # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one.\n if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':\n", "issue": "Add insignts_credential paramter to tower_inventory\n<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:\r\n\r\n- http://webchat.freenode.net/?channels=ansible-awx\r\n- https://groups.google.com/forum/#!forum/awx-project\r\n\r\nWe have to limit this because of limited volunteer time to respond to issues! -->\r\n\r\n##### ISSUE TYPE\r\n - Feature Idea\r\n\r\n##### SUMMARY\r\n<!-- Briefly describe the problem or desired enhancement. -->\r\nPer PR #7963 tower_inventory is missing support for the insights_credential API parameter.\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, Wayne Witzel III <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_inventory\nauthor: \"Wayne Witzel III (@wwitzel3)\"\nshort_description: create, update, or destroy Ansible Tower inventory.\ndescription:\n - Create, update, or destroy Ansible Tower inventories. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name to use for the inventory.\n required: True\n type: str\n description:\n description:\n - The description to use for the inventory.\n type: str\n organization:\n description:\n - Organization the inventory belongs to.\n required: True\n type: str\n variables:\n description:\n - Inventory variables.\n type: dict\n kind:\n description:\n - The kind field. Cannot be modified after created.\n default: \"\"\n choices: [\"\", \"smart\"]\n type: str\n host_filter:\n description:\n - The host_filter field. Only useful when C(kind=smart).\n type: str\n state:\n description:\n - Desired state of the resource.\n default: \"present\"\n choices: [\"present\", \"absent\"]\n type: str\nextends_documentation_fragment: awx.awx.auth\n'''\n\n\nEXAMPLES = '''\n- name: Add tower inventory\n tower_inventory:\n name: \"Foo Inventory\"\n description: \"Our Foo Cloud Servers\"\n organization: \"Bar Org\"\n state: present\n tower_config_file: \"~/tower_cli.cfg\"\n'''\n\n\nfrom ..module_utils.tower_api import TowerAPIModule\nimport json\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True),\n description=dict(),\n organization=dict(required=True),\n variables=dict(type='dict'),\n kind=dict(choices=['', 'smart'], default=''),\n host_filter=dict(),\n state=dict(choices=['present', 'absent'], default='present'),\n )\n\n # Create a module for ourselves\n module = TowerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n name = module.params.get('name')\n description = module.params.get('description')\n organization = module.params.get('organization')\n variables = module.params.get('variables')\n state = module.params.get('state')\n kind = module.params.get('kind')\n host_filter = module.params.get('host_filter')\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n org_id = module.resolve_name_to_id('organizations', organization)\n\n # Attempt to look up inventory based on the provided name and org ID\n inventory = module.get_one('inventories', **{\n 'data': {\n 'name': name,\n 'organization': org_id\n }\n })\n\n if state == 'absent':\n # If the state was absent we can let the module delete it if needed, the module will handle exiting from this\n module.delete_if_needed(inventory)\n\n # Create the data that gets sent for create and update\n inventory_fields = {\n 'name': name,\n 'organization': org_id,\n 'kind': kind,\n 'host_filter': host_filter,\n }\n if description is not None:\n inventory_fields['description'] = description\n if variables is not None:\n inventory_fields['variables'] = json.dumps(variables)\n\n # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one.\n if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':\n module.fail_json(msg='You cannot turn a regular inventory into a \"smart\" inventory.')\n\n # If the state was present and we can let the module build or update the existing inventory, this will return on its own\n module.create_or_update_if_needed(inventory, inventory_fields, endpoint='inventories', item_type='inventory')\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_inventory.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2017, Wayne Witzel III <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_inventory\nauthor: \"Wayne Witzel III (@wwitzel3)\"\nshort_description: create, update, or destroy Ansible Tower inventory.\ndescription:\n - Create, update, or destroy Ansible Tower inventories. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - The name to use for the inventory.\n required: True\n type: str\n description:\n description:\n - The description to use for the inventory.\n type: str\n organization:\n description:\n - Organization the inventory belongs to.\n required: True\n type: str\n variables:\n description:\n - Inventory variables.\n type: dict\n kind:\n description:\n - The kind field. Cannot be modified after created.\n default: \"\"\n choices: [\"\", \"smart\"]\n type: str\n host_filter:\n description:\n - The host_filter field. Only useful when C(kind=smart).\n type: str\n insights_credential:\n description:\n - Credentials to be used by hosts belonging to this inventory when accessing Red Hat Insights API.\n type: str\n state:\n description:\n - Desired state of the resource.\n default: \"present\"\n choices: [\"present\", \"absent\"]\n type: str\nextends_documentation_fragment: awx.awx.auth\n'''\n\n\nEXAMPLES = '''\n- name: Add tower inventory\n tower_inventory:\n name: \"Foo Inventory\"\n description: \"Our Foo Cloud Servers\"\n organization: \"Bar Org\"\n state: present\n tower_config_file: \"~/tower_cli.cfg\"\n'''\n\n\nfrom ..module_utils.tower_api import TowerAPIModule\nimport json\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(required=True),\n description=dict(),\n organization=dict(required=True),\n variables=dict(type='dict'),\n kind=dict(choices=['', 'smart'], default=''),\n host_filter=dict(),\n insights_credential=dict(),\n state=dict(choices=['present', 'absent'], default='present'),\n )\n\n # Create a module for ourselves\n module = TowerAPIModule(argument_spec=argument_spec)\n\n # Extract our parameters\n name = module.params.get('name')\n description = module.params.get('description')\n organization = module.params.get('organization')\n variables = module.params.get('variables')\n state = module.params.get('state')\n kind = module.params.get('kind')\n host_filter = module.params.get('host_filter')\n insights_credential = module.params.get('insights_credential')\n\n # Attempt to look up the related items the user specified (these will fail the module if not found)\n org_id = module.resolve_name_to_id('organizations', organization)\n\n # Attempt to look up inventory based on the provided name and org ID\n inventory = module.get_one('inventories', **{\n 'data': {\n 'name': name,\n 'organization': org_id\n }\n })\n\n if state == 'absent':\n # If the state was absent we can let the module delete it if needed, the module will handle exiting from this\n module.delete_if_needed(inventory)\n\n # Create the data that gets sent for create and update\n inventory_fields = {\n 'name': name,\n 'organization': org_id,\n 'kind': kind,\n 'host_filter': host_filter,\n }\n if description is not None:\n inventory_fields['description'] = description\n if variables is not None:\n inventory_fields['variables'] = json.dumps(variables)\n if insights_credential is not None:\n inventory_fields['insights_credential'] = module.resolve_name_to_id('credentials', insights_credential)\n\n # We need to perform a check to make sure you are not trying to convert a regular inventory into a smart one.\n if inventory and inventory['kind'] == '' and inventory_fields['kind'] == 'smart':\n module.fail_json(msg='You cannot turn a regular inventory into a \"smart\" inventory.')\n\n # If the state was present and we can let the module build or update the existing inventory, this will return on its own\n module.create_or_update_if_needed(inventory, inventory_fields, endpoint='inventories', item_type='inventory')\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_inventory.py"}]} | 1,692 | 433 |
gh_patches_debug_12719 | rasdani/github-patches | git_diff | microsoft__playwright-python-593 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Async API - asyncio error
After updating to the 1.9.2 version, for **Async API** I get the following error:
Exception ignored in: <function BaseSubprocessTransport.__del__ at 0x0000000002E1E9D0>
Traceback (most recent call last):
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_subprocess.py", line 126, in __del__
self.close()
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_subprocess.py", line 104, in close
proto.pipe.close()
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\proactor_events.py", line 108, in close
self._loop.call_soon(self._call_connection_lost, None)
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 746, in call_soon
self._check_closed()
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 510, in _check_closed
raise RuntimeError('Event loop is closed')
RuntimeError: Event loop is closed
Exception ignored in: <function _ProactorBasePipeTransport.__del__ at 0x0000000002E4A280>
Traceback (most recent call last):
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\proactor_events.py", line 116, in __del__
self.close()
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\proactor_events.py", line 108, in close
self._loop.call_soon(self._call_connection_lost, None)
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 746, in call_soon
self._check_closed()
File "C:\Users\Matthew\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py", line 510, in _check_closed
raise RuntimeError('Event loop is closed')
RuntimeError: Event loop is closed
Versions:
playwright=1.9.2
asyncio=3.4.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `playwright/_impl/_transport.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 import io
17 import json
18 import os
19 import sys
20 from pathlib import Path
21 from typing import Dict, Optional
22
23
24 # Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77
25 def _get_stderr_fileno() -> Optional[int]:
26 try:
27 return sys.stderr.fileno()
28 except (AttributeError, io.UnsupportedOperation):
29 # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
30 # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
31 # This is potentially dangerous, but the best we can do.
32 if not hasattr(sys, "__stderr__") or not sys.__stderr__:
33 return None
34 return sys.__stderr__.fileno()
35
36
37 class Transport:
38 def __init__(self, driver_executable: Path) -> None:
39 super().__init__()
40 self.on_message = lambda _: None
41 self._stopped = False
42 self._driver_executable = driver_executable
43 self._loop: asyncio.AbstractEventLoop
44
45 def stop(self) -> None:
46 self._stopped = True
47 self._output.close()
48
49 async def wait_until_stopped(self) -> None:
50 await self._stopped_future
51
52 async def run(self) -> None:
53 self._loop = asyncio.get_running_loop()
54 self._stopped_future: asyncio.Future = asyncio.Future()
55
56 proc = await asyncio.create_subprocess_exec(
57 str(self._driver_executable),
58 "run-driver",
59 stdin=asyncio.subprocess.PIPE,
60 stdout=asyncio.subprocess.PIPE,
61 stderr=_get_stderr_fileno(),
62 limit=32768,
63 )
64 assert proc.stdout
65 assert proc.stdin
66 self._output = proc.stdin
67
68 while not self._stopped:
69 try:
70 buffer = await proc.stdout.readexactly(4)
71 length = int.from_bytes(buffer, byteorder="little", signed=False)
72 buffer = bytes(0)
73 while length:
74 to_read = min(length, 32768)
75 data = await proc.stdout.readexactly(to_read)
76 length -= to_read
77 if len(buffer):
78 buffer = buffer + data
79 else:
80 buffer = data
81 obj = json.loads(buffer)
82
83 if "DEBUGP" in os.environ: # pragma: no cover
84 print("\x1b[33mRECV>\x1b[0m", json.dumps(obj, indent=2))
85 self.on_message(obj)
86 except asyncio.IncompleteReadError:
87 break
88 await asyncio.sleep(0)
89 self._stopped_future.set_result(None)
90
91 def send(self, message: Dict) -> None:
92 msg = json.dumps(message)
93 if "DEBUGP" in os.environ: # pragma: no cover
94 print("\x1b[32mSEND>\x1b[0m", json.dumps(message, indent=2))
95 data = msg.encode()
96 self._output.write(
97 len(data).to_bytes(4, byteorder="little", signed=False) + data
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/playwright/_impl/_transport.py b/playwright/_impl/_transport.py
--- a/playwright/_impl/_transport.py
+++ b/playwright/_impl/_transport.py
@@ -48,12 +48,13 @@
async def wait_until_stopped(self) -> None:
await self._stopped_future
+ await self._proc.wait()
async def run(self) -> None:
self._loop = asyncio.get_running_loop()
self._stopped_future: asyncio.Future = asyncio.Future()
- proc = await asyncio.create_subprocess_exec(
+ self._proc = proc = await asyncio.create_subprocess_exec(
str(self._driver_executable),
"run-driver",
stdin=asyncio.subprocess.PIPE,
| {"golden_diff": "diff --git a/playwright/_impl/_transport.py b/playwright/_impl/_transport.py\n--- a/playwright/_impl/_transport.py\n+++ b/playwright/_impl/_transport.py\n@@ -48,12 +48,13 @@\n \n async def wait_until_stopped(self) -> None:\n await self._stopped_future\n+ await self._proc.wait()\n \n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n self._stopped_future: asyncio.Future = asyncio.Future()\n \n- proc = await asyncio.create_subprocess_exec(\n+ self._proc = proc = await asyncio.create_subprocess_exec(\n str(self._driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n", "issue": "Async API - asyncio error\nAfter updating to the 1.9.2 version, for **Async API** I get the following error:\r\n\r\nException ignored in: <function BaseSubprocessTransport.__del__ at 0x0000000002E1E9D0>\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_subprocess.py\", line 126, in __del__\r\n self.close()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_subprocess.py\", line 104, in close \r\n proto.pipe.close()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\proactor_events.py\", line 108, in close \r\n self._loop.call_soon(self._call_connection_lost, None)\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_events.py\", line 746, in call_soon\r\n self._check_closed()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_events.py\", line 510, in _check_closed\r\n raise RuntimeError('Event loop is closed')\r\nRuntimeError: Event loop is closed\r\nException ignored in: <function _ProactorBasePipeTransport.__del__ at 0x0000000002E4A280>\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\proactor_events.py\", line 116, in __del__\r\n self.close()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\proactor_events.py\", line 108, in close\r\n self._loop.call_soon(self._call_connection_lost, None)\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_events.py\", line 746, in call_soon\r\n self._check_closed()\r\n File \"C:\\Users\\Matthew\\AppData\\Local\\Programs\\Python\\Python39\\lib\\asyncio\\base_events.py\", line 510, in _check_closed\r\n raise RuntimeError('Event loop is closed')\r\nRuntimeError: Event loop is closed\r\n\r\nVersions:\r\nplaywright=1.9.2\r\nasyncio=3.4.3\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport json\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\n\n# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77\ndef _get_stderr_fileno() -> Optional[int]:\n try:\n return sys.stderr.fileno()\n except (AttributeError, io.UnsupportedOperation):\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n if not hasattr(sys, \"__stderr__\") or not sys.__stderr__:\n return None\n return sys.__stderr__.fileno()\n\n\nclass Transport:\n def __init__(self, driver_executable: Path) -> None:\n super().__init__()\n self.on_message = lambda _: None\n self._stopped = False\n self._driver_executable = driver_executable\n self._loop: asyncio.AbstractEventLoop\n\n def stop(self) -> None:\n self._stopped = True\n self._output.close()\n\n async def wait_until_stopped(self) -> None:\n await self._stopped_future\n\n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n self._stopped_future: asyncio.Future = asyncio.Future()\n\n proc = await asyncio.create_subprocess_exec(\n str(self._driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n self._output = proc.stdin\n\n while not self._stopped:\n try:\n buffer = await proc.stdout.readexactly(4)\n length = int.from_bytes(buffer, byteorder=\"little\", signed=False)\n buffer = bytes(0)\n while length:\n to_read = min(length, 32768)\n data = await proc.stdout.readexactly(to_read)\n length -= to_read\n if len(buffer):\n buffer = buffer + data\n else:\n buffer = data\n obj = json.loads(buffer)\n\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[33mRECV>\\x1b[0m\", json.dumps(obj, indent=2))\n self.on_message(obj)\n except asyncio.IncompleteReadError:\n break\n await asyncio.sleep(0)\n self._stopped_future.set_result(None)\n\n def send(self, message: Dict) -> None:\n msg = json.dumps(message)\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[32mSEND>\\x1b[0m\", json.dumps(message, indent=2))\n data = msg.encode()\n self._output.write(\n len(data).to_bytes(4, byteorder=\"little\", signed=False) + data\n )\n", "path": "playwright/_impl/_transport.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport json\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\n\n# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77\ndef _get_stderr_fileno() -> Optional[int]:\n try:\n return sys.stderr.fileno()\n except (AttributeError, io.UnsupportedOperation):\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n if not hasattr(sys, \"__stderr__\") or not sys.__stderr__:\n return None\n return sys.__stderr__.fileno()\n\n\nclass Transport:\n def __init__(self, driver_executable: Path) -> None:\n super().__init__()\n self.on_message = lambda _: None\n self._stopped = False\n self._driver_executable = driver_executable\n self._loop: asyncio.AbstractEventLoop\n\n def stop(self) -> None:\n self._stopped = True\n self._output.close()\n\n async def wait_until_stopped(self) -> None:\n await self._stopped_future\n await self._proc.wait()\n\n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n self._stopped_future: asyncio.Future = asyncio.Future()\n\n self._proc = proc = await asyncio.create_subprocess_exec(\n str(self._driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n self._output = proc.stdin\n\n while not self._stopped:\n try:\n buffer = await proc.stdout.readexactly(4)\n length = int.from_bytes(buffer, byteorder=\"little\", signed=False)\n buffer = bytes(0)\n while length:\n to_read = min(length, 32768)\n data = await proc.stdout.readexactly(to_read)\n length -= to_read\n if len(buffer):\n buffer = buffer + data\n else:\n buffer = data\n obj = json.loads(buffer)\n\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[33mRECV>\\x1b[0m\", json.dumps(obj, indent=2))\n self.on_message(obj)\n except asyncio.IncompleteReadError:\n break\n await asyncio.sleep(0)\n self._stopped_future.set_result(None)\n\n def send(self, message: Dict) -> None:\n msg = json.dumps(message)\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[32mSEND>\\x1b[0m\", json.dumps(message, indent=2))\n data = msg.encode()\n self._output.write(\n len(data).to_bytes(4, byteorder=\"little\", signed=False) + data\n )\n", "path": "playwright/_impl/_transport.py"}]} | 1,873 | 163 |
gh_patches_debug_47861 | rasdani/github-patches | git_diff | saleor__saleor-4008 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duplicate choices in shipping address
### What I'm trying to achieve
I'm trying to set shipping user for Russian user and there are duplicate values in "Oblast" selector.
### Steps to reproduce the problem
1. Create new shipping address
2. Country --> Russia
3. There are duplicated values in "Oblast" selector
### What I expected to happen
There are no duplicated values in "Oblast" selector
### Screenshots
What happens now
<img src="https://user-images.githubusercontent.com/13136992/53255369-8a239600-36d6-11e9-84a6-24a10b96a321.png" width="300">
What I expect to see
<img src="https://user-images.githubusercontent.com/13136992/53255400-99a2df00-36d6-11e9-8913-ecaec174487a.png" width="300">
**System information**
Operating system: Manjaro Linux
Browser: Google Chrome 72.0.3626.81
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/account/forms.py`
Content:
```
1 from captcha.fields import ReCaptchaField
2 from django import forms
3 from django.conf import settings
4 from django.contrib.auth import forms as django_forms, update_session_auth_hash
5 from django.utils.translation import pgettext, pgettext_lazy
6 from phonenumbers.phonenumberutil import country_code_for_region
7
8 from ..account.models import User
9 from . import emails
10 from .i18n import AddressMetaForm, get_address_form_class
11
12
13 class FormWithReCaptcha(forms.BaseForm):
14 def __new__(cls, *args, **kwargs):
15 if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:
16 # insert a Google reCaptcha field inside the form
17 # note: label is empty, the reCaptcha is self-explanatory making
18 # the form simpler for the user.
19 cls.base_fields['_captcha'] = ReCaptchaField(label='')
20 return super(FormWithReCaptcha, cls).__new__(cls)
21
22
23 def get_address_form(
24 data, country_code, initial=None, instance=None, **kwargs):
25 country_form = AddressMetaForm(data, initial=initial)
26 preview = False
27 if country_form.is_valid():
28 country_code = country_form.cleaned_data['country']
29 preview = country_form.cleaned_data['preview']
30
31 if initial is None and country_code:
32 initial = {}
33 if country_code:
34 initial['phone'] = '+{}'.format(country_code_for_region(country_code))
35
36 address_form_class = get_address_form_class(country_code)
37
38 if not preview and instance is not None:
39 address_form_class = get_address_form_class(instance.country.code)
40 address_form = address_form_class(data, instance=instance, **kwargs)
41 else:
42 initial_address = (
43 initial if not preview
44 else data.dict() if data is not None else data)
45 address_form = address_form_class(
46 not preview and data or None,
47 initial=initial_address,
48 **kwargs)
49 return address_form, preview
50
51
52 class ChangePasswordForm(django_forms.PasswordChangeForm):
53 def __init__(self, *args, **kwargs):
54 super().__init__(*args, **kwargs)
55 self.fields['new_password1'].user = self.user
56 self.fields['old_password'].widget.attrs['placeholder'] = ''
57 self.fields['new_password1'].widget.attrs['placeholder'] = ''
58 del self.fields['new_password2']
59
60
61 def logout_on_password_change(request, user):
62 if (update_session_auth_hash is not None and
63 not settings.LOGOUT_ON_PASSWORD_CHANGE):
64 update_session_auth_hash(request, user)
65
66
67 class LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):
68 username = forms.EmailField(
69 label=pgettext('Form field', 'Email'), max_length=75)
70
71 def __init__(self, request=None, *args, **kwargs):
72 super().__init__(request=request, *args, **kwargs)
73 if request:
74 email = request.GET.get('email')
75 if email:
76 self.fields['username'].initial = email
77
78
79 class SignupForm(forms.ModelForm, FormWithReCaptcha):
80 password = forms.CharField(
81 widget=forms.PasswordInput,
82 label=pgettext('Password', 'Password'))
83 email = forms.EmailField(
84 label=pgettext('Email', 'Email'),
85 error_messages={
86 'unique': pgettext_lazy(
87 'Registration error',
88 'This email has already been registered.')})
89
90 class Meta:
91 model = User
92 fields = ('email',)
93
94 def __init__(self, *args, **kwargs):
95 super().__init__(*args, **kwargs)
96 if self._meta.model.USERNAME_FIELD in self.fields:
97 self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
98 {'autofocus': ''})
99
100 def save(self, request=None, commit=True):
101 user = super().save(commit=False)
102 password = self.cleaned_data['password']
103 user.set_password(password)
104 if commit:
105 user.save()
106 return user
107
108
109 class PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):
110 """Allow resetting passwords.
111
112 This subclass overrides sending emails to use templated email.
113 """
114
115 def get_users(self, email):
116 active_users = User.objects.filter(email__iexact=email, is_active=True)
117 return active_users
118
119 def send_mail(
120 self, subject_template_name, email_template_name, context,
121 from_email, to_email, html_email_template_name=None):
122 # Passing the user object to the Celery task throws an
123 # error "'User' is not JSON serializable". Since it's not used in our
124 # template, we remove it from the context.
125 del context['user']
126 emails.send_password_reset_email.delay(context, to_email)
127
128
129 class NameForm(forms.ModelForm):
130 class Meta:
131 model = User
132 fields = ['first_name', 'last_name']
133 labels = {
134 'first_name': pgettext_lazy(
135 'Customer form: Given name field', 'Given name'),
136 'last_name': pgettext_lazy(
137 'Customer form: Family name field', 'Family name')}
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/account/forms.py b/saleor/account/forms.py
--- a/saleor/account/forms.py
+++ b/saleor/account/forms.py
@@ -46,6 +46,11 @@
not preview and data or None,
initial=initial_address,
**kwargs)
+
+ if hasattr(address_form.fields['country_area'], 'choices'):
+ choices = address_form.fields['country_area'].choices
+ choices = [(choice[1], choice[1]) for choice in choices]
+ address_form.fields['country_area'].choices = choices
return address_form, preview
| {"golden_diff": "diff --git a/saleor/account/forms.py b/saleor/account/forms.py\n--- a/saleor/account/forms.py\n+++ b/saleor/account/forms.py\n@@ -46,6 +46,11 @@\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n+\n+ if hasattr(address_form.fields['country_area'], 'choices'):\n+ choices = address_form.fields['country_area'].choices\n+ choices = [(choice[1], choice[1]) for choice in choices]\n+ address_form.fields['country_area'].choices = choices\n return address_form, preview\n", "issue": "Duplicate choices in shipping address\n### What I'm trying to achieve\r\nI'm trying to set shipping user for Russian user and there are duplicate values in \"Oblast\" selector.\r\n\r\n### Steps to reproduce the problem\r\n1. Create new shipping address\r\n2. Country --> Russia\r\n3. There are duplicated values in \"Oblast\" selector\r\n\r\n### What I expected to happen\r\nThere are no duplicated values in \"Oblast\" selector\r\n\r\n### Screenshots\r\nWhat happens now\r\n<img src=\"https://user-images.githubusercontent.com/13136992/53255369-8a239600-36d6-11e9-84a6-24a10b96a321.png\" width=\"300\">\r\n\r\n\r\nWhat I expect to see\r\n<img src=\"https://user-images.githubusercontent.com/13136992/53255400-99a2df00-36d6-11e9-8913-ecaec174487a.png\" width=\"300\">\r\n\r\n\r\n**System information**\r\nOperating system: Manjaro Linux\r\nBrowser: Google Chrome 72.0.3626.81\r\n\n", "before_files": [{"content": "from captcha.fields import ReCaptchaField\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom . import emails\nfrom .i18n import AddressMetaForm, get_address_form_class\n\n\nclass FormWithReCaptcha(forms.BaseForm):\n def __new__(cls, *args, **kwargs):\n if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:\n # insert a Google reCaptcha field inside the form\n # note: label is empty, the reCaptcha is self-explanatory making\n # the form simpler for the user.\n cls.base_fields['_captcha'] = ReCaptchaField(label='')\n return super(FormWithReCaptcha, cls).__new__(cls)\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(instance.country.code)\n address_form = address_form_class(data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm, FormWithReCaptcha):\n password = forms.CharField(\n widget=forms.PasswordInput,\n label=pgettext('Password', 'Password'))\n email = forms.EmailField(\n label=pgettext('Email', 'Email'),\n error_messages={\n 'unique': pgettext_lazy(\n 'Registration error',\n 'This email has already been registered.')})\n\n class Meta:\n model = User\n fields = ('email',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n # Passing the user object to the Celery task throws an\n # error \"'User' is not JSON serializable\". Since it's not used in our\n # template, we remove it from the context.\n del context['user']\n emails.send_password_reset_email.delay(context, to_email)\n\n\nclass NameForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['first_name', 'last_name']\n labels = {\n 'first_name': pgettext_lazy(\n 'Customer form: Given name field', 'Given name'),\n 'last_name': pgettext_lazy(\n 'Customer form: Family name field', 'Family name')}\n", "path": "saleor/account/forms.py"}], "after_files": [{"content": "from captcha.fields import ReCaptchaField\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom . import emails\nfrom .i18n import AddressMetaForm, get_address_form_class\n\n\nclass FormWithReCaptcha(forms.BaseForm):\n def __new__(cls, *args, **kwargs):\n if settings.RECAPTCHA_PUBLIC_KEY and settings.RECAPTCHA_PRIVATE_KEY:\n # insert a Google reCaptcha field inside the form\n # note: label is empty, the reCaptcha is self-explanatory making\n # the form simpler for the user.\n cls.base_fields['_captcha'] = ReCaptchaField(label='')\n return super(FormWithReCaptcha, cls).__new__(cls)\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(instance.country.code)\n address_form = address_form_class(data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n\n if hasattr(address_form.fields['country_area'], 'choices'):\n choices = address_form.fields['country_area'].choices\n choices = [(choice[1], choice[1]) for choice in choices]\n address_form.fields['country_area'].choices = choices\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm, FormWithReCaptcha):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm, FormWithReCaptcha):\n password = forms.CharField(\n widget=forms.PasswordInput,\n label=pgettext('Password', 'Password'))\n email = forms.EmailField(\n label=pgettext('Email', 'Email'),\n error_messages={\n 'unique': pgettext_lazy(\n 'Registration error',\n 'This email has already been registered.')})\n\n class Meta:\n model = User\n fields = ('email',)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm, FormWithReCaptcha):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n # Passing the user object to the Celery task throws an\n # error \"'User' is not JSON serializable\". Since it's not used in our\n # template, we remove it from the context.\n del context['user']\n emails.send_password_reset_email.delay(context, to_email)\n\n\nclass NameForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['first_name', 'last_name']\n labels = {\n 'first_name': pgettext_lazy(\n 'Customer form: Given name field', 'Given name'),\n 'last_name': pgettext_lazy(\n 'Customer form: Family name field', 'Family name')}\n", "path": "saleor/account/forms.py"}]} | 1,929 | 133 |
gh_patches_debug_8100 | rasdani/github-patches | git_diff | WeblateOrg__weblate-11568 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Time to use `build` from `setuptools` instead of `distutils`?
### Describe the problem
The following feature in setuptools has been deprecated for almost 2 years and is about to be removed:
https://github.com/pypa/setuptools/blob/1ed759173983656734c3606e9c97a348895e5e0c/setuptools/command/build.py#L13-L27
It might be a good idea to import `build` directly from setuptools for the following code:
https://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L9
https://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L51-L58
(`build` is available directly from setuptools, starting on version v62.4.0)
### Describe the solution you would like
Whenever possible, it might be a good idea to import from setuptools (and minimise imports to `distutils` to the minimum viable).
### Describe alternatives you have considered
_No response_
### Screenshots
_No response_
### Additional context
_No response_
Time to use `build` from `setuptools` instead of `distutils`?
### Describe the problem
The following feature in setuptools has been deprecated for almost 2 years and is about to be removed:
https://github.com/pypa/setuptools/blob/1ed759173983656734c3606e9c97a348895e5e0c/setuptools/command/build.py#L13-L27
It might be a good idea to import `build` directly from setuptools for the following code:
https://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L9
https://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L51-L58
(`build` is available directly from setuptools, starting on version v62.4.0)
### Describe the solution you would like
Whenever possible, it might be a good idea to import from setuptools (and minimise imports to `distutils` to the minimum viable).
### Describe alternatives you have considered
_No response_
### Screenshots
_No response_
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright © Michal Čihař <[email protected]>
4 #
5 # SPDX-License-Identifier: GPL-3.0-or-later
6
7 import os
8 from distutils import log
9 from distutils.command.build import build
10 from distutils.core import Command
11 from glob import glob
12 from itertools import chain
13
14 from setuptools import setup
15 from setuptools.command.build_py import build_py
16 from setuptools.modified import newer
17 from translate.tools.pocompile import convertmo
18
19 LOCALE_MASKS = [
20 "weblate/locale/*/LC_MESSAGES/*.po",
21 ]
22
23
24 class WeblateBuildPy(build_py):
25 def find_package_modules(self, package, package_dir):
26 """Filter settings.py from built module."""
27 result = super().find_package_modules(package, package_dir)
28 return [item for item in result if item[2] != "weblate/settings.py"]
29
30
31 class BuildMo(Command):
32 description = "update MO files to match PO"
33 user_options = []
34
35 def initialize_options(self) -> None:
36 self.build_base = None
37
38 def finalize_options(self) -> None:
39 self.set_undefined_options("build", ("build_base", "build_base"))
40
41 def run(self) -> None:
42 for name in chain.from_iterable(glob(mask) for mask in LOCALE_MASKS):
43 output = os.path.splitext(name)[0] + ".mo"
44 if not newer(name, output):
45 continue
46 self.announce(f"compiling {name} -> {output}", level=log.INFO)
47 with open(name, "rb") as pofile, open(output, "wb") as mofile:
48 convertmo(pofile, mofile, None)
49
50
51 class WeblateBuild(build):
52 """Override the default build with new subcommands."""
53
54 # The build_mo has to be before build_data
55 sub_commands = [
56 ("build_mo", lambda self: True), # noqa: ARG005
57 *build.sub_commands,
58 ]
59
60
61 setup(
62 cmdclass={"build_py": WeblateBuildPy, "build_mo": BuildMo, "build": WeblateBuild},
63 )
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,12 +6,12 @@
import os
from distutils import log
-from distutils.command.build import build
from distutils.core import Command
from glob import glob
from itertools import chain
from setuptools import setup
+from setuptools.command.build import build
from setuptools.command.build_py import build_py
from setuptools.modified import newer
from translate.tools.pocompile import convertmo
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -6,12 +6,12 @@\n \n import os\n from distutils import log\n-from distutils.command.build import build\n from distutils.core import Command\n from glob import glob\n from itertools import chain\n \n from setuptools import setup\n+from setuptools.command.build import build\n from setuptools.command.build_py import build_py\n from setuptools.modified import newer\n from translate.tools.pocompile import convertmo\n", "issue": "Time to use `build` from `setuptools` instead of `distutils`?\n### Describe the problem\n\nThe following feature in setuptools has been deprecated for almost 2 years and is about to be removed:\r\n\r\nhttps://github.com/pypa/setuptools/blob/1ed759173983656734c3606e9c97a348895e5e0c/setuptools/command/build.py#L13-L27\r\n\r\nIt might be a good idea to import `build` directly from setuptools for the following code:\r\n\r\nhttps://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L9\r\nhttps://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L51-L58\r\n\r\n(`build` is available directly from setuptools, starting on version v62.4.0)\n\n### Describe the solution you would like\n\nWhenever possible, it might be a good idea to import from setuptools (and minimise imports to `distutils` to the minimum viable).\n\n### Describe alternatives you have considered\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Additional context\n\n_No response_\nTime to use `build` from `setuptools` instead of `distutils`?\n### Describe the problem\n\nThe following feature in setuptools has been deprecated for almost 2 years and is about to be removed:\r\n\r\nhttps://github.com/pypa/setuptools/blob/1ed759173983656734c3606e9c97a348895e5e0c/setuptools/command/build.py#L13-L27\r\n\r\nIt might be a good idea to import `build` directly from setuptools for the following code:\r\n\r\nhttps://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L9\r\nhttps://github.com/WeblateOrg/weblate/blob/47f9f2870c4ed9fd5429eebfacc61d2267a5bb31/setup.py#L51-L58\r\n\r\n(`build` is available directly from setuptools, starting on version v62.4.0)\n\n### Describe the solution you would like\n\nWhenever possible, it might be a good idea to import from setuptools (and minimise imports to `distutils` to the minimum viable).\n\n### Describe alternatives you have considered\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport os\nfrom distutils import log\nfrom distutils.command.build import build\nfrom distutils.core import Command\nfrom glob import glob\nfrom itertools import chain\n\nfrom setuptools import setup\nfrom setuptools.command.build_py import build_py\nfrom setuptools.modified import newer\nfrom translate.tools.pocompile import convertmo\n\nLOCALE_MASKS = [\n \"weblate/locale/*/LC_MESSAGES/*.po\",\n]\n\n\nclass WeblateBuildPy(build_py):\n def find_package_modules(self, package, package_dir):\n \"\"\"Filter settings.py from built module.\"\"\"\n result = super().find_package_modules(package, package_dir)\n return [item for item in result if item[2] != \"weblate/settings.py\"]\n\n\nclass BuildMo(Command):\n description = \"update MO files to match PO\"\n user_options = []\n\n def initialize_options(self) -> None:\n self.build_base = None\n\n def finalize_options(self) -> None:\n self.set_undefined_options(\"build\", (\"build_base\", \"build_base\"))\n\n def run(self) -> None:\n for name in chain.from_iterable(glob(mask) for mask in LOCALE_MASKS):\n output = os.path.splitext(name)[0] + \".mo\"\n if not newer(name, output):\n continue\n self.announce(f\"compiling {name} -> {output}\", level=log.INFO)\n with open(name, \"rb\") as pofile, open(output, \"wb\") as mofile:\n convertmo(pofile, mofile, None)\n\n\nclass WeblateBuild(build):\n \"\"\"Override the default build with new subcommands.\"\"\"\n\n # The build_mo has to be before build_data\n sub_commands = [\n (\"build_mo\", lambda self: True), # noqa: ARG005\n *build.sub_commands,\n ]\n\n\nsetup(\n cmdclass={\"build_py\": WeblateBuildPy, \"build_mo\": BuildMo, \"build\": WeblateBuild},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nimport os\nfrom distutils import log\nfrom distutils.core import Command\nfrom glob import glob\nfrom itertools import chain\n\nfrom setuptools import setup\nfrom setuptools.command.build import build\nfrom setuptools.command.build_py import build_py\nfrom setuptools.modified import newer\nfrom translate.tools.pocompile import convertmo\n\nLOCALE_MASKS = [\n \"weblate/locale/*/LC_MESSAGES/*.po\",\n]\n\n\nclass WeblateBuildPy(build_py):\n def find_package_modules(self, package, package_dir):\n \"\"\"Filter settings.py from built module.\"\"\"\n result = super().find_package_modules(package, package_dir)\n return [item for item in result if item[2] != \"weblate/settings.py\"]\n\n\nclass BuildMo(Command):\n description = \"update MO files to match PO\"\n user_options = []\n\n def initialize_options(self) -> None:\n self.build_base = None\n\n def finalize_options(self) -> None:\n self.set_undefined_options(\"build\", (\"build_base\", \"build_base\"))\n\n def run(self) -> None:\n for name in chain.from_iterable(glob(mask) for mask in LOCALE_MASKS):\n output = os.path.splitext(name)[0] + \".mo\"\n if not newer(name, output):\n continue\n self.announce(f\"compiling {name} -> {output}\", level=log.INFO)\n with open(name, \"rb\") as pofile, open(output, \"wb\") as mofile:\n convertmo(pofile, mofile, None)\n\n\nclass WeblateBuild(build):\n \"\"\"Override the default build with new subcommands.\"\"\"\n\n # The build_mo has to be before build_data\n sub_commands = [\n (\"build_mo\", lambda self: True), # noqa: ARG005\n *build.sub_commands,\n ]\n\n\nsetup(\n cmdclass={\"build_py\": WeblateBuildPy, \"build_mo\": BuildMo, \"build\": WeblateBuild},\n)\n", "path": "setup.py"}]} | 1,470 | 105 |
gh_patches_debug_8391 | rasdani/github-patches | git_diff | scrapy__scrapy-3377 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scrapy check exit code on exception
When I run `scrapy check` and a contract raises unhandled exception, the command exits with successful code `0`.
```
$ scrapy check $SPIDER_NAME
... some stuff ...
Ran 0 contracts in 0.000s
OK
$ echo $?
0
```
Is this intended, or should I fix it?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/contracts/__init__.py`
Content:
```
1 import sys
2 import re
3 from functools import wraps
4 from inspect import getmembers
5 from unittest import TestCase
6
7 from scrapy.http import Request
8 from scrapy.utils.spider import iterate_spider_output
9 from scrapy.utils.python import get_spec
10
11
12 class ContractsManager(object):
13 contracts = {}
14
15 def __init__(self, contracts):
16 for contract in contracts:
17 self.contracts[contract.name] = contract
18
19 def tested_methods_from_spidercls(self, spidercls):
20 methods = []
21 for key, value in getmembers(spidercls):
22 if (callable(value) and value.__doc__ and
23 re.search(r'^\s*@', value.__doc__, re.MULTILINE)):
24 methods.append(key)
25
26 return methods
27
28 def extract_contracts(self, method):
29 contracts = []
30 for line in method.__doc__.split('\n'):
31 line = line.strip()
32
33 if line.startswith('@'):
34 name, args = re.match(r'@(\w+)\s*(.*)', line).groups()
35 args = re.split(r'\s+', args)
36
37 contracts.append(self.contracts[name](method, *args))
38
39 return contracts
40
41 def from_spider(self, spider, results):
42 requests = []
43 for method in self.tested_methods_from_spidercls(type(spider)):
44 bound_method = spider.__getattribute__(method)
45 requests.append(self.from_method(bound_method, results))
46
47 return requests
48
49 def from_method(self, method, results):
50 contracts = self.extract_contracts(method)
51 if contracts:
52 request_cls = Request
53 for contract in contracts:
54 if contract.request_cls is not None:
55 request_cls = contract.request_cls
56
57 # calculate request args
58 args, kwargs = get_spec(request_cls.__init__)
59 kwargs['callback'] = method
60 for contract in contracts:
61 kwargs = contract.adjust_request_args(kwargs)
62
63 args.remove('self')
64
65 # check if all positional arguments are defined in kwargs
66 if set(args).issubset(set(kwargs)):
67 request = request_cls(**kwargs)
68
69 # execute pre and post hooks in order
70 for contract in reversed(contracts):
71 request = contract.add_pre_hook(request, results)
72 for contract in contracts:
73 request = contract.add_post_hook(request, results)
74
75 self._clean_req(request, method, results)
76 return request
77
78 def _clean_req(self, request, method, results):
79 """ stop the request from returning objects and records any errors """
80
81 cb = request.callback
82
83 @wraps(cb)
84 def cb_wrapper(response):
85 try:
86 output = cb(response)
87 output = list(iterate_spider_output(output))
88 except:
89 case = _create_testcase(method, 'callback')
90 results.addError(case, sys.exc_info())
91
92 def eb_wrapper(failure):
93 case = _create_testcase(method, 'errback')
94 exc_info = failure.type, failure.value, failure.getTracebackObject()
95 results.addError(case, exc_info)
96
97 request.callback = cb_wrapper
98 request.errback = eb_wrapper
99
100
101 class Contract(object):
102 """ Abstract class for contracts """
103 request_cls = None
104
105 def __init__(self, method, *args):
106 self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)
107 self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)
108 self.args = args
109
110 def add_pre_hook(self, request, results):
111 if hasattr(self, 'pre_process'):
112 cb = request.callback
113
114 @wraps(cb)
115 def wrapper(response):
116 try:
117 results.startTest(self.testcase_pre)
118 self.pre_process(response)
119 results.stopTest(self.testcase_pre)
120 except AssertionError:
121 results.addFailure(self.testcase_pre, sys.exc_info())
122 except Exception:
123 results.addError(self.testcase_pre, sys.exc_info())
124 else:
125 results.addSuccess(self.testcase_pre)
126 finally:
127 return list(iterate_spider_output(cb(response)))
128
129 request.callback = wrapper
130
131 return request
132
133 def add_post_hook(self, request, results):
134 if hasattr(self, 'post_process'):
135 cb = request.callback
136
137 @wraps(cb)
138 def wrapper(response):
139 output = list(iterate_spider_output(cb(response)))
140 try:
141 results.startTest(self.testcase_post)
142 self.post_process(output)
143 results.stopTest(self.testcase_post)
144 except AssertionError:
145 results.addFailure(self.testcase_post, sys.exc_info())
146 except Exception:
147 results.addError(self.testcase_post, sys.exc_info())
148 else:
149 results.addSuccess(self.testcase_post)
150 finally:
151 return output
152
153 request.callback = wrapper
154
155 return request
156
157 def adjust_request_args(self, args):
158 return args
159
160
161 def _create_testcase(method, desc):
162 spider = method.__self__.name
163
164 class ContractTestCase(TestCase):
165 def __str__(_self):
166 return "[%s] %s (%s)" % (spider, method.__name__, desc)
167
168 name = '%s_%s' % (spider, method.__name__)
169 setattr(ContractTestCase, name, lambda x: x)
170 return ContractTestCase(name)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py
--- a/scrapy/contracts/__init__.py
+++ b/scrapy/contracts/__init__.py
@@ -42,7 +42,11 @@
requests = []
for method in self.tested_methods_from_spidercls(type(spider)):
bound_method = spider.__getattribute__(method)
- requests.append(self.from_method(bound_method, results))
+ try:
+ requests.append(self.from_method(bound_method, results))
+ except Exception:
+ case = _create_testcase(bound_method, 'contract')
+ results.addError(case, sys.exc_info())
return requests
| {"golden_diff": "diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py\n--- a/scrapy/contracts/__init__.py\n+++ b/scrapy/contracts/__init__.py\n@@ -42,7 +42,11 @@\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n- requests.append(self.from_method(bound_method, results))\n+ try:\n+ requests.append(self.from_method(bound_method, results))\n+ except Exception:\n+ case = _create_testcase(bound_method, 'contract')\n+ results.addError(case, sys.exc_info())\n \n return requests\n", "issue": "scrapy check exit code on exception\nWhen I run `scrapy check` and a contract raises unhandled exception, the command exits with successful code `0`.\r\n\r\n```\r\n$ scrapy check $SPIDER_NAME\r\n... some stuff ...\r\nRan 0 contracts in 0.000s\r\nOK\r\n$ echo $?\r\n0\r\n```\r\n\r\nIs this intended, or should I fix it?\n", "before_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom inspect import getmembers\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in getmembers(spidercls):\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n requests.append(self.from_method(bound_method, results))\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n request_cls = Request\n for contract in contracts:\n if contract.request_cls is not None:\n request_cls = contract.request_cls\n\n # calculate request args\n args, kwargs = get_spec(request_cls.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n args.remove('self')\n\n # check if all positional arguments are defined in kwargs\n if set(args).issubset(set(kwargs)):\n request = request_cls(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n request_cls = None\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}], "after_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom inspect import getmembers\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in getmembers(spidercls):\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n try:\n requests.append(self.from_method(bound_method, results))\n except Exception:\n case = _create_testcase(bound_method, 'contract')\n results.addError(case, sys.exc_info())\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n request_cls = Request\n for contract in contracts:\n if contract.request_cls is not None:\n request_cls = contract.request_cls\n\n # calculate request args\n args, kwargs = get_spec(request_cls.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n args.remove('self')\n\n # check if all positional arguments are defined in kwargs\n if set(args).issubset(set(kwargs)):\n request = request_cls(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n request_cls = None\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}]} | 1,876 | 154 |
gh_patches_debug_5037 | rasdani/github-patches | git_diff | facebookresearch__hydra-793 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] MISSING for Object Conf cls argument
# 🐛 Bug
OmegaConf cls argument should not be a mandatory value if target is defined. Can we change this to be an optional value with None being the default?
** Stack trace/error message **
```
omegaconf.errors.MissingMandatoryValue: Missing mandatory value: scheduler.cls
full_key: scheduler.cls
reference_type=ObjectConf
object_type=ObjectConf
```
## System information
- **Hydra Version** : 1.0.0rc2
- **Python version** : 3.7.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/types.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from dataclasses import dataclass, field
3 from enum import Enum
4 from typing import Any, Callable, Dict
5
6 from omegaconf import MISSING
7
8 TaskFunction = Callable[[Any], Any]
9
10
11 @dataclass
12 # This extends Dict[str, Any] to allow for the deprecated "class" field.
13 # Once support for class field removed this can stop extending Dict.
14 class ObjectConf(Dict[str, Any]):
15 # class, class method or function name
16 target: str = MISSING
17
18 # parameters to pass to cls when calling it
19 params: Any = field(default_factory=dict)
20
21 # cls is deprecated, use target, cls will be removed in Hydra 1.1
22 cls: str = MISSING
23
24 # class is deprecated, use target, class will be removed in Hydra 1.1
25 # (class is Python keyword and is only supported through DictConfig)
26 # class: str = MISSING
27
28
29 class RunMode(Enum):
30 RUN = 1
31 MULTIRUN = 2
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hydra/types.py b/hydra/types.py
--- a/hydra/types.py
+++ b/hydra/types.py
@@ -18,13 +18,6 @@
# parameters to pass to cls when calling it
params: Any = field(default_factory=dict)
- # cls is deprecated, use target, cls will be removed in Hydra 1.1
- cls: str = MISSING
-
- # class is deprecated, use target, class will be removed in Hydra 1.1
- # (class is Python keyword and is only supported through DictConfig)
- # class: str = MISSING
-
class RunMode(Enum):
RUN = 1
| {"golden_diff": "diff --git a/hydra/types.py b/hydra/types.py\n--- a/hydra/types.py\n+++ b/hydra/types.py\n@@ -18,13 +18,6 @@\n # parameters to pass to cls when calling it\n params: Any = field(default_factory=dict)\n \n- # cls is deprecated, use target, cls will be removed in Hydra 1.1\n- cls: str = MISSING\n-\n- # class is deprecated, use target, class will be removed in Hydra 1.1\n- # (class is Python keyword and is only supported through DictConfig)\n- # class: str = MISSING\n-\n \n class RunMode(Enum):\n RUN = 1\n", "issue": "[Bug] MISSING for Object Conf cls argument\n# \ud83d\udc1b Bug\r\n\r\nOmegaConf cls argument should not be a mandatory value if target is defined. Can we change this to be an optional value with None being the default?\r\n \r\n** Stack trace/error message **\r\n```\r\nomegaconf.errors.MissingMandatoryValue: Missing mandatory value: scheduler.cls\r\n full_key: scheduler.cls\r\n reference_type=ObjectConf\r\n object_type=ObjectConf\r\n```\r\n\r\n\r\n## System information\r\n- **Hydra Version** : 1.0.0rc2\r\n- **Python version** : 3.7.7\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Any, Callable, Dict\n\nfrom omegaconf import MISSING\n\nTaskFunction = Callable[[Any], Any]\n\n\n@dataclass\n# This extends Dict[str, Any] to allow for the deprecated \"class\" field.\n# Once support for class field removed this can stop extending Dict.\nclass ObjectConf(Dict[str, Any]):\n # class, class method or function name\n target: str = MISSING\n\n # parameters to pass to cls when calling it\n params: Any = field(default_factory=dict)\n\n # cls is deprecated, use target, cls will be removed in Hydra 1.1\n cls: str = MISSING\n\n # class is deprecated, use target, class will be removed in Hydra 1.1\n # (class is Python keyword and is only supported through DictConfig)\n # class: str = MISSING\n\n\nclass RunMode(Enum):\n RUN = 1\n MULTIRUN = 2\n", "path": "hydra/types.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Any, Callable, Dict\n\nfrom omegaconf import MISSING\n\nTaskFunction = Callable[[Any], Any]\n\n\n@dataclass\n# This extends Dict[str, Any] to allow for the deprecated \"class\" field.\n# Once support for class field removed this can stop extending Dict.\nclass ObjectConf(Dict[str, Any]):\n # class, class method or function name\n target: str = MISSING\n\n # parameters to pass to cls when calling it\n params: Any = field(default_factory=dict)\n\n\nclass RunMode(Enum):\n RUN = 1\n MULTIRUN = 2\n", "path": "hydra/types.py"}]} | 678 | 156 |
gh_patches_debug_9165 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-1921 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] No module named 'fcntl' while importing the package
Hey,
Working on windows 11, Python 3.7 and tried importing the package.
Got the error in the title, is there a way around it since it's exclusive to Linux?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepspeed/utils/debug.py`
Content:
```
1 """ debug utils """
2
3 import fcntl
4
5 # for debug purposes map module and param objects to their fully qualified names
6 module_names = {}
7 param_names = {}
8
9
10 def debug_extract_module_and_param_names(model):
11 # extract the fully qualified names as soon as the model is acquired
12 global module_names
13 global param_names
14 # XXX: can probably make a map of param2module and vice-versa
15 module_names = {module: name for name, module in model.named_modules()}
16 param_names = {param: name for name, param in model.named_parameters()}
17
18
19 def debug_module2name(module):
20 if module in module_names:
21 return module_names[module]
22 else:
23 return "unknown"
24
25
26 def debug_module2name_id(module):
27 return f"name={debug_module2name(module)} id={module.id}"
28
29
30 def debug_module2name_class(module):
31 return f"name={debug_module2name(module)} {module.__class__.__name__}"
32
33
34 def debug_param2name(param):
35 if param in param_names:
36 return param_names[param]
37 else:
38 return "unknown"
39
40
41 def debug_param2name_id(param):
42 return f"name={debug_param2name(param)} id={param.ds_id}"
43
44
45 def debug_param2name_id_shape(param):
46 return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}"
47
48
49 def debug_param2name_id_shape_device(param):
50 return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}"
51
52
53 def debug_param2name_id_numel(param):
54 return f"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}"
55
56
57 def debug_param2name_id_shape_status(param):
58 return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}"
59
60
61 def printflock(*msgs):
62 """
63
64 For printing messages for all concurrent gpus w/o getting interleaved text.
65
66 This is useful when debugging issues where multi-gpus don't sync.
67
68 1. Enable the force debug in say partitioning and zero3 files
69 2. Override the usual versions with ::
70
71 def print_rank_0(message, debug=False, force=False):
72 rank = torch.distributed.get_rank()
73 printflock(f"[{rank}] {message}")
74 3. run the program and you get both logs non-interleaved
75
76 But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper
77 function might be more useful, as it's easier to send each log stream into a separate file and
78 then compare those.
79
80 """
81
82 with open(__file__, "r") as fh:
83 fcntl.flock(fh, fcntl.LOCK_EX)
84 try:
85 print(*msgs)
86 finally:
87 fcntl.flock(fh, fcntl.LOCK_UN)
88
89
90 fh = None
91
92
93 def log_rank_file(rank, *msgs):
94 """
95 Print to a log file of the given rank
96
97 This is useful for debugging hanging in sync processes. Here is a possible workflow:
98
99 1. Enable the force debug in say partitioning and zero3 files
100 2. Override the usual versions of print_rank_0 in those files with ::
101
102 def print_rank_0(message, debug=False, force=False):
103 rank = torch.distributed.get_rank()
104 log_rank_file(rank, message)
105
106 3. run the program
107 4. fix up the expected differences, e.g. different cuda numbers ::
108
109 perl -pi -e 's|cuda:1|cuda:0|' log_rank_*
110
111 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same
112 work (e.g. when some layers get conditionally skipped on one gpu but not all)
113
114 diff -u log_rank_0.txt log_rank_1.txt | less
115
116 """
117 global fh
118 if fh is None:
119 fh = open(f"log_rank_{rank}.txt", "w")
120 for m in msgs:
121 fh.write(f"{m}\n")
122 fh.flush()
123
124
125 def print_backward_tensors(tensor):
126 def _print_bwd_tensors(grad_fn):
127 print(f"Backward tensors in {grad_fn}")
128 for funcs in grad_fn.next_functions:
129 if funcs[0]:
130 try:
131 tensor = getattr(funcs[0], 'variable')
132 print(funcs[0])
133 print(
134 f"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}"
135 )
136 except AttributeError as e:
137 _print_bwd_tensors(funcs[0])
138
139 if hasattr(tensor, 'grad_fn'):
140 _print_bwd_tensors(tensor.grad_fn)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepspeed/utils/debug.py b/deepspeed/utils/debug.py
--- a/deepspeed/utils/debug.py
+++ b/deepspeed/utils/debug.py
@@ -1,6 +1,7 @@
""" debug utils """
-import fcntl
+# For lazy import with printflock()
+fcntl = None
# for debug purposes map module and param objects to their fully qualified names
module_names = {}
@@ -78,6 +79,9 @@
then compare those.
"""
+ global fcntl
+ if fcntl == None:
+ import fcntl
with open(__file__, "r") as fh:
fcntl.flock(fh, fcntl.LOCK_EX)
| {"golden_diff": "diff --git a/deepspeed/utils/debug.py b/deepspeed/utils/debug.py\n--- a/deepspeed/utils/debug.py\n+++ b/deepspeed/utils/debug.py\n@@ -1,6 +1,7 @@\n \"\"\" debug utils \"\"\"\n \n-import fcntl\n+# For lazy import with printflock()\n+fcntl = None\n \n # for debug purposes map module and param objects to their fully qualified names\n module_names = {}\n@@ -78,6 +79,9 @@\n then compare those.\n \n \"\"\"\n+ global fcntl\n+ if fcntl == None:\n+ import fcntl\n \n with open(__file__, \"r\") as fh:\n fcntl.flock(fh, fcntl.LOCK_EX)\n", "issue": "[BUG] No module named 'fcntl' while importing the package\nHey,\r\nWorking on windows 11, Python 3.7 and tried importing the package.\r\nGot the error in the title, is there a way around it since it's exclusive to Linux?\r\n\r\n\r\n \n", "before_files": [{"content": "\"\"\" debug utils \"\"\"\n\nimport fcntl\n\n# for debug purposes map module and param objects to their fully qualified names\nmodule_names = {}\nparam_names = {}\n\n\ndef debug_extract_module_and_param_names(model):\n # extract the fully qualified names as soon as the model is acquired\n global module_names\n global param_names\n # XXX: can probably make a map of param2module and vice-versa\n module_names = {module: name for name, module in model.named_modules()}\n param_names = {param: name for name, param in model.named_parameters()}\n\n\ndef debug_module2name(module):\n if module in module_names:\n return module_names[module]\n else:\n return \"unknown\"\n\n\ndef debug_module2name_id(module):\n return f\"name={debug_module2name(module)} id={module.id}\"\n\n\ndef debug_module2name_class(module):\n return f\"name={debug_module2name(module)} {module.__class__.__name__}\"\n\n\ndef debug_param2name(param):\n if param in param_names:\n return param_names[param]\n else:\n return \"unknown\"\n\n\ndef debug_param2name_id(param):\n return f\"name={debug_param2name(param)} id={param.ds_id}\"\n\n\ndef debug_param2name_id_shape(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}\"\n\n\ndef debug_param2name_id_shape_device(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}\"\n\n\ndef debug_param2name_id_numel(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}\"\n\n\ndef debug_param2name_id_shape_status(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}\"\n\n\ndef printflock(*msgs):\n \"\"\"\n\n For printing messages for all concurrent gpus w/o getting interleaved text.\n\n This is useful when debugging issues where multi-gpus don't sync.\n\n 1. Enable the force debug in say partitioning and zero3 files\n 2. Override the usual versions with ::\n\n def print_rank_0(message, debug=False, force=False):\n rank = torch.distributed.get_rank()\n printflock(f\"[{rank}] {message}\")\n 3. run the program and you get both logs non-interleaved\n\n But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper\n function might be more useful, as it's easier to send each log stream into a separate file and\n then compare those.\n\n \"\"\"\n\n with open(__file__, \"r\") as fh:\n fcntl.flock(fh, fcntl.LOCK_EX)\n try:\n print(*msgs)\n finally:\n fcntl.flock(fh, fcntl.LOCK_UN)\n\n\nfh = None\n\n\ndef log_rank_file(rank, *msgs):\n \"\"\"\n Print to a log file of the given rank\n\n This is useful for debugging hanging in sync processes. Here is a possible workflow:\n\n 1. Enable the force debug in say partitioning and zero3 files\n 2. Override the usual versions of print_rank_0 in those files with ::\n\n def print_rank_0(message, debug=False, force=False):\n rank = torch.distributed.get_rank()\n log_rank_file(rank, message)\n\n 3. run the program\n 4. fix up the expected differences, e.g. different cuda numbers ::\n\n perl -pi -e 's|cuda:1|cuda:0|' log_rank_*\n\n 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same\n work (e.g. when some layers get conditionally skipped on one gpu but not all)\n\n diff -u log_rank_0.txt log_rank_1.txt | less\n\n \"\"\"\n global fh\n if fh is None:\n fh = open(f\"log_rank_{rank}.txt\", \"w\")\n for m in msgs:\n fh.write(f\"{m}\\n\")\n fh.flush()\n\n\ndef print_backward_tensors(tensor):\n def _print_bwd_tensors(grad_fn):\n print(f\"Backward tensors in {grad_fn}\")\n for funcs in grad_fn.next_functions:\n if funcs[0]:\n try:\n tensor = getattr(funcs[0], 'variable')\n print(funcs[0])\n print(\n f\"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}\"\n )\n except AttributeError as e:\n _print_bwd_tensors(funcs[0])\n\n if hasattr(tensor, 'grad_fn'):\n _print_bwd_tensors(tensor.grad_fn)\n", "path": "deepspeed/utils/debug.py"}], "after_files": [{"content": "\"\"\" debug utils \"\"\"\n\n# For lazy import with printflock()\nfcntl = None\n\n# for debug purposes map module and param objects to their fully qualified names\nmodule_names = {}\nparam_names = {}\n\n\ndef debug_extract_module_and_param_names(model):\n # extract the fully qualified names as soon as the model is acquired\n global module_names\n global param_names\n # XXX: can probably make a map of param2module and vice-versa\n module_names = {module: name for name, module in model.named_modules()}\n param_names = {param: name for name, param in model.named_parameters()}\n\n\ndef debug_module2name(module):\n if module in module_names:\n return module_names[module]\n else:\n return \"unknown\"\n\n\ndef debug_module2name_id(module):\n return f\"name={debug_module2name(module)} id={module.id}\"\n\n\ndef debug_module2name_class(module):\n return f\"name={debug_module2name(module)} {module.__class__.__name__}\"\n\n\ndef debug_param2name(param):\n if param in param_names:\n return param_names[param]\n else:\n return \"unknown\"\n\n\ndef debug_param2name_id(param):\n return f\"name={debug_param2name(param)} id={param.ds_id}\"\n\n\ndef debug_param2name_id_shape(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}\"\n\n\ndef debug_param2name_id_shape_device(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}\"\n\n\ndef debug_param2name_id_numel(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}\"\n\n\ndef debug_param2name_id_shape_status(param):\n return f\"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}\"\n\n\ndef printflock(*msgs):\n \"\"\"\n\n For printing messages for all concurrent gpus w/o getting interleaved text.\n\n This is useful when debugging issues where multi-gpus don't sync.\n\n 1. Enable the force debug in say partitioning and zero3 files\n 2. Override the usual versions with ::\n\n def print_rank_0(message, debug=False, force=False):\n rank = torch.distributed.get_rank()\n printflock(f\"[{rank}] {message}\")\n 3. run the program and you get both logs non-interleaved\n\n But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper\n function might be more useful, as it's easier to send each log stream into a separate file and\n then compare those.\n\n \"\"\"\n global fcntl\n if fcntl == None:\n import fcntl\n\n with open(__file__, \"r\") as fh:\n fcntl.flock(fh, fcntl.LOCK_EX)\n try:\n print(*msgs)\n finally:\n fcntl.flock(fh, fcntl.LOCK_UN)\n\n\nfh = None\n\n\ndef log_rank_file(rank, *msgs):\n \"\"\"\n Print to a log file of the given rank\n\n This is useful for debugging hanging in sync processes. Here is a possible workflow:\n\n 1. Enable the force debug in say partitioning and zero3 files\n 2. Override the usual versions of print_rank_0 in those files with ::\n\n def print_rank_0(message, debug=False, force=False):\n rank = torch.distributed.get_rank()\n log_rank_file(rank, message)\n\n 3. run the program\n 4. fix up the expected differences, e.g. different cuda numbers ::\n\n perl -pi -e 's|cuda:1|cuda:0|' log_rank_*\n\n 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same\n work (e.g. when some layers get conditionally skipped on one gpu but not all)\n\n diff -u log_rank_0.txt log_rank_1.txt | less\n\n \"\"\"\n global fh\n if fh is None:\n fh = open(f\"log_rank_{rank}.txt\", \"w\")\n for m in msgs:\n fh.write(f\"{m}\\n\")\n fh.flush()\n\n\ndef print_backward_tensors(tensor):\n def _print_bwd_tensors(grad_fn):\n print(f\"Backward tensors in {grad_fn}\")\n for funcs in grad_fn.next_functions:\n if funcs[0]:\n try:\n tensor = getattr(funcs[0], 'variable')\n print(funcs[0])\n print(\n f\"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}\"\n )\n except AttributeError as e:\n _print_bwd_tensors(funcs[0])\n\n if hasattr(tensor, 'grad_fn'):\n _print_bwd_tensors(tensor.grad_fn)\n", "path": "deepspeed/utils/debug.py"}]} | 1,753 | 152 |
gh_patches_debug_31923 | rasdani/github-patches | git_diff | alpa-projects__alpa-511 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding the `pjit` in the comparison
Some people are more familiar with using model parallel via [`pjit`](https://github.com/google/jax/blob/main/jax/experimental/pjit.py). What about adding one more rows [here](https://github.com/alpa-projects/alpa/blob/main/docs/gallery/tutorials/alpa_vs_pmap.py#L46-L52)?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/gallery/tutorials/alpa_vs_pmap.py`
Content:
```
1 """
2 Differences between alpa.parallelize and jax.pmap
3 =================================================
4
5 The most common tool for parallelization or distributed computing in jax is
6 `pmap <https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap>`_.
7 With several lines of code change, we can use ``pmap`` for data parallel
8 training. However, we cannot use ``pmap`` for model parallel training,
9 which is required for training large models with billions of parameters.
10
11 On the contrary, ``alpa.parallelize`` supports both data parallelism and
12 model parallelism in an automatic way. ``alpa.parallelize`` analyzes the
13 jax computational graph and picks the best strategy.
14 If data parallelism is more suitable, ``alpa.parallelize`` achieves the same
15 performance as ``pmap`` but with less code change.
16 If model parallelism is more suitable, ``alpa.parallelize`` achieves better performance
17 and uses less memory than ``pmap``.
18
19 In this tutorial, we are going to compare ``alpa.parallelize`` and ``pmap`` on two
20 workloads. A more detailed comparison among ``alpa.parallelize``, ``pmap``, and ``xmap``
21 is also attached at the end of the article.
22 """
23
24 ################################################################################
25 # When data parallelism is prefered
26 # ---------------------------------
27
28 # TODO
29
30 ################################################################################
31 # When model parallelism is prefered
32 # ----------------------------------
33
34 # TODO
35
36 ################################################################################
37 # Comparing ``alpa.parallelize``, ``pmap``, and ``xmap``
38 # ------------------------------------------------------
39 # Besides ``pmap``, jax also provides
40 # `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_
41 # for more advanced parallelization.
42 # The table below compares the features of ``alpa.parallelize``, ``pmap``, and ``xmap``.
43 # In summary, ``alpa.parallelize`` supports more parallelism techniques in a
44 # more automatic way.
45 #
46 # ================ ================ ==================== ==================== =========
47 # Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated
48 # ================ ================ ==================== ==================== =========
49 # alpa.parallelize yes yes yes yes
50 # pmap yes no no no
51 # xmap yes yes no no
52 # ================ ================ ==================== ==================== =========
53 #
54 # .. note::
55 # Operator parallelism and pipeline parallelism are two forms of model parallelism.
56 # Operator parallelism partitions the work in a single operator and assigns them
57 # to different devices. Pipeline parallelism partitions the computational
58 # graphs and assigns different operators to different devices.
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/gallery/tutorials/alpa_vs_pmap.py b/docs/gallery/tutorials/alpa_vs_pmap.py
--- a/docs/gallery/tutorials/alpa_vs_pmap.py
+++ b/docs/gallery/tutorials/alpa_vs_pmap.py
@@ -34,14 +34,15 @@
# TODO
################################################################################
-# Comparing ``alpa.parallelize``, ``pmap``, and ``xmap``
-# ------------------------------------------------------
+# Comparing ``alpa.parallelize``, ``pmap``, ``xmap``, and ``pjit``
+# -----------------------------------------------------------------
# Besides ``pmap``, jax also provides
-# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_
+# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_ and
+# `pjit <https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html>`_
# for more advanced parallelization.
-# The table below compares the features of ``alpa.parallelize``, ``pmap``, and ``xmap``.
-# In summary, ``alpa.parallelize`` supports more parallelism techniques in a
-# more automatic way.
+# The table below compares the features of ``alpa.parallelize``, ``pmap``, ``xmap``
+# and ``pjit``. In summary, ``alpa.parallelize`` supports more parallelism
+# techniques in a more automatic way.
#
# ================ ================ ==================== ==================== =========
# Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated
@@ -49,6 +50,7 @@
# alpa.parallelize yes yes yes yes
# pmap yes no no no
# xmap yes yes no no
+# pjit yes yes no no
# ================ ================ ==================== ==================== =========
#
# .. note::
| {"golden_diff": "diff --git a/docs/gallery/tutorials/alpa_vs_pmap.py b/docs/gallery/tutorials/alpa_vs_pmap.py\n--- a/docs/gallery/tutorials/alpa_vs_pmap.py\n+++ b/docs/gallery/tutorials/alpa_vs_pmap.py\n@@ -34,14 +34,15 @@\n # TODO\n \n ################################################################################\n-# Comparing ``alpa.parallelize``, ``pmap``, and ``xmap``\n-# ------------------------------------------------------\n+# Comparing ``alpa.parallelize``, ``pmap``, ``xmap``, and ``pjit``\n+# -----------------------------------------------------------------\n # Besides ``pmap``, jax also provides\n-# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_\n+# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_ and \n+# `pjit <https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html>`_\n # for more advanced parallelization.\n-# The table below compares the features of ``alpa.parallelize``, ``pmap``, and ``xmap``.\n-# In summary, ``alpa.parallelize`` supports more parallelism techniques in a\n-# more automatic way.\n+# The table below compares the features of ``alpa.parallelize``, ``pmap``, ``xmap`` \n+# and ``pjit``. In summary, ``alpa.parallelize`` supports more parallelism \n+# techniques in a more automatic way.\n #\n # ================ ================ ==================== ==================== =========\n # Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated\n@@ -49,6 +50,7 @@\n # alpa.parallelize yes yes yes yes\n # pmap yes no no no\n # xmap yes yes no no\n+# pjit yes yes no no\n # ================ ================ ==================== ==================== =========\n #\n # .. note::\n", "issue": "Adding the `pjit` in the comparison\nSome people are more familiar with using model parallel via [`pjit`](https://github.com/google/jax/blob/main/jax/experimental/pjit.py). What about adding one more rows [here](https://github.com/alpa-projects/alpa/blob/main/docs/gallery/tutorials/alpa_vs_pmap.py#L46-L52)?\n", "before_files": [{"content": "\"\"\"\nDifferences between alpa.parallelize and jax.pmap\n=================================================\n\nThe most common tool for parallelization or distributed computing in jax is\n`pmap <https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap>`_.\nWith several lines of code change, we can use ``pmap`` for data parallel\ntraining. However, we cannot use ``pmap`` for model parallel training,\nwhich is required for training large models with billions of parameters.\n\nOn the contrary, ``alpa.parallelize`` supports both data parallelism and\nmodel parallelism in an automatic way. ``alpa.parallelize`` analyzes the\njax computational graph and picks the best strategy.\nIf data parallelism is more suitable, ``alpa.parallelize`` achieves the same\nperformance as ``pmap`` but with less code change.\nIf model parallelism is more suitable, ``alpa.parallelize`` achieves better performance\nand uses less memory than ``pmap``.\n\nIn this tutorial, we are going to compare ``alpa.parallelize`` and ``pmap`` on two\nworkloads. A more detailed comparison among ``alpa.parallelize``, ``pmap``, and ``xmap``\nis also attached at the end of the article.\n\"\"\"\n\n################################################################################\n# When data parallelism is prefered\n# ---------------------------------\n\n# TODO\n\n################################################################################\n# When model parallelism is prefered\n# ----------------------------------\n\n# TODO\n\n################################################################################\n# Comparing ``alpa.parallelize``, ``pmap``, and ``xmap``\n# ------------------------------------------------------\n# Besides ``pmap``, jax also provides\n# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_\n# for more advanced parallelization.\n# The table below compares the features of ``alpa.parallelize``, ``pmap``, and ``xmap``.\n# In summary, ``alpa.parallelize`` supports more parallelism techniques in a\n# more automatic way.\n#\n# ================ ================ ==================== ==================== =========\n# Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated\n# ================ ================ ==================== ==================== =========\n# alpa.parallelize yes yes yes yes\n# pmap yes no no no\n# xmap yes yes no no\n# ================ ================ ==================== ==================== =========\n#\n# .. note::\n# Operator parallelism and pipeline parallelism are two forms of model parallelism.\n# Operator parallelism partitions the work in a single operator and assigns them\n# to different devices. Pipeline parallelism partitions the computational\n# graphs and assigns different operators to different devices.\n", "path": "docs/gallery/tutorials/alpa_vs_pmap.py"}], "after_files": [{"content": "\"\"\"\nDifferences between alpa.parallelize and jax.pmap\n=================================================\n\nThe most common tool for parallelization or distributed computing in jax is\n`pmap <https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap>`_.\nWith several lines of code change, we can use ``pmap`` for data parallel\ntraining. However, we cannot use ``pmap`` for model parallel training,\nwhich is required for training large models with billions of parameters.\n\nOn the contrary, ``alpa.parallelize`` supports both data parallelism and\nmodel parallelism in an automatic way. ``alpa.parallelize`` analyzes the\njax computational graph and picks the best strategy.\nIf data parallelism is more suitable, ``alpa.parallelize`` achieves the same\nperformance as ``pmap`` but with less code change.\nIf model parallelism is more suitable, ``alpa.parallelize`` achieves better performance\nand uses less memory than ``pmap``.\n\nIn this tutorial, we are going to compare ``alpa.parallelize`` and ``pmap`` on two\nworkloads. A more detailed comparison among ``alpa.parallelize``, ``pmap``, and ``xmap``\nis also attached at the end of the article.\n\"\"\"\n\n################################################################################\n# When data parallelism is prefered\n# ---------------------------------\n\n# TODO\n\n################################################################################\n# When model parallelism is prefered\n# ----------------------------------\n\n# TODO\n\n################################################################################\n# Comparing ``alpa.parallelize``, ``pmap``, ``xmap``, and ``pjit``\n# -----------------------------------------------------------------\n# Besides ``pmap``, jax also provides\n# `xmap <https://jax.readthedocs.io/en/latest/notebooks/xmap_tutorial.html>`_ and \n# `pjit <https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html>`_\n# for more advanced parallelization.\n# The table below compares the features of ``alpa.parallelize``, ``pmap``, ``xmap`` \n# and ``pjit``. In summary, ``alpa.parallelize`` supports more parallelism \n# techniques in a more automatic way.\n#\n# ================ ================ ==================== ==================== =========\n# Transformation Data Parallelism Operator Parallelism Pipeline Parallelism Automated\n# ================ ================ ==================== ==================== =========\n# alpa.parallelize yes yes yes yes\n# pmap yes no no no\n# xmap yes yes no no\n# pjit yes yes no no\n# ================ ================ ==================== ==================== =========\n#\n# .. note::\n# Operator parallelism and pipeline parallelism are two forms of model parallelism.\n# Operator parallelism partitions the work in a single operator and assigns them\n# to different devices. Pipeline parallelism partitions the computational\n# graphs and assigns different operators to different devices.\n", "path": "docs/gallery/tutorials/alpa_vs_pmap.py"}]} | 1,017 | 424 |
gh_patches_debug_19670 | rasdani/github-patches | git_diff | google__flax-2842 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lm1b sampler update is out of bound?
Hi,
_Disclaimer: I'm not confident if this is some intended behavior._
I think in the lm1b example, the prediction sampler could write to indices out of bound of the result.
The particular offending line is: https://github.com/google/flax/blob/master/examples/lm1b/temperature_sampler.py#L109.
Since `i <= max_decode_len` according to https://github.com/google/flax/blob/master/examples/lm1b/temperature_sampler.py#L75, and since `prompt_inputs.shape[1] == max_decode_len`, the last iteration (or two iterations?) of while loop would overwrite to `prompt_inputs[..., max_decode_len - 1]` (due to XLA semantics).
It's either a subtle bug (it won't raise cause error), or there's something I'm not understanding about the sampling procedure.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/lm1b/temperature_sampler.py`
Content:
```
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Fast decoding routines for inference from a trained language model."""
16
17 from jax import lax
18 from jax import random
19 import jax.numpy as jnp
20
21
22 # Constants
23 # The default End-of-Sentence token id is 2 (SentencePiece).
24 EOS_ID = 2
25
26
27 def temperature_sample(prompt_inputs,
28 init_cache,
29 tokens_to_logits,
30 prng_key,
31 temperature=1.0,
32 topk=20,
33 eos_token=EOS_ID):
34 """Temperature sampling for language model generation.
35
36 Args:
37 prompt_inputs: array: [batch_size, max_decode_len] int32 sequence of tokens.
38 init_cache: flax attention cache.
39 tokens_to_logits: fast autoregressive decoder function taking single token
40 slices and cache and returning next-token logits and updated cache.
41 prng_key: JAX PRNGKey.
42 temperature: float: sampling temperature factor. As it approaches
43 zero this becomes equivalent to greedy sampling.
44 topk: integer: if nonzero only use the top-k logits to sample next token,
45 if zero don't use any cutoff and sample from full logits over vocabulary.
46 eos_token: int: end-of-sentence token for target vocabulary.
47
48 Returns:
49 Array of sampled sequences: [batch_size, max_decode_len]
50 """
51 batch_size = prompt_inputs.shape[0]
52 max_decode_len = prompt_inputs.shape[1]
53 end_marker = jnp.array(eos_token)
54 temperature = jnp.array(temperature)
55
56 # Initialize sampling loop state.
57 # initial loop PRNGKey
58 rng0 = prng_key
59 # loop position counter.
60 i0 = jnp.array(0)
61 # per batch-item holding current token in loop.
62 token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)
63 # per batch-item state bit indicating if sentence has finished.
64 ended0 = jnp.zeros((batch_size, 1), dtype=jnp.bool_)
65 # (batch, length) array containing prefix prompt tokens for sampling loop
66 # as well as the generated output of newly sampled tokens.
67 sequences0 = prompt_inputs
68 # Sampling loop state is stored in a simple tuple.
69 sampling_loop_init_state = (i0, sequences0, init_cache, token0, ended0, rng0)
70
71 def sampling_loop_cond_fn(state):
72 """Sampling loop termination condition."""
73 (i, _, _, _, ended, _) = state
74 # Have we reached max decoding length?
75 not_at_end = (i < max_decode_len)
76 # Have all sampled sequences reached an end marker?
77 all_sequences_ended = jnp.all(ended)
78 return not_at_end & (~all_sequences_ended)
79
80 def sampling_loop_body_fn(state):
81 """Sampling loop state update."""
82 i, sequences, cache, cur_token, ended, rng = state
83 # Split RNG for sampling.
84 rng1, rng2 = random.split(rng)
85 # Call fast-decoder model on current tokens to get next-position logits.
86 logits, new_cache = tokens_to_logits(cur_token, cache)
87 # Sample next token from logits.
88 # TODO(levskaya): add top-p "nucleus" sampling option.
89 if topk:
90 # Get top-k logits and their indices, sample within these top-k tokens.
91 topk_logits, topk_idxs = lax.top_k(logits, topk)
92 topk_token = jnp.expand_dims(random.categorical(
93 rng1, topk_logits / temperature).astype(jnp.int32), axis=-1)
94 # Return the original indices corresponding to the sampled top-k tokens.
95 next_token = jnp.squeeze(
96 jnp.take_along_axis(topk_idxs, topk_token, axis=-1), axis=-1)
97 else:
98 next_token = random.categorical(
99 rng1, logits / temperature).astype(jnp.int32)
100 # Only use sampled tokens if we're past provided prefix tokens.
101 out_of_prompt = (sequences[:, i+1] == 0)
102 next_token = (next_token * out_of_prompt +
103 sequences[:, i+1] * ~out_of_prompt)
104 # If end-marker reached for batch item, only emit padding tokens.
105 next_token_or_endpad = (next_token[None] * ~ended)
106 ended |= (next_token_or_endpad == end_marker)
107 # Add current sampled tokens to recorded sequences.
108 new_sequences = lax.dynamic_update_slice(
109 sequences, next_token_or_endpad, (0, i+1))
110 return (i+1, new_sequences, new_cache, next_token_or_endpad, ended, rng2)
111
112 # Run sampling loop and collect final state.
113 final_state = lax.while_loop(sampling_loop_cond_fn,
114 sampling_loop_body_fn,
115 sampling_loop_init_state)
116
117 # Pick part of the state corresponding to the sampled sequences.
118 final_sequences = final_state[1]
119 return final_sequences
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/lm1b/temperature_sampler.py b/examples/lm1b/temperature_sampler.py
--- a/examples/lm1b/temperature_sampler.py
+++ b/examples/lm1b/temperature_sampler.py
@@ -57,7 +57,7 @@
# initial loop PRNGKey
rng0 = prng_key
# loop position counter.
- i0 = jnp.array(0)
+ i0 = jnp.array(-1)
# per batch-item holding current token in loop.
token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)
# per batch-item state bit indicating if sentence has finished.
@@ -72,7 +72,7 @@
"""Sampling loop termination condition."""
(i, _, _, _, ended, _) = state
# Have we reached max decoding length?
- not_at_end = (i < max_decode_len)
+ not_at_end = (i < max_decode_len - 1)
# Have all sampled sequences reached an end marker?
all_sequences_ended = jnp.all(ended)
return not_at_end & (~all_sequences_ended)
| {"golden_diff": "diff --git a/examples/lm1b/temperature_sampler.py b/examples/lm1b/temperature_sampler.py\n--- a/examples/lm1b/temperature_sampler.py\n+++ b/examples/lm1b/temperature_sampler.py\n@@ -57,7 +57,7 @@\n # initial loop PRNGKey\n rng0 = prng_key\n # loop position counter.\n- i0 = jnp.array(0)\n+ i0 = jnp.array(-1)\n # per batch-item holding current token in loop.\n token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)\n # per batch-item state bit indicating if sentence has finished.\n@@ -72,7 +72,7 @@\n \"\"\"Sampling loop termination condition.\"\"\"\n (i, _, _, _, ended, _) = state\n # Have we reached max decoding length?\n- not_at_end = (i < max_decode_len)\n+ not_at_end = (i < max_decode_len - 1)\n # Have all sampled sequences reached an end marker?\n all_sequences_ended = jnp.all(ended)\n return not_at_end & (~all_sequences_ended)\n", "issue": "lm1b sampler update is out of bound?\nHi,\r\n\r\n_Disclaimer: I'm not confident if this is some intended behavior._\r\n\r\nI think in the lm1b example, the prediction sampler could write to indices out of bound of the result.\r\nThe particular offending line is: https://github.com/google/flax/blob/master/examples/lm1b/temperature_sampler.py#L109.\r\n\r\nSince `i <= max_decode_len` according to https://github.com/google/flax/blob/master/examples/lm1b/temperature_sampler.py#L75, and since `prompt_inputs.shape[1] == max_decode_len`, the last iteration (or two iterations?) of while loop would overwrite to `prompt_inputs[..., max_decode_len - 1]` (due to XLA semantics). \r\n\r\nIt's either a subtle bug (it won't raise cause error), or there's something I'm not understanding about the sampling procedure.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Fast decoding routines for inference from a trained language model.\"\"\"\n\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\n# Constants\n# The default End-of-Sentence token id is 2 (SentencePiece).\nEOS_ID = 2\n\n\ndef temperature_sample(prompt_inputs,\n init_cache,\n tokens_to_logits,\n prng_key,\n temperature=1.0,\n topk=20,\n eos_token=EOS_ID):\n \"\"\"Temperature sampling for language model generation.\n\n Args:\n prompt_inputs: array: [batch_size, max_decode_len] int32 sequence of tokens.\n init_cache: flax attention cache.\n tokens_to_logits: fast autoregressive decoder function taking single token\n slices and cache and returning next-token logits and updated cache.\n prng_key: JAX PRNGKey.\n temperature: float: sampling temperature factor. As it approaches\n zero this becomes equivalent to greedy sampling.\n topk: integer: if nonzero only use the top-k logits to sample next token,\n if zero don't use any cutoff and sample from full logits over vocabulary.\n eos_token: int: end-of-sentence token for target vocabulary.\n\n Returns:\n Array of sampled sequences: [batch_size, max_decode_len]\n \"\"\"\n batch_size = prompt_inputs.shape[0]\n max_decode_len = prompt_inputs.shape[1]\n end_marker = jnp.array(eos_token)\n temperature = jnp.array(temperature)\n\n # Initialize sampling loop state.\n # initial loop PRNGKey\n rng0 = prng_key\n # loop position counter.\n i0 = jnp.array(0)\n # per batch-item holding current token in loop.\n token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)\n # per batch-item state bit indicating if sentence has finished.\n ended0 = jnp.zeros((batch_size, 1), dtype=jnp.bool_)\n # (batch, length) array containing prefix prompt tokens for sampling loop\n # as well as the generated output of newly sampled tokens.\n sequences0 = prompt_inputs\n # Sampling loop state is stored in a simple tuple.\n sampling_loop_init_state = (i0, sequences0, init_cache, token0, ended0, rng0)\n\n def sampling_loop_cond_fn(state):\n \"\"\"Sampling loop termination condition.\"\"\"\n (i, _, _, _, ended, _) = state\n # Have we reached max decoding length?\n not_at_end = (i < max_decode_len)\n # Have all sampled sequences reached an end marker?\n all_sequences_ended = jnp.all(ended)\n return not_at_end & (~all_sequences_ended)\n\n def sampling_loop_body_fn(state):\n \"\"\"Sampling loop state update.\"\"\"\n i, sequences, cache, cur_token, ended, rng = state\n # Split RNG for sampling.\n rng1, rng2 = random.split(rng)\n # Call fast-decoder model on current tokens to get next-position logits.\n logits, new_cache = tokens_to_logits(cur_token, cache)\n # Sample next token from logits.\n # TODO(levskaya): add top-p \"nucleus\" sampling option.\n if topk:\n # Get top-k logits and their indices, sample within these top-k tokens.\n topk_logits, topk_idxs = lax.top_k(logits, topk)\n topk_token = jnp.expand_dims(random.categorical(\n rng1, topk_logits / temperature).astype(jnp.int32), axis=-1)\n # Return the original indices corresponding to the sampled top-k tokens.\n next_token = jnp.squeeze(\n jnp.take_along_axis(topk_idxs, topk_token, axis=-1), axis=-1)\n else:\n next_token = random.categorical(\n rng1, logits / temperature).astype(jnp.int32)\n # Only use sampled tokens if we're past provided prefix tokens.\n out_of_prompt = (sequences[:, i+1] == 0)\n next_token = (next_token * out_of_prompt +\n sequences[:, i+1] * ~out_of_prompt)\n # If end-marker reached for batch item, only emit padding tokens.\n next_token_or_endpad = (next_token[None] * ~ended)\n ended |= (next_token_or_endpad == end_marker)\n # Add current sampled tokens to recorded sequences.\n new_sequences = lax.dynamic_update_slice(\n sequences, next_token_or_endpad, (0, i+1))\n return (i+1, new_sequences, new_cache, next_token_or_endpad, ended, rng2)\n\n # Run sampling loop and collect final state.\n final_state = lax.while_loop(sampling_loop_cond_fn,\n sampling_loop_body_fn,\n sampling_loop_init_state)\n\n # Pick part of the state corresponding to the sampled sequences.\n final_sequences = final_state[1]\n return final_sequences\n", "path": "examples/lm1b/temperature_sampler.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Fast decoding routines for inference from a trained language model.\"\"\"\n\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\n# Constants\n# The default End-of-Sentence token id is 2 (SentencePiece).\nEOS_ID = 2\n\n\ndef temperature_sample(prompt_inputs,\n init_cache,\n tokens_to_logits,\n prng_key,\n temperature=1.0,\n topk=20,\n eos_token=EOS_ID):\n \"\"\"Temperature sampling for language model generation.\n\n Args:\n prompt_inputs: array: [batch_size, max_decode_len] int32 sequence of tokens.\n init_cache: flax attention cache.\n tokens_to_logits: fast autoregressive decoder function taking single token\n slices and cache and returning next-token logits and updated cache.\n prng_key: JAX PRNGKey.\n temperature: float: sampling temperature factor. As it approaches\n zero this becomes equivalent to greedy sampling.\n topk: integer: if nonzero only use the top-k logits to sample next token,\n if zero don't use any cutoff and sample from full logits over vocabulary.\n eos_token: int: end-of-sentence token for target vocabulary.\n\n Returns:\n Array of sampled sequences: [batch_size, max_decode_len]\n \"\"\"\n batch_size = prompt_inputs.shape[0]\n max_decode_len = prompt_inputs.shape[1]\n end_marker = jnp.array(eos_token)\n temperature = jnp.array(temperature)\n\n # Initialize sampling loop state.\n # initial loop PRNGKey\n rng0 = prng_key\n # loop position counter.\n i0 = jnp.array(-1)\n # per batch-item holding current token in loop.\n token0 = jnp.zeros((batch_size, 1), dtype=jnp.int32)\n # per batch-item state bit indicating if sentence has finished.\n ended0 = jnp.zeros((batch_size, 1), dtype=jnp.bool_)\n # (batch, length) array containing prefix prompt tokens for sampling loop\n # as well as the generated output of newly sampled tokens.\n sequences0 = prompt_inputs\n # Sampling loop state is stored in a simple tuple.\n sampling_loop_init_state = (i0, sequences0, init_cache, token0, ended0, rng0)\n\n def sampling_loop_cond_fn(state):\n \"\"\"Sampling loop termination condition.\"\"\"\n (i, _, _, _, ended, _) = state\n # Have we reached max decoding length?\n not_at_end = (i < max_decode_len - 1)\n # Have all sampled sequences reached an end marker?\n all_sequences_ended = jnp.all(ended)\n return not_at_end & (~all_sequences_ended)\n\n def sampling_loop_body_fn(state):\n \"\"\"Sampling loop state update.\"\"\"\n i, sequences, cache, cur_token, ended, rng = state\n # Split RNG for sampling.\n rng1, rng2 = random.split(rng)\n # Call fast-decoder model on current tokens to get next-position logits.\n logits, new_cache = tokens_to_logits(cur_token, cache)\n # Sample next token from logits.\n # TODO(levskaya): add top-p \"nucleus\" sampling option.\n if topk:\n # Get top-k logits and their indices, sample within these top-k tokens.\n topk_logits, topk_idxs = lax.top_k(logits, topk)\n topk_token = jnp.expand_dims(random.categorical(\n rng1, topk_logits / temperature).astype(jnp.int32), axis=-1)\n # Return the original indices corresponding to the sampled top-k tokens.\n next_token = jnp.squeeze(\n jnp.take_along_axis(topk_idxs, topk_token, axis=-1), axis=-1)\n else:\n next_token = random.categorical(\n rng1, logits / temperature).astype(jnp.int32)\n # Only use sampled tokens if we're past provided prefix tokens.\n out_of_prompt = (sequences[:, i+1] == 0)\n next_token = (next_token * out_of_prompt +\n sequences[:, i+1] * ~out_of_prompt)\n # If end-marker reached for batch item, only emit padding tokens.\n next_token_or_endpad = (next_token[None] * ~ended)\n ended |= (next_token_or_endpad == end_marker)\n # Add current sampled tokens to recorded sequences.\n new_sequences = lax.dynamic_update_slice(\n sequences, next_token_or_endpad, (0, i+1))\n return (i+1, new_sequences, new_cache, next_token_or_endpad, ended, rng2)\n\n # Run sampling loop and collect final state.\n final_state = lax.while_loop(sampling_loop_cond_fn,\n sampling_loop_body_fn,\n sampling_loop_init_state)\n\n # Pick part of the state corresponding to the sampled sequences.\n final_sequences = final_state[1]\n return final_sequences\n", "path": "examples/lm1b/temperature_sampler.py"}]} | 1,892 | 254 |
gh_patches_debug_4479 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[functional testing] Fix staging CI job on tbb-0.9.0
We removed the application/functional test run from the staging environment in #3697. We should also update the testinfra test references and remove the application test run from CI, otherwise we get a few testinfra test failures due to pip deps, and an error when we attempt to run the application tests in CI:
```
TASK [Run application tests] ***************************************************
Friday 10 August 2018 19:28:17 +0000 (0:00:00.037) 0:01:08.223 *********
fatal: [app-staging]: FAILED! => {"changed": true, "msg": "non-zero return code", "rc": 127, "stderr": "Shared connection to 52.36.194.59 closed.\r\n", "stdout": "/home/sdrop/.ansible/tmp/ansible-tmp-1533929297.62-93522333058246/app-tests.sh: line 13: pytest: command not found\r\n", "stdout_lines": ["/home/sdrop/.ansible/tmp/ansible-tmp-1533929297.62-93522333058246/app-tests.sh: line 13: pytest: command not found"]}
...ignoring
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/create-dev-data.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import datetime
5 import os
6 import sys
7 import argparse
8 from sqlalchemy.exc import IntegrityError
9
10 os.environ["SECUREDROP_ENV"] = "dev" # noqa
11 import journalist_app
12 from sdconfig import config
13 from db import db
14 from models import Journalist, Source, Submission
15
16
17 def add_test_user(username, password, otp_secret, is_admin=False):
18 context = journalist_app.create_app(config).app_context()
19 context.push()
20
21 try:
22 user = Journalist(username=username,
23 password=password,
24 is_admin=is_admin)
25 user.otp_secret = otp_secret
26 db.session.add(user)
27 db.session.commit()
28 print('Test user successfully added: '
29 'username={}, password={}, otp_secret={}, is_admin={}'
30 ''.format(username, password, otp_secret, is_admin))
31 except IntegrityError:
32 print("Test user already added")
33 db.session.rollback()
34
35 context.pop()
36
37
38 def create_source_and_submissions(num_submissions=2):
39 app = journalist_app.create_app(config)
40
41 with app.app_context():
42 # Store source in database
43 codename = app.crypto_util.genrandomid()
44 filesystem_id = app.crypto_util.hash_codename(codename)
45 journalist_designation = app.crypto_util.display_id()
46 source = Source(filesystem_id, journalist_designation)
47 source.pending = False
48 db.session.add(source)
49 db.session.commit()
50
51 # Generate submissions directory and generate source key
52 os.mkdir(app.storage.path(source.filesystem_id))
53 app.crypto_util.genkeypair(source.filesystem_id, codename)
54
55 # Generate some test submissions
56 for _ in range(num_submissions):
57 source.interaction_count += 1
58 fpath = app.storage.save_message_submission(
59 source.filesystem_id,
60 source.interaction_count,
61 source.journalist_filename,
62 'test submission!'
63 )
64 source.last_updated = datetime.datetime.utcnow()
65 submission = Submission(source, fpath)
66 db.session.add(submission)
67
68 db.session.commit()
69 print("Test source '{}' added with {} submissions".format(
70 journalist_designation, num_submissions)
71 )
72
73
74 if __name__ == "__main__": # pragma: no cover
75 # Add two test users
76 test_password = "correct horse battery staple profanity oil chewy"
77 test_otp_secret = "JHCOGO7VCER3EJ4L"
78
79 parser = argparse.ArgumentParser()
80 parser.add_argument("--staging", help="Adding user for staging tests.",
81 action="store_true")
82 args = parser.parse_args()
83 add_test_user("journalist",
84 test_password,
85 test_otp_secret,
86 is_admin=True)
87
88 # If staging, we only need the journalist user (admin)
89 if args.staging:
90 sys.exit(0)
91
92 add_test_user("dellsberg",
93 test_password,
94 test_otp_secret,
95 is_admin=False)
96
97 # Add test sources and submissions
98 num_sources = 2
99 for _ in range(num_sources):
100 create_source_and_submissions()
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/create-dev-data.py b/securedrop/create-dev-data.py
--- a/securedrop/create-dev-data.py
+++ b/securedrop/create-dev-data.py
@@ -78,7 +78,7 @@
parser = argparse.ArgumentParser()
parser.add_argument("--staging", help="Adding user for staging tests.",
- action="store_true")
+ action="store_true")
args = parser.parse_args()
add_test_user("journalist",
test_password,
| {"golden_diff": "diff --git a/securedrop/create-dev-data.py b/securedrop/create-dev-data.py\n--- a/securedrop/create-dev-data.py\n+++ b/securedrop/create-dev-data.py\n@@ -78,7 +78,7 @@\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--staging\", help=\"Adding user for staging tests.\",\n- action=\"store_true\")\n+ action=\"store_true\")\n args = parser.parse_args()\n add_test_user(\"journalist\",\n test_password,\n", "issue": "[functional testing] Fix staging CI job on tbb-0.9.0\nWe removed the application/functional test run from the staging environment in #3697. We should also update the testinfra test references and remove the application test run from CI, otherwise we get a few testinfra test failures due to pip deps, and an error when we attempt to run the application tests in CI: \r\n\r\n```\r\nTASK [Run application tests] ***************************************************\r\n Friday 10 August 2018 19:28:17 +0000 (0:00:00.037) 0:01:08.223 *********\r\n fatal: [app-staging]: FAILED! => {\"changed\": true, \"msg\": \"non-zero return code\", \"rc\": 127, \"stderr\": \"Shared connection to 52.36.194.59 closed.\\r\\n\", \"stdout\": \"/home/sdrop/.ansible/tmp/ansible-tmp-1533929297.62-93522333058246/app-tests.sh: line 13: pytest: command not found\\r\\n\", \"stdout_lines\": [\"/home/sdrop/.ansible/tmp/ansible-tmp-1533929297.62-93522333058246/app-tests.sh: line 13: pytest: command not found\"]}\r\n ...ignoring\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport os\nimport sys\nimport argparse\nfrom sqlalchemy.exc import IntegrityError\n\nos.environ[\"SECUREDROP_ENV\"] = \"dev\" # noqa\nimport journalist_app\nfrom sdconfig import config\nfrom db import db\nfrom models import Journalist, Source, Submission\n\n\ndef add_test_user(username, password, otp_secret, is_admin=False):\n context = journalist_app.create_app(config).app_context()\n context.push()\n\n try:\n user = Journalist(username=username,\n password=password,\n is_admin=is_admin)\n user.otp_secret = otp_secret\n db.session.add(user)\n db.session.commit()\n print('Test user successfully added: '\n 'username={}, password={}, otp_secret={}, is_admin={}'\n ''.format(username, password, otp_secret, is_admin))\n except IntegrityError:\n print(\"Test user already added\")\n db.session.rollback()\n\n context.pop()\n\n\ndef create_source_and_submissions(num_submissions=2):\n app = journalist_app.create_app(config)\n\n with app.app_context():\n # Store source in database\n codename = app.crypto_util.genrandomid()\n filesystem_id = app.crypto_util.hash_codename(codename)\n journalist_designation = app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n source.pending = False\n db.session.add(source)\n db.session.commit()\n\n # Generate submissions directory and generate source key\n os.mkdir(app.storage.path(source.filesystem_id))\n app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n # Generate some test submissions\n for _ in range(num_submissions):\n source.interaction_count += 1\n fpath = app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n 'test submission!'\n )\n source.last_updated = datetime.datetime.utcnow()\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n db.session.commit()\n print(\"Test source '{}' added with {} submissions\".format(\n journalist_designation, num_submissions)\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n # Add two test users\n test_password = \"correct horse battery staple profanity oil chewy\"\n test_otp_secret = \"JHCOGO7VCER3EJ4L\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--staging\", help=\"Adding user for staging tests.\",\n action=\"store_true\")\n args = parser.parse_args()\n add_test_user(\"journalist\",\n test_password,\n test_otp_secret,\n is_admin=True)\n\n # If staging, we only need the journalist user (admin)\n if args.staging:\n sys.exit(0)\n\n add_test_user(\"dellsberg\",\n test_password,\n test_otp_secret,\n is_admin=False)\n\n # Add test sources and submissions\n num_sources = 2\n for _ in range(num_sources):\n create_source_and_submissions()\n", "path": "securedrop/create-dev-data.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport datetime\nimport os\nimport sys\nimport argparse\nfrom sqlalchemy.exc import IntegrityError\n\nos.environ[\"SECUREDROP_ENV\"] = \"dev\" # noqa\nimport journalist_app\nfrom sdconfig import config\nfrom db import db\nfrom models import Journalist, Source, Submission\n\n\ndef add_test_user(username, password, otp_secret, is_admin=False):\n context = journalist_app.create_app(config).app_context()\n context.push()\n\n try:\n user = Journalist(username=username,\n password=password,\n is_admin=is_admin)\n user.otp_secret = otp_secret\n db.session.add(user)\n db.session.commit()\n print('Test user successfully added: '\n 'username={}, password={}, otp_secret={}, is_admin={}'\n ''.format(username, password, otp_secret, is_admin))\n except IntegrityError:\n print(\"Test user already added\")\n db.session.rollback()\n\n context.pop()\n\n\ndef create_source_and_submissions(num_submissions=2):\n app = journalist_app.create_app(config)\n\n with app.app_context():\n # Store source in database\n codename = app.crypto_util.genrandomid()\n filesystem_id = app.crypto_util.hash_codename(codename)\n journalist_designation = app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n source.pending = False\n db.session.add(source)\n db.session.commit()\n\n # Generate submissions directory and generate source key\n os.mkdir(app.storage.path(source.filesystem_id))\n app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n # Generate some test submissions\n for _ in range(num_submissions):\n source.interaction_count += 1\n fpath = app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n 'test submission!'\n )\n source.last_updated = datetime.datetime.utcnow()\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n db.session.commit()\n print(\"Test source '{}' added with {} submissions\".format(\n journalist_designation, num_submissions)\n )\n\n\nif __name__ == \"__main__\": # pragma: no cover\n # Add two test users\n test_password = \"correct horse battery staple profanity oil chewy\"\n test_otp_secret = \"JHCOGO7VCER3EJ4L\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--staging\", help=\"Adding user for staging tests.\",\n action=\"store_true\")\n args = parser.parse_args()\n add_test_user(\"journalist\",\n test_password,\n test_otp_secret,\n is_admin=True)\n\n # If staging, we only need the journalist user (admin)\n if args.staging:\n sys.exit(0)\n\n add_test_user(\"dellsberg\",\n test_password,\n test_otp_secret,\n is_admin=False)\n\n # Add test sources and submissions\n num_sources = 2\n for _ in range(num_sources):\n create_source_and_submissions()\n", "path": "securedrop/create-dev-data.py"}]} | 1,458 | 108 |
gh_patches_debug_57973 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-1191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[INF/CI] Add `--cov-append` for `pytest`
<!-- Thank you for your PR!
BEFORE YOU CONTINUE! Please add the appropriate three-letter abbreviation to your title.
The abbreviations can be:
- [DOC]: Documentation fixes.
- [ENH]: Code contributions and new features.
- [TST]: Test-related contributions.
- [INF]: Infrastructure-related contributions.
Also, do not forget to tag the relevant issue here as well.
Finally, as commits come in, don't forget to regularly rebase!
-->
# PR Description
Please describe the changes proposed in the pull request:
> Another reason code coverage failed is that pytest doesn't add `--cov-append` option.
`--cov-append` can get a sum coverage. I'll add this option in the next PR.
First let us merge `codecov.yml` into `tests.yml`. Keep the same test logic for the dev branch or a PR.
_Originally posted by @Zeroto521 in https://github.com/pyjanitor-devs/pyjanitor/issues/1185#issuecomment-1296479926_
<!-- Doing so provides maintainers with context on what the PR is, and can help us more effectively review your PR. -->
<!-- Please also identify below which issue that has been raised that you are going to close. -->
<!-- As you go down the PR template, please feel free to delete sections that are irrelevant. -->
# PR Checklist
<!-- This checklist exists for newcomers who are not yet familiar with our requirements. If you are experienced with
the project, please feel free to delete this section. -->
Please ensure that you have done the following:
1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`.
<!-- Doing this helps us keep the commit history much cleaner than it would otherwise be. -->
2. [x] If you're not on the contributors list, add yourself to `AUTHORS.md`.
<!-- We'd like to acknowledge your contributions! -->
3. [x] Add a line to `CHANGELOG.md` under the latest version header (i.e. the one that is "on deck") describing the contribution.
- Do use some discretion here; if there are multiple PRs that are related, keep them in a single line.
# Automatic checks
There will be automatic checks run on the PR. These include:
- Building a preview of the docs on Netlify
- Automatically linting the code
- Making sure the code is documented
- Making sure that all tests are passed
- Making sure that code coverage doesn't go down.
# Relevant Reviewers
<!-- Finally, please tag relevant maintainers to review. -->
Please tag maintainers to review.
- @ericmjl
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `janitor/accessors/__init__.py`
Content:
```
1 """Miscellaneous mathematical operators.
2
3 Lazy loading used here to speed up imports.
4 """
5
6 import warnings
7 from typing import Tuple
8
9
10 import lazy_loader as lazy
11
12 scipy_special = lazy.load("scipy.special")
13 ss = lazy.load("scipy.stats")
14 pf = lazy.load("pandas_flavor")
15 pd = lazy.load("pandas")
16 np = lazy.load("numpy")
17 pdtypes = lazy.load("pandas.api.types")
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/janitor/accessors/__init__.py b/janitor/accessors/__init__.py
--- a/janitor/accessors/__init__.py
+++ b/janitor/accessors/__init__.py
@@ -1,17 +1,3 @@
-"""Miscellaneous mathematical operators.
+"""Miscellaneous mathematical operators."""
-Lazy loading used here to speed up imports.
-"""
-
-import warnings
-from typing import Tuple
-
-
-import lazy_loader as lazy
-
-scipy_special = lazy.load("scipy.special")
-ss = lazy.load("scipy.stats")
-pf = lazy.load("pandas_flavor")
-pd = lazy.load("pandas")
-np = lazy.load("numpy")
-pdtypes = lazy.load("pandas.api.types")
+from janitor.accessors.data_description import DataDescription # noqa: F401
| {"golden_diff": "diff --git a/janitor/accessors/__init__.py b/janitor/accessors/__init__.py\n--- a/janitor/accessors/__init__.py\n+++ b/janitor/accessors/__init__.py\n@@ -1,17 +1,3 @@\n-\"\"\"Miscellaneous mathematical operators.\n+\"\"\"Miscellaneous mathematical operators.\"\"\"\n \n-Lazy loading used here to speed up imports.\n-\"\"\"\n-\n-import warnings\n-from typing import Tuple\n-\n-\n-import lazy_loader as lazy\n-\n-scipy_special = lazy.load(\"scipy.special\")\n-ss = lazy.load(\"scipy.stats\")\n-pf = lazy.load(\"pandas_flavor\")\n-pd = lazy.load(\"pandas\")\n-np = lazy.load(\"numpy\")\n-pdtypes = lazy.load(\"pandas.api.types\")\n+from janitor.accessors.data_description import DataDescription # noqa: F401\n", "issue": "[INF/CI] Add `--cov-append` for `pytest`\n<!-- Thank you for your PR!\r\n\r\nBEFORE YOU CONTINUE! Please add the appropriate three-letter abbreviation to your title.\r\n\r\nThe abbreviations can be:\r\n- [DOC]: Documentation fixes.\r\n- [ENH]: Code contributions and new features.\r\n- [TST]: Test-related contributions.\r\n- [INF]: Infrastructure-related contributions.\r\n\r\nAlso, do not forget to tag the relevant issue here as well.\r\n\r\nFinally, as commits come in, don't forget to regularly rebase!\r\n-->\r\n\r\n# PR Description\r\n\r\nPlease describe the changes proposed in the pull request:\r\n\r\n> Another reason code coverage failed is that pytest doesn't add `--cov-append` option.\r\n`--cov-append` can get a sum coverage. I'll add this option in the next PR.\r\nFirst let us merge `codecov.yml` into `tests.yml`. Keep the same test logic for the dev branch or a PR.\r\n\r\n_Originally posted by @Zeroto521 in https://github.com/pyjanitor-devs/pyjanitor/issues/1185#issuecomment-1296479926_\r\n\r\n<!-- Doing so provides maintainers with context on what the PR is, and can help us more effectively review your PR. -->\r\n\r\n<!-- Please also identify below which issue that has been raised that you are going to close. -->\r\n\r\n<!-- As you go down the PR template, please feel free to delete sections that are irrelevant. -->\r\n\r\n# PR Checklist\r\n\r\n<!-- This checklist exists for newcomers who are not yet familiar with our requirements. If you are experienced with\r\nthe project, please feel free to delete this section. -->\r\n\r\nPlease ensure that you have done the following:\r\n\r\n1. [x] PR in from a fork off your branch. Do not PR from `<your_username>`:`dev`, but rather from `<your_username>`:`<feature-branch_name>`.\r\n<!-- Doing this helps us keep the commit history much cleaner than it would otherwise be. -->\r\n2. [x] If you're not on the contributors list, add yourself to `AUTHORS.md`.\r\n<!-- We'd like to acknowledge your contributions! -->\r\n3. [x] Add a line to `CHANGELOG.md` under the latest version header (i.e. the one that is \"on deck\") describing the contribution.\r\n - Do use some discretion here; if there are multiple PRs that are related, keep them in a single line.\r\n\r\n# Automatic checks\r\n\r\nThere will be automatic checks run on the PR. These include:\r\n\r\n- Building a preview of the docs on Netlify\r\n- Automatically linting the code\r\n- Making sure the code is documented\r\n- Making sure that all tests are passed\r\n- Making sure that code coverage doesn't go down.\r\n\r\n# Relevant Reviewers\r\n\r\n<!-- Finally, please tag relevant maintainers to review. -->\r\n\r\nPlease tag maintainers to review.\r\n\r\n- @ericmjl\r\n\n", "before_files": [{"content": "\"\"\"Miscellaneous mathematical operators.\n\nLazy loading used here to speed up imports.\n\"\"\"\n\nimport warnings\nfrom typing import Tuple\n\n\nimport lazy_loader as lazy\n\nscipy_special = lazy.load(\"scipy.special\")\nss = lazy.load(\"scipy.stats\")\npf = lazy.load(\"pandas_flavor\")\npd = lazy.load(\"pandas\")\nnp = lazy.load(\"numpy\")\npdtypes = lazy.load(\"pandas.api.types\")\n", "path": "janitor/accessors/__init__.py"}], "after_files": [{"content": "\"\"\"Miscellaneous mathematical operators.\"\"\"\n\nfrom janitor.accessors.data_description import DataDescription # noqa: F401\n", "path": "janitor/accessors/__init__.py"}]} | 972 | 185 |
gh_patches_debug_6944 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-1805 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logger is not passed to translator
When building the translator, there is a logger created but not passed to the translator:
https://github.com/OpenNMT/OpenNMT-py/blob/35cf4f0ae774a4aa500318879a1a4d53408ac129/onmt/bin/translate.py#L18
This results in a log file that only contains a single entry:
https://github.com/OpenNMT/OpenNMT-py/blob/35cf4f0ae774a4aa500318879a1a4d53408ac129/onmt/bin/translate.py#L24
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/bin/translate.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import unicode_literals
5
6 from onmt.utils.logging import init_logger
7 from onmt.utils.misc import split_corpus
8 from onmt.translate.translator import build_translator
9
10 import onmt.opts as opts
11 from onmt.utils.parse import ArgumentParser
12
13
14 def translate(opt):
15 ArgumentParser.validate_translate_opts(opt)
16 logger = init_logger(opt.log_file)
17
18 translator = build_translator(opt, report_score=True)
19 src_shards = split_corpus(opt.src, opt.shard_size)
20 tgt_shards = split_corpus(opt.tgt, opt.shard_size)
21 shard_pairs = zip(src_shards, tgt_shards)
22
23 for i, (src_shard, tgt_shard) in enumerate(shard_pairs):
24 logger.info("Translating shard %d." % i)
25 translator.translate(
26 src=src_shard,
27 tgt=tgt_shard,
28 src_dir=opt.src_dir,
29 batch_size=opt.batch_size,
30 batch_type=opt.batch_type,
31 attn_debug=opt.attn_debug,
32 align_debug=opt.align_debug
33 )
34
35
36 def _get_parser():
37 parser = ArgumentParser(description='translate.py')
38
39 opts.config_opts(parser)
40 opts.translate_opts(parser)
41 return parser
42
43
44 def main():
45 parser = _get_parser()
46
47 opt = parser.parse_args()
48 translate(opt)
49
50
51 if __name__ == "__main__":
52 main()
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onmt/bin/translate.py b/onmt/bin/translate.py
--- a/onmt/bin/translate.py
+++ b/onmt/bin/translate.py
@@ -15,7 +15,7 @@
ArgumentParser.validate_translate_opts(opt)
logger = init_logger(opt.log_file)
- translator = build_translator(opt, report_score=True)
+ translator = build_translator(opt, logger=logger, report_score=True)
src_shards = split_corpus(opt.src, opt.shard_size)
tgt_shards = split_corpus(opt.tgt, opt.shard_size)
shard_pairs = zip(src_shards, tgt_shards)
| {"golden_diff": "diff --git a/onmt/bin/translate.py b/onmt/bin/translate.py\n--- a/onmt/bin/translate.py\n+++ b/onmt/bin/translate.py\n@@ -15,7 +15,7 @@\n ArgumentParser.validate_translate_opts(opt)\n logger = init_logger(opt.log_file)\n \n- translator = build_translator(opt, report_score=True)\n+ translator = build_translator(opt, logger=logger, report_score=True)\n src_shards = split_corpus(opt.src, opt.shard_size)\n tgt_shards = split_corpus(opt.tgt, opt.shard_size)\n shard_pairs = zip(src_shards, tgt_shards)\n", "issue": "Logger is not passed to translator\nWhen building the translator, there is a logger created but not passed to the translator:\r\nhttps://github.com/OpenNMT/OpenNMT-py/blob/35cf4f0ae774a4aa500318879a1a4d53408ac129/onmt/bin/translate.py#L18\r\nThis results in a log file that only contains a single entry:\r\nhttps://github.com/OpenNMT/OpenNMT-py/blob/35cf4f0ae774a4aa500318879a1a4d53408ac129/onmt/bin/translate.py#L24\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom onmt.utils.logging import init_logger\nfrom onmt.utils.misc import split_corpus\nfrom onmt.translate.translator import build_translator\n\nimport onmt.opts as opts\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef translate(opt):\n ArgumentParser.validate_translate_opts(opt)\n logger = init_logger(opt.log_file)\n\n translator = build_translator(opt, report_score=True)\n src_shards = split_corpus(opt.src, opt.shard_size)\n tgt_shards = split_corpus(opt.tgt, opt.shard_size)\n shard_pairs = zip(src_shards, tgt_shards)\n\n for i, (src_shard, tgt_shard) in enumerate(shard_pairs):\n logger.info(\"Translating shard %d.\" % i)\n translator.translate(\n src=src_shard,\n tgt=tgt_shard,\n src_dir=opt.src_dir,\n batch_size=opt.batch_size,\n batch_type=opt.batch_type,\n attn_debug=opt.attn_debug,\n align_debug=opt.align_debug\n )\n\n\ndef _get_parser():\n parser = ArgumentParser(description='translate.py')\n\n opts.config_opts(parser)\n opts.translate_opts(parser)\n return parser\n\n\ndef main():\n parser = _get_parser()\n\n opt = parser.parse_args()\n translate(opt)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "onmt/bin/translate.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom onmt.utils.logging import init_logger\nfrom onmt.utils.misc import split_corpus\nfrom onmt.translate.translator import build_translator\n\nimport onmt.opts as opts\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef translate(opt):\n ArgumentParser.validate_translate_opts(opt)\n logger = init_logger(opt.log_file)\n\n translator = build_translator(opt, logger=logger, report_score=True)\n src_shards = split_corpus(opt.src, opt.shard_size)\n tgt_shards = split_corpus(opt.tgt, opt.shard_size)\n shard_pairs = zip(src_shards, tgt_shards)\n\n for i, (src_shard, tgt_shard) in enumerate(shard_pairs):\n logger.info(\"Translating shard %d.\" % i)\n translator.translate(\n src=src_shard,\n tgt=tgt_shard,\n src_dir=opt.src_dir,\n batch_size=opt.batch_size,\n batch_type=opt.batch_type,\n attn_debug=opt.attn_debug,\n align_debug=opt.align_debug\n )\n\n\ndef _get_parser():\n parser = ArgumentParser(description='translate.py')\n\n opts.config_opts(parser)\n opts.translate_opts(parser)\n return parser\n\n\ndef main():\n parser = _get_parser()\n\n opt = parser.parse_args()\n translate(opt)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "onmt/bin/translate.py"}]} | 833 | 140 |
gh_patches_debug_17769 | rasdani/github-patches | git_diff | kivy__python-for-android-2340 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Webview app crashes when trying to request permissions
### Checklist
- [X] the issue is indeed a bug and not a support request
- [X] issue doesn't already exist: https://github.com/kivy/python-for-android/issues
- [X] I have a short, runnable example that reproduces the issue
- [X] I reproduced the problem with the latest development version (`p4a.branch = develop`)
- [X] I used the grave accent (aka backticks) to format code or logs when appropriated
### Versions
- Python: 3.6.6
- OS: Ubuntu 20.04
- python-for-android: 2020.6.2
### Description
I'm building with:
```
HERE=$(pwd)
cd app
export ANDROIDSDK="$HERE/android_sdks"
# Have also tried with
# export ANDROIDNDK="$HERE/android_sdks/android-ndk-r19c"
export ANDROIDNDK="$HERE/android_sdks/android-ndk-r21d"
export ANDROIDAPI="27" # Target API version of your application
export NDKAPI="21" # Minimum supported API version of your application
p4a apk --private "." --package=org.anarres.pdfreader --name "pdf reader" --version 0.1 --bootstrap=webview --requirements=python3,flask --port=5000 --orientation=sensor --blacklist="../blacklist.txt" --permission READ_EXTERNAL_STORAGE --permission WRITE_EXTERNAL_STORAGE
```
My python code is just:
```
from android.permissions import request_permissions, Permission
request_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(port=5000)
```
If I comment out:
```
from android.permissions import request_permissions, Permission
request_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])
```
I don't see the error, but then of course I can't access the external storage.
### Logs
```
ImportError: dlopen failed: cannot locate symbol "SDL_ANDROID_GetJNIEnv" referenced by "/data/data/org.anarres.pdfreader/files/app/_python_bundle/site-packages/android/_android.so"...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/android/src/setup.py`
Content:
```
1 from distutils.core import setup, Extension
2 import os
3
4 library_dirs = ['libs/' + os.environ['ARCH']]
5 lib_dict = {
6 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf']
7 }
8 sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], [])
9
10 modules = [Extension('android._android',
11 ['android/_android.c', 'android/_android_jni.c'],
12 libraries=sdl_libs + ['log'],
13 library_dirs=library_dirs),
14 Extension('android._android_billing',
15 ['android/_android_billing.c', 'android/_android_billing_jni.c'],
16 libraries=['log'],
17 library_dirs=library_dirs)]
18
19 setup(name='android',
20 version='1.0',
21 packages=['android'],
22 package_dir={'android': 'android'},
23 ext_modules=modules
24 )
25
```
Path: `pythonforandroid/recipes/android/__init__.py`
Content:
```
1 from pythonforandroid.recipe import CythonRecipe, IncludedFilesBehaviour
2 from pythonforandroid.util import current_directory
3 from pythonforandroid import logger
4
5 from os.path import join
6
7
8 class AndroidRecipe(IncludedFilesBehaviour, CythonRecipe):
9 # name = 'android'
10 version = None
11 url = None
12
13 src_filename = 'src'
14
15 depends = [('sdl2', 'genericndkbuild'), 'pyjnius']
16
17 config_env = {}
18
19 def get_recipe_env(self, arch):
20 env = super().get_recipe_env(arch)
21 env.update(self.config_env)
22 return env
23
24 def prebuild_arch(self, arch):
25 super().prebuild_arch(arch)
26 ctx_bootstrap = self.ctx.bootstrap.name
27
28 # define macros for Cython, C, Python
29 tpxi = 'DEF {} = {}\n'
30 th = '#define {} {}\n'
31 tpy = '{} = {}\n'
32
33 # make sure bootstrap name is in unicode
34 if isinstance(ctx_bootstrap, bytes):
35 ctx_bootstrap = ctx_bootstrap.decode('utf-8')
36 bootstrap = bootstrap_name = ctx_bootstrap
37 is_sdl2 = (bootstrap_name == "sdl2")
38 if bootstrap_name in ["sdl2", "webview", "service_only", "service_library"]:
39 java_ns = u'org.kivy.android'
40 jni_ns = u'org/kivy/android'
41 else:
42 logger.error((
43 'unsupported bootstrap for android recipe: {}'
44 ''.format(bootstrap_name)
45 ))
46 exit(1)
47
48 config = {
49 'BOOTSTRAP': bootstrap,
50 'IS_SDL2': int(is_sdl2),
51 'PY2': 0,
52 'JAVA_NAMESPACE': java_ns,
53 'JNI_NAMESPACE': jni_ns,
54 'ACTIVITY_CLASS_NAME': self.ctx.activity_class_name,
55 'ACTIVITY_CLASS_NAMESPACE': self.ctx.activity_class_name.replace('.', '/'),
56 }
57
58 # create config files for Cython, C and Python
59 with (
60 current_directory(self.get_build_dir(arch.arch))), (
61 open(join('android', 'config.pxi'), 'w')) as fpxi, (
62 open(join('android', 'config.h'), 'w')) as fh, (
63 open(join('android', 'config.py'), 'w')) as fpy:
64
65 for key, value in config.items():
66 fpxi.write(tpxi.format(key, repr(value)))
67 fpy.write(tpy.format(key, repr(value)))
68
69 fh.write(th.format(
70 key,
71 value if isinstance(value, int) else '"{}"'.format(value)
72 ))
73 self.config_env[key] = str(value)
74
75 if is_sdl2:
76 fh.write('JNIEnv *SDL_AndroidGetJNIEnv(void);\n')
77 fh.write(
78 '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\n'
79 )
80
81
82 recipe = AndroidRecipe()
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pythonforandroid/recipes/android/__init__.py b/pythonforandroid/recipes/android/__init__.py
--- a/pythonforandroid/recipes/android/__init__.py
+++ b/pythonforandroid/recipes/android/__init__.py
@@ -77,6 +77,11 @@
fh.write(
'#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\n'
)
+ else:
+ fh.write('JNIEnv *WebView_AndroidGetJNIEnv(void);\n')
+ fh.write(
+ '#define SDL_ANDROID_GetJNIEnv WebView_AndroidGetJNIEnv\n'
+ )
recipe = AndroidRecipe()
diff --git a/pythonforandroid/recipes/android/src/setup.py b/pythonforandroid/recipes/android/src/setup.py
--- a/pythonforandroid/recipes/android/src/setup.py
+++ b/pythonforandroid/recipes/android/src/setup.py
@@ -5,7 +5,7 @@
lib_dict = {
'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf']
}
-sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], [])
+sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], ['main'])
modules = [Extension('android._android',
['android/_android.c', 'android/_android_jni.c'],
| {"golden_diff": "diff --git a/pythonforandroid/recipes/android/__init__.py b/pythonforandroid/recipes/android/__init__.py\n--- a/pythonforandroid/recipes/android/__init__.py\n+++ b/pythonforandroid/recipes/android/__init__.py\n@@ -77,6 +77,11 @@\n fh.write(\n '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\\n'\n )\n+ else:\n+ fh.write('JNIEnv *WebView_AndroidGetJNIEnv(void);\\n')\n+ fh.write(\n+ '#define SDL_ANDROID_GetJNIEnv WebView_AndroidGetJNIEnv\\n'\n+ )\n \n \n recipe = AndroidRecipe()\ndiff --git a/pythonforandroid/recipes/android/src/setup.py b/pythonforandroid/recipes/android/src/setup.py\n--- a/pythonforandroid/recipes/android/src/setup.py\n+++ b/pythonforandroid/recipes/android/src/setup.py\n@@ -5,7 +5,7 @@\n lib_dict = {\n 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf']\n }\n-sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], [])\n+sdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], ['main'])\n \n modules = [Extension('android._android',\n ['android/_android.c', 'android/_android_jni.c'],\n", "issue": "Webview app crashes when trying to request permissions\n### Checklist\r\n\r\n- [X] the issue is indeed a bug and not a support request\r\n- [X] issue doesn't already exist: https://github.com/kivy/python-for-android/issues\r\n- [X] I have a short, runnable example that reproduces the issue\r\n- [X] I reproduced the problem with the latest development version (`p4a.branch = develop`)\r\n- [X] I used the grave accent (aka backticks) to format code or logs when appropriated\r\n\r\n### Versions\r\n\r\n- Python: 3.6.6\r\n- OS: Ubuntu 20.04\r\n- python-for-android: 2020.6.2\r\n\r\n### Description\r\n\r\nI'm building with:\r\n\r\n```\r\nHERE=$(pwd)\r\ncd app\r\n\r\nexport ANDROIDSDK=\"$HERE/android_sdks\"\r\n# Have also tried with\r\n# export ANDROIDNDK=\"$HERE/android_sdks/android-ndk-r19c\"\r\nexport ANDROIDNDK=\"$HERE/android_sdks/android-ndk-r21d\"\r\nexport ANDROIDAPI=\"27\" # Target API version of your application\r\nexport NDKAPI=\"21\" # Minimum supported API version of your application\r\n\r\np4a apk --private \".\" --package=org.anarres.pdfreader --name \"pdf reader\" --version 0.1 --bootstrap=webview --requirements=python3,flask --port=5000 --orientation=sensor --blacklist=\"../blacklist.txt\" --permission READ_EXTERNAL_STORAGE --permission WRITE_EXTERNAL_STORAGE\r\n```\r\n\r\nMy python code is just:\r\n\r\n```\r\nfrom android.permissions import request_permissions, Permission\r\nrequest_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])\r\n\r\nfrom flask import Flask, render_template\r\n\r\napp = Flask(__name__)\r\n\r\[email protected]('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\nif __name__ == '__main__':\r\n app.run(port=5000)\r\n```\r\n\r\nIf I comment out:\r\n\r\n```\r\nfrom android.permissions import request_permissions, Permission\r\nrequest_permissions([Permission.READ_EXTERNAL_STORAGE, Permission.WRITE_EXTERNAL_STORAGE])\r\n```\r\n\r\nI don't see the error, but then of course I can't access the external storage.\r\n\r\n### Logs\r\n\r\n```\r\nImportError: dlopen failed: cannot locate symbol \"SDL_ANDROID_GetJNIEnv\" referenced by \"/data/data/org.anarres.pdfreader/files/app/_python_bundle/site-packages/android/_android.so\"...\r\n```\r\n\n", "before_files": [{"content": "from distutils.core import setup, Extension\nimport os\n\nlibrary_dirs = ['libs/' + os.environ['ARCH']]\nlib_dict = {\n 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf']\n}\nsdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], [])\n\nmodules = [Extension('android._android',\n ['android/_android.c', 'android/_android_jni.c'],\n libraries=sdl_libs + ['log'],\n library_dirs=library_dirs),\n Extension('android._android_billing',\n ['android/_android_billing.c', 'android/_android_billing_jni.c'],\n libraries=['log'],\n library_dirs=library_dirs)]\n\nsetup(name='android',\n version='1.0',\n packages=['android'],\n package_dir={'android': 'android'},\n ext_modules=modules\n )\n", "path": "pythonforandroid/recipes/android/src/setup.py"}, {"content": "from pythonforandroid.recipe import CythonRecipe, IncludedFilesBehaviour\nfrom pythonforandroid.util import current_directory\nfrom pythonforandroid import logger\n\nfrom os.path import join\n\n\nclass AndroidRecipe(IncludedFilesBehaviour, CythonRecipe):\n # name = 'android'\n version = None\n url = None\n\n src_filename = 'src'\n\n depends = [('sdl2', 'genericndkbuild'), 'pyjnius']\n\n config_env = {}\n\n def get_recipe_env(self, arch):\n env = super().get_recipe_env(arch)\n env.update(self.config_env)\n return env\n\n def prebuild_arch(self, arch):\n super().prebuild_arch(arch)\n ctx_bootstrap = self.ctx.bootstrap.name\n\n # define macros for Cython, C, Python\n tpxi = 'DEF {} = {}\\n'\n th = '#define {} {}\\n'\n tpy = '{} = {}\\n'\n\n # make sure bootstrap name is in unicode\n if isinstance(ctx_bootstrap, bytes):\n ctx_bootstrap = ctx_bootstrap.decode('utf-8')\n bootstrap = bootstrap_name = ctx_bootstrap\n is_sdl2 = (bootstrap_name == \"sdl2\")\n if bootstrap_name in [\"sdl2\", \"webview\", \"service_only\", \"service_library\"]:\n java_ns = u'org.kivy.android'\n jni_ns = u'org/kivy/android'\n else:\n logger.error((\n 'unsupported bootstrap for android recipe: {}'\n ''.format(bootstrap_name)\n ))\n exit(1)\n\n config = {\n 'BOOTSTRAP': bootstrap,\n 'IS_SDL2': int(is_sdl2),\n 'PY2': 0,\n 'JAVA_NAMESPACE': java_ns,\n 'JNI_NAMESPACE': jni_ns,\n 'ACTIVITY_CLASS_NAME': self.ctx.activity_class_name,\n 'ACTIVITY_CLASS_NAMESPACE': self.ctx.activity_class_name.replace('.', '/'),\n }\n\n # create config files for Cython, C and Python\n with (\n current_directory(self.get_build_dir(arch.arch))), (\n open(join('android', 'config.pxi'), 'w')) as fpxi, (\n open(join('android', 'config.h'), 'w')) as fh, (\n open(join('android', 'config.py'), 'w')) as fpy:\n\n for key, value in config.items():\n fpxi.write(tpxi.format(key, repr(value)))\n fpy.write(tpy.format(key, repr(value)))\n\n fh.write(th.format(\n key,\n value if isinstance(value, int) else '\"{}\"'.format(value)\n ))\n self.config_env[key] = str(value)\n\n if is_sdl2:\n fh.write('JNIEnv *SDL_AndroidGetJNIEnv(void);\\n')\n fh.write(\n '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\\n'\n )\n\n\nrecipe = AndroidRecipe()\n", "path": "pythonforandroid/recipes/android/__init__.py"}], "after_files": [{"content": "from distutils.core import setup, Extension\nimport os\n\nlibrary_dirs = ['libs/' + os.environ['ARCH']]\nlib_dict = {\n 'sdl2': ['SDL2', 'SDL2_image', 'SDL2_mixer', 'SDL2_ttf']\n}\nsdl_libs = lib_dict.get(os.environ['BOOTSTRAP'], ['main'])\n\nmodules = [Extension('android._android',\n ['android/_android.c', 'android/_android_jni.c'],\n libraries=sdl_libs + ['log'],\n library_dirs=library_dirs),\n Extension('android._android_billing',\n ['android/_android_billing.c', 'android/_android_billing_jni.c'],\n libraries=['log'],\n library_dirs=library_dirs)]\n\nsetup(name='android',\n version='1.0',\n packages=['android'],\n package_dir={'android': 'android'},\n ext_modules=modules\n )\n", "path": "pythonforandroid/recipes/android/src/setup.py"}, {"content": "from pythonforandroid.recipe import CythonRecipe, IncludedFilesBehaviour\nfrom pythonforandroid.util import current_directory\nfrom pythonforandroid import logger\n\nfrom os.path import join\n\n\nclass AndroidRecipe(IncludedFilesBehaviour, CythonRecipe):\n # name = 'android'\n version = None\n url = None\n\n src_filename = 'src'\n\n depends = [('sdl2', 'genericndkbuild'), 'pyjnius']\n\n config_env = {}\n\n def get_recipe_env(self, arch):\n env = super().get_recipe_env(arch)\n env.update(self.config_env)\n return env\n\n def prebuild_arch(self, arch):\n super().prebuild_arch(arch)\n ctx_bootstrap = self.ctx.bootstrap.name\n\n # define macros for Cython, C, Python\n tpxi = 'DEF {} = {}\\n'\n th = '#define {} {}\\n'\n tpy = '{} = {}\\n'\n\n # make sure bootstrap name is in unicode\n if isinstance(ctx_bootstrap, bytes):\n ctx_bootstrap = ctx_bootstrap.decode('utf-8')\n bootstrap = bootstrap_name = ctx_bootstrap\n is_sdl2 = (bootstrap_name == \"sdl2\")\n if bootstrap_name in [\"sdl2\", \"webview\", \"service_only\", \"service_library\"]:\n java_ns = u'org.kivy.android'\n jni_ns = u'org/kivy/android'\n else:\n logger.error((\n 'unsupported bootstrap for android recipe: {}'\n ''.format(bootstrap_name)\n ))\n exit(1)\n\n config = {\n 'BOOTSTRAP': bootstrap,\n 'IS_SDL2': int(is_sdl2),\n 'PY2': 0,\n 'JAVA_NAMESPACE': java_ns,\n 'JNI_NAMESPACE': jni_ns,\n 'ACTIVITY_CLASS_NAME': self.ctx.activity_class_name,\n 'ACTIVITY_CLASS_NAMESPACE': self.ctx.activity_class_name.replace('.', '/'),\n }\n\n # create config files for Cython, C and Python\n with (\n current_directory(self.get_build_dir(arch.arch))), (\n open(join('android', 'config.pxi'), 'w')) as fpxi, (\n open(join('android', 'config.h'), 'w')) as fh, (\n open(join('android', 'config.py'), 'w')) as fpy:\n\n for key, value in config.items():\n fpxi.write(tpxi.format(key, repr(value)))\n fpy.write(tpy.format(key, repr(value)))\n\n fh.write(th.format(\n key,\n value if isinstance(value, int) else '\"{}\"'.format(value)\n ))\n self.config_env[key] = str(value)\n\n if is_sdl2:\n fh.write('JNIEnv *SDL_AndroidGetJNIEnv(void);\\n')\n fh.write(\n '#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\\n'\n )\n else:\n fh.write('JNIEnv *WebView_AndroidGetJNIEnv(void);\\n')\n fh.write(\n '#define SDL_ANDROID_GetJNIEnv WebView_AndroidGetJNIEnv\\n'\n )\n\n\nrecipe = AndroidRecipe()\n", "path": "pythonforandroid/recipes/android/__init__.py"}]} | 1,800 | 283 |
gh_patches_debug_19727 | rasdani/github-patches | git_diff | facebookresearch__hydra-1424 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade to OmegaConf 2.1
OmegaConf 2.1 is adding many important new features.
For example:
* Powerful interpolation grammar supporting nested interpolations
* Relative interpolations
* And many many bug fixes
Release notes: [omegaconf==2.1.0.rc1](https://github.com/omry/omegaconf/releases/tag/v2.1.0.rc1).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from dataclasses import dataclass
3 from typing import Optional
4
5 from hydra.core.config_store import ConfigStore
6 from omegaconf import II
7
8
9 @dataclass
10 class RedisConf:
11 # host address via REDIS_HOST environment variable, default: localhost
12 host: str = II("env:REDIS_HOST,localhost")
13 # port via REDIS_PORT environment variable, default: 6379
14 port: int = II("env:REDIS_PORT,6379")
15 # database via REDIS_DB environment variable, default: 0
16 db: Optional[str] = II("env:REDIS_DB,0")
17 # password via REDIS_PASSWORD environment variable, default: no password
18 password: str = II("env:REDIS_PASSWORD,")
19 # switch to run without redis server in single thread, for testing purposes only
20 mock: bool = II("env:REDIS_MOCK,False")
21
22
23 @dataclass
24 class EnqueueConf:
25 # maximum runtime of the job before it's killed (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit
26 job_timeout: Optional[str] = None
27 # maximum queued time before the job before is discarded (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit
28 ttl: Optional[str] = None
29 # how long successful jobs and their results are kept (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit
30 result_ttl: Optional[str] = None
31 # specifies how long failed jobs are kept (e.g. "1d" for 1 day, units: d/h/m/s), default: no limit
32 failure_ttl: Optional[str] = None
33 # place job at the front of the queue, instead of the back
34 at_front: bool = False
35 # job id, will be overidden automatically by a uuid unless specified explicitly
36 job_id: Optional[str] = None
37 # description, will be overidden automatically unless specified explicitly
38 description: Optional[str] = None
39
40
41 @dataclass
42 class RQLauncherConf:
43 _target_: str = "hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher"
44 # enqueue configuration
45 enqueue: EnqueueConf = EnqueueConf()
46 # queue name
47 queue: str = "default"
48 # redis configuration
49 redis: RedisConf = RedisConf()
50 # stop after enqueueing by raising custom exception
51 stop_after_enqueue: bool = False
52 # wait time in seconds when polling results
53 wait_polling: float = 1.0
54
55
56 ConfigStore.instance().store(
57 group="hydra/launcher", name="rq", node=RQLauncherConf, provider="rq_launcher"
58 )
59
```
Path: `plugins/hydra_ax_sweeper/setup.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 # type: ignore
3 from setuptools import find_namespace_packages, setup
4
5 with open("README.md", "r") as fh:
6 LONG_DESC = fh.read()
7 setup(
8 name="hydra-ax-sweeper",
9 version="1.1.0rc1",
10 author="Omry Yadan, Shagun Sodhani",
11 author_email="[email protected], [email protected]",
12 description="Hydra Ax Sweeper plugin",
13 long_description=LONG_DESC,
14 long_description_content_type="text/markdown",
15 url="https://github.com/facebookresearch/hydra/",
16 packages=find_namespace_packages(include=["hydra_plugins.*"]),
17 classifiers=[
18 "License :: OSI Approved :: MIT License",
19 "Programming Language :: Python :: 3.7",
20 "Programming Language :: Python :: 3.8",
21 "Programming Language :: Python :: 3.9",
22 "Operating System :: POSIX :: Linux",
23 "Operating System :: MacOS",
24 "Development Status :: 4 - Beta",
25 ],
26 install_requires=[
27 "hydra-core>=1.0.0",
28 "ax-platform>=0.1.13",
29 "numpy<1.20.0", # remove once ax is upgraded to support numpy 1.20
30 ],
31 include_package_data=True,
32 )
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py
--- a/plugins/hydra_ax_sweeper/setup.py
+++ b/plugins/hydra_ax_sweeper/setup.py
@@ -25,8 +25,7 @@
],
install_requires=[
"hydra-core>=1.0.0",
- "ax-platform>=0.1.13",
- "numpy<1.20.0", # remove once ax is upgraded to support numpy 1.20
+ "ax-platform>=0.1.20",
],
include_package_data=True,
)
diff --git a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py
--- a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py
+++ b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py
@@ -15,7 +15,7 @@
# database via REDIS_DB environment variable, default: 0
db: Optional[str] = II("env:REDIS_DB,0")
# password via REDIS_PASSWORD environment variable, default: no password
- password: str = II("env:REDIS_PASSWORD,")
+ password: str = II("env:REDIS_PASSWORD")
# switch to run without redis server in single thread, for testing purposes only
mock: bool = II("env:REDIS_MOCK,False")
| {"golden_diff": "diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py\n--- a/plugins/hydra_ax_sweeper/setup.py\n+++ b/plugins/hydra_ax_sweeper/setup.py\n@@ -25,8 +25,7 @@\n ],\n install_requires=[\n \"hydra-core>=1.0.0\",\n- \"ax-platform>=0.1.13\",\n- \"numpy<1.20.0\", # remove once ax is upgraded to support numpy 1.20\n+ \"ax-platform>=0.1.20\",\n ],\n include_package_data=True,\n )\ndiff --git a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n--- a/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n+++ b/plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py\n@@ -15,7 +15,7 @@\n # database via REDIS_DB environment variable, default: 0\n db: Optional[str] = II(\"env:REDIS_DB,0\")\n # password via REDIS_PASSWORD environment variable, default: no password\n- password: str = II(\"env:REDIS_PASSWORD,\")\n+ password: str = II(\"env:REDIS_PASSWORD\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"env:REDIS_MOCK,False\")\n", "issue": "Upgrade to OmegaConf 2.1\nOmegaConf 2.1 is adding many important new features.\r\nFor example:\r\n* Powerful interpolation grammar supporting nested interpolations\r\n* Relative interpolations\r\n* And many many bug fixes\r\n\r\nRelease notes: [omegaconf==2.1.0.rc1](https://github.com/omry/omegaconf/releases/tag/v2.1.0.rc1).\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import II\n\n\n@dataclass\nclass RedisConf:\n # host address via REDIS_HOST environment variable, default: localhost\n host: str = II(\"env:REDIS_HOST,localhost\")\n # port via REDIS_PORT environment variable, default: 6379\n port: int = II(\"env:REDIS_PORT,6379\")\n # database via REDIS_DB environment variable, default: 0\n db: Optional[str] = II(\"env:REDIS_DB,0\")\n # password via REDIS_PASSWORD environment variable, default: no password\n password: str = II(\"env:REDIS_PASSWORD,\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"env:REDIS_MOCK,False\")\n\n\n@dataclass\nclass EnqueueConf:\n # maximum runtime of the job before it's killed (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n job_timeout: Optional[str] = None\n # maximum queued time before the job before is discarded (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n ttl: Optional[str] = None\n # how long successful jobs and their results are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n result_ttl: Optional[str] = None\n # specifies how long failed jobs are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n failure_ttl: Optional[str] = None\n # place job at the front of the queue, instead of the back\n at_front: bool = False\n # job id, will be overidden automatically by a uuid unless specified explicitly\n job_id: Optional[str] = None\n # description, will be overidden automatically unless specified explicitly\n description: Optional[str] = None\n\n\n@dataclass\nclass RQLauncherConf:\n _target_: str = \"hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher\"\n # enqueue configuration\n enqueue: EnqueueConf = EnqueueConf()\n # queue name\n queue: str = \"default\"\n # redis configuration\n redis: RedisConf = RedisConf()\n # stop after enqueueing by raising custom exception\n stop_after_enqueue: bool = False\n # wait time in seconds when polling results\n wait_polling: float = 1.0\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\", name=\"rq\", node=RQLauncherConf, provider=\"rq_launcher\"\n)\n", "path": "plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-ax-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Omry Yadan, Shagun Sodhani\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Ax Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.0.0\",\n \"ax-platform>=0.1.13\",\n \"numpy<1.20.0\", # remove once ax is upgraded to support numpy 1.20\n ],\n include_package_data=True,\n )\n", "path": "plugins/hydra_ax_sweeper/setup.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom dataclasses import dataclass\nfrom typing import Optional\n\nfrom hydra.core.config_store import ConfigStore\nfrom omegaconf import II\n\n\n@dataclass\nclass RedisConf:\n # host address via REDIS_HOST environment variable, default: localhost\n host: str = II(\"env:REDIS_HOST,localhost\")\n # port via REDIS_PORT environment variable, default: 6379\n port: int = II(\"env:REDIS_PORT,6379\")\n # database via REDIS_DB environment variable, default: 0\n db: Optional[str] = II(\"env:REDIS_DB,0\")\n # password via REDIS_PASSWORD environment variable, default: no password\n password: str = II(\"env:REDIS_PASSWORD\")\n # switch to run without redis server in single thread, for testing purposes only\n mock: bool = II(\"env:REDIS_MOCK,False\")\n\n\n@dataclass\nclass EnqueueConf:\n # maximum runtime of the job before it's killed (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n job_timeout: Optional[str] = None\n # maximum queued time before the job before is discarded (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n ttl: Optional[str] = None\n # how long successful jobs and their results are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n result_ttl: Optional[str] = None\n # specifies how long failed jobs are kept (e.g. \"1d\" for 1 day, units: d/h/m/s), default: no limit\n failure_ttl: Optional[str] = None\n # place job at the front of the queue, instead of the back\n at_front: bool = False\n # job id, will be overidden automatically by a uuid unless specified explicitly\n job_id: Optional[str] = None\n # description, will be overidden automatically unless specified explicitly\n description: Optional[str] = None\n\n\n@dataclass\nclass RQLauncherConf:\n _target_: str = \"hydra_plugins.hydra_rq_launcher.rq_launcher.RQLauncher\"\n # enqueue configuration\n enqueue: EnqueueConf = EnqueueConf()\n # queue name\n queue: str = \"default\"\n # redis configuration\n redis: RedisConf = RedisConf()\n # stop after enqueueing by raising custom exception\n stop_after_enqueue: bool = False\n # wait time in seconds when polling results\n wait_polling: float = 1.0\n\n\nConfigStore.instance().store(\n group=\"hydra/launcher\", name=\"rq\", node=RQLauncherConf, provider=\"rq_launcher\"\n)\n", "path": "plugins/hydra_rq_launcher/hydra_plugins/hydra_rq_launcher/config.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-ax-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Omry Yadan, Shagun Sodhani\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Ax Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\n \"hydra-core>=1.0.0\",\n \"ax-platform>=0.1.20\",\n ],\n include_package_data=True,\n )\n", "path": "plugins/hydra_ax_sweeper/setup.py"}]} | 1,471 | 342 |
gh_patches_debug_31077 | rasdani/github-patches | git_diff | sopel-irc__sopel-1441 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
imdb module not working anymore
I just noticed that anytime you make a call to imdb now the bot responds:
> [MOVIE] No API key provided.
I know it used to work, not sure how recently. Maybe it can be switched to a different database that doesn't require an API key?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/movie.py`
Content:
```
1 # coding=utf-8
2 """
3 imdb.py - Sopel Movie Information Module
4 Copyright © 2012-2013, Elad Alfassa, <[email protected]>
5 Licensed under the Eiffel Forum License 2.
6
7 This module relies on omdbapi.com
8 """
9 from __future__ import unicode_literals, absolute_import, print_function, division
10
11 import requests
12 import sopel.module
13 from sopel.logger import get_logger
14
15 LOGGER = get_logger(__name__)
16
17
18 @sopel.module.commands('movie', 'imdb')
19 @sopel.module.example('.movie ThisTitleDoesNotExist', '[MOVIE] Movie not found!')
20 @sopel.module.example('.movie Citizen Kane', '[MOVIE] Title: Citizen Kane | Year: 1941 | Rating: 8.4 | Genre: Drama, Mystery | IMDB Link: http://imdb.com/title/tt0033467')
21 def movie(bot, trigger):
22 """
23 Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link.
24 """
25 if not trigger.group(2):
26 return
27 word = trigger.group(2).rstrip()
28 uri = "http://www.omdbapi.com/"
29 data = requests.get(uri, params={'t': word}, timeout=30,
30 verify=bot.config.core.verify_ssl).json()
31 if data['Response'] == 'False':
32 if 'Error' in data:
33 message = '[MOVIE] %s' % data['Error']
34 else:
35 LOGGER.warning(
36 'Got an error from the OMDb api, search phrase was %s; data was %s',
37 word, str(data))
38 message = '[MOVIE] Got an error from OMDbapi'
39 else:
40 message = '[MOVIE] Title: ' + data['Title'] + \
41 ' | Year: ' + data['Year'] + \
42 ' | Rating: ' + data['imdbRating'] + \
43 ' | Genre: ' + data['Genre'] + \
44 ' | IMDB Link: http://imdb.com/title/' + data['imdbID']
45 bot.say(message)
46
47
48 if __name__ == "__main__":
49 from sopel.test_tools import run_example_tests
50 run_example_tests(__file__)
51
```
Path: `conftest.py`
Content:
```
1 # This file lists files which should be ignored by pytest
2 collect_ignore = ["setup.py", "sopel.py", "sopel/modules/ipython.py", "sopel/modules/movie.py"]
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -1,2 +1,2 @@
# This file lists files which should be ignored by pytest
-collect_ignore = ["setup.py", "sopel.py", "sopel/modules/ipython.py", "sopel/modules/movie.py"]
+collect_ignore = ["setup.py", "sopel.py", "sopel/modules/ipython.py"]
diff --git a/sopel/modules/movie.py b/sopel/modules/movie.py
deleted file mode 100644
--- a/sopel/modules/movie.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# coding=utf-8
-"""
-imdb.py - Sopel Movie Information Module
-Copyright © 2012-2013, Elad Alfassa, <[email protected]>
-Licensed under the Eiffel Forum License 2.
-
-This module relies on omdbapi.com
-"""
-from __future__ import unicode_literals, absolute_import, print_function, division
-
-import requests
-import sopel.module
-from sopel.logger import get_logger
-
-LOGGER = get_logger(__name__)
-
-
[email protected]('movie', 'imdb')
[email protected]('.movie ThisTitleDoesNotExist', '[MOVIE] Movie not found!')
[email protected]('.movie Citizen Kane', '[MOVIE] Title: Citizen Kane | Year: 1941 | Rating: 8.4 | Genre: Drama, Mystery | IMDB Link: http://imdb.com/title/tt0033467')
-def movie(bot, trigger):
- """
- Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link.
- """
- if not trigger.group(2):
- return
- word = trigger.group(2).rstrip()
- uri = "http://www.omdbapi.com/"
- data = requests.get(uri, params={'t': word}, timeout=30,
- verify=bot.config.core.verify_ssl).json()
- if data['Response'] == 'False':
- if 'Error' in data:
- message = '[MOVIE] %s' % data['Error']
- else:
- LOGGER.warning(
- 'Got an error from the OMDb api, search phrase was %s; data was %s',
- word, str(data))
- message = '[MOVIE] Got an error from OMDbapi'
- else:
- message = '[MOVIE] Title: ' + data['Title'] + \
- ' | Year: ' + data['Year'] + \
- ' | Rating: ' + data['imdbRating'] + \
- ' | Genre: ' + data['Genre'] + \
- ' | IMDB Link: http://imdb.com/title/' + data['imdbID']
- bot.say(message)
-
-
-if __name__ == "__main__":
- from sopel.test_tools import run_example_tests
- run_example_tests(__file__)
| {"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -1,2 +1,2 @@\n # This file lists files which should be ignored by pytest\n-collect_ignore = [\"setup.py\", \"sopel.py\", \"sopel/modules/ipython.py\", \"sopel/modules/movie.py\"]\n+collect_ignore = [\"setup.py\", \"sopel.py\", \"sopel/modules/ipython.py\"]\ndiff --git a/sopel/modules/movie.py b/sopel/modules/movie.py\ndeleted file mode 100644\n--- a/sopel/modules/movie.py\n+++ /dev/null\n@@ -1,50 +0,0 @@\n-# coding=utf-8\n-\"\"\"\n-imdb.py - Sopel Movie Information Module\n-Copyright \u00a9 2012-2013, Elad Alfassa, <[email protected]>\n-Licensed under the Eiffel Forum License 2.\n-\n-This module relies on omdbapi.com\n-\"\"\"\n-from __future__ import unicode_literals, absolute_import, print_function, division\n-\n-import requests\n-import sopel.module\n-from sopel.logger import get_logger\n-\n-LOGGER = get_logger(__name__)\n-\n-\[email protected]('movie', 'imdb')\[email protected]('.movie ThisTitleDoesNotExist', '[MOVIE] Movie not found!')\[email protected]('.movie Citizen Kane', '[MOVIE] Title: Citizen Kane | Year: 1941 | Rating: 8.4 | Genre: Drama, Mystery | IMDB Link: http://imdb.com/title/tt0033467')\n-def movie(bot, trigger):\n- \"\"\"\n- Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link.\n- \"\"\"\n- if not trigger.group(2):\n- return\n- word = trigger.group(2).rstrip()\n- uri = \"http://www.omdbapi.com/\"\n- data = requests.get(uri, params={'t': word}, timeout=30,\n- verify=bot.config.core.verify_ssl).json()\n- if data['Response'] == 'False':\n- if 'Error' in data:\n- message = '[MOVIE] %s' % data['Error']\n- else:\n- LOGGER.warning(\n- 'Got an error from the OMDb api, search phrase was %s; data was %s',\n- word, str(data))\n- message = '[MOVIE] Got an error from OMDbapi'\n- else:\n- message = '[MOVIE] Title: ' + data['Title'] + \\\n- ' | Year: ' + data['Year'] + \\\n- ' | Rating: ' + data['imdbRating'] + \\\n- ' | Genre: ' + data['Genre'] + \\\n- ' | IMDB Link: http://imdb.com/title/' + data['imdbID']\n- bot.say(message)\n-\n-\n-if __name__ == \"__main__\":\n- from sopel.test_tools import run_example_tests\n- run_example_tests(__file__)\n", "issue": "imdb module not working anymore\nI just noticed that anytime you make a call to imdb now the bot responds: \r\n\r\n> [MOVIE] No API key provided.\r\n\r\nI know it used to work, not sure how recently. Maybe it can be switched to a different database that doesn't require an API key?\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nimdb.py - Sopel Movie Information Module\nCopyright \u00a9 2012-2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nThis module relies on omdbapi.com\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport requests\nimport sopel.module\nfrom sopel.logger import get_logger\n\nLOGGER = get_logger(__name__)\n\n\[email protected]('movie', 'imdb')\[email protected]('.movie ThisTitleDoesNotExist', '[MOVIE] Movie not found!')\[email protected]('.movie Citizen Kane', '[MOVIE] Title: Citizen Kane | Year: 1941 | Rating: 8.4 | Genre: Drama, Mystery | IMDB Link: http://imdb.com/title/tt0033467')\ndef movie(bot, trigger):\n \"\"\"\n Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link.\n \"\"\"\n if not trigger.group(2):\n return\n word = trigger.group(2).rstrip()\n uri = \"http://www.omdbapi.com/\"\n data = requests.get(uri, params={'t': word}, timeout=30,\n verify=bot.config.core.verify_ssl).json()\n if data['Response'] == 'False':\n if 'Error' in data:\n message = '[MOVIE] %s' % data['Error']\n else:\n LOGGER.warning(\n 'Got an error from the OMDb api, search phrase was %s; data was %s',\n word, str(data))\n message = '[MOVIE] Got an error from OMDbapi'\n else:\n message = '[MOVIE] Title: ' + data['Title'] + \\\n ' | Year: ' + data['Year'] + \\\n ' | Rating: ' + data['imdbRating'] + \\\n ' | Genre: ' + data['Genre'] + \\\n ' | IMDB Link: http://imdb.com/title/' + data['imdbID']\n bot.say(message)\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/movie.py"}, {"content": "# This file lists files which should be ignored by pytest\ncollect_ignore = [\"setup.py\", \"sopel.py\", \"sopel/modules/ipython.py\", \"sopel/modules/movie.py\"]\n", "path": "conftest.py"}], "after_files": [{"content": null, "path": "sopel/modules/movie.py"}, {"content": "# This file lists files which should be ignored by pytest\ncollect_ignore = [\"setup.py\", \"sopel.py\", \"sopel/modules/ipython.py\"]\n", "path": "conftest.py"}]} | 975 | 695 |
gh_patches_debug_19487 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1011 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compatibility with future PT 1.12
## 🐛 Bug
```
> distance = x @ y.T
E RuntimeError: "addmm_impl_cpu_" not implemented for 'Half'
```
### To Reproduce
https://github.com/PyTorchLightning/metrics/runs/6275393755?check_suite_focus=true
#### Code sample
```py
distance = x @ y.T
```
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- OS (e.g., Linux): linux
- Python & PyTorch Version (e.g., 1.0): py3.8 & pt1.12
### Additional context
context see https://github.com/pytorch/pytorch/pull/75647
slack: https://pytorch.slack.com/archives/C3PDTEV8E/p1651742487294399
tl;dr: We don't have correct and fast linalg algorithms for half in CPU as none of the backend libraries that we use support them so it's better not to support these via half-baked implementations, and simply make the users cast their inputs if they want to use these methods
> but for GPU it shall be supported, correct? >> Yes
note as well that this just applies to Half on CPU and for linalg functios (think matmul and conv). Pointwise functions on Half on CPU will still be available, and Half on CUDA will still have full support
> it was implemented up till 1.11 but there was no real speed-up, correct?
Not only it was slower, but it was not numerically stable, so it was pretty much a bug (hence the removal without deprecation)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchmetrics/functional/pairwise/cosine.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional
15
16 import torch
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
21
22
23 def _pairwise_cosine_similarity_update(
24 x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
25 ) -> Tensor:
26 """Calculates the pairwise cosine similarity matrix.
27
28 Args:
29 x: tensor of shape ``[N,d]``
30 y: tensor of shape ``[M,d]``
31 zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
32 """
33 x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
34
35 norm = torch.norm(x, p=2, dim=1)
36 x /= norm.unsqueeze(1)
37 norm = torch.norm(y, p=2, dim=1)
38 y /= norm.unsqueeze(1)
39
40 distance = x @ y.T
41 if zero_diagonal:
42 distance.fill_diagonal_(0)
43 return distance
44
45
46 def pairwise_cosine_similarity(
47 x: Tensor,
48 y: Optional[Tensor] = None,
49 reduction: Literal["mean", "sum", "none", None] = None,
50 zero_diagonal: Optional[bool] = None,
51 ) -> Tensor:
52 r"""Calculates pairwise cosine similarity:
53
54 .. math::
55 s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||}
56 = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}}
57
58 If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise
59 between the rows of :math:`x` and :math:`y`.
60 If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
61
62 Args:
63 x: Tensor with shape ``[N, d]``
64 y: Tensor with shape ``[M, d]``, optional
65 reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
66 (applied along column dimension) or `'none'`, `None` for no reduction
67 zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given
68 this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``
69
70 Returns:
71 A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
72
73 Example:
74 >>> import torch
75 >>> from torchmetrics.functional import pairwise_cosine_similarity
76 >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
77 >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
78 >>> pairwise_cosine_similarity(x, y)
79 tensor([[0.5547, 0.8682],
80 [0.5145, 0.8437],
81 [0.5300, 0.8533]])
82 >>> pairwise_cosine_similarity(x)
83 tensor([[0.0000, 0.9989, 0.9996],
84 [0.9989, 0.0000, 0.9998],
85 [0.9996, 0.9998, 0.0000]])
86
87 """
88 distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)
89 return _reduce_distance_matrix(distance, reduction)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchmetrics/functional/pairwise/cosine.py b/torchmetrics/functional/pairwise/cosine.py
--- a/torchmetrics/functional/pairwise/cosine.py
+++ b/torchmetrics/functional/pairwise/cosine.py
@@ -20,6 +20,16 @@
from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
+def _safe_matmul(x: Tensor, y: Tensor) -> Tensor:
+ """Safe calculation of matrix multiplication.
+
+ If input is float16, will cast to float32 for computation and back again.
+ """
+ if x.dtype == torch.float16 or y.dtype == torch.float16:
+ return (x.float() @ y.T.float()).half()
+ return x @ y.T
+
+
def _pairwise_cosine_similarity_update(
x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
) -> Tensor:
@@ -37,7 +47,7 @@
norm = torch.norm(y, p=2, dim=1)
y /= norm.unsqueeze(1)
- distance = x @ y.T
+ distance = _safe_matmul(x, y)
if zero_diagonal:
distance.fill_diagonal_(0)
return distance
| {"golden_diff": "diff --git a/torchmetrics/functional/pairwise/cosine.py b/torchmetrics/functional/pairwise/cosine.py\n--- a/torchmetrics/functional/pairwise/cosine.py\n+++ b/torchmetrics/functional/pairwise/cosine.py\n@@ -20,6 +20,16 @@\n from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\n \n \n+def _safe_matmul(x: Tensor, y: Tensor) -> Tensor:\n+ \"\"\"Safe calculation of matrix multiplication.\n+\n+ If input is float16, will cast to float32 for computation and back again.\n+ \"\"\"\n+ if x.dtype == torch.float16 or y.dtype == torch.float16:\n+ return (x.float() @ y.T.float()).half()\n+ return x @ y.T\n+\n+\n def _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n ) -> Tensor:\n@@ -37,7 +47,7 @@\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n \n- distance = x @ y.T\n+ distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n", "issue": "Compatibility with future PT 1.12\n## \ud83d\udc1b Bug\r\n\r\n```\r\n> distance = x @ y.T\r\nE RuntimeError: \"addmm_impl_cpu_\" not implemented for 'Half'\r\n```\r\n\r\n### To Reproduce\r\n\r\nhttps://github.com/PyTorchLightning/metrics/runs/6275393755?check_suite_focus=true\r\n\r\n#### Code sample\r\n\r\n```py\r\ndistance = x @ y.T\r\n```\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- OS (e.g., Linux): linux\r\n- Python & PyTorch Version (e.g., 1.0): py3.8 & pt1.12\r\n\r\n### Additional context\r\n\r\ncontext see https://github.com/pytorch/pytorch/pull/75647\r\nslack: https://pytorch.slack.com/archives/C3PDTEV8E/p1651742487294399\r\n\r\ntl;dr: We don't have correct and fast linalg algorithms for half in CPU as none of the backend libraries that we use support them so it's better not to support these via half-baked implementations, and simply make the users cast their inputs if they want to use these methods\r\n\r\n> but for GPU it shall be supported, correct? >> Yes\r\n\r\nnote as well that this just applies to Half on CPU and for linalg functios (think matmul and conv). Pointwise functions on Half on CPU will still be available, and Half on CUDA will still have full support\r\n\r\n> it was implemented up till 1.11 but there was no real speed-up, correct?\r\n\r\nNot only it was slower, but it was not numerically stable, so it was pretty much a bug (hence the removal without deprecation)\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = x @ y.T\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D x_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "torchmetrics/functional/pairwise/cosine.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\n\n\ndef _safe_matmul(x: Tensor, y: Tensor) -> Tensor:\n \"\"\"Safe calculation of matrix multiplication.\n\n If input is float16, will cast to float32 for computation and back again.\n \"\"\"\n if x.dtype == torch.float16 or y.dtype == torch.float16:\n return (x.float() @ y.T.float()).half()\n return x @ y.T\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D x_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "torchmetrics/functional/pairwise/cosine.py"}]} | 1,811 | 298 |
gh_patches_debug_37251 | rasdani/github-patches | git_diff | databricks__koalas-189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document all the methods in Metadata
There are a bunch of methods like index_info, index_fields. It's pretty difficult to figure out what they do. We should just add some basic docstring comments.
@ueshin you are probably the best person to take this since you created the file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `databricks/koalas/metadata.py`
Content:
```
1 #
2 # Copyright (C) 2019 Databricks, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 """
18 A metadata to manage indexes.
19 """
20 import pandas as pd
21
22 from databricks.koalas.dask.compatibility import string_types
23
24
25 class Metadata(object):
26 """
27 Manages column names and index information
28 """
29
30 def __init__(self, column_fields, index_info=None):
31 """ Create a new metadata to manage column fields and index fields and names.
32
33 :param column_fields: list of string
34 Field names to appear as columns.
35 :param index_info: list of string pair
36 Each pair holds the index field name which exists in Spark fields,
37 and the index name.
38 """
39 assert all(isinstance(col, string_types) for col in column_fields)
40 assert index_info is None \
41 or all(isinstance(index_field, string_types)
42 and (index_name is None or isinstance(index_name, string_types))
43 for index_field, index_name in index_info)
44 self._column_fields = column_fields
45 self._index_info = index_info or []
46
47 @property
48 def column_fields(self):
49 return self._column_fields
50
51 @property
52 def index_info(self):
53 return self._index_info
54
55 @property
56 def index_fields(self):
57 return [index_field for index_field, _ in self._index_info]
58
59 @property
60 def index_names(self):
61 return [name for _, name in self._index_info]
62
63 @property
64 def all_fields(self):
65 index_fields = self.index_fields
66 return index_fields + [field for field in self._column_fields
67 if field not in index_fields]
68
69 def copy(self, column_fields=None, index_info=None):
70 if column_fields is None:
71 column_fields = self._column_fields
72 if index_info is None:
73 index_info = self._index_info
74 return Metadata(column_fields=column_fields.copy(), index_info=index_info.copy())
75
76 @staticmethod
77 def from_pandas(pdf):
78 column_fields = [str(col) for col in pdf.columns]
79 index = pdf.index
80 if isinstance(index, pd.MultiIndex):
81 if index.names is None:
82 index_info = [('__index_level_{}__'.format(i), None)
83 for i in range(len(index.levels))]
84 else:
85 index_info = [('__index_level_{}__'.format(i) if name is None else name, name)
86 for i, name in enumerate(index.names)]
87 else:
88 index_info = [(index.name
89 if index.name is not None else '__index_level_0__', index.name)]
90
91 return Metadata(column_fields=column_fields, index_info=index_info)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/databricks/koalas/metadata.py b/databricks/koalas/metadata.py
--- a/databricks/koalas/metadata.py
+++ b/databricks/koalas/metadata.py
@@ -24,7 +24,11 @@
class Metadata(object):
"""
- Manages column names and index information
+ Manages column names and index information.
+
+ :ivar _column_fields: list of the Spark field names to be seen as columns in Koalas DataFrame.
+ :ivar _index_info: list of pair holding the Spark field names for indexes,
+ and the index name to be seen in Koalas DataFrame.
"""
def __init__(self, column_fields, index_info=None):
@@ -46,27 +50,38 @@
@property
def column_fields(self):
+ """ Returns the managed column field names. """
return self._column_fields
@property
def index_info(self):
+ """ Return the managed index information. """
return self._index_info
@property
def index_fields(self):
+ """ Returns the managed index field names. """
return [index_field for index_field, _ in self._index_info]
@property
def index_names(self):
+ """ Return the managed index names. """
return [name for _, name in self._index_info]
@property
def all_fields(self):
+ """ Return all the field names including index field names. """
index_fields = self.index_fields
return index_fields + [field for field in self._column_fields
if field not in index_fields]
def copy(self, column_fields=None, index_info=None):
+ """ Copy the metadata.
+
+ :param column_fields: the new column field names. If None, then the original ones are used.
+ :param index_info: the new index information. If None, then the original one is used.
+ :return: the copied metadata.
+ """
if column_fields is None:
column_fields = self._column_fields
if index_info is None:
@@ -75,6 +90,11 @@
@staticmethod
def from_pandas(pdf):
+ """ Create a metadata from pandas DataFrame.
+
+ :param pdf: :class:`pd.DataFrame`
+ :return: the created metadata
+ """
column_fields = [str(col) for col in pdf.columns]
index = pdf.index
if isinstance(index, pd.MultiIndex):
| {"golden_diff": "diff --git a/databricks/koalas/metadata.py b/databricks/koalas/metadata.py\n--- a/databricks/koalas/metadata.py\n+++ b/databricks/koalas/metadata.py\n@@ -24,7 +24,11 @@\n \n class Metadata(object):\n \"\"\"\n- Manages column names and index information\n+ Manages column names and index information.\n+\n+ :ivar _column_fields: list of the Spark field names to be seen as columns in Koalas DataFrame.\n+ :ivar _index_info: list of pair holding the Spark field names for indexes,\n+ and the index name to be seen in Koalas DataFrame.\n \"\"\"\n \n def __init__(self, column_fields, index_info=None):\n@@ -46,27 +50,38 @@\n \n @property\n def column_fields(self):\n+ \"\"\" Returns the managed column field names. \"\"\"\n return self._column_fields\n \n @property\n def index_info(self):\n+ \"\"\" Return the managed index information. \"\"\"\n return self._index_info\n \n @property\n def index_fields(self):\n+ \"\"\" Returns the managed index field names. \"\"\"\n return [index_field for index_field, _ in self._index_info]\n \n @property\n def index_names(self):\n+ \"\"\" Return the managed index names. \"\"\"\n return [name for _, name in self._index_info]\n \n @property\n def all_fields(self):\n+ \"\"\" Return all the field names including index field names. \"\"\"\n index_fields = self.index_fields\n return index_fields + [field for field in self._column_fields\n if field not in index_fields]\n \n def copy(self, column_fields=None, index_info=None):\n+ \"\"\" Copy the metadata.\n+\n+ :param column_fields: the new column field names. If None, then the original ones are used.\n+ :param index_info: the new index information. If None, then the original one is used.\n+ :return: the copied metadata.\n+ \"\"\"\n if column_fields is None:\n column_fields = self._column_fields\n if index_info is None:\n@@ -75,6 +90,11 @@\n \n @staticmethod\n def from_pandas(pdf):\n+ \"\"\" Create a metadata from pandas DataFrame.\n+\n+ :param pdf: :class:`pd.DataFrame`\n+ :return: the created metadata\n+ \"\"\"\n column_fields = [str(col) for col in pdf.columns]\n index = pdf.index\n if isinstance(index, pd.MultiIndex):\n", "issue": "Document all the methods in Metadata\nThere are a bunch of methods like index_info, index_fields. It's pretty difficult to figure out what they do. We should just add some basic docstring comments.\r\n\r\n@ueshin you are probably the best person to take this since you created the file.\r\n\n", "before_files": [{"content": "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA metadata to manage indexes.\n\"\"\"\nimport pandas as pd\n\nfrom databricks.koalas.dask.compatibility import string_types\n\n\nclass Metadata(object):\n \"\"\"\n Manages column names and index information\n \"\"\"\n\n def __init__(self, column_fields, index_info=None):\n \"\"\" Create a new metadata to manage column fields and index fields and names.\n\n :param column_fields: list of string\n Field names to appear as columns.\n :param index_info: list of string pair\n Each pair holds the index field name which exists in Spark fields,\n and the index name.\n \"\"\"\n assert all(isinstance(col, string_types) for col in column_fields)\n assert index_info is None \\\n or all(isinstance(index_field, string_types)\n and (index_name is None or isinstance(index_name, string_types))\n for index_field, index_name in index_info)\n self._column_fields = column_fields\n self._index_info = index_info or []\n\n @property\n def column_fields(self):\n return self._column_fields\n\n @property\n def index_info(self):\n return self._index_info\n\n @property\n def index_fields(self):\n return [index_field for index_field, _ in self._index_info]\n\n @property\n def index_names(self):\n return [name for _, name in self._index_info]\n\n @property\n def all_fields(self):\n index_fields = self.index_fields\n return index_fields + [field for field in self._column_fields\n if field not in index_fields]\n\n def copy(self, column_fields=None, index_info=None):\n if column_fields is None:\n column_fields = self._column_fields\n if index_info is None:\n index_info = self._index_info\n return Metadata(column_fields=column_fields.copy(), index_info=index_info.copy())\n\n @staticmethod\n def from_pandas(pdf):\n column_fields = [str(col) for col in pdf.columns]\n index = pdf.index\n if isinstance(index, pd.MultiIndex):\n if index.names is None:\n index_info = [('__index_level_{}__'.format(i), None)\n for i in range(len(index.levels))]\n else:\n index_info = [('__index_level_{}__'.format(i) if name is None else name, name)\n for i, name in enumerate(index.names)]\n else:\n index_info = [(index.name\n if index.name is not None else '__index_level_0__', index.name)]\n\n return Metadata(column_fields=column_fields, index_info=index_info)\n", "path": "databricks/koalas/metadata.py"}], "after_files": [{"content": "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nA metadata to manage indexes.\n\"\"\"\nimport pandas as pd\n\nfrom databricks.koalas.dask.compatibility import string_types\n\n\nclass Metadata(object):\n \"\"\"\n Manages column names and index information.\n\n :ivar _column_fields: list of the Spark field names to be seen as columns in Koalas DataFrame.\n :ivar _index_info: list of pair holding the Spark field names for indexes,\n and the index name to be seen in Koalas DataFrame.\n \"\"\"\n\n def __init__(self, column_fields, index_info=None):\n \"\"\" Create a new metadata to manage column fields and index fields and names.\n\n :param column_fields: list of string\n Field names to appear as columns.\n :param index_info: list of string pair\n Each pair holds the index field name which exists in Spark fields,\n and the index name.\n \"\"\"\n assert all(isinstance(col, string_types) for col in column_fields)\n assert index_info is None \\\n or all(isinstance(index_field, string_types)\n and (index_name is None or isinstance(index_name, string_types))\n for index_field, index_name in index_info)\n self._column_fields = column_fields\n self._index_info = index_info or []\n\n @property\n def column_fields(self):\n \"\"\" Returns the managed column field names. \"\"\"\n return self._column_fields\n\n @property\n def index_info(self):\n \"\"\" Return the managed index information. \"\"\"\n return self._index_info\n\n @property\n def index_fields(self):\n \"\"\" Returns the managed index field names. \"\"\"\n return [index_field for index_field, _ in self._index_info]\n\n @property\n def index_names(self):\n \"\"\" Return the managed index names. \"\"\"\n return [name for _, name in self._index_info]\n\n @property\n def all_fields(self):\n \"\"\" Return all the field names including index field names. \"\"\"\n index_fields = self.index_fields\n return index_fields + [field for field in self._column_fields\n if field not in index_fields]\n\n def copy(self, column_fields=None, index_info=None):\n \"\"\" Copy the metadata.\n\n :param column_fields: the new column field names. If None, then the original ones are used.\n :param index_info: the new index information. If None, then the original one is used.\n :return: the copied metadata.\n \"\"\"\n if column_fields is None:\n column_fields = self._column_fields\n if index_info is None:\n index_info = self._index_info\n return Metadata(column_fields=column_fields.copy(), index_info=index_info.copy())\n\n @staticmethod\n def from_pandas(pdf):\n \"\"\" Create a metadata from pandas DataFrame.\n\n :param pdf: :class:`pd.DataFrame`\n :return: the created metadata\n \"\"\"\n column_fields = [str(col) for col in pdf.columns]\n index = pdf.index\n if isinstance(index, pd.MultiIndex):\n if index.names is None:\n index_info = [('__index_level_{}__'.format(i), None)\n for i in range(len(index.levels))]\n else:\n index_info = [('__index_level_{}__'.format(i) if name is None else name, name)\n for i, name in enumerate(index.names)]\n else:\n index_info = [(index.name\n if index.name is not None else '__index_level_0__', index.name)]\n\n return Metadata(column_fields=column_fields, index_info=index_info)\n", "path": "databricks/koalas/metadata.py"}]} | 1,192 | 557 |
gh_patches_debug_5870 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wait for new cryptography release
For the next mitmproxy release, we want to recommend Python 3.5 by default. This requires a cryptography release which includes https://github.com/pyca/cryptography/pull/3063.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from codecs import open
3 import os
4
5 from netlib import version
6
7 # Based on https://github.com/pypa/sampleproject/blob/master/setup.py
8 # and https://python-packaging-user-guide.readthedocs.org/
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12 with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
13 long_description = f.read()
14
15 setup(
16 name="mitmproxy",
17 version=version.VERSION,
18 description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
19 long_description=long_description,
20 url="http://mitmproxy.org",
21 author="Aldo Cortesi",
22 author_email="[email protected]",
23 license="MIT",
24 classifiers=[
25 "License :: OSI Approved :: MIT License",
26 "Development Status :: 5 - Production/Stable",
27 "Environment :: Console",
28 "Environment :: Console :: Curses",
29 "Operating System :: MacOS :: MacOS X",
30 "Operating System :: POSIX",
31 "Operating System :: Microsoft :: Windows",
32 "Programming Language :: Python",
33 "Programming Language :: Python :: 2",
34 "Programming Language :: Python :: 2.7",
35 "Programming Language :: Python :: 3",
36 "Programming Language :: Python :: 3.5",
37 "Programming Language :: Python :: Implementation :: CPython",
38 "Programming Language :: Python :: Implementation :: PyPy",
39 "Topic :: Security",
40 "Topic :: Internet",
41 "Topic :: Internet :: WWW/HTTP",
42 "Topic :: Internet :: Proxy Servers",
43 "Topic :: Software Development :: Testing"
44 ],
45 packages=find_packages(include=[
46 "mitmproxy", "mitmproxy.*",
47 "pathod", "pathod.*",
48 "netlib", "netlib.*"
49 ]),
50 include_package_data=True,
51 entry_points={
52 'console_scripts': [
53 "mitmproxy = mitmproxy.main:mitmproxy",
54 "mitmdump = mitmproxy.main:mitmdump",
55 "mitmweb = mitmproxy.main:mitmweb",
56 "pathod = pathod.pathod_cmdline:go_pathod",
57 "pathoc = pathod.pathoc_cmdline:go_pathoc"
58 ]
59 },
60 # https://packaging.python.org/en/latest/requirements/#install-requires
61 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
62 install_requires=[
63 "backports.ssl_match_hostname>=3.5.0.1, <3.6",
64 "blinker>=1.4, <1.5",
65 "click>=6.2, <7.0",
66 "certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
67 "configargparse>=0.10, <0.11",
68 "construct>=2.5.2, <2.6",
69 "cryptography>=1.3, <1.5",
70 "cssutils>=1.0.1, <1.1",
71 "Flask>=0.10.1, <0.12",
72 "h2>=2.4.1, <3",
73 "html2text>=2016.1.8, <=2016.5.29",
74 "hyperframe>=4.0.1, <5",
75 "jsbeautifier>=1.6.3, <1.7",
76 "lxml>=3.5.0, <=3.6.0", # no wheels for 3.6.1 yet.
77 "Pillow>=3.2, <3.4",
78 "passlib>=1.6.5, <1.7",
79 "pyasn1>=0.1.9, <0.2",
80 "pyOpenSSL>=16.0, <17.0",
81 "pyparsing>=2.1.3, <2.2",
82 "pyperclip>=1.5.22, <1.6",
83 "requests>=2.9.1, <2.12",
84 "six>=1.10, <1.11",
85 "tornado>=4.3, <4.5",
86 "urwid>=1.3.1, <1.4",
87 "watchdog>=0.8.3, <0.9",
88 "brotlipy>=0.3.0, <0.5",
89 ],
90 extras_require={
91 ':sys_platform == "win32"': [
92 "pydivert>=0.0.7, <0.1",
93 ],
94 ':sys_platform != "win32"': [
95 ],
96 # Do not use a range operator here: https://bitbucket.org/pypa/setuptools/issues/380
97 # Ubuntu Trusty and other still ship with setuptools < 17.1
98 ':python_version == "2.7"': [
99 "enum34>=1.0.4, <2",
100 "ipaddress>=1.0.15, <1.1",
101 "typing==3.5.2.2",
102 ],
103 'dev': [
104 "tox>=2.3, <3",
105 "mock>=2.0, <2.1",
106 "pytest>=2.8.7, <3",
107 "pytest-cov>=2.2.1, <3",
108 "pytest-timeout>=1.0.0, <2",
109 "pytest-xdist>=1.14, <2",
110 "sphinx>=1.3.5, <1.5",
111 "sphinx-autobuild>=0.5.2, <0.7",
112 "sphinxcontrib-documentedlist>=0.4.0, <0.5",
113 "sphinx_rtd_theme>=0.1.9, <0.2",
114 ],
115 'contentviews': [
116 # TODO: Find Python 3 replacements
117 # "protobuf>=2.6.1, <2.7",
118 # "pyamf>=0.8.0, <0.9",
119 ],
120 'examples': [
121 "beautifulsoup4>=4.4.1, <4.6",
122 "pytz>=2015.07.0, <=2016.6.1",
123 ]
124 }
125 )
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
"certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
"configargparse>=0.10, <0.11",
"construct>=2.5.2, <2.6",
- "cryptography>=1.3, <1.5",
+ "cryptography>=1.3, <1.6",
"cssutils>=1.0.1, <1.1",
"Flask>=0.10.1, <0.12",
"h2>=2.4.1, <3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"configargparse>=0.10, <0.11\",\n \"construct>=2.5.2, <2.6\",\n- \"cryptography>=1.3, <1.5\",\n+ \"cryptography>=1.3, <1.6\",\n \"cssutils>=1.0.1, <1.1\",\n \"Flask>=0.10.1, <0.12\",\n \"h2>=2.4.1, <3\",\n", "issue": "Wait for new cryptography release\nFor the next mitmproxy release, we want to recommend Python 3.5 by default. This requires a cryptography release which includes https://github.com/pyca/cryptography/pull/3063.\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom codecs import open\nimport os\n\nfrom netlib import version\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"mitmproxy\",\n version=version.VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n \"netlib\", \"netlib.*\"\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.main:mitmproxy\",\n \"mitmdump = mitmproxy.main:mitmdump\",\n \"mitmweb = mitmproxy.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"backports.ssl_match_hostname>=3.5.0.1, <3.6\",\n \"blinker>=1.4, <1.5\",\n \"click>=6.2, <7.0\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"configargparse>=0.10, <0.11\",\n \"construct>=2.5.2, <2.6\",\n \"cryptography>=1.3, <1.5\",\n \"cssutils>=1.0.1, <1.1\",\n \"Flask>=0.10.1, <0.12\",\n \"h2>=2.4.1, <3\",\n \"html2text>=2016.1.8, <=2016.5.29\",\n \"hyperframe>=4.0.1, <5\",\n \"jsbeautifier>=1.6.3, <1.7\",\n \"lxml>=3.5.0, <=3.6.0\", # no wheels for 3.6.1 yet.\n \"Pillow>=3.2, <3.4\",\n \"passlib>=1.6.5, <1.7\",\n \"pyasn1>=0.1.9, <0.2\",\n \"pyOpenSSL>=16.0, <17.0\",\n \"pyparsing>=2.1.3, <2.2\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <2.12\",\n \"six>=1.10, <1.11\",\n \"tornado>=4.3, <4.5\",\n \"urwid>=1.3.1, <1.4\",\n \"watchdog>=0.8.3, <0.9\",\n \"brotlipy>=0.3.0, <0.5\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=0.0.7, <0.1\",\n ],\n ':sys_platform != \"win32\"': [\n ],\n # Do not use a range operator here: https://bitbucket.org/pypa/setuptools/issues/380\n # Ubuntu Trusty and other still ship with setuptools < 17.1\n ':python_version == \"2.7\"': [\n \"enum34>=1.0.4, <2\",\n \"ipaddress>=1.0.15, <1.1\",\n \"typing==3.5.2.2\",\n ],\n 'dev': [\n \"tox>=2.3, <3\",\n \"mock>=2.0, <2.1\",\n \"pytest>=2.8.7, <3\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"sphinx>=1.3.5, <1.5\",\n \"sphinx-autobuild>=0.5.2, <0.7\",\n \"sphinxcontrib-documentedlist>=0.4.0, <0.5\",\n \"sphinx_rtd_theme>=0.1.9, <0.2\",\n ],\n 'contentviews': [\n # TODO: Find Python 3 replacements\n # \"protobuf>=2.6.1, <2.7\",\n # \"pyamf>=0.8.0, <0.9\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.6\",\n \"pytz>=2015.07.0, <=2016.6.1\",\n ]\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom codecs import open\nimport os\n\nfrom netlib import version\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"mitmproxy\",\n version=version.VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n \"netlib\", \"netlib.*\"\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.main:mitmproxy\",\n \"mitmdump = mitmproxy.main:mitmdump\",\n \"mitmweb = mitmproxy.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"backports.ssl_match_hostname>=3.5.0.1, <3.6\",\n \"blinker>=1.4, <1.5\",\n \"click>=6.2, <7.0\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"configargparse>=0.10, <0.11\",\n \"construct>=2.5.2, <2.6\",\n \"cryptography>=1.3, <1.6\",\n \"cssutils>=1.0.1, <1.1\",\n \"Flask>=0.10.1, <0.12\",\n \"h2>=2.4.1, <3\",\n \"html2text>=2016.1.8, <=2016.5.29\",\n \"hyperframe>=4.0.1, <5\",\n \"jsbeautifier>=1.6.3, <1.7\",\n \"lxml>=3.5.0, <=3.6.0\", # no wheels for 3.6.1 yet.\n \"Pillow>=3.2, <3.4\",\n \"passlib>=1.6.5, <1.7\",\n \"pyasn1>=0.1.9, <0.2\",\n \"pyOpenSSL>=16.0, <17.0\",\n \"pyparsing>=2.1.3, <2.2\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <2.12\",\n \"six>=1.10, <1.11\",\n \"tornado>=4.3, <4.5\",\n \"urwid>=1.3.1, <1.4\",\n \"watchdog>=0.8.3, <0.9\",\n \"brotlipy>=0.3.0, <0.5\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=0.0.7, <0.1\",\n ],\n ':sys_platform != \"win32\"': [\n ],\n # Do not use a range operator here: https://bitbucket.org/pypa/setuptools/issues/380\n # Ubuntu Trusty and other still ship with setuptools < 17.1\n ':python_version == \"2.7\"': [\n \"enum34>=1.0.4, <2\",\n \"ipaddress>=1.0.15, <1.1\",\n \"typing==3.5.2.2\",\n ],\n 'dev': [\n \"tox>=2.3, <3\",\n \"mock>=2.0, <2.1\",\n \"pytest>=2.8.7, <3\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"sphinx>=1.3.5, <1.5\",\n \"sphinx-autobuild>=0.5.2, <0.7\",\n \"sphinxcontrib-documentedlist>=0.4.0, <0.5\",\n \"sphinx_rtd_theme>=0.1.9, <0.2\",\n ],\n 'contentviews': [\n # TODO: Find Python 3 replacements\n # \"protobuf>=2.6.1, <2.7\",\n # \"pyamf>=0.8.0, <0.9\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.6\",\n \"pytz>=2015.07.0, <=2016.6.1\",\n ]\n }\n)\n", "path": "setup.py"}]} | 1,990 | 176 |
gh_patches_debug_63301 | rasdani/github-patches | git_diff | scikit-hep__pyhf-372 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update tensorflow-probability to the next release that includes continuous approximations
# Description
This is a follow up to #302. As the bug is fixed in upstream tensorflow-probability, we just need to wait for a new release to be shipped.
This bug was because of a change in the API to get rid of the continuous approximation to the Poisson pmf which broke our tests.
### Describe the solution you'd like
Unfix tensorflow-probability to `0.3.0` and bump to the next available release post-0.4.0.
Update Tensorflow to TensorFlow 1.12.0 release
# Description
[TensorFlow 1.12.0 has been released](https://github.com/tensorflow/tensorflow/releases/tag/v1.12.0) and it has breaking changes. Most notably
> Remove `tf.contrib.linalg`. `tf.linalg` should be used instead.
Once there is a new release of TensorFlow probability (`v0.5.0` — c.f. Issue #360 and #330) that upgrades to `v1.12.0` then we can follow them in upgrading.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4 from os import path
5 import sys
6
7 this_directory = path.abspath(path.dirname(__file__))
8 if sys.version_info.major < 3:
9 from io import open
10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
11 long_description = readme_md.read()
12
13 extras_require = {
14 'tensorflow': [
15 'tensorflow<1.12.0,>=1.10.0',
16 'tensorflow-probability==0.3.0',
17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
18 'setuptools<=39.1.0',
19 ],
20 'torch': ['torch>=0.4.0'],
21 'mxnet': [
22 'mxnet>=1.0.0',
23 'requests<2.19.0,>=2.18.4',
24 'numpy<1.15.0,>=1.8.2',
25 'requests<2.19.0,>=2.18.4',
26 ],
27 # 'dask': [
28 # 'dask[array]'
29 # ],
30 'xmlimport': ['uproot'],
31 'minuit': ['iminuit'],
32 'develop': [
33 'pyflakes',
34 'pytest<4.0.0,>=3.5.1',
35 'pytest-cov>=2.5.1',
36 'pytest-mock',
37 'pytest-benchmark[histogram]',
38 'pytest-console-scripts',
39 'python-coveralls',
40 'coverage>=4.0', # coveralls
41 'matplotlib',
42 'jupyter',
43 'nbdime',
44 'uproot>=3.0.0',
45 'papermill>=0.16.0',
46 'graphviz',
47 'bumpversion',
48 'sphinx',
49 'sphinxcontrib-bibtex',
50 'sphinxcontrib-napoleon',
51 'sphinx_rtd_theme',
52 'nbsphinx',
53 'sphinx-issues',
54 'm2r',
55 'jsonpatch',
56 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
57 'pre-commit',
58 'black;python_version>="3.6"', # Black is Python3 only
59 'twine',
60 ],
61 }
62 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
63
64 setup(
65 name='pyhf',
66 version='0.0.15',
67 description='(partial) pure python histfactory implementation',
68 long_description=long_description,
69 long_description_content_type='text/markdown',
70 url='https://github.com/diana-hep/pyhf',
71 author='Lukas Heinrich',
72 author_email='[email protected]',
73 license='Apache',
74 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
75 classifiers=[
76 "Programming Language :: Python :: 2",
77 "Programming Language :: Python :: 2.7",
78 "Programming Language :: Python :: 3",
79 "Programming Language :: Python :: 3.6",
80 ],
81 packages=find_packages(),
82 include_package_data=True,
83 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
84 install_requires=[
85 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
86 'click>=6.0', # for console scripts,
87 'tqdm', # for readxml
88 'six', # for modifiers
89 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
90 'jsonpatch',
91 ],
92 extras_require=extras_require,
93 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
94 dependency_links=[],
95 )
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,8 +12,8 @@
extras_require = {
'tensorflow': [
- 'tensorflow<1.12.0,>=1.10.0',
- 'tensorflow-probability==0.3.0',
+ 'tensorflow>=1.12.0',
+ 'tensorflow-probability>=0.5.0',
'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
'setuptools<=39.1.0',
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,8 +12,8 @@\n \n extras_require = {\n 'tensorflow': [\n- 'tensorflow<1.12.0,>=1.10.0',\n- 'tensorflow-probability==0.3.0',\n+ 'tensorflow>=1.12.0',\n+ 'tensorflow-probability>=0.5.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n", "issue": "Update tensorflow-probability to the next release that includes continuous approximations\n# Description\r\n\r\nThis is a follow up to #302. As the bug is fixed in upstream tensorflow-probability, we just need to wait for a new release to be shipped.\r\n\r\nThis bug was because of a change in the API to get rid of the continuous approximation to the Poisson pmf which broke our tests.\r\n\r\n### Describe the solution you'd like\r\n\r\nUnfix tensorflow-probability to `0.3.0` and bump to the next available release post-0.4.0.\nUpdate Tensorflow to TensorFlow 1.12.0 release\n# Description\r\n\r\n[TensorFlow 1.12.0 has been released](https://github.com/tensorflow/tensorflow/releases/tag/v1.12.0) and it has breaking changes. Most notably\r\n\r\n> Remove `tf.contrib.linalg`. `tf.linalg` should be used instead. \r\n\r\nOnce there is a new release of TensorFlow probability (`v0.5.0` — c.f. Issue #360 and #330) that upgrades to `v1.12.0` then we can follow them in upgrading.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow<1.12.0,>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest<4.0.0,>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill>=0.16.0',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.12.0',\n 'tensorflow-probability>=0.5.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest<4.0.0,>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill>=0.16.0',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]} | 1,587 | 161 |
gh_patches_debug_14775 | rasdani/github-patches | git_diff | hylang__hy-1122 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hy sets are really broken.
``` Hy
(env-hy) C:\Users\ME\Code>hy
hy 0.11.0 using CPython(v3.4.3:9b73f1c3e601) 3.4.3 on Windows
=> #{:a 'a}
Traceback (most recent call last):
File "C:\Python34\Scripts\env-hy\Scripts\hy-script.py", line 9, in <module>
load_entry_point('hy==0.11.0', 'console_scripts', 'hy')()
File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\cmdline.py", line 341, in hy_main
sys.exit(cmdline_handler("hy", sys.argv))
File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\cmdline.py", line 336, in cmdline_handler
return run_repl(spy=options.spy)
File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\cmdline.py", line 234, in run_repl
os=platform.system()
File "C:\Python34\Lib\code.py", line 234, in interact
more = self.push(line)
File "C:\Python34\Lib\code.py", line 256, in push
more = self.runsource(source, self.filename)
File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\cmdline.py", line 93, in runsource
tokens = tokenize(source)
File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\lex\__init__.py", line 33, in tokenize
return parser.parse(lexer.lex(buf))
File "C:\Python34\Scripts\env-hy\lib\site-packages\rply\parser.py", line 23, in parse
t, symstack, statestack, state
File "C:\Python34\Scripts\env-hy\lib\site-packages\rply\parser.py", line 80, in _reduce_production
value = p.func(targ)
File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\lex\parser.py", line 69, in wrapped
ret = fun(p)
File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\lex\parser.py", line 214, in t_set
return HySet(p[1])
File "C:\Python34\Scripts\env-hy\lib\site-packages\hy\models\set.py", line 31, in __init__
items = sorted(items)
TypeError: unorderable types: HyExpression() < HyKeyword()
(env-hy) C:\Users\ME\Code>
```
That is NOT supposed to happen.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hy/models/set.py`
Content:
```
1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the "Software"),
5 # to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 # and/or sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 from hy.models.list import HyList
22 from functools import reduce
23
24
25 class HySet(HyList):
26 """
27 Hy set (actually a list that pretends to be a set)
28 """
29
30 def __init__(self, items):
31 items = sorted(items)
32 items = list(reduce(lambda r, v: v in r and r or r+[v], items, []))
33 super(HySet, self).__init__(items)
34
35 def __repr__(self):
36 return "#{%s}" % (" ".join([repr(x) for x in self]))
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hy/models/set.py b/hy/models/set.py
--- a/hy/models/set.py
+++ b/hy/models/set.py
@@ -18,19 +18,16 @@
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
+from hy.models import _wrappers, wrap_value
from hy.models.list import HyList
-from functools import reduce
class HySet(HyList):
"""
- Hy set (actually a list that pretends to be a set)
+ Hy set (just a representation of a set)
"""
- def __init__(self, items):
- items = sorted(items)
- items = list(reduce(lambda r, v: v in r and r or r+[v], items, []))
- super(HySet, self).__init__(items)
-
def __repr__(self):
return "#{%s}" % (" ".join([repr(x) for x in self]))
+
+_wrappers[set] = lambda s: HySet(wrap_value(x) for x in s)
| {"golden_diff": "diff --git a/hy/models/set.py b/hy/models/set.py\n--- a/hy/models/set.py\n+++ b/hy/models/set.py\n@@ -18,19 +18,16 @@\n # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n # DEALINGS IN THE SOFTWARE.\n \n+from hy.models import _wrappers, wrap_value\n from hy.models.list import HyList\n-from functools import reduce\n \n \n class HySet(HyList):\n \"\"\"\n- Hy set (actually a list that pretends to be a set)\n+ Hy set (just a representation of a set)\n \"\"\"\n \n- def __init__(self, items):\n- items = sorted(items)\n- items = list(reduce(lambda r, v: v in r and r or r+[v], items, []))\n- super(HySet, self).__init__(items)\n-\n def __repr__(self):\n return \"#{%s}\" % (\" \".join([repr(x) for x in self]))\n+\n+_wrappers[set] = lambda s: HySet(wrap_value(x) for x in s)\n", "issue": "Hy sets are really broken.\n``` Hy\n(env-hy) C:\\Users\\ME\\Code>hy\nhy 0.11.0 using CPython(v3.4.3:9b73f1c3e601) 3.4.3 on Windows\n=> #{:a 'a}\nTraceback (most recent call last):\n File \"C:\\Python34\\Scripts\\env-hy\\Scripts\\hy-script.py\", line 9, in <module>\n load_entry_point('hy==0.11.0', 'console_scripts', 'hy')()\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\cmdline.py\", line 341, in hy_main\n sys.exit(cmdline_handler(\"hy\", sys.argv))\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\cmdline.py\", line 336, in cmdline_handler\n return run_repl(spy=options.spy)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\cmdline.py\", line 234, in run_repl\n os=platform.system()\n File \"C:\\Python34\\Lib\\code.py\", line 234, in interact\n more = self.push(line)\n File \"C:\\Python34\\Lib\\code.py\", line 256, in push\n more = self.runsource(source, self.filename)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\cmdline.py\", line 93, in runsource\n tokens = tokenize(source)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\lex\\__init__.py\", line 33, in tokenize\n return parser.parse(lexer.lex(buf))\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\rply\\parser.py\", line 23, in parse\n t, symstack, statestack, state\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\rply\\parser.py\", line 80, in _reduce_production\n value = p.func(targ)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\lex\\parser.py\", line 69, in wrapped\n ret = fun(p)\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\lex\\parser.py\", line 214, in t_set\n return HySet(p[1])\n File \"C:\\Python34\\Scripts\\env-hy\\lib\\site-packages\\hy\\models\\set.py\", line 31, in __init__\n items = sorted(items)\nTypeError: unorderable types: HyExpression() < HyKeyword()\n\n(env-hy) C:\\Users\\ME\\Code>\n```\n\nThat is NOT supposed to happen.\n\n", "before_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.list import HyList\nfrom functools import reduce\n\n\nclass HySet(HyList):\n \"\"\"\n Hy set (actually a list that pretends to be a set)\n \"\"\"\n\n def __init__(self, items):\n items = sorted(items)\n items = list(reduce(lambda r, v: v in r and r or r+[v], items, []))\n super(HySet, self).__init__(items)\n\n def __repr__(self):\n return \"#{%s}\" % (\" \".join([repr(x) for x in self]))\n", "path": "hy/models/set.py"}], "after_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models import _wrappers, wrap_value\nfrom hy.models.list import HyList\n\n\nclass HySet(HyList):\n \"\"\"\n Hy set (just a representation of a set)\n \"\"\"\n\n def __repr__(self):\n return \"#{%s}\" % (\" \".join([repr(x) for x in self]))\n\n_wrappers[set] = lambda s: HySet(wrap_value(x) for x in s)\n", "path": "hy/models/set.py"}]} | 1,352 | 242 |
gh_patches_debug_1429 | rasdani/github-patches | git_diff | google__turbinia-785 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
import TurbiniaException to partitions.py
```
Traceback (most recent call last):
File "PATH/v2/lib/python3.8/site-packages/turbinia/workers/__init__.py", line 916, in run_wrapper
self.result = self.run(evidence, self.result)
File "PATH/v2/lib/python3.8/site-packages/turbinia/workers/partitions.py", line 144, in run
path_specs = partitions.Enumerate(evidence)
File "/PATH/v2/lib/python3.8/site-packages/turbinia/processors/partitions.py", line 49, in Enumerate
raise TurbiniaException(
NameError: name 'TurbiniaException' is not defined
2021-03-05 18:45:56 [ERROR] PartitionEnumerationTask Task failed with exception: [name 'TurbiniaException' is not defined]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/processors/partitions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2021 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # https://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Evidence processor to enumerate partitions."""
16
17 import logging
18
19 from dfvfs.helpers import volume_scanner
20 from dfvfs.lib import definitions as dfvfs_definitions
21 from dfvfs.lib import errors as dfvfs_errors
22
23 from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator
24
25 log = logging.getLogger('turbinia')
26
27
28 def Enumerate(evidence):
29 """Uses dfVFS to enumerate partitions in a disk / image.
30
31 Args:
32 evidence: Evidence object to be scanned.
33
34 Raises:
35 TurbiniaException if source evidence can't be scanned.
36
37 Returns:
38 list[dfVFS.path_spec]: path specs for identified partitions
39 """
40 dfvfs_definitions.PREFERRED_GPT_BACK_END = (
41 dfvfs_definitions.TYPE_INDICATOR_GPT)
42 mediator = UnattendedVolumeScannerMediator()
43 mediator.credentials = evidence.credentials
44 path_specs = []
45 try:
46 scanner = volume_scanner.VolumeScanner(mediator=mediator)
47 path_specs = scanner.GetBasePathSpecs(evidence.local_path)
48 except dfvfs_errors.ScannerError as e:
49 raise TurbiniaException(
50 'Could not enumerate partitions [{0!s}]: {1!s}'.format(
51 evidence.local_path, e))
52
53 return path_specs
54
55
56 def GetPartitionEncryptionType(path_spec):
57 """Checks a partition for encryption.
58
59 Args:
60 path_spec (dfVFS.path_spec): Partition path_spec.
61
62 Returns:
63 String representing the type of encryption, or None.
64 """
65 encryption_type = None
66 if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
67 encryption_type = 'BDE'
68 return encryption_type
69
70
71 def GetPathSpecByLocation(path_specs, location):
72 """Finds a path_spec from a list of path_specs for a given location.
73
74 Args:
75 path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.
76 location (str): dfVFS location to search for.
77
78 Returns:
79 dfVFS.path_spec for the given location or None if not found.
80 """
81 for path_spec in path_specs:
82 child_path_spec = path_spec
83 fs_location = getattr(path_spec, 'location', None)
84 while path_spec.HasParent():
85 type_indicator = path_spec.type_indicator
86 if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
87 dfvfs_definitions.TYPE_INDICATOR_GPT):
88 if fs_location in ('\\', '/'):
89 fs_location = getattr(path_spec, 'location', None)
90 break
91 path_spec = path_spec.parent
92 if fs_location == location:
93 return child_path_spec
94 return None
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py
--- a/turbinia/processors/partitions.py
+++ b/turbinia/processors/partitions.py
@@ -21,6 +21,7 @@
from dfvfs.lib import errors as dfvfs_errors
from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator
+from turbinia import TurbiniaException
log = logging.getLogger('turbinia')
| {"golden_diff": "diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py\n--- a/turbinia/processors/partitions.py\n+++ b/turbinia/processors/partitions.py\n@@ -21,6 +21,7 @@\n from dfvfs.lib import errors as dfvfs_errors\n \n from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\n+from turbinia import TurbiniaException\n \n log = logging.getLogger('turbinia')\n", "issue": "import TurbiniaException to partitions.py\n```\r\nTraceback (most recent call last):\r\n File \"PATH/v2/lib/python3.8/site-packages/turbinia/workers/__init__.py\", line 916, in run_wrapper\r\n self.result = self.run(evidence, self.result)\r\n File \"PATH/v2/lib/python3.8/site-packages/turbinia/workers/partitions.py\", line 144, in run\r\n path_specs = partitions.Enumerate(evidence)\r\n File \"/PATH/v2/lib/python3.8/site-packages/turbinia/processors/partitions.py\", line 49, in Enumerate\r\n raise TurbiniaException(\r\nNameError: name 'TurbiniaException' is not defined\r\n\r\n2021-03-05 18:45:56 [ERROR] PartitionEnumerationTask Task failed with exception: [name 'TurbiniaException' is not defined]\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n", "path": "turbinia/processors/partitions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\nfrom turbinia import TurbiniaException\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n", "path": "turbinia/processors/partitions.py"}]} | 1,357 | 108 |
gh_patches_debug_25484 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-1259 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DJStripeSubscriptionPermission issue returning bool
This permission is not returning properly the bool.
**Current behaviour**
```python
class DJStripeSubscriptionPermission(BasePermission):
"""
A permission to be used when wanting to permit users with active subscriptions.
"""
def has_permission(self, request, view):
"""
Check if the subscriber has an active subscription.
Returns false if:
* a subscriber isn't passed through the request
See ``utils.subscriber_has_active_subscription`` for more rules.
"""
try:
subscriber_has_active_subscription(subscriber_request_callback(request))
except AttributeError:
return False
```
Here is not returning True or False except if it falls in the exception.
**Expected Behaviour**
```python
class DJStripeSubscriptionPermission(BasePermission):
"""
A permission to be used when wanting to permit users with active subscriptions.
"""
def has_permission(self, request, view):
"""
Check if the subscriber has an active subscription.
Returns false if:
* a subscriber isn't passed through the request
See ``utils.subscriber_has_active_subscription`` for more rules.
"""
try:
return bool(subscriber_has_active_subscription(subscriber_request_callback(request)))
except AttributeError:
return False
```
Just missing a return and it solves the problem. We don't need a bool directly there, I just added just to follow the same patterns as the DRF (also being added to the other project :-))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/contrib/rest_framework/serializers.py`
Content:
```
1 """
2 .. module:: dj-stripe.contrib.rest_framework.serializers.
3
4 :synopsis: dj-stripe - Serializers to be used with the dj-stripe REST API.
5
6 .. moduleauthor:: Philippe Luickx (@philippeluickx)
7
8 """
9
10 from rest_framework import serializers
11 from rest_framework.serializers import ModelSerializer
12
13 from djstripe.models import Subscription
14
15
16 class SubscriptionSerializer(ModelSerializer):
17 """A serializer used for the Subscription model."""
18
19 class Meta:
20 """Model class options."""
21
22 model = Subscription
23 exclude = ["default_tax_rates"]
24
25
26 class CreateSubscriptionSerializer(serializers.Serializer):
27 """A serializer used to create a Subscription."""
28
29 stripe_token = serializers.CharField(max_length=200)
30 plan = serializers.CharField(max_length=50)
31 charge_immediately = serializers.BooleanField(required=False, allow_null=True, default=None)
32 tax_percent = serializers.DecimalField(
33 required=False, max_digits=5, decimal_places=2
34 )
35
```
Path: `djstripe/contrib/rest_framework/permissions.py`
Content:
```
1 """
2 .. module:: dj-stripe.contrib.rest_framework.permissions.
3
4 :synopsis: dj-stripe - Permissions to be used with the dj-stripe REST API.
5
6 .. moduleauthor:: @kavdev, @pydanny
7
8 """
9 from rest_framework.permissions import BasePermission
10
11 from ...settings import subscriber_request_callback
12 from ...utils import subscriber_has_active_subscription
13
14
15 class DJStripeSubscriptionPermission(BasePermission):
16 """
17 A permission to be used when wanting to permit users with active subscriptions.
18 """
19
20 def has_permission(self, request, view):
21 """
22 Check if the subscriber has an active subscription.
23
24 Returns false if:
25 * a subscriber isn't passed through the request
26
27 See ``utils.subscriber_has_active_subscription`` for more rules.
28
29 """
30 try:
31 subscriber_has_active_subscription(subscriber_request_callback(request))
32 except AttributeError:
33 return False
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/djstripe/contrib/rest_framework/permissions.py b/djstripe/contrib/rest_framework/permissions.py
--- a/djstripe/contrib/rest_framework/permissions.py
+++ b/djstripe/contrib/rest_framework/permissions.py
@@ -17,7 +17,7 @@
A permission to be used when wanting to permit users with active subscriptions.
"""
- def has_permission(self, request, view):
+ def has_permission(self, request, view) -> bool:
"""
Check if the subscriber has an active subscription.
@@ -28,6 +28,8 @@
"""
try:
- subscriber_has_active_subscription(subscriber_request_callback(request))
+ return subscriber_has_active_subscription(
+ subscriber_request_callback(request)
+ )
except AttributeError:
return False
diff --git a/djstripe/contrib/rest_framework/serializers.py b/djstripe/contrib/rest_framework/serializers.py
--- a/djstripe/contrib/rest_framework/serializers.py
+++ b/djstripe/contrib/rest_framework/serializers.py
@@ -28,7 +28,9 @@
stripe_token = serializers.CharField(max_length=200)
plan = serializers.CharField(max_length=50)
- charge_immediately = serializers.BooleanField(required=False, allow_null=True, default=None)
+ charge_immediately = serializers.BooleanField(
+ required=False, allow_null=True, default=None
+ )
tax_percent = serializers.DecimalField(
required=False, max_digits=5, decimal_places=2
)
| {"golden_diff": "diff --git a/djstripe/contrib/rest_framework/permissions.py b/djstripe/contrib/rest_framework/permissions.py\n--- a/djstripe/contrib/rest_framework/permissions.py\n+++ b/djstripe/contrib/rest_framework/permissions.py\n@@ -17,7 +17,7 @@\n A permission to be used when wanting to permit users with active subscriptions.\n \"\"\"\n \n- def has_permission(self, request, view):\n+ def has_permission(self, request, view) -> bool:\n \"\"\"\n Check if the subscriber has an active subscription.\n \n@@ -28,6 +28,8 @@\n \n \"\"\"\n try:\n- subscriber_has_active_subscription(subscriber_request_callback(request))\n+ return subscriber_has_active_subscription(\n+ subscriber_request_callback(request)\n+ )\n except AttributeError:\n return False\ndiff --git a/djstripe/contrib/rest_framework/serializers.py b/djstripe/contrib/rest_framework/serializers.py\n--- a/djstripe/contrib/rest_framework/serializers.py\n+++ b/djstripe/contrib/rest_framework/serializers.py\n@@ -28,7 +28,9 @@\n \n stripe_token = serializers.CharField(max_length=200)\n plan = serializers.CharField(max_length=50)\n- charge_immediately = serializers.BooleanField(required=False, allow_null=True, default=None)\n+ charge_immediately = serializers.BooleanField(\n+ required=False, allow_null=True, default=None\n+ )\n tax_percent = serializers.DecimalField(\n required=False, max_digits=5, decimal_places=2\n )\n", "issue": "DJStripeSubscriptionPermission issue returning bool\nThis permission is not returning properly the bool.\r\n\r\n**Current behaviour**\r\n\r\n```python\r\nclass DJStripeSubscriptionPermission(BasePermission):\r\n \"\"\"\r\n A permission to be used when wanting to permit users with active subscriptions.\r\n \"\"\"\r\n\r\n def has_permission(self, request, view):\r\n \"\"\"\r\n Check if the subscriber has an active subscription.\r\n\r\n Returns false if:\r\n * a subscriber isn't passed through the request\r\n\r\n See ``utils.subscriber_has_active_subscription`` for more rules.\r\n\r\n \"\"\"\r\n try:\r\n subscriber_has_active_subscription(subscriber_request_callback(request))\r\n except AttributeError:\r\n return False\r\n```\r\n\r\nHere is not returning True or False except if it falls in the exception.\r\n\r\n\r\n**Expected Behaviour**\r\n\r\n\r\n```python\r\nclass DJStripeSubscriptionPermission(BasePermission):\r\n \"\"\"\r\n A permission to be used when wanting to permit users with active subscriptions.\r\n \"\"\"\r\n\r\n def has_permission(self, request, view):\r\n \"\"\"\r\n Check if the subscriber has an active subscription.\r\n\r\n Returns false if:\r\n * a subscriber isn't passed through the request\r\n\r\n See ``utils.subscriber_has_active_subscription`` for more rules.\r\n\r\n \"\"\"\r\n try:\r\n return bool(subscriber_has_active_subscription(subscriber_request_callback(request)))\r\n except AttributeError:\r\n return False\r\n```\r\n\r\nJust missing a return and it solves the problem. We don't need a bool directly there, I just added just to follow the same patterns as the DRF (also being added to the other project :-))\n", "before_files": [{"content": "\"\"\"\n.. module:: dj-stripe.contrib.rest_framework.serializers.\n\n :synopsis: dj-stripe - Serializers to be used with the dj-stripe REST API.\n\n.. moduleauthor:: Philippe Luickx (@philippeluickx)\n\n\"\"\"\n\nfrom rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\n\nfrom djstripe.models import Subscription\n\n\nclass SubscriptionSerializer(ModelSerializer):\n \"\"\"A serializer used for the Subscription model.\"\"\"\n\n class Meta:\n \"\"\"Model class options.\"\"\"\n\n model = Subscription\n exclude = [\"default_tax_rates\"]\n\n\nclass CreateSubscriptionSerializer(serializers.Serializer):\n \"\"\"A serializer used to create a Subscription.\"\"\"\n\n stripe_token = serializers.CharField(max_length=200)\n plan = serializers.CharField(max_length=50)\n charge_immediately = serializers.BooleanField(required=False, allow_null=True, default=None)\n tax_percent = serializers.DecimalField(\n required=False, max_digits=5, decimal_places=2\n )\n", "path": "djstripe/contrib/rest_framework/serializers.py"}, {"content": "\"\"\"\n.. module:: dj-stripe.contrib.rest_framework.permissions.\n\n :synopsis: dj-stripe - Permissions to be used with the dj-stripe REST API.\n\n.. moduleauthor:: @kavdev, @pydanny\n\n\"\"\"\nfrom rest_framework.permissions import BasePermission\n\nfrom ...settings import subscriber_request_callback\nfrom ...utils import subscriber_has_active_subscription\n\n\nclass DJStripeSubscriptionPermission(BasePermission):\n \"\"\"\n A permission to be used when wanting to permit users with active subscriptions.\n \"\"\"\n\n def has_permission(self, request, view):\n \"\"\"\n Check if the subscriber has an active subscription.\n\n Returns false if:\n * a subscriber isn't passed through the request\n\n See ``utils.subscriber_has_active_subscription`` for more rules.\n\n \"\"\"\n try:\n subscriber_has_active_subscription(subscriber_request_callback(request))\n except AttributeError:\n return False\n", "path": "djstripe/contrib/rest_framework/permissions.py"}], "after_files": [{"content": "\"\"\"\n.. module:: dj-stripe.contrib.rest_framework.serializers.\n\n :synopsis: dj-stripe - Serializers to be used with the dj-stripe REST API.\n\n.. moduleauthor:: Philippe Luickx (@philippeluickx)\n\n\"\"\"\n\nfrom rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\n\nfrom djstripe.models import Subscription\n\n\nclass SubscriptionSerializer(ModelSerializer):\n \"\"\"A serializer used for the Subscription model.\"\"\"\n\n class Meta:\n \"\"\"Model class options.\"\"\"\n\n model = Subscription\n exclude = [\"default_tax_rates\"]\n\n\nclass CreateSubscriptionSerializer(serializers.Serializer):\n \"\"\"A serializer used to create a Subscription.\"\"\"\n\n stripe_token = serializers.CharField(max_length=200)\n plan = serializers.CharField(max_length=50)\n charge_immediately = serializers.BooleanField(\n required=False, allow_null=True, default=None\n )\n tax_percent = serializers.DecimalField(\n required=False, max_digits=5, decimal_places=2\n )\n", "path": "djstripe/contrib/rest_framework/serializers.py"}, {"content": "\"\"\"\n.. module:: dj-stripe.contrib.rest_framework.permissions.\n\n :synopsis: dj-stripe - Permissions to be used with the dj-stripe REST API.\n\n.. moduleauthor:: @kavdev, @pydanny\n\n\"\"\"\nfrom rest_framework.permissions import BasePermission\n\nfrom ...settings import subscriber_request_callback\nfrom ...utils import subscriber_has_active_subscription\n\n\nclass DJStripeSubscriptionPermission(BasePermission):\n \"\"\"\n A permission to be used when wanting to permit users with active subscriptions.\n \"\"\"\n\n def has_permission(self, request, view) -> bool:\n \"\"\"\n Check if the subscriber has an active subscription.\n\n Returns false if:\n * a subscriber isn't passed through the request\n\n See ``utils.subscriber_has_active_subscription`` for more rules.\n\n \"\"\"\n try:\n return subscriber_has_active_subscription(\n subscriber_request_callback(request)\n )\n except AttributeError:\n return False\n", "path": "djstripe/contrib/rest_framework/permissions.py"}]} | 1,099 | 334 |
gh_patches_debug_35132 | rasdani/github-patches | git_diff | CTFd__CTFd-1352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Submission search
Search submissions akin to how users are searched
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/admin/submissions.py`
Content:
```
1 from flask import render_template, request
2
3 from CTFd.admin import admin
4 from CTFd.models import Challenges, Submissions
5 from CTFd.utils.decorators import admins_only
6 from CTFd.utils.modes import get_model
7
8
9 @admin.route("/admin/submissions", defaults={"submission_type": None})
10 @admin.route("/admin/submissions/<submission_type>")
11 @admins_only
12 def submissions_listing(submission_type):
13 filters = {}
14 if submission_type:
15 filters["type"] = submission_type
16
17 curr_page = abs(int(request.args.get("page", 1, type=int)))
18 results_per_page = 50
19 page_start = results_per_page * (curr_page - 1)
20 page_end = results_per_page * (curr_page - 1) + results_per_page
21 sub_count = Submissions.query.filter_by(**filters).count()
22 page_count = int(sub_count / results_per_page) + (sub_count % results_per_page > 0)
23
24 Model = get_model()
25
26 submissions = (
27 Submissions.query.add_columns(
28 Submissions.id,
29 Submissions.type,
30 Submissions.challenge_id,
31 Submissions.provided,
32 Submissions.account_id,
33 Submissions.date,
34 Challenges.name.label("challenge_name"),
35 Model.name.label("team_name"),
36 )
37 .filter_by(**filters)
38 .join(Challenges)
39 .join(Model)
40 .order_by(Submissions.date.desc())
41 .slice(page_start, page_end)
42 .all()
43 )
44
45 return render_template(
46 "admin/submissions.html",
47 submissions=submissions,
48 page_count=page_count,
49 curr_page=curr_page,
50 type=submission_type,
51 )
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/admin/submissions.py b/CTFd/admin/submissions.py
--- a/CTFd/admin/submissions.py
+++ b/CTFd/admin/submissions.py
@@ -1,4 +1,4 @@
-from flask import render_template, request
+from flask import render_template, request, url_for
from CTFd.admin import admin
from CTFd.models import Challenges, Submissions
@@ -10,16 +10,21 @@
@admin.route("/admin/submissions/<submission_type>")
@admins_only
def submissions_listing(submission_type):
- filters = {}
+ filters_by = {}
if submission_type:
- filters["type"] = submission_type
+ filters_by["type"] = submission_type
+ filters = []
- curr_page = abs(int(request.args.get("page", 1, type=int)))
- results_per_page = 50
- page_start = results_per_page * (curr_page - 1)
- page_end = results_per_page * (curr_page - 1) + results_per_page
- sub_count = Submissions.query.filter_by(**filters).count()
- page_count = int(sub_count / results_per_page) + (sub_count % results_per_page > 0)
+ q = request.args.get("q")
+ field = request.args.get("field")
+ page = abs(request.args.get("page", 1, type=int))
+
+ if q:
+ submissions = []
+ if Submissions.__mapper__.has_property(
+ field
+ ): # The field exists as an exposed column
+ filters.append(getattr(Submissions, field).like("%{}%".format(q)))
Model = get_model()
@@ -34,18 +39,27 @@
Challenges.name.label("challenge_name"),
Model.name.label("team_name"),
)
- .filter_by(**filters)
+ .filter_by(**filters_by)
+ .filter(*filters)
.join(Challenges)
.join(Model)
.order_by(Submissions.date.desc())
- .slice(page_start, page_end)
- .all()
+ .paginate(page=page, per_page=50)
)
+ args = dict(request.args)
+ args.pop("page", 1)
+
return render_template(
"admin/submissions.html",
submissions=submissions,
- page_count=page_count,
- curr_page=curr_page,
+ prev_page=url_for(
+ request.endpoint, type=submission_type, page=submissions.prev_num, **args
+ ),
+ next_page=url_for(
+ request.endpoint, type=submission_type, page=submissions.next_num, **args
+ ),
type=submission_type,
+ q=q,
+ field=field,
)
| {"golden_diff": "diff --git a/CTFd/admin/submissions.py b/CTFd/admin/submissions.py\n--- a/CTFd/admin/submissions.py\n+++ b/CTFd/admin/submissions.py\n@@ -1,4 +1,4 @@\n-from flask import render_template, request\n+from flask import render_template, request, url_for\n \n from CTFd.admin import admin\n from CTFd.models import Challenges, Submissions\n@@ -10,16 +10,21 @@\n @admin.route(\"/admin/submissions/<submission_type>\")\n @admins_only\n def submissions_listing(submission_type):\n- filters = {}\n+ filters_by = {}\n if submission_type:\n- filters[\"type\"] = submission_type\n+ filters_by[\"type\"] = submission_type\n+ filters = []\n \n- curr_page = abs(int(request.args.get(\"page\", 1, type=int)))\n- results_per_page = 50\n- page_start = results_per_page * (curr_page - 1)\n- page_end = results_per_page * (curr_page - 1) + results_per_page\n- sub_count = Submissions.query.filter_by(**filters).count()\n- page_count = int(sub_count / results_per_page) + (sub_count % results_per_page > 0)\n+ q = request.args.get(\"q\")\n+ field = request.args.get(\"field\")\n+ page = abs(request.args.get(\"page\", 1, type=int))\n+\n+ if q:\n+ submissions = []\n+ if Submissions.__mapper__.has_property(\n+ field\n+ ): # The field exists as an exposed column\n+ filters.append(getattr(Submissions, field).like(\"%{}%\".format(q)))\n \n Model = get_model()\n \n@@ -34,18 +39,27 @@\n Challenges.name.label(\"challenge_name\"),\n Model.name.label(\"team_name\"),\n )\n- .filter_by(**filters)\n+ .filter_by(**filters_by)\n+ .filter(*filters)\n .join(Challenges)\n .join(Model)\n .order_by(Submissions.date.desc())\n- .slice(page_start, page_end)\n- .all()\n+ .paginate(page=page, per_page=50)\n )\n \n+ args = dict(request.args)\n+ args.pop(\"page\", 1)\n+\n return render_template(\n \"admin/submissions.html\",\n submissions=submissions,\n- page_count=page_count,\n- curr_page=curr_page,\n+ prev_page=url_for(\n+ request.endpoint, type=submission_type, page=submissions.prev_num, **args\n+ ),\n+ next_page=url_for(\n+ request.endpoint, type=submission_type, page=submissions.next_num, **args\n+ ),\n type=submission_type,\n+ q=q,\n+ field=field,\n )\n", "issue": "Submission search\nSearch submissions akin to how users are searched\n", "before_files": [{"content": "from flask import render_template, request\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Submissions\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import get_model\n\n\[email protected](\"/admin/submissions\", defaults={\"submission_type\": None})\[email protected](\"/admin/submissions/<submission_type>\")\n@admins_only\ndef submissions_listing(submission_type):\n filters = {}\n if submission_type:\n filters[\"type\"] = submission_type\n\n curr_page = abs(int(request.args.get(\"page\", 1, type=int)))\n results_per_page = 50\n page_start = results_per_page * (curr_page - 1)\n page_end = results_per_page * (curr_page - 1) + results_per_page\n sub_count = Submissions.query.filter_by(**filters).count()\n page_count = int(sub_count / results_per_page) + (sub_count % results_per_page > 0)\n\n Model = get_model()\n\n submissions = (\n Submissions.query.add_columns(\n Submissions.id,\n Submissions.type,\n Submissions.challenge_id,\n Submissions.provided,\n Submissions.account_id,\n Submissions.date,\n Challenges.name.label(\"challenge_name\"),\n Model.name.label(\"team_name\"),\n )\n .filter_by(**filters)\n .join(Challenges)\n .join(Model)\n .order_by(Submissions.date.desc())\n .slice(page_start, page_end)\n .all()\n )\n\n return render_template(\n \"admin/submissions.html\",\n submissions=submissions,\n page_count=page_count,\n curr_page=curr_page,\n type=submission_type,\n )\n", "path": "CTFd/admin/submissions.py"}], "after_files": [{"content": "from flask import render_template, request, url_for\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Submissions\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import get_model\n\n\[email protected](\"/admin/submissions\", defaults={\"submission_type\": None})\[email protected](\"/admin/submissions/<submission_type>\")\n@admins_only\ndef submissions_listing(submission_type):\n filters_by = {}\n if submission_type:\n filters_by[\"type\"] = submission_type\n filters = []\n\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n\n if q:\n submissions = []\n if Submissions.__mapper__.has_property(\n field\n ): # The field exists as an exposed column\n filters.append(getattr(Submissions, field).like(\"%{}%\".format(q)))\n\n Model = get_model()\n\n submissions = (\n Submissions.query.add_columns(\n Submissions.id,\n Submissions.type,\n Submissions.challenge_id,\n Submissions.provided,\n Submissions.account_id,\n Submissions.date,\n Challenges.name.label(\"challenge_name\"),\n Model.name.label(\"team_name\"),\n )\n .filter_by(**filters_by)\n .filter(*filters)\n .join(Challenges)\n .join(Model)\n .order_by(Submissions.date.desc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/submissions.html\",\n submissions=submissions,\n prev_page=url_for(\n request.endpoint, type=submission_type, page=submissions.prev_num, **args\n ),\n next_page=url_for(\n request.endpoint, type=submission_type, page=submissions.next_num, **args\n ),\n type=submission_type,\n q=q,\n field=field,\n )\n", "path": "CTFd/admin/submissions.py"}]} | 728 | 615 |
gh_patches_debug_51710 | rasdani/github-patches | git_diff | getsentry__sentry-python-2069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot import appengine
### How do you use Sentry?
Sentry Saas (sentry.io)
### Version
1.18.0
### Steps to Reproduce
Install the SDK within any project that is not pinning urllib3 < 2.0.0
### Expected Result
ability to import appengine
### Actual Result
Cannot import appengine as gaecontrib.
As per urllib 2.0.0 release: https://github.com/urllib3/urllib3/tree/2.0.0
Removed urllib3.contrib.appengine.AppEngineManager and support for Google App Engine Standard Environment (https://github.com/urllib3/urllib3/issues/2044).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Sentry-Python - Sentry SDK for Python
5 =====================================
6
7 **Sentry-Python is an SDK for Sentry.** Check out `GitHub
8 <https://github.com/getsentry/sentry-python>`_ to find out more.
9 """
10
11 import os
12 from setuptools import setup, find_packages
13
14 here = os.path.abspath(os.path.dirname(__file__))
15
16
17 def get_file_text(file_name):
18 with open(os.path.join(here, file_name)) as in_file:
19 return in_file.read()
20
21
22 setup(
23 name="sentry-sdk",
24 version="1.21.1",
25 author="Sentry Team and Contributors",
26 author_email="[email protected]",
27 url="https://github.com/getsentry/sentry-python",
28 project_urls={
29 "Documentation": "https://docs.sentry.io/platforms/python/",
30 "Changelog": "https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md",
31 },
32 description="Python client for Sentry (https://sentry.io)",
33 long_description=get_file_text("README.md"),
34 long_description_content_type="text/markdown",
35 packages=find_packages(exclude=("tests", "tests.*")),
36 # PEP 561
37 package_data={"sentry_sdk": ["py.typed"]},
38 zip_safe=False,
39 license="MIT",
40 install_requires=[
41 'urllib3>=1.25.7; python_version<="3.4"',
42 'urllib3>=1.26.9; python_version=="3.5"',
43 'urllib3>=1.26.11; python_version >="3.6"',
44 "certifi",
45 ],
46 extras_require={
47 "flask": ["flask>=0.11", "blinker>=1.1"],
48 "quart": ["quart>=0.16.1", "blinker>=1.1"],
49 "bottle": ["bottle>=0.12.13"],
50 "falcon": ["falcon>=1.4"],
51 "django": ["django>=1.8"],
52 "sanic": ["sanic>=0.8"],
53 "celery": ["celery>=3"],
54 "huey": ["huey>=2"],
55 "beam": ["apache-beam>=2.12"],
56 "arq": ["arq>=0.23"],
57 "rq": ["rq>=0.6"],
58 "aiohttp": ["aiohttp>=3.5"],
59 "tornado": ["tornado>=5"],
60 "sqlalchemy": ["sqlalchemy>=1.2"],
61 "pyspark": ["pyspark>=2.4.4"],
62 "pure_eval": ["pure_eval", "executing", "asttokens"],
63 "chalice": ["chalice>=1.16.0"],
64 "httpx": ["httpx>=0.16.0"],
65 "starlette": ["starlette>=0.19.1"],
66 "starlite": ["starlite>=1.48"],
67 "fastapi": ["fastapi>=0.79.0"],
68 "pymongo": ["pymongo>=3.1"],
69 "opentelemetry": ["opentelemetry-distro>=0.35b0"],
70 "grpcio": ["grpcio>=1.21.1"]
71 },
72 classifiers=[
73 "Development Status :: 5 - Production/Stable",
74 "Environment :: Web Environment",
75 "Intended Audience :: Developers",
76 "License :: OSI Approved :: BSD License",
77 "Operating System :: OS Independent",
78 "Programming Language :: Python",
79 "Programming Language :: Python :: 2",
80 "Programming Language :: Python :: 2.7",
81 "Programming Language :: Python :: 3",
82 "Programming Language :: Python :: 3.4",
83 "Programming Language :: Python :: 3.5",
84 "Programming Language :: Python :: 3.6",
85 "Programming Language :: Python :: 3.7",
86 "Programming Language :: Python :: 3.8",
87 "Programming Language :: Python :: 3.9",
88 "Programming Language :: Python :: 3.10",
89 "Topic :: Software Development :: Libraries :: Python Modules",
90 ],
91 options={"bdist_wheel": {"universal": "1"}},
92 )
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,6 +41,7 @@
'urllib3>=1.25.7; python_version<="3.4"',
'urllib3>=1.26.9; python_version=="3.5"',
'urllib3>=1.26.11; python_version >="3.6"',
+ 'urllib3<2.0.0',
"certifi",
],
extras_require={
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,6 +41,7 @@\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version==\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n+ 'urllib3<2.0.0',\n \"certifi\",\n ],\n extras_require={\n", "issue": "Cannot import appengine\n### How do you use Sentry?\n\nSentry Saas (sentry.io)\n\n### Version\n\n1.18.0\n\n### Steps to Reproduce\n\nInstall the SDK within any project that is not pinning urllib3 < 2.0.0\n\n### Expected Result\n\nability to import appengine\n\n### Actual Result\n\nCannot import appengine as gaecontrib.\r\nAs per urllib 2.0.0 release: https://github.com/urllib3/urllib3/tree/2.0.0\r\n\r\nRemoved urllib3.contrib.appengine.AppEngineManager and support for Google App Engine Standard Environment (https://github.com/urllib3/urllib3/issues/2044).\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.21.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"MIT\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version==\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"huey\": [\"huey>=2\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"arq\": [\"arq>=0.23\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"starlite\": [\"starlite>=1.48\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"pymongo\": [\"pymongo>=3.1\"],\n \"opentelemetry\": [\"opentelemetry-distro>=0.35b0\"],\n \"grpcio\": [\"grpcio>=1.21.1\"]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nSentry-Python - Sentry SDK for Python\n=====================================\n\n**Sentry-Python is an SDK for Sentry.** Check out `GitHub\n<https://github.com/getsentry/sentry-python>`_ to find out more.\n\"\"\"\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_file_text(file_name):\n with open(os.path.join(here, file_name)) as in_file:\n return in_file.read()\n\n\nsetup(\n name=\"sentry-sdk\",\n version=\"1.21.1\",\n author=\"Sentry Team and Contributors\",\n author_email=\"[email protected]\",\n url=\"https://github.com/getsentry/sentry-python\",\n project_urls={\n \"Documentation\": \"https://docs.sentry.io/platforms/python/\",\n \"Changelog\": \"https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md\",\n },\n description=\"Python client for Sentry (https://sentry.io)\",\n long_description=get_file_text(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n # PEP 561\n package_data={\"sentry_sdk\": [\"py.typed\"]},\n zip_safe=False,\n license=\"MIT\",\n install_requires=[\n 'urllib3>=1.25.7; python_version<=\"3.4\"',\n 'urllib3>=1.26.9; python_version==\"3.5\"',\n 'urllib3>=1.26.11; python_version >=\"3.6\"',\n 'urllib3<2.0.0',\n \"certifi\",\n ],\n extras_require={\n \"flask\": [\"flask>=0.11\", \"blinker>=1.1\"],\n \"quart\": [\"quart>=0.16.1\", \"blinker>=1.1\"],\n \"bottle\": [\"bottle>=0.12.13\"],\n \"falcon\": [\"falcon>=1.4\"],\n \"django\": [\"django>=1.8\"],\n \"sanic\": [\"sanic>=0.8\"],\n \"celery\": [\"celery>=3\"],\n \"huey\": [\"huey>=2\"],\n \"beam\": [\"apache-beam>=2.12\"],\n \"arq\": [\"arq>=0.23\"],\n \"rq\": [\"rq>=0.6\"],\n \"aiohttp\": [\"aiohttp>=3.5\"],\n \"tornado\": [\"tornado>=5\"],\n \"sqlalchemy\": [\"sqlalchemy>=1.2\"],\n \"pyspark\": [\"pyspark>=2.4.4\"],\n \"pure_eval\": [\"pure_eval\", \"executing\", \"asttokens\"],\n \"chalice\": [\"chalice>=1.16.0\"],\n \"httpx\": [\"httpx>=0.16.0\"],\n \"starlette\": [\"starlette>=0.19.1\"],\n \"starlite\": [\"starlite>=1.48\"],\n \"fastapi\": [\"fastapi>=0.79.0\"],\n \"pymongo\": [\"pymongo>=3.1\"],\n \"opentelemetry\": [\"opentelemetry-distro>=0.35b0\"],\n \"grpcio\": [\"grpcio>=1.21.1\"]\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n options={\"bdist_wheel\": {\"universal\": \"1\"}},\n)\n", "path": "setup.py"}]} | 1,514 | 119 |
gh_patches_debug_51406 | rasdani/github-patches | git_diff | pytorch__ignite-1016 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyTorch dependency is lacking version constraint
## 🐛 Bug description
<!-- A clear and concise description of what the bug is. -->
PyTorch is a dependency of Ignite and, thus, is specified in `setup.py`
https://github.com/pytorch/ignite/blob/4b311cc82fe45b3082661125cd7ee54007283fb0/setup.py#L24-L26
and `conda.recipe/meta.yaml`:
https://github.com/pytorch/ignite/blob/4b311cc82fe45b3082661125cd7ee54007283fb0/conda.recipe/meta.yaml#L15-L23
The PyTorch dependency is lacking a version constraint which may work fine right now, but there is no guarantee that Ignite will be compatible with any future major PyTorch release (e.g. PyTorch v2.x).
I suggest to constrain the PyTorch version that Ignite is compatible with, e.g. `>=1.0,<2` or `<2` if any `0.x` and `1.x` version works. If PyTorch has a new major release, even previous Ignite versions can become compatible with the new major PyTorch release (especially if no changes to the code are necessary) by making new bug fix releases with relaxed version constraints to include the new PyTorch version.
In my opinion, it is highly preferable to be conservative about dependency version constraints through a [compatible release constraint](https://www.python.org/dev/peps/pep-0440/#compatible-release) in case the dependency conforms with semantic versioning. It is impossible to guarantee compatibility with a future major release of a dependency as its API can change arbitrarily.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import io
3 import re
4 from setuptools import setup, find_packages
5
6
7 def read(*names, **kwargs):
8 with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
9 return fp.read()
10
11
12 def find_version(*file_paths):
13 version_file = read(*file_paths)
14 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
15 if version_match:
16 return version_match.group(1)
17 raise RuntimeError("Unable to find version string.")
18
19
20 readme = read("README.md")
21
22 VERSION = find_version("ignite", "__init__.py")
23
24 requirements = [
25 "torch",
26 ]
27
28 setup(
29 # Metadata
30 name="pytorch-ignite",
31 version=VERSION,
32 author="PyTorch Core Team",
33 author_email="[email protected]",
34 url="https://github.com/pytorch/ignite",
35 description="A lightweight library to help with training neural networks in PyTorch.",
36 long_description_content_type="text/markdown",
37 long_description=readme,
38 license="BSD",
39 # Package info
40 packages=find_packages(exclude=("tests", "tests.*",)),
41 zip_safe=True,
42 install_requires=requirements,
43 )
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
VERSION = find_version("ignite", "__init__.py")
requirements = [
- "torch",
+ "torch>=1.0,<2",
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n VERSION = find_version(\"ignite\", \"__init__.py\")\n \n requirements = [\n- \"torch\",\n+ \"torch>=1.0,<2\",\n ]\n \n setup(\n", "issue": "PyTorch dependency is lacking version constraint\n## \ud83d\udc1b Bug description\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nPyTorch is a dependency of Ignite and, thus, is specified in `setup.py`\r\n\r\nhttps://github.com/pytorch/ignite/blob/4b311cc82fe45b3082661125cd7ee54007283fb0/setup.py#L24-L26\r\n\r\nand `conda.recipe/meta.yaml`:\r\n\r\nhttps://github.com/pytorch/ignite/blob/4b311cc82fe45b3082661125cd7ee54007283fb0/conda.recipe/meta.yaml#L15-L23\r\n\r\nThe PyTorch dependency is lacking a version constraint which may work fine right now, but there is no guarantee that Ignite will be compatible with any future major PyTorch release (e.g. PyTorch v2.x).\r\n\r\nI suggest to constrain the PyTorch version that Ignite is compatible with, e.g. `>=1.0,<2` or `<2` if any `0.x` and `1.x` version works. If PyTorch has a new major release, even previous Ignite versions can become compatible with the new major PyTorch release (especially if no changes to the code are necessary) by making new bug fix releases with relaxed version constraints to include the new PyTorch version.\r\n\r\nIn my opinion, it is highly preferable to be conservative about dependency version constraints through a [compatible release constraint](https://www.python.org/dev/peps/pep-0440/#compatible-release) in case the dependency conforms with semantic versioning. It is impossible to guarantee compatibility with a future major release of a dependency as its API can change arbitrarily.\n", "before_files": [{"content": "import os\nimport io\nimport re\nfrom setuptools import setup, find_packages\n\n\ndef read(*names, **kwargs):\n with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nreadme = read(\"README.md\")\n\nVERSION = find_version(\"ignite\", \"__init__.py\")\n\nrequirements = [\n \"torch\",\n]\n\nsetup(\n # Metadata\n name=\"pytorch-ignite\",\n version=VERSION,\n author=\"PyTorch Core Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pytorch/ignite\",\n description=\"A lightweight library to help with training neural networks in PyTorch.\",\n long_description_content_type=\"text/markdown\",\n long_description=readme,\n license=\"BSD\",\n # Package info\n packages=find_packages(exclude=(\"tests\", \"tests.*\",)),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport io\nimport re\nfrom setuptools import setup, find_packages\n\n\ndef read(*names, **kwargs):\n with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nreadme = read(\"README.md\")\n\nVERSION = find_version(\"ignite\", \"__init__.py\")\n\nrequirements = [\n \"torch>=1.0,<2\",\n]\n\nsetup(\n # Metadata\n name=\"pytorch-ignite\",\n version=VERSION,\n author=\"PyTorch Core Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/pytorch/ignite\",\n description=\"A lightweight library to help with training neural networks in PyTorch.\",\n long_description_content_type=\"text/markdown\",\n long_description=readme,\n license=\"BSD\",\n # Package info\n packages=find_packages(exclude=(\"tests\", \"tests.*\",)),\n zip_safe=True,\n install_requires=requirements,\n)\n", "path": "setup.py"}]} | 1,012 | 68 |
gh_patches_debug_64103 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2411 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
App breaking when using Enum as field for Generic
<!-- Provide a general summary of the bug in the title above. -->
When using an Enum as a field on a Generic, the app breaks, throwing a `NotImplementedError`.
<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->
<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->
## Describe the Bug
The below code is an example of the error.
```python
from enum import Enum
from typing import Generic, Optional, TypeVar
import strawberry
T = TypeVar("T")
@strawberry.enum
class EstimatedValueEnum(Enum):
test = "test"
testtest = "testtest"
@strawberry.type
class EstimatedValue(Generic[T]):
value: T
type: EstimatedValueEnum
@strawberry.type
class Query:
id_translations: Optional[EstimatedValue[int]]
schema = strawberry.Schema(query=Query)
```
Are we doing something wrong and this is intended or is this a bug?
<!-- A clear and concise description of what the bug is. -->
## System Information
- Operating system: Docker
- Strawberry version (if applicable): Since [0.149.2](https://github.com/strawberry-graphql/strawberry/blob/main/CHANGELOG.md#01492---2022-12-09)
<!-- Add any other relevant information about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/enum.py`
Content:
```
1 import dataclasses
2 from enum import EnumMeta
3 from typing import (
4 Any,
5 Callable,
6 Iterable,
7 List,
8 Mapping,
9 Optional,
10 TypeVar,
11 Union,
12 overload,
13 )
14
15 from strawberry.type import StrawberryType
16
17 from .exceptions import ObjectIsNotAnEnumError
18
19
20 @dataclasses.dataclass
21 class EnumValue:
22 name: str
23 value: Any
24 deprecation_reason: Optional[str] = None
25 directives: Iterable[object] = ()
26 description: Optional[str] = None
27
28
29 @dataclasses.dataclass
30 class EnumDefinition(StrawberryType):
31 wrapped_cls: EnumMeta
32 name: str
33 values: List[EnumValue]
34 description: Optional[str]
35 directives: Iterable[object] = ()
36
37 def __hash__(self) -> int:
38 # TODO: Is this enough for unique-ness?
39 return hash(self.name)
40
41 def copy_with(
42 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
43 ) -> Union[StrawberryType, type]:
44 return super().copy_with(type_var_map) # type: ignore[safe-super]
45
46 @property
47 def is_generic(self) -> bool:
48 return False
49
50
51 # TODO: remove duplication of EnumValueDefinition and EnumValue
52 @dataclasses.dataclass
53 class EnumValueDefinition:
54 value: Any
55 deprecation_reason: Optional[str] = None
56 directives: Iterable[object] = ()
57 description: Optional[str] = None
58
59
60 def enum_value(
61 value: Any,
62 deprecation_reason: Optional[str] = None,
63 directives: Iterable[object] = (),
64 description: Optional[str] = None,
65 ) -> EnumValueDefinition:
66 return EnumValueDefinition(
67 value=value,
68 deprecation_reason=deprecation_reason,
69 directives=directives,
70 description=description,
71 )
72
73
74 EnumType = TypeVar("EnumType", bound=EnumMeta)
75
76
77 def _process_enum(
78 cls: EnumType,
79 name: Optional[str] = None,
80 description: Optional[str] = None,
81 directives: Iterable[object] = (),
82 ) -> EnumType:
83 if not isinstance(cls, EnumMeta):
84 raise ObjectIsNotAnEnumError(cls)
85
86 if not name:
87 name = cls.__name__
88
89 description = description
90
91 values = []
92 for item in cls: # type: ignore
93 item_value = item.value
94 item_name = item.name
95 deprecation_reason = None
96 item_directives: Iterable[object] = ()
97 enum_value_description = None
98
99 if isinstance(item_value, EnumValueDefinition):
100 item_directives = item_value.directives
101 enum_value_description = item_value.description
102 deprecation_reason = item_value.deprecation_reason
103 item_value = item_value.value
104
105 # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and
106 # `MyEnum['MY_VALUE']` both work
107 cls._value2member_map_[item_value] = item
108 cls._member_map_[item_name]._value_ = item_value
109
110 value = EnumValue(
111 item_name,
112 item_value,
113 deprecation_reason=deprecation_reason,
114 directives=item_directives,
115 description=enum_value_description,
116 )
117 values.append(value)
118
119 cls._enum_definition = EnumDefinition( # type: ignore
120 wrapped_cls=cls,
121 name=name,
122 values=values,
123 description=description,
124 directives=directives,
125 )
126
127 return cls
128
129
130 @overload
131 def enum(
132 _cls: EnumType,
133 *,
134 name: Optional[str] = None,
135 description: Optional[str] = None,
136 directives: Iterable[object] = ()
137 ) -> EnumType:
138 ...
139
140
141 @overload
142 def enum(
143 _cls: None = None,
144 *,
145 name: Optional[str] = None,
146 description: Optional[str] = None,
147 directives: Iterable[object] = ()
148 ) -> Callable[[EnumType], EnumType]:
149 ...
150
151
152 def enum(
153 _cls: Optional[EnumType] = None,
154 *,
155 name: Optional[str] = None,
156 description: Optional[str] = None,
157 directives: Iterable[object] = ()
158 ) -> Union[EnumType, Callable[[EnumType], EnumType]]:
159 """Registers the enum in the GraphQL type system.
160
161 If name is passed, the name of the GraphQL type will be
162 the value passed of name instead of the Enum class name.
163 """
164
165 def wrap(cls: EnumType) -> EnumType:
166 return _process_enum(cls, name, description, directives=directives)
167
168 if not _cls:
169 return wrap
170
171 return wrap(_cls)
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/enum.py b/strawberry/enum.py
--- a/strawberry/enum.py
+++ b/strawberry/enum.py
@@ -41,7 +41,8 @@
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> Union[StrawberryType, type]:
- return super().copy_with(type_var_map) # type: ignore[safe-super]
+ # enum don't support type parameters, so we can safely return self
+ return self
@property
def is_generic(self) -> bool:
| {"golden_diff": "diff --git a/strawberry/enum.py b/strawberry/enum.py\n--- a/strawberry/enum.py\n+++ b/strawberry/enum.py\n@@ -41,7 +41,8 @@\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n- return super().copy_with(type_var_map) # type: ignore[safe-super]\n+ # enum don't support type parameters, so we can safely return self\n+ return self\n \n @property\n def is_generic(self) -> bool:\n", "issue": "App breaking when using Enum as field for Generic\n<!-- Provide a general summary of the bug in the title above. -->\r\nWhen using an Enum as a field on a Generic, the app breaks, throwing a `NotImplementedError`.\r\n\r\n<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->\r\n<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->\r\n\r\n## Describe the Bug\r\nThe below code is an example of the error.\r\n```python\r\nfrom enum import Enum\r\nfrom typing import Generic, Optional, TypeVar\r\n\r\nimport strawberry\r\n\r\nT = TypeVar(\"T\")\r\n\r\n\r\[email protected]\r\nclass EstimatedValueEnum(Enum):\r\n test = \"test\"\r\n testtest = \"testtest\"\r\n\r\n\r\[email protected]\r\nclass EstimatedValue(Generic[T]):\r\n value: T\r\n type: EstimatedValueEnum\r\n\r\n\r\[email protected]\r\nclass Query:\r\n id_translations: Optional[EstimatedValue[int]]\r\n\r\n\r\nschema = strawberry.Schema(query=Query)\r\n```\r\nAre we doing something wrong and this is intended or is this a bug?\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## System Information\r\n\r\n - Operating system: Docker\r\n - Strawberry version (if applicable): Since [0.149.2](https://github.com/strawberry-graphql/strawberry/blob/main/CHANGELOG.md#01492---2022-12-09)\r\n\r\n<!-- Add any other relevant information about the problem here. -->\r\n\n", "before_files": [{"content": "import dataclasses\nfrom enum import EnumMeta\nfrom typing import (\n Any,\n Callable,\n Iterable,\n List,\n Mapping,\n Optional,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.type import StrawberryType\n\nfrom .exceptions import ObjectIsNotAnEnumError\n\n\[email protected]\nclass EnumValue:\n name: str\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\[email protected]\nclass EnumDefinition(StrawberryType):\n wrapped_cls: EnumMeta\n name: str\n values: List[EnumValue]\n description: Optional[str]\n directives: Iterable[object] = ()\n\n def __hash__(self) -> int:\n # TODO: Is this enough for unique-ness?\n return hash(self.name)\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return super().copy_with(type_var_map) # type: ignore[safe-super]\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\n# TODO: remove duplication of EnumValueDefinition and EnumValue\[email protected]\nclass EnumValueDefinition:\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\ndef enum_value(\n value: Any,\n deprecation_reason: Optional[str] = None,\n directives: Iterable[object] = (),\n description: Optional[str] = None,\n) -> EnumValueDefinition:\n return EnumValueDefinition(\n value=value,\n deprecation_reason=deprecation_reason,\n directives=directives,\n description=description,\n )\n\n\nEnumType = TypeVar(\"EnumType\", bound=EnumMeta)\n\n\ndef _process_enum(\n cls: EnumType,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = (),\n) -> EnumType:\n if not isinstance(cls, EnumMeta):\n raise ObjectIsNotAnEnumError(cls)\n\n if not name:\n name = cls.__name__\n\n description = description\n\n values = []\n for item in cls: # type: ignore\n item_value = item.value\n item_name = item.name\n deprecation_reason = None\n item_directives: Iterable[object] = ()\n enum_value_description = None\n\n if isinstance(item_value, EnumValueDefinition):\n item_directives = item_value.directives\n enum_value_description = item_value.description\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n\n # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and\n # `MyEnum['MY_VALUE']` both work\n cls._value2member_map_[item_value] = item\n cls._member_map_[item_name]._value_ = item_value\n\n value = EnumValue(\n item_name,\n item_value,\n deprecation_reason=deprecation_reason,\n directives=item_directives,\n description=enum_value_description,\n )\n values.append(value)\n\n cls._enum_definition = EnumDefinition( # type: ignore\n wrapped_cls=cls,\n name=name,\n values=values,\n description=description,\n directives=directives,\n )\n\n return cls\n\n\n@overload\ndef enum(\n _cls: EnumType,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> EnumType:\n ...\n\n\n@overload\ndef enum(\n _cls: None = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Callable[[EnumType], EnumType]:\n ...\n\n\ndef enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n \"\"\"Registers the enum in the GraphQL type system.\n\n If name is passed, the name of the GraphQL type will be\n the value passed of name instead of the Enum class name.\n \"\"\"\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)\n", "path": "strawberry/enum.py"}], "after_files": [{"content": "import dataclasses\nfrom enum import EnumMeta\nfrom typing import (\n Any,\n Callable,\n Iterable,\n List,\n Mapping,\n Optional,\n TypeVar,\n Union,\n overload,\n)\n\nfrom strawberry.type import StrawberryType\n\nfrom .exceptions import ObjectIsNotAnEnumError\n\n\[email protected]\nclass EnumValue:\n name: str\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\[email protected]\nclass EnumDefinition(StrawberryType):\n wrapped_cls: EnumMeta\n name: str\n values: List[EnumValue]\n description: Optional[str]\n directives: Iterable[object] = ()\n\n def __hash__(self) -> int:\n # TODO: Is this enough for unique-ness?\n return hash(self.name)\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n # enum don't support type parameters, so we can safely return self\n return self\n\n @property\n def is_generic(self) -> bool:\n return False\n\n\n# TODO: remove duplication of EnumValueDefinition and EnumValue\[email protected]\nclass EnumValueDefinition:\n value: Any\n deprecation_reason: Optional[str] = None\n directives: Iterable[object] = ()\n description: Optional[str] = None\n\n\ndef enum_value(\n value: Any,\n deprecation_reason: Optional[str] = None,\n directives: Iterable[object] = (),\n description: Optional[str] = None,\n) -> EnumValueDefinition:\n return EnumValueDefinition(\n value=value,\n deprecation_reason=deprecation_reason,\n directives=directives,\n description=description,\n )\n\n\nEnumType = TypeVar(\"EnumType\", bound=EnumMeta)\n\n\ndef _process_enum(\n cls: EnumType,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = (),\n) -> EnumType:\n if not isinstance(cls, EnumMeta):\n raise ObjectIsNotAnEnumError(cls)\n\n if not name:\n name = cls.__name__\n\n description = description\n\n values = []\n for item in cls: # type: ignore\n item_value = item.value\n item_name = item.name\n deprecation_reason = None\n item_directives: Iterable[object] = ()\n enum_value_description = None\n\n if isinstance(item_value, EnumValueDefinition):\n item_directives = item_value.directives\n enum_value_description = item_value.description\n deprecation_reason = item_value.deprecation_reason\n item_value = item_value.value\n\n # update _value2member_map_ so that doing `MyEnum.MY_VALUE` and\n # `MyEnum['MY_VALUE']` both work\n cls._value2member_map_[item_value] = item\n cls._member_map_[item_name]._value_ = item_value\n\n value = EnumValue(\n item_name,\n item_value,\n deprecation_reason=deprecation_reason,\n directives=item_directives,\n description=enum_value_description,\n )\n values.append(value)\n\n cls._enum_definition = EnumDefinition( # type: ignore\n wrapped_cls=cls,\n name=name,\n values=values,\n description=description,\n directives=directives,\n )\n\n return cls\n\n\n@overload\ndef enum(\n _cls: EnumType,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> EnumType:\n ...\n\n\n@overload\ndef enum(\n _cls: None = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Callable[[EnumType], EnumType]:\n ...\n\n\ndef enum(\n _cls: Optional[EnumType] = None,\n *,\n name: Optional[str] = None,\n description: Optional[str] = None,\n directives: Iterable[object] = ()\n) -> Union[EnumType, Callable[[EnumType], EnumType]]:\n \"\"\"Registers the enum in the GraphQL type system.\n\n If name is passed, the name of the GraphQL type will be\n the value passed of name instead of the Enum class name.\n \"\"\"\n\n def wrap(cls: EnumType) -> EnumType:\n return _process_enum(cls, name, description, directives=directives)\n\n if not _cls:\n return wrap\n\n return wrap(_cls)\n", "path": "strawberry/enum.py"}]} | 2,021 | 146 |
gh_patches_debug_27460 | rasdani/github-patches | git_diff | googleapis__python-bigquery-442 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Too noise logging about telemetry
Hello,
In the Apache Airflow project, we use the BigQuery library, but recently we've started to see annoying log message when the library is loaded. It is enough that the library is loaded and there is an message every time.
In my opinion, this message should be of a lower level (DEBUG) so that it is not displayed much less often or is displayed only when the client is initialized.
```
import logging
logging.basicConfig(level=logging.INFO)
from google.cloud import bigquery
```
Output:
```
INFO:google.cloud.bigquery.opentelemetry_tracing:This service is instrumented using OpenTelemetry. OpenTelemetry could not be imported; please add opentelemetry-api and opentelemetry-instrumentation packages in order to get BigQuery Tracing data.
```
Related issue: https://github.com/apache/airflow/issues/13131
CC: @tswast
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/cloud/bigquery/opentelemetry_tracing.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 from contextlib import contextmanager
17 from google.api_core.exceptions import GoogleAPICallError
18
19 logger = logging.getLogger(__name__)
20 try:
21 from opentelemetry import trace
22 from opentelemetry.instrumentation.utils import http_status_to_canonical_code
23 from opentelemetry.trace.status import Status
24
25 HAS_OPENTELEMETRY = True
26
27 except ImportError:
28 logger.info(
29 "This service is instrumented using OpenTelemetry. "
30 "OpenTelemetry could not be imported; please "
31 "add opentelemetry-api and opentelemetry-instrumentation "
32 "packages in order to get BigQuery Tracing data."
33 )
34
35 HAS_OPENTELEMETRY = False
36
37 _default_attributes = {
38 "db.system": "BigQuery"
39 } # static, default values assigned to all spans
40
41
42 @contextmanager
43 def create_span(name, attributes=None, client=None, job_ref=None):
44 """Creates a ContextManager for a Span to be exported to the configured exporter.
45 If no configuration exists yields None.
46
47 Args:
48 name (str): Name that will be set for the span being created
49 attributes (Optional[dict]):
50 Additional attributes that pertain to
51 the specific API call (i.e. not a default attribute)
52 client (Optional[google.cloud.bigquery.client.Client]):
53 Pass in a Client object to extract any attributes that may be
54 relevant to it and add them to the created spans.
55 job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
56 Pass in a _AsyncJob object to extract any attributes that may be
57 relevant to it and add them to the created spans.
58
59 Yields:
60 opentelemetry.trace.Span: Yields the newly created Span.
61
62 Raises:
63 google.api_core.exceptions.GoogleAPICallError:
64 Raised if a span could not be yielded or issue with call to
65 OpenTelemetry.
66 """
67 final_attributes = _get_final_span_attributes(attributes, client, job_ref)
68 if not HAS_OPENTELEMETRY:
69 yield None
70 return
71 tracer = trace.get_tracer(__name__)
72
73 # yield new span value
74 with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
75 try:
76 yield span
77 except GoogleAPICallError as error:
78 if error.code is not None:
79 span.set_status(Status(http_status_to_canonical_code(error.code)))
80 raise
81
82
83 def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
84 final_attributes = {}
85 final_attributes.update(_default_attributes.copy())
86 if client:
87 client_attributes = _set_client_attributes(client)
88 final_attributes.update(client_attributes)
89 if job_ref:
90 job_attributes = _set_job_attributes(job_ref)
91 final_attributes.update(job_attributes)
92 if attributes:
93 final_attributes.update(attributes)
94 return final_attributes
95
96
97 def _set_client_attributes(client):
98 return {"db.name": client.project, "location": client.location}
99
100
101 def _set_job_attributes(job_ref):
102 job_attributes = {
103 "db.name": job_ref.project,
104 "location": job_ref.location,
105 "num_child_jobs": job_ref.num_child_jobs,
106 "job_id": job_ref.job_id,
107 "parent_job_id": job_ref.parent_job_id,
108 "state": job_ref.state,
109 }
110
111 job_attributes["hasErrors"] = job_ref.error_result is not None
112
113 if job_ref.created is not None:
114 job_attributes["timeCreated"] = job_ref.created.isoformat()
115
116 if job_ref.started is not None:
117 job_attributes["timeStarted"] = job_ref.started.isoformat()
118
119 if job_ref.ended is not None:
120 job_attributes["timeEnded"] = job_ref.ended.isoformat()
121
122 return job_attributes
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py
--- a/google/cloud/bigquery/opentelemetry_tracing.py
+++ b/google/cloud/bigquery/opentelemetry_tracing.py
@@ -23,16 +23,11 @@
from opentelemetry.trace.status import Status
HAS_OPENTELEMETRY = True
+ _warned_telemetry = True
except ImportError:
- logger.info(
- "This service is instrumented using OpenTelemetry. "
- "OpenTelemetry could not be imported; please "
- "add opentelemetry-api and opentelemetry-instrumentation "
- "packages in order to get BigQuery Tracing data."
- )
-
HAS_OPENTELEMETRY = False
+ _warned_telemetry = False
_default_attributes = {
"db.system": "BigQuery"
@@ -64,8 +59,18 @@
Raised if a span could not be yielded or issue with call to
OpenTelemetry.
"""
+ global _warned_telemetry
final_attributes = _get_final_span_attributes(attributes, client, job_ref)
if not HAS_OPENTELEMETRY:
+ if not _warned_telemetry:
+ logger.debug(
+ "This service is instrumented using OpenTelemetry. "
+ "OpenTelemetry could not be imported; please "
+ "add opentelemetry-api and opentelemetry-instrumentation "
+ "packages in order to get BigQuery Tracing data."
+ )
+ _warned_telemetry = True
+
yield None
return
tracer = trace.get_tracer(__name__)
| {"golden_diff": "diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py\n--- a/google/cloud/bigquery/opentelemetry_tracing.py\n+++ b/google/cloud/bigquery/opentelemetry_tracing.py\n@@ -23,16 +23,11 @@\n from opentelemetry.trace.status import Status\n \n HAS_OPENTELEMETRY = True\n+ _warned_telemetry = True\n \n except ImportError:\n- logger.info(\n- \"This service is instrumented using OpenTelemetry. \"\n- \"OpenTelemetry could not be imported; please \"\n- \"add opentelemetry-api and opentelemetry-instrumentation \"\n- \"packages in order to get BigQuery Tracing data.\"\n- )\n-\n HAS_OPENTELEMETRY = False\n+ _warned_telemetry = False\n \n _default_attributes = {\n \"db.system\": \"BigQuery\"\n@@ -64,8 +59,18 @@\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n+ global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n+ if not _warned_telemetry:\n+ logger.debug(\n+ \"This service is instrumented using OpenTelemetry. \"\n+ \"OpenTelemetry could not be imported; please \"\n+ \"add opentelemetry-api and opentelemetry-instrumentation \"\n+ \"packages in order to get BigQuery Tracing data.\"\n+ )\n+ _warned_telemetry = True\n+\n yield None\n return\n tracer = trace.get_tracer(__name__)\n", "issue": "Too noise logging about telemetry\nHello,\r\n\r\nIn the Apache Airflow project, we use the BigQuery library, but recently we've started to see annoying log message when the library is loaded. It is enough that the library is loaded and there is an message every time. \r\n\r\nIn my opinion, this message should be of a lower level (DEBUG) so that it is not displayed much less often or is displayed only when the client is initialized. \r\n```\r\nimport logging\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\nfrom google.cloud import bigquery\r\n```\r\nOutput: \r\n```\r\nINFO:google.cloud.bigquery.opentelemetry_tracing:This service is instrumented using OpenTelemetry. OpenTelemetry could not be imported; please add opentelemetry-api and opentelemetry-instrumentation packages in order to get BigQuery Tracing data.\r\n```\r\n\r\nRelated issue: https://github.com/apache/airflow/issues/13131\r\n\r\nCC: @tswast \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace\n from opentelemetry.instrumentation.utils import http_status_to_canonical_code\n from opentelemetry.trace.status import Status\n\n HAS_OPENTELEMETRY = True\n\nexcept ImportError:\n logger.info(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry could not be imported; please \"\n \"add opentelemetry-api and opentelemetry-instrumentation \"\n \"packages in order to get BigQuery Tracing data.\"\n )\n\n HAS_OPENTELEMETRY = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_canonical_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n final_attributes = {}\n final_attributes.update(_default_attributes.copy())\n if client:\n client_attributes = _set_client_attributes(client)\n final_attributes.update(client_attributes)\n if job_ref:\n job_attributes = _set_job_attributes(job_ref)\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"location\": job_ref.location,\n \"num_child_jobs\": job_ref.num_child_jobs,\n \"job_id\": job_ref.job_id,\n \"parent_job_id\": job_ref.parent_job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace\n from opentelemetry.instrumentation.utils import http_status_to_canonical_code\n from opentelemetry.trace.status import Status\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry could not be imported; please \"\n \"add opentelemetry-api and opentelemetry-instrumentation \"\n \"packages in order to get BigQuery Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_canonical_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n final_attributes = {}\n final_attributes.update(_default_attributes.copy())\n if client:\n client_attributes = _set_client_attributes(client)\n final_attributes.update(client_attributes)\n if job_ref:\n job_attributes = _set_job_attributes(job_ref)\n final_attributes.update(job_attributes)\n if attributes:\n final_attributes.update(attributes)\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"location\": job_ref.location,\n \"num_child_jobs\": job_ref.num_child_jobs,\n \"job_id\": job_ref.job_id,\n \"parent_job_id\": job_ref.parent_job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}]} | 1,662 | 377 |
gh_patches_debug_9151 | rasdani/github-patches | git_diff | python-discord__bot-1199 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Race condition in wait_for_deletion between a manual delete and reacting
Sentry Issue: [BOT-86](https://sentry.io/organizations/python-discord/issues/1861537034/?referrer=github_integration)
The message may be deleted before `wait_for_deletion` has a chance to add a reaction.
```
NotFound: 404 Not Found (error code: 10008): Unknown Message
File "bot/utils/messages.py", line 38, in wait_for_deletion
await message.add_reaction(emoji)
File "discord/message.py", line 953, in add_reaction
await self._state.http.add_reaction(self.channel.id, self.id, emoji)
File "discord/http.py", line 243, in request
raise NotFound(r, data)
Task exception was never retrieved
future: <Task finished name='Task-333258' coro=<wait_for_deletion() done, defined at /bot/bot/utils/messages.py:19> exception=NotFound('404 Not Found (error code: 10008): Unknown Message')>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/utils/messages.py`
Content:
```
1 import asyncio
2 import contextlib
3 import logging
4 import random
5 import re
6 from io import BytesIO
7 from typing import List, Optional, Sequence, Union
8
9 import discord
10 from discord.errors import HTTPException
11 from discord.ext.commands import Context
12
13 from bot.constants import Emojis, NEGATIVE_REPLIES
14
15 log = logging.getLogger(__name__)
16
17
18 async def wait_for_deletion(
19 message: discord.Message,
20 user_ids: Sequence[discord.abc.Snowflake],
21 client: discord.Client,
22 deletion_emojis: Sequence[str] = (Emojis.trashcan,),
23 timeout: float = 60 * 5,
24 attach_emojis: bool = True,
25 ) -> None:
26 """
27 Wait for up to `timeout` seconds for a reaction by any of the specified `user_ids` to delete the message.
28
29 An `attach_emojis` bool may be specified to determine whether to attach the given
30 `deletion_emojis` to the message in the given `context`.
31 """
32 if message.guild is None:
33 raise ValueError("Message must be sent on a guild")
34
35 if attach_emojis:
36 for emoji in deletion_emojis:
37 await message.add_reaction(emoji)
38
39 def check(reaction: discord.Reaction, user: discord.Member) -> bool:
40 """Check that the deletion emoji is reacted by the appropriate user."""
41 return (
42 reaction.message.id == message.id
43 and str(reaction.emoji) in deletion_emojis
44 and user.id in user_ids
45 )
46
47 with contextlib.suppress(asyncio.TimeoutError):
48 await client.wait_for('reaction_add', check=check, timeout=timeout)
49 await message.delete()
50
51
52 async def send_attachments(
53 message: discord.Message,
54 destination: Union[discord.TextChannel, discord.Webhook],
55 link_large: bool = True
56 ) -> List[str]:
57 """
58 Re-upload the message's attachments to the destination and return a list of their new URLs.
59
60 Each attachment is sent as a separate message to more easily comply with the request/file size
61 limit. If link_large is True, attachments which are too large are instead grouped into a single
62 embed which links to them.
63 """
64 large = []
65 urls = []
66 for attachment in message.attachments:
67 failure_msg = (
68 f"Failed to re-upload attachment {attachment.filename} from message {message.id}"
69 )
70
71 try:
72 # Allow 512 bytes of leeway for the rest of the request.
73 # This should avoid most files that are too large,
74 # but some may get through hence the try-catch.
75 if attachment.size <= destination.guild.filesize_limit - 512:
76 with BytesIO() as file:
77 await attachment.save(file, use_cached=True)
78 attachment_file = discord.File(file, filename=attachment.filename)
79
80 if isinstance(destination, discord.TextChannel):
81 msg = await destination.send(file=attachment_file)
82 urls.append(msg.attachments[0].url)
83 else:
84 await destination.send(
85 file=attachment_file,
86 username=sub_clyde(message.author.display_name),
87 avatar_url=message.author.avatar_url
88 )
89 elif link_large:
90 large.append(attachment)
91 else:
92 log.info(f"{failure_msg} because it's too large.")
93 except HTTPException as e:
94 if link_large and e.status == 413:
95 large.append(attachment)
96 else:
97 log.warning(f"{failure_msg} with status {e.status}.", exc_info=e)
98
99 if link_large and large:
100 desc = "\n".join(f"[{attachment.filename}]({attachment.url})" for attachment in large)
101 embed = discord.Embed(description=desc)
102 embed.set_footer(text="Attachments exceed upload size limit.")
103
104 if isinstance(destination, discord.TextChannel):
105 await destination.send(embed=embed)
106 else:
107 await destination.send(
108 embed=embed,
109 username=sub_clyde(message.author.display_name),
110 avatar_url=message.author.avatar_url
111 )
112
113 return urls
114
115
116 def sub_clyde(username: Optional[str]) -> Optional[str]:
117 """
118 Replace "e"/"E" in any "clyde" in `username` with a Cyrillic "е"/"E" and return the new string.
119
120 Discord disallows "clyde" anywhere in the username for webhooks. It will return a 400.
121 Return None only if `username` is None.
122 """
123 def replace_e(match: re.Match) -> str:
124 char = "е" if match[2] == "e" else "Е"
125 return match[1] + char
126
127 if username:
128 return re.sub(r"(clyd)(e)", replace_e, username, flags=re.I)
129 else:
130 return username # Empty string or None
131
132
133 async def send_denial(ctx: Context, reason: str) -> None:
134 """Send an embed denying the user with the given reason."""
135 embed = discord.Embed()
136 embed.colour = discord.Colour.red()
137 embed.title = random.choice(NEGATIVE_REPLIES)
138 embed.description = reason
139
140 await ctx.send(embed=embed)
141
142
143 def format_user(user: discord.abc.User) -> str:
144 """Return a string for `user` which has their mention and ID."""
145 return f"{user.mention} (`{user.id}`)"
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/utils/messages.py b/bot/utils/messages.py
--- a/bot/utils/messages.py
+++ b/bot/utils/messages.py
@@ -34,7 +34,11 @@
if attach_emojis:
for emoji in deletion_emojis:
- await message.add_reaction(emoji)
+ try:
+ await message.add_reaction(emoji)
+ except discord.NotFound:
+ log.trace(f"Aborting wait_for_deletion: message {message.id} deleted prematurely.")
+ return
def check(reaction: discord.Reaction, user: discord.Member) -> bool:
"""Check that the deletion emoji is reacted by the appropriate user."""
| {"golden_diff": "diff --git a/bot/utils/messages.py b/bot/utils/messages.py\n--- a/bot/utils/messages.py\n+++ b/bot/utils/messages.py\n@@ -34,7 +34,11 @@\n \n if attach_emojis:\n for emoji in deletion_emojis:\n- await message.add_reaction(emoji)\n+ try:\n+ await message.add_reaction(emoji)\n+ except discord.NotFound:\n+ log.trace(f\"Aborting wait_for_deletion: message {message.id} deleted prematurely.\")\n+ return\n \n def check(reaction: discord.Reaction, user: discord.Member) -> bool:\n \"\"\"Check that the deletion emoji is reacted by the appropriate user.\"\"\"\n", "issue": "Race condition in wait_for_deletion between a manual delete and reacting\nSentry Issue: [BOT-86](https://sentry.io/organizations/python-discord/issues/1861537034/?referrer=github_integration)\r\n\r\nThe message may be deleted before `wait_for_deletion` has a chance to add a reaction.\r\n\r\n```\r\nNotFound: 404 Not Found (error code: 10008): Unknown Message\r\n File \"bot/utils/messages.py\", line 38, in wait_for_deletion\r\n await message.add_reaction(emoji)\r\n File \"discord/message.py\", line 953, in add_reaction\r\n await self._state.http.add_reaction(self.channel.id, self.id, emoji)\r\n File \"discord/http.py\", line 243, in request\r\n raise NotFound(r, data)\r\n\r\nTask exception was never retrieved\r\nfuture: <Task finished name='Task-333258' coro=<wait_for_deletion() done, defined at /bot/bot/utils/messages.py:19> exception=NotFound('404 Not Found (error code: 10008): Unknown Message')>\r\n```\n", "before_files": [{"content": "import asyncio\nimport contextlib\nimport logging\nimport random\nimport re\nfrom io import BytesIO\nfrom typing import List, Optional, Sequence, Union\n\nimport discord\nfrom discord.errors import HTTPException\nfrom discord.ext.commands import Context\n\nfrom bot.constants import Emojis, NEGATIVE_REPLIES\n\nlog = logging.getLogger(__name__)\n\n\nasync def wait_for_deletion(\n message: discord.Message,\n user_ids: Sequence[discord.abc.Snowflake],\n client: discord.Client,\n deletion_emojis: Sequence[str] = (Emojis.trashcan,),\n timeout: float = 60 * 5,\n attach_emojis: bool = True,\n) -> None:\n \"\"\"\n Wait for up to `timeout` seconds for a reaction by any of the specified `user_ids` to delete the message.\n\n An `attach_emojis` bool may be specified to determine whether to attach the given\n `deletion_emojis` to the message in the given `context`.\n \"\"\"\n if message.guild is None:\n raise ValueError(\"Message must be sent on a guild\")\n\n if attach_emojis:\n for emoji in deletion_emojis:\n await message.add_reaction(emoji)\n\n def check(reaction: discord.Reaction, user: discord.Member) -> bool:\n \"\"\"Check that the deletion emoji is reacted by the appropriate user.\"\"\"\n return (\n reaction.message.id == message.id\n and str(reaction.emoji) in deletion_emojis\n and user.id in user_ids\n )\n\n with contextlib.suppress(asyncio.TimeoutError):\n await client.wait_for('reaction_add', check=check, timeout=timeout)\n await message.delete()\n\n\nasync def send_attachments(\n message: discord.Message,\n destination: Union[discord.TextChannel, discord.Webhook],\n link_large: bool = True\n) -> List[str]:\n \"\"\"\n Re-upload the message's attachments to the destination and return a list of their new URLs.\n\n Each attachment is sent as a separate message to more easily comply with the request/file size\n limit. If link_large is True, attachments which are too large are instead grouped into a single\n embed which links to them.\n \"\"\"\n large = []\n urls = []\n for attachment in message.attachments:\n failure_msg = (\n f\"Failed to re-upload attachment {attachment.filename} from message {message.id}\"\n )\n\n try:\n # Allow 512 bytes of leeway for the rest of the request.\n # This should avoid most files that are too large,\n # but some may get through hence the try-catch.\n if attachment.size <= destination.guild.filesize_limit - 512:\n with BytesIO() as file:\n await attachment.save(file, use_cached=True)\n attachment_file = discord.File(file, filename=attachment.filename)\n\n if isinstance(destination, discord.TextChannel):\n msg = await destination.send(file=attachment_file)\n urls.append(msg.attachments[0].url)\n else:\n await destination.send(\n file=attachment_file,\n username=sub_clyde(message.author.display_name),\n avatar_url=message.author.avatar_url\n )\n elif link_large:\n large.append(attachment)\n else:\n log.info(f\"{failure_msg} because it's too large.\")\n except HTTPException as e:\n if link_large and e.status == 413:\n large.append(attachment)\n else:\n log.warning(f\"{failure_msg} with status {e.status}.\", exc_info=e)\n\n if link_large and large:\n desc = \"\\n\".join(f\"[{attachment.filename}]({attachment.url})\" for attachment in large)\n embed = discord.Embed(description=desc)\n embed.set_footer(text=\"Attachments exceed upload size limit.\")\n\n if isinstance(destination, discord.TextChannel):\n await destination.send(embed=embed)\n else:\n await destination.send(\n embed=embed,\n username=sub_clyde(message.author.display_name),\n avatar_url=message.author.avatar_url\n )\n\n return urls\n\n\ndef sub_clyde(username: Optional[str]) -> Optional[str]:\n \"\"\"\n Replace \"e\"/\"E\" in any \"clyde\" in `username` with a Cyrillic \"\u0435\"/\"E\" and return the new string.\n\n Discord disallows \"clyde\" anywhere in the username for webhooks. It will return a 400.\n Return None only if `username` is None.\n \"\"\"\n def replace_e(match: re.Match) -> str:\n char = \"\u0435\" if match[2] == \"e\" else \"\u0415\"\n return match[1] + char\n\n if username:\n return re.sub(r\"(clyd)(e)\", replace_e, username, flags=re.I)\n else:\n return username # Empty string or None\n\n\nasync def send_denial(ctx: Context, reason: str) -> None:\n \"\"\"Send an embed denying the user with the given reason.\"\"\"\n embed = discord.Embed()\n embed.colour = discord.Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = reason\n\n await ctx.send(embed=embed)\n\n\ndef format_user(user: discord.abc.User) -> str:\n \"\"\"Return a string for `user` which has their mention and ID.\"\"\"\n return f\"{user.mention} (`{user.id}`)\"\n", "path": "bot/utils/messages.py"}], "after_files": [{"content": "import asyncio\nimport contextlib\nimport logging\nimport random\nimport re\nfrom io import BytesIO\nfrom typing import List, Optional, Sequence, Union\n\nimport discord\nfrom discord.errors import HTTPException\nfrom discord.ext.commands import Context\n\nfrom bot.constants import Emojis, NEGATIVE_REPLIES\n\nlog = logging.getLogger(__name__)\n\n\nasync def wait_for_deletion(\n message: discord.Message,\n user_ids: Sequence[discord.abc.Snowflake],\n client: discord.Client,\n deletion_emojis: Sequence[str] = (Emojis.trashcan,),\n timeout: float = 60 * 5,\n attach_emojis: bool = True,\n) -> None:\n \"\"\"\n Wait for up to `timeout` seconds for a reaction by any of the specified `user_ids` to delete the message.\n\n An `attach_emojis` bool may be specified to determine whether to attach the given\n `deletion_emojis` to the message in the given `context`.\n \"\"\"\n if message.guild is None:\n raise ValueError(\"Message must be sent on a guild\")\n\n if attach_emojis:\n for emoji in deletion_emojis:\n try:\n await message.add_reaction(emoji)\n except discord.NotFound:\n log.trace(f\"Aborting wait_for_deletion: message {message.id} deleted prematurely.\")\n return\n\n def check(reaction: discord.Reaction, user: discord.Member) -> bool:\n \"\"\"Check that the deletion emoji is reacted by the appropriate user.\"\"\"\n return (\n reaction.message.id == message.id\n and str(reaction.emoji) in deletion_emojis\n and user.id in user_ids\n )\n\n with contextlib.suppress(asyncio.TimeoutError):\n await client.wait_for('reaction_add', check=check, timeout=timeout)\n await message.delete()\n\n\nasync def send_attachments(\n message: discord.Message,\n destination: Union[discord.TextChannel, discord.Webhook],\n link_large: bool = True\n) -> List[str]:\n \"\"\"\n Re-upload the message's attachments to the destination and return a list of their new URLs.\n\n Each attachment is sent as a separate message to more easily comply with the request/file size\n limit. If link_large is True, attachments which are too large are instead grouped into a single\n embed which links to them.\n \"\"\"\n large = []\n urls = []\n for attachment in message.attachments:\n failure_msg = (\n f\"Failed to re-upload attachment {attachment.filename} from message {message.id}\"\n )\n\n try:\n # Allow 512 bytes of leeway for the rest of the request.\n # This should avoid most files that are too large,\n # but some may get through hence the try-catch.\n if attachment.size <= destination.guild.filesize_limit - 512:\n with BytesIO() as file:\n await attachment.save(file, use_cached=True)\n attachment_file = discord.File(file, filename=attachment.filename)\n\n if isinstance(destination, discord.TextChannel):\n msg = await destination.send(file=attachment_file)\n urls.append(msg.attachments[0].url)\n else:\n await destination.send(\n file=attachment_file,\n username=sub_clyde(message.author.display_name),\n avatar_url=message.author.avatar_url\n )\n elif link_large:\n large.append(attachment)\n else:\n log.info(f\"{failure_msg} because it's too large.\")\n except HTTPException as e:\n if link_large and e.status == 413:\n large.append(attachment)\n else:\n log.warning(f\"{failure_msg} with status {e.status}.\", exc_info=e)\n\n if link_large and large:\n desc = \"\\n\".join(f\"[{attachment.filename}]({attachment.url})\" for attachment in large)\n embed = discord.Embed(description=desc)\n embed.set_footer(text=\"Attachments exceed upload size limit.\")\n\n if isinstance(destination, discord.TextChannel):\n await destination.send(embed=embed)\n else:\n await destination.send(\n embed=embed,\n username=sub_clyde(message.author.display_name),\n avatar_url=message.author.avatar_url\n )\n\n return urls\n\n\ndef sub_clyde(username: Optional[str]) -> Optional[str]:\n \"\"\"\n Replace \"e\"/\"E\" in any \"clyde\" in `username` with a Cyrillic \"\u0435\"/\"E\" and return the new string.\n\n Discord disallows \"clyde\" anywhere in the username for webhooks. It will return a 400.\n Return None only if `username` is None.\n \"\"\"\n def replace_e(match: re.Match) -> str:\n char = \"\u0435\" if match[2] == \"e\" else \"\u0415\"\n return match[1] + char\n\n if username:\n return re.sub(r\"(clyd)(e)\", replace_e, username, flags=re.I)\n else:\n return username # Empty string or None\n\n\nasync def send_denial(ctx: Context, reason: str) -> None:\n \"\"\"Send an embed denying the user with the given reason.\"\"\"\n embed = discord.Embed()\n embed.colour = discord.Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = reason\n\n await ctx.send(embed=embed)\n\n\ndef format_user(user: discord.abc.User) -> str:\n \"\"\"Return a string for `user` which has their mention and ID.\"\"\"\n return f\"{user.mention} (`{user.id}`)\"\n", "path": "bot/utils/messages.py"}]} | 1,989 | 147 |
gh_patches_debug_35 | rasdani/github-patches | git_diff | StackStorm__st2-5104 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add version string to st2tests to make it installable
Prior to this change, this will fail:
cd st2tests/st2tests
pip install .
After this change that command successfully installs the `st2tests` package. This will also work for installing via GitHub as in:
pip install -e git+https://github.com/StackStorm/[email protected]#egg=st2tests&subdirectory=st2tests
The original request in #2574 is to get st2tests onto PyPI, and I'm not sure if this will accomplish that request, but this is a good first step.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2tests/st2tests/__init__.py`
Content:
```
1 # Copyright 2020 The StackStorm Authors.
2 # Copyright 2019 Extreme Networks, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from __future__ import absolute_import
17
18 from st2tests.base import EventletTestCase
19 from st2tests.base import DbTestCase
20 from st2tests.base import ExecutionDbTestCase
21 from st2tests.base import DbModelTestCase
22 from st2tests.base import WorkflowTestCase
23
24
25 __all__ = [
26 'EventletTestCase',
27 'DbTestCase',
28 'ExecutionDbTestCase',
29 'DbModelTestCase',
30 'WorkflowTestCase'
31 ]
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2tests/st2tests/__init__.py b/st2tests/st2tests/__init__.py
--- a/st2tests/st2tests/__init__.py
+++ b/st2tests/st2tests/__init__.py
@@ -29,3 +29,5 @@
'DbModelTestCase',
'WorkflowTestCase'
]
+
+__version__ = '3.3dev'
| {"golden_diff": "diff --git a/st2tests/st2tests/__init__.py b/st2tests/st2tests/__init__.py\n--- a/st2tests/st2tests/__init__.py\n+++ b/st2tests/st2tests/__init__.py\n@@ -29,3 +29,5 @@\n 'DbModelTestCase',\n 'WorkflowTestCase'\n ]\n+\n+__version__ = '3.3dev'\n", "issue": "Add version string to st2tests to make it installable\nPrior to this change, this will fail:\r\n\r\n cd st2tests/st2tests\r\n pip install .\r\n\r\nAfter this change that command successfully installs the `st2tests` package. This will also work for installing via GitHub as in:\r\n\r\n pip install -e git+https://github.com/StackStorm/[email protected]#egg=st2tests&subdirectory=st2tests\r\n\r\nThe original request in #2574 is to get st2tests onto PyPI, and I'm not sure if this will accomplish that request, but this is a good first step.\n", "before_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2tests.base import EventletTestCase\nfrom st2tests.base import DbTestCase\nfrom st2tests.base import ExecutionDbTestCase\nfrom st2tests.base import DbModelTestCase\nfrom st2tests.base import WorkflowTestCase\n\n\n__all__ = [\n 'EventletTestCase',\n 'DbTestCase',\n 'ExecutionDbTestCase',\n 'DbModelTestCase',\n 'WorkflowTestCase'\n]\n", "path": "st2tests/st2tests/__init__.py"}], "after_files": [{"content": "# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2tests.base import EventletTestCase\nfrom st2tests.base import DbTestCase\nfrom st2tests.base import ExecutionDbTestCase\nfrom st2tests.base import DbModelTestCase\nfrom st2tests.base import WorkflowTestCase\n\n\n__all__ = [\n 'EventletTestCase',\n 'DbTestCase',\n 'ExecutionDbTestCase',\n 'DbModelTestCase',\n 'WorkflowTestCase'\n]\n\n__version__ = '3.3dev'\n", "path": "st2tests/st2tests/__init__.py"}]} | 696 | 88 |
gh_patches_debug_29289 | rasdani/github-patches | git_diff | google__openhtf-186 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Attaching binary file using test.attach raises UnicodeDecodeError
If I attach a png or avi I see the following in OutputTestRecord
Python2.7/site-packages/openhtf/**init**.py", line 185, in OutputTestRecord
output_cb(test_record)
File "virtualenv/local/lib/python2.7/site-packages/openhtf/**init**.py", line 83, in **call**
f.write(self.encode(as_dict))
File "/usr/lib/python2.7/json/encoder.py", line 209, in encode
chunks = list(chunks)
File "/usr/lib/python2.7/json/encoder.py", line 434, in _iterencode
for chunk in _iterencode_dict(o, _current_indent_level):
File "/usr/lib/python2.7/json/encoder.py", line 408, in _iterencode_dict
for chunk in chunks:
File "/usr/lib/python2.7/json/encoder.py", line 332, in _iterencode_list
for chunk in chunks:
File "/usr/lib/python2.7/json/encoder.py", line 408, in _iterencode_dict
for chunk in chunks:
File "/usr/lib/python2.7/json/encoder.py", line 408, in _iterencode_dict
for chunk in chunks:
File "/usr/lib/python2.7/json/encoder.py", line 390, in _iterencode_dict
yield _encoder(value)
UnicodeDecodeError: 'utf8' codec can't decode byte 0x89 in position 0: invalid start byte
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openhtf/io/output/json_factory.py`
Content:
```
1 """Module for outputting test record to JSON-formatted files."""
2
3 from json import JSONEncoder
4
5 from openhtf import util
6 from openhtf.exe import test_state
7
8
9 class OutputToJSON(JSONEncoder):
10 """Return an output callback that writes JSON Test Records.
11
12 An example filename_pattern might be:
13 '/data/test_records/%(dut_id)s.%(start_time_millis)s'
14
15 To use this output mechanism:
16 test = openhtf.Test(PhaseOne, PhaseTwo)
17 test.AddOutputCallback(openhtf.OutputToJson(
18 '/data/test_records/%(dut_id)s.%(start_time_millis)s'))
19
20 Args:
21 filename_pattern: A format string specifying the filename to write to,
22 will be formatted with the Test Record as a dictionary.
23 inline_attachments: Whether attachments should be included inline in the
24 output. Set to False if you expect to have large binary attachments.
25 """
26
27 def __init__(self, filename_pattern=None, inline_attachments=True, **kwargs):
28 super(OutputToJSON, self).__init__(**kwargs)
29 self.filename_pattern = filename_pattern
30 self.inline_attachments = inline_attachments
31
32 def default(self, obj):
33 if isinstance(obj, BaseException):
34 # Just repr exceptions.
35 return repr(obj)
36 return super(OutputToJSON, self).default(obj)
37
38 # pylint: disable=invalid-name
39 def __call__(self, test_record):
40 assert self.filename_pattern, 'filename_pattern required'
41 if self.inline_attachments:
42 as_dict = util.ConvertToBaseTypes(test_record)
43 else:
44 as_dict = util.ConvertToBaseTypes(test_record, ignore_keys='attachments')
45 with open(self.filename_pattern % as_dict, 'w') as f:
46 f.write(self.encode(as_dict))
47 # pylint: enable=invalid-name
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openhtf/io/output/json_factory.py b/openhtf/io/output/json_factory.py
--- a/openhtf/io/output/json_factory.py
+++ b/openhtf/io/output/json_factory.py
@@ -1,5 +1,6 @@
"""Module for outputting test record to JSON-formatted files."""
+import base64
from json import JSONEncoder
from openhtf import util
@@ -21,7 +22,9 @@
filename_pattern: A format string specifying the filename to write to,
will be formatted with the Test Record as a dictionary.
inline_attachments: Whether attachments should be included inline in the
- output. Set to False if you expect to have large binary attachments.
+ output. Set to False if you expect to have large binary attachments. If
+ True (the default), then attachments are base64 encoded to allow for
+ binary data that's not supported by JSON directly.
"""
def __init__(self, filename_pattern=None, inline_attachments=True, **kwargs):
@@ -40,6 +43,9 @@
assert self.filename_pattern, 'filename_pattern required'
if self.inline_attachments:
as_dict = util.ConvertToBaseTypes(test_record)
+ for phase in as_dict['phases']:
+ for value in phase['attachments'].itervalues():
+ value['data'] = base64.standard_b64encode(value['data'])
else:
as_dict = util.ConvertToBaseTypes(test_record, ignore_keys='attachments')
with open(self.filename_pattern % as_dict, 'w') as f:
| {"golden_diff": "diff --git a/openhtf/io/output/json_factory.py b/openhtf/io/output/json_factory.py\n--- a/openhtf/io/output/json_factory.py\n+++ b/openhtf/io/output/json_factory.py\n@@ -1,5 +1,6 @@\n \"\"\"Module for outputting test record to JSON-formatted files.\"\"\"\n \n+import base64\n from json import JSONEncoder\n \n from openhtf import util\n@@ -21,7 +22,9 @@\n filename_pattern: A format string specifying the filename to write to,\n will be formatted with the Test Record as a dictionary.\n inline_attachments: Whether attachments should be included inline in the\n- output. Set to False if you expect to have large binary attachments.\n+ output. Set to False if you expect to have large binary attachments. If\n+ True (the default), then attachments are base64 encoded to allow for\n+ binary data that's not supported by JSON directly.\n \"\"\"\n \n def __init__(self, filename_pattern=None, inline_attachments=True, **kwargs):\n@@ -40,6 +43,9 @@\n assert self.filename_pattern, 'filename_pattern required'\n if self.inline_attachments:\n as_dict = util.ConvertToBaseTypes(test_record)\n+ for phase in as_dict['phases']:\n+ for value in phase['attachments'].itervalues():\n+ value['data'] = base64.standard_b64encode(value['data'])\n else:\n as_dict = util.ConvertToBaseTypes(test_record, ignore_keys='attachments')\n with open(self.filename_pattern % as_dict, 'w') as f:\n", "issue": "Attaching binary file using test.attach raises UnicodeDecodeError\nIf I attach a png or avi I see the following in OutputTestRecord\n\nPython2.7/site-packages/openhtf/**init**.py\", line 185, in OutputTestRecord\n output_cb(test_record)\n File \"virtualenv/local/lib/python2.7/site-packages/openhtf/**init**.py\", line 83, in **call**\n f.write(self.encode(as_dict))\n File \"/usr/lib/python2.7/json/encoder.py\", line 209, in encode\n chunks = list(chunks)\n File \"/usr/lib/python2.7/json/encoder.py\", line 434, in _iterencode\n for chunk in _iterencode_dict(o, _current_indent_level):\n File \"/usr/lib/python2.7/json/encoder.py\", line 408, in _iterencode_dict\n for chunk in chunks:\n File \"/usr/lib/python2.7/json/encoder.py\", line 332, in _iterencode_list\n for chunk in chunks:\n File \"/usr/lib/python2.7/json/encoder.py\", line 408, in _iterencode_dict\n for chunk in chunks:\n File \"/usr/lib/python2.7/json/encoder.py\", line 408, in _iterencode_dict\n for chunk in chunks:\n File \"/usr/lib/python2.7/json/encoder.py\", line 390, in _iterencode_dict\n yield _encoder(value)\nUnicodeDecodeError: 'utf8' codec can't decode byte 0x89 in position 0: invalid start byte\n\n", "before_files": [{"content": "\"\"\"Module for outputting test record to JSON-formatted files.\"\"\"\n\nfrom json import JSONEncoder\n\nfrom openhtf import util\nfrom openhtf.exe import test_state\n\n\nclass OutputToJSON(JSONEncoder):\n \"\"\"Return an output callback that writes JSON Test Records.\n\n An example filename_pattern might be:\n '/data/test_records/%(dut_id)s.%(start_time_millis)s'\n\n To use this output mechanism:\n test = openhtf.Test(PhaseOne, PhaseTwo)\n test.AddOutputCallback(openhtf.OutputToJson(\n '/data/test_records/%(dut_id)s.%(start_time_millis)s'))\n\n Args:\n filename_pattern: A format string specifying the filename to write to,\n will be formatted with the Test Record as a dictionary.\n inline_attachments: Whether attachments should be included inline in the\n output. Set to False if you expect to have large binary attachments.\n \"\"\"\n\n def __init__(self, filename_pattern=None, inline_attachments=True, **kwargs):\n super(OutputToJSON, self).__init__(**kwargs)\n self.filename_pattern = filename_pattern\n self.inline_attachments = inline_attachments\n\n def default(self, obj):\n if isinstance(obj, BaseException):\n # Just repr exceptions.\n return repr(obj)\n return super(OutputToJSON, self).default(obj)\n\n # pylint: disable=invalid-name\n def __call__(self, test_record):\n assert self.filename_pattern, 'filename_pattern required'\n if self.inline_attachments:\n as_dict = util.ConvertToBaseTypes(test_record)\n else:\n as_dict = util.ConvertToBaseTypes(test_record, ignore_keys='attachments')\n with open(self.filename_pattern % as_dict, 'w') as f:\n f.write(self.encode(as_dict))\n # pylint: enable=invalid-name\n", "path": "openhtf/io/output/json_factory.py"}], "after_files": [{"content": "\"\"\"Module for outputting test record to JSON-formatted files.\"\"\"\n\nimport base64\nfrom json import JSONEncoder\n\nfrom openhtf import util\nfrom openhtf.exe import test_state\n\n\nclass OutputToJSON(JSONEncoder):\n \"\"\"Return an output callback that writes JSON Test Records.\n\n An example filename_pattern might be:\n '/data/test_records/%(dut_id)s.%(start_time_millis)s'\n\n To use this output mechanism:\n test = openhtf.Test(PhaseOne, PhaseTwo)\n test.AddOutputCallback(openhtf.OutputToJson(\n '/data/test_records/%(dut_id)s.%(start_time_millis)s'))\n\n Args:\n filename_pattern: A format string specifying the filename to write to,\n will be formatted with the Test Record as a dictionary.\n inline_attachments: Whether attachments should be included inline in the\n output. Set to False if you expect to have large binary attachments. If\n True (the default), then attachments are base64 encoded to allow for\n binary data that's not supported by JSON directly.\n \"\"\"\n\n def __init__(self, filename_pattern=None, inline_attachments=True, **kwargs):\n super(OutputToJSON, self).__init__(**kwargs)\n self.filename_pattern = filename_pattern\n self.inline_attachments = inline_attachments\n\n def default(self, obj):\n if isinstance(obj, BaseException):\n # Just repr exceptions.\n return repr(obj)\n return super(OutputToJSON, self).default(obj)\n\n # pylint: disable=invalid-name\n def __call__(self, test_record):\n assert self.filename_pattern, 'filename_pattern required'\n if self.inline_attachments:\n as_dict = util.ConvertToBaseTypes(test_record)\n for phase in as_dict['phases']:\n for value in phase['attachments'].itervalues():\n value['data'] = base64.standard_b64encode(value['data'])\n else:\n as_dict = util.ConvertToBaseTypes(test_record, ignore_keys='attachments')\n with open(self.filename_pattern % as_dict, 'w') as f:\n f.write(self.encode(as_dict))\n # pylint: enable=invalid-name\n", "path": "openhtf/io/output/json_factory.py"}]} | 1,093 | 344 |
gh_patches_debug_24992 | rasdani/github-patches | git_diff | fedora-infra__bodhi-2733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Staging is currently returning HTML to bodhi CLI requests
I am not sure why this is happening, but it seems that staging Bodhi is currently returning HTML to CLI requests. This also happens to requests with ```http``` or ```curl```.
I recall a problem with the unit tests where they would sometimes receive HTML when they didn't explicitly use a request header to ask for a JSON response once we started testing under Python 3. We ended up adjusting the tests to pass that header since this did not seem to happen when serving Bodhi with ```pserve-3```.
It turns out that there really is some problem that seems related to Python 3 since staging Bodhi started doing this same thing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/webapp.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright © 2018 Red Hat, Inc.
3 #
4 # This file is part of Bodhi.
5 #
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 """
20 Define Bodhi's WSGI application.
21
22 As of the writing of this docblock, this module is a bit misnamed since the webapp is actually
23 defined in bodhi.server.__init__. However, that is an anti-pattern with lots of nasty in-line
24 imports due to circular dependencies, and this module is intended to solve that problem.
25 Unfortunately, it is a backwards-incompatible change to move main() here, so it will remain in
26 __init__ until we make a major Bodhi release. See https://github.com/fedora-infra/bodhi/issues/2294
27 """
28
29 from pyramid.events import NewRequest, subscriber
30
31 from bodhi import server
32
33
34 def _complete_database_session(request):
35 """
36 Commit the database changes if no exceptions occurred.
37
38 This is a post-request hook. It handles rolling back or committing the session based on whether
39 an exception occurred or not. To get a database session that's not tied to the request/response
40 cycle, just use the :data:`Session` scoped session.
41
42 Args:
43 request (pyramid.request.Request): The current web request.
44 """
45 _rollback_or_commit(request)
46 server.Session().close()
47 server.Session.remove()
48
49
50 @subscriber(NewRequest)
51 def _prepare_request(event):
52 """
53 Add callbacks onto every new request.
54
55 This function adds a callback to clean up the database session when the request is finished.
56
57 Args:
58 event (pyramid.events.NewRequest): The new request event.
59 """
60 event.request.add_finished_callback(_complete_database_session)
61
62
63 def _rollback_or_commit(request):
64 """
65 Commit the transaction if there are no exceptions, otherwise rollback.
66
67 Args:
68 request (pyramid.request.Request): The current web request.
69 """
70 if request.exception is not None:
71 server.Session().rollback()
72 else:
73 server.Session().commit()
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/webapp.py b/bodhi/server/webapp.py
--- a/bodhi/server/webapp.py
+++ b/bodhi/server/webapp.py
@@ -50,13 +50,25 @@
@subscriber(NewRequest)
def _prepare_request(event):
"""
- Add callbacks onto every new request.
+ Prepare each incoming request to Bodhi.
- This function adds a callback to clean up the database session when the request is finished.
+ This function does two things:
+ * If requests do not have an Accept header, or if their Accept header is "*/*", it sets the
+ header to application/json. Pyramid has undefined behavior when an ambiguous or missing
+ Accept header is received, and multiple views are defined that handle specific Accept
+ headers. For example, we have a view that returns html or JSON for /composes/, depending
+ on the Accept header, but if a request has no Accept header or has */*, Pyramid will
+ consider both views to be a match for the request and so it is undefined which view will
+ handle the request. Let's force ambibuous requests to receive a JSON response so we have a
+ defined behavior. See https://github.com/fedora-infra/bodhi/issues/2731.
+ * It adds a callback to clean up the database session when the request is finished.
Args:
event (pyramid.events.NewRequest): The new request event.
"""
+ if 'Accept' not in event.request.headers or event.request.headers['Accept'] == '*/*':
+ event.request.headers['Accept'] = 'application/json'
+
event.request.add_finished_callback(_complete_database_session)
| {"golden_diff": "diff --git a/bodhi/server/webapp.py b/bodhi/server/webapp.py\n--- a/bodhi/server/webapp.py\n+++ b/bodhi/server/webapp.py\n@@ -50,13 +50,25 @@\n @subscriber(NewRequest)\n def _prepare_request(event):\n \"\"\"\n- Add callbacks onto every new request.\n+ Prepare each incoming request to Bodhi.\n \n- This function adds a callback to clean up the database session when the request is finished.\n+ This function does two things:\n+ * If requests do not have an Accept header, or if their Accept header is \"*/*\", it sets the\n+ header to application/json. Pyramid has undefined behavior when an ambiguous or missing\n+ Accept header is received, and multiple views are defined that handle specific Accept\n+ headers. For example, we have a view that returns html or JSON for /composes/, depending\n+ on the Accept header, but if a request has no Accept header or has */*, Pyramid will\n+ consider both views to be a match for the request and so it is undefined which view will\n+ handle the request. Let's force ambibuous requests to receive a JSON response so we have a\n+ defined behavior. See https://github.com/fedora-infra/bodhi/issues/2731.\n+ * It adds a callback to clean up the database session when the request is finished.\n \n Args:\n event (pyramid.events.NewRequest): The new request event.\n \"\"\"\n+ if 'Accept' not in event.request.headers or event.request.headers['Accept'] == '*/*':\n+ event.request.headers['Accept'] = 'application/json'\n+\n event.request.add_finished_callback(_complete_database_session)\n", "issue": "Staging is currently returning HTML to bodhi CLI requests\nI am not sure why this is happening, but it seems that staging Bodhi is currently returning HTML to CLI requests. This also happens to requests with ```http``` or ```curl```.\r\n\r\nI recall a problem with the unit tests where they would sometimes receive HTML when they didn't explicitly use a request header to ask for a JSON response once we started testing under Python 3. We ended up adjusting the tests to pass that header since this did not seem to happen when serving Bodhi with ```pserve-3```.\r\n\r\nIt turns out that there really is some problem that seems related to Python 3 since staging Bodhi started doing this same thing.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nDefine Bodhi's WSGI application.\n\nAs of the writing of this docblock, this module is a bit misnamed since the webapp is actually\ndefined in bodhi.server.__init__. However, that is an anti-pattern with lots of nasty in-line\nimports due to circular dependencies, and this module is intended to solve that problem.\nUnfortunately, it is a backwards-incompatible change to move main() here, so it will remain in\n__init__ until we make a major Bodhi release. See https://github.com/fedora-infra/bodhi/issues/2294\n\"\"\"\n\nfrom pyramid.events import NewRequest, subscriber\n\nfrom bodhi import server\n\n\ndef _complete_database_session(request):\n \"\"\"\n Commit the database changes if no exceptions occurred.\n\n This is a post-request hook. It handles rolling back or committing the session based on whether\n an exception occurred or not. To get a database session that's not tied to the request/response\n cycle, just use the :data:`Session` scoped session.\n\n Args:\n request (pyramid.request.Request): The current web request.\n \"\"\"\n _rollback_or_commit(request)\n server.Session().close()\n server.Session.remove()\n\n\n@subscriber(NewRequest)\ndef _prepare_request(event):\n \"\"\"\n Add callbacks onto every new request.\n\n This function adds a callback to clean up the database session when the request is finished.\n\n Args:\n event (pyramid.events.NewRequest): The new request event.\n \"\"\"\n event.request.add_finished_callback(_complete_database_session)\n\n\ndef _rollback_or_commit(request):\n \"\"\"\n Commit the transaction if there are no exceptions, otherwise rollback.\n\n Args:\n request (pyramid.request.Request): The current web request.\n \"\"\"\n if request.exception is not None:\n server.Session().rollback()\n else:\n server.Session().commit()\n", "path": "bodhi/server/webapp.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nDefine Bodhi's WSGI application.\n\nAs of the writing of this docblock, this module is a bit misnamed since the webapp is actually\ndefined in bodhi.server.__init__. However, that is an anti-pattern with lots of nasty in-line\nimports due to circular dependencies, and this module is intended to solve that problem.\nUnfortunately, it is a backwards-incompatible change to move main() here, so it will remain in\n__init__ until we make a major Bodhi release. See https://github.com/fedora-infra/bodhi/issues/2294\n\"\"\"\n\nfrom pyramid.events import NewRequest, subscriber\n\nfrom bodhi import server\n\n\ndef _complete_database_session(request):\n \"\"\"\n Commit the database changes if no exceptions occurred.\n\n This is a post-request hook. It handles rolling back or committing the session based on whether\n an exception occurred or not. To get a database session that's not tied to the request/response\n cycle, just use the :data:`Session` scoped session.\n\n Args:\n request (pyramid.request.Request): The current web request.\n \"\"\"\n _rollback_or_commit(request)\n server.Session().close()\n server.Session.remove()\n\n\n@subscriber(NewRequest)\ndef _prepare_request(event):\n \"\"\"\n Prepare each incoming request to Bodhi.\n\n This function does two things:\n * If requests do not have an Accept header, or if their Accept header is \"*/*\", it sets the\n header to application/json. Pyramid has undefined behavior when an ambiguous or missing\n Accept header is received, and multiple views are defined that handle specific Accept\n headers. For example, we have a view that returns html or JSON for /composes/, depending\n on the Accept header, but if a request has no Accept header or has */*, Pyramid will\n consider both views to be a match for the request and so it is undefined which view will\n handle the request. Let's force ambibuous requests to receive a JSON response so we have a\n defined behavior. See https://github.com/fedora-infra/bodhi/issues/2731.\n * It adds a callback to clean up the database session when the request is finished.\n\n Args:\n event (pyramid.events.NewRequest): The new request event.\n \"\"\"\n if 'Accept' not in event.request.headers or event.request.headers['Accept'] == '*/*':\n event.request.headers['Accept'] = 'application/json'\n\n event.request.add_finished_callback(_complete_database_session)\n\n\ndef _rollback_or_commit(request):\n \"\"\"\n Commit the transaction if there are no exceptions, otherwise rollback.\n\n Args:\n request (pyramid.request.Request): The current web request.\n \"\"\"\n if request.exception is not None:\n server.Session().rollback()\n else:\n server.Session().commit()\n", "path": "bodhi/server/webapp.py"}]} | 1,138 | 372 |
gh_patches_debug_53973 | rasdani/github-patches | git_diff | Mailu__Mailu-2563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
postfix throttling afer a hard shutdown
After a hard shutdown, I noticed that postfix would not restart because of master.pid is found in /queue/pid/master.pid
We should check and remove this file during container start up (start.py)
postfix throttling afer a hard shutdown
After a hard shutdown, I noticed that postfix would not restart because of master.pid is found in /queue/pid/master.pid
We should check and remove this file during container start up (start.py)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/postfix/start.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import os
4 import glob
5 import shutil
6 import multiprocessing
7 import logging as log
8 import sys
9 import re
10
11 from podop import run_server
12 from pwd import getpwnam
13 from socrate import system, conf
14
15 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
16
17 def start_podop():
18 os.setuid(getpwnam('postfix').pw_uid)
19 os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True)
20 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
21 # TODO: Remove verbosity setting from Podop?
22 run_server(0, "postfix", "/tmp/podop.socket", [
23 ("transport", "url", url + "transport/§"),
24 ("alias", "url", url + "alias/§"),
25 ("dane", "url", url + "dane/§"),
26 ("domain", "url", url + "domain/§"),
27 ("mailbox", "url", url + "mailbox/§"),
28 ("recipientmap", "url", url + "recipient/map/§"),
29 ("sendermap", "url", url + "sender/map/§"),
30 ("senderlogin", "url", url + "sender/login/§"),
31 ("senderrate", "url", url + "sender/rate/§")
32 ])
33
34 def start_mta_sts_daemon():
35 os.chmod("/root/", 0o755) # read access to /root/.netrc required
36 os.setuid(getpwnam('postfix').pw_uid)
37 from postfix_mta_sts_resolver import daemon
38 daemon.main()
39
40 def is_valid_postconf_line(line):
41 return not line.startswith("#") \
42 and not line == ''
43
44 # Actual startup script
45 os.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True'
46 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
47 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
48 os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
49 os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
50 os.environ["POSTFIX_LOG_SYSLOG"] = os.environ.get("POSTFIX_LOG_SYSLOG","local")
51 os.environ["POSTFIX_LOG_FILE"] = os.environ.get("POSTFIX_LOG_FILE", "")
52
53 # Postfix requires IPv6 addresses to be wrapped in square brackets
54 if 'RELAYNETS' in os.environ:
55 os.environ["RELAYNETS"] = re.sub(r'([0-9a-fA-F]+:[0-9a-fA-F:]+)/', '[\\1]/', os.environ["RELAYNETS"])
56
57 for postfix_file in glob.glob("/conf/*.cf"):
58 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
59
60 if os.path.exists("/overrides/postfix.cf"):
61 for line in open("/overrides/postfix.cf").read().strip().split("\n"):
62 if is_valid_postconf_line(line):
63 os.system('postconf -e "{}"'.format(line))
64
65 if os.path.exists("/overrides/postfix.master"):
66 for line in open("/overrides/postfix.master").read().strip().split("\n"):
67 if is_valid_postconf_line(line):
68 os.system('postconf -Me "{}"'.format(line))
69
70 for map_file in glob.glob("/overrides/*.map"):
71 destination = os.path.join("/etc/postfix", os.path.basename(map_file))
72 shutil.copyfile(map_file, destination)
73 os.system("postmap {}".format(destination))
74 os.remove(destination)
75
76 if os.path.exists("/overrides/mta-sts-daemon.yml"):
77 shutil.copyfile("/overrides/mta-sts-daemon.yml", "/etc/mta-sts-daemon.yml")
78 else:
79 conf.jinja("/conf/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml")
80
81 for policy in ['tls_policy', 'transport']:
82 if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):
83 open(f'/etc/postfix/{policy}.map', 'a').close()
84 os.system(f'postmap /etc/postfix/{policy}.map')
85
86 if "RELAYUSER" in os.environ:
87 path = "/etc/postfix/sasl_passwd"
88 conf.jinja("/conf/sasl_passwd", os.environ, path)
89 os.system("postmap {}".format(path))
90
91 # Configure and start local rsyslog server
92 conf.jinja("/conf/rsyslog.conf", os.environ, "/etc/rsyslog.conf")
93 os.system("/usr/sbin/rsyslogd -niNONE &")
94 # Configure logrotate and start crond
95 if os.environ["POSTFIX_LOG_FILE"] != "":
96 conf.jinja("/conf/logrotate.conf", os.environ, "/etc/logrotate.d/postfix.conf")
97 os.system("/usr/sbin/crond")
98 if os.path.exists("/overrides/logrotate.conf"):
99 shutil.copyfile("/overrides/logrotate.conf", "/etc/logrotate.d/postfix.conf")
100
101 # Run Podop and Postfix
102 multiprocessing.Process(target=start_podop).start()
103 multiprocessing.Process(target=start_mta_sts_daemon).start()
104 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
105 # Before starting postfix, we need to check permissions on /queue
106 # in the event that postfix,postdrop id have changed
107 os.system("postfix set-permissions")
108 os.system("postfix start-fg")
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -14,6 +14,8 @@
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
+os.system("flock -n /queue/pid/master.pid rm /queue/pid/master.pid")
+
def start_podop():
os.setuid(getpwnam('postfix').pw_uid)
os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True)
| {"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -14,6 +14,8 @@\n \n log.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n \n+os.system(\"flock -n /queue/pid/master.pid rm /queue/pid/master.pid\")\n+\n def start_podop():\n os.setuid(getpwnam('postfix').pw_uid)\n os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True)\n", "issue": "postfix throttling afer a hard shutdown\nAfter a hard shutdown, I noticed that postfix would not restart because of master.pid is found in /queue/pid/master.pid\r\nWe should check and remove this file during container start up (start.py)\npostfix throttling afer a hard shutdown\nAfter a hard shutdown, I noticed that postfix would not restart because of master.pid is found in /queue/pid/master.pid\r\nWe should check and remove this file during container start up (start.py)\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\nimport re\n\nfrom podop import run_server\nfrom pwd import getpwnam\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(getpwnam('postfix').pw_uid)\n os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n (\"transport\", \"url\", url + \"transport/\u00a7\"),\n (\"alias\", \"url\", url + \"alias/\u00a7\"),\n (\"dane\", \"url\", url + \"dane/\u00a7\"),\n (\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\"),\n (\"senderrate\", \"url\", url + \"sender/rate/\u00a7\")\n ])\n\ndef start_mta_sts_daemon():\n os.chmod(\"/root/\", 0o755) # read access to /root/.netrc required\n os.setuid(getpwnam('postfix').pw_uid)\n from postfix_mta_sts_resolver import daemon\n daemon.main()\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True'\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\nos.environ[\"POSTFIX_LOG_SYSLOG\"] = os.environ.get(\"POSTFIX_LOG_SYSLOG\",\"local\")\nos.environ[\"POSTFIX_LOG_FILE\"] = os.environ.get(\"POSTFIX_LOG_FILE\", \"\")\n\n# Postfix requires IPv6 addresses to be wrapped in square brackets\nif 'RELAYNETS' in os.environ:\n os.environ[\"RELAYNETS\"] = re.sub(r'([0-9a-fA-F]+:[0-9a-fA-F:]+)/', '[\\\\1]/', os.environ[\"RELAYNETS\"])\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\nelse:\n conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n\nfor policy in ['tls_policy', 'transport']:\n if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):\n open(f'/etc/postfix/{policy}.map', 'a').close()\n os.system(f'postmap /etc/postfix/{policy}.map')\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Configure and start local rsyslog server\nconf.jinja(\"/conf/rsyslog.conf\", os.environ, \"/etc/rsyslog.conf\")\nos.system(\"/usr/sbin/rsyslogd -niNONE &\")\n# Configure logrotate and start crond\nif os.environ[\"POSTFIX_LOG_FILE\"] != \"\":\n conf.jinja(\"/conf/logrotate.conf\", os.environ, \"/etc/logrotate.d/postfix.conf\")\n os.system(\"/usr/sbin/crond\")\n if os.path.exists(\"/overrides/logrotate.conf\"):\n shutil.copyfile(\"/overrides/logrotate.conf\", \"/etc/logrotate.d/postfix.conf\")\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nmultiprocessing.Process(target=start_mta_sts_daemon).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\nimport re\n\nfrom podop import run_server\nfrom pwd import getpwnam\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\nos.system(\"flock -n /queue/pid/master.pid rm /queue/pid/master.pid\")\n\ndef start_podop():\n os.setuid(getpwnam('postfix').pw_uid)\n os.makedirs('/dev/shm/postfix',mode=0o700, exist_ok=True)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n (\"transport\", \"url\", url + \"transport/\u00a7\"),\n (\"alias\", \"url\", url + \"alias/\u00a7\"),\n (\"dane\", \"url\", url + \"dane/\u00a7\"),\n (\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\"),\n (\"senderrate\", \"url\", url + \"sender/rate/\u00a7\")\n ])\n\ndef start_mta_sts_daemon():\n os.chmod(\"/root/\", 0o755) # read access to /root/.netrc required\n os.setuid(getpwnam('postfix').pw_uid)\n from postfix_mta_sts_resolver import daemon\n daemon.main()\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True'\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\nos.environ[\"POSTFIX_LOG_SYSLOG\"] = os.environ.get(\"POSTFIX_LOG_SYSLOG\",\"local\")\nos.environ[\"POSTFIX_LOG_FILE\"] = os.environ.get(\"POSTFIX_LOG_FILE\", \"\")\n\n# Postfix requires IPv6 addresses to be wrapped in square brackets\nif 'RELAYNETS' in os.environ:\n os.environ[\"RELAYNETS\"] = re.sub(r'([0-9a-fA-F]+:[0-9a-fA-F:]+)/', '[\\\\1]/', os.environ[\"RELAYNETS\"])\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\nelse:\n conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n\nfor policy in ['tls_policy', 'transport']:\n if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):\n open(f'/etc/postfix/{policy}.map', 'a').close()\n os.system(f'postmap /etc/postfix/{policy}.map')\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Configure and start local rsyslog server\nconf.jinja(\"/conf/rsyslog.conf\", os.environ, \"/etc/rsyslog.conf\")\nos.system(\"/usr/sbin/rsyslogd -niNONE &\")\n# Configure logrotate and start crond\nif os.environ[\"POSTFIX_LOG_FILE\"] != \"\":\n conf.jinja(\"/conf/logrotate.conf\", os.environ, \"/etc/logrotate.d/postfix.conf\")\n os.system(\"/usr/sbin/crond\")\n if os.path.exists(\"/overrides/logrotate.conf\"):\n shutil.copyfile(\"/overrides/logrotate.conf\", \"/etc/logrotate.d/postfix.conf\")\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nmultiprocessing.Process(target=start_mta_sts_daemon).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]} | 1,830 | 128 |
gh_patches_debug_10941 | rasdani/github-patches | git_diff | mesonbuild__meson-8978 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_variable with a file object as default value: Argument of type File is not held by an ObjectHolder
**Describe the bug**
After updating Meson, I see this error in a previously working build:
```
build/analysis/vale/meson.build:24:0: ERROR: Argument build/analysis/vale/vale-styleguide/config/documentation.vale.ini of type File is not held by an ObjectHolder.
This is a Meson bug and should be reported!
```
The file is being specified in this manner:
```
# Supply a style file, which will use this file instead of the default .vale.ini
vale_config_file = get_variable('vale_config_file',
files('vale-styleguide/config/documentation.vale.ini'))
```
The default variable option is being used - I'm not overriding it.
The same is happening in a Doxygen module I use:
```
doxyfile_input = get_variable('doxyfile_input', files('Doxyfile.in'))
```
I tried moving the file object into another variable:
```
vale_default_config_file = files('vale-styleguide/config/documentation.vale.ini')
vale_config_file = get_variable('vale_config_file', vale_default_config_file)
```
With teh same result - the error is reported on the `get_variable` line.
**system parameters**
* Is this a [cross build](https://mesonbuild.com/Cross-compilation.html) or just a plain native build (for the same computer)? **native**
* what operating system (e.g. MacOS Catalina, Windows 10, CentOS 8.0, Ubuntu 18.04, etc.) **MacOS 10.15.7**
* what Python version are you using e.g. 3.8.0 **Python 3.9.6**
* what `meson --version` **0.59.0.rc1**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/interpreterbase/_unholder.py`
Content:
```
1 # Copyright 2013-2021 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from .baseobjects import InterpreterObject, MesonInterpreterObject, ObjectHolder, TYPE_var
16 from .exceptions import InvalidArguments
17 from ..mesonlib import HoldableObject, MesonBugException
18
19 import typing as T
20
21 def _unholder(obj: T.Union[TYPE_var, InterpreterObject], *, permissive: bool = False) -> TYPE_var:
22 if isinstance(obj, (int, bool, str)):
23 return obj
24 elif isinstance(obj, list):
25 return [_unholder(x) for x in obj]
26 elif isinstance(obj, dict):
27 return {k: _unholder(v) for k, v in obj.items()}
28 elif isinstance(obj, ObjectHolder):
29 assert isinstance(obj.held_object, HoldableObject)
30 return obj.held_object
31 elif isinstance(obj, MesonInterpreterObject):
32 return obj
33 elif isinstance(obj, HoldableObject) and permissive:
34 return obj
35 elif isinstance(obj, HoldableObject):
36 raise MesonBugException(f'Argument {obj} of type {type(obj).__name__} is not held by an ObjectHolder.')
37 elif isinstance(obj, InterpreterObject):
38 raise InvalidArguments(f'Argument {obj} of type {type(obj).__name__} cannot be passed to a method or function')
39 raise MesonBugException(f'Unknown object {obj} of type {type(obj).__name__} in the parameters.')
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesonbuild/interpreterbase/_unholder.py b/mesonbuild/interpreterbase/_unholder.py
--- a/mesonbuild/interpreterbase/_unholder.py
+++ b/mesonbuild/interpreterbase/_unholder.py
@@ -22,9 +22,9 @@
if isinstance(obj, (int, bool, str)):
return obj
elif isinstance(obj, list):
- return [_unholder(x) for x in obj]
+ return [_unholder(x, permissive=permissive) for x in obj]
elif isinstance(obj, dict):
- return {k: _unholder(v) for k, v in obj.items()}
+ return {k: _unholder(v, permissive=permissive) for k, v in obj.items()}
elif isinstance(obj, ObjectHolder):
assert isinstance(obj.held_object, HoldableObject)
return obj.held_object
| {"golden_diff": "diff --git a/mesonbuild/interpreterbase/_unholder.py b/mesonbuild/interpreterbase/_unholder.py\n--- a/mesonbuild/interpreterbase/_unholder.py\n+++ b/mesonbuild/interpreterbase/_unholder.py\n@@ -22,9 +22,9 @@\n if isinstance(obj, (int, bool, str)):\n return obj\n elif isinstance(obj, list):\n- return [_unholder(x) for x in obj]\n+ return [_unholder(x, permissive=permissive) for x in obj]\n elif isinstance(obj, dict):\n- return {k: _unholder(v) for k, v in obj.items()}\n+ return {k: _unholder(v, permissive=permissive) for k, v in obj.items()}\n elif isinstance(obj, ObjectHolder):\n assert isinstance(obj.held_object, HoldableObject)\n return obj.held_object\n", "issue": "get_variable with a file object as default value: Argument of type File is not held by an ObjectHolder\n**Describe the bug**\r\nAfter updating Meson, I see this error in a previously working build:\r\n\r\n```\r\nbuild/analysis/vale/meson.build:24:0: ERROR: Argument build/analysis/vale/vale-styleguide/config/documentation.vale.ini of type File is not held by an ObjectHolder.\r\n\r\n This is a Meson bug and should be reported!\r\n```\r\n\r\nThe file is being specified in this manner:\r\n\r\n```\r\n# Supply a style file, which will use this file instead of the default .vale.ini\r\nvale_config_file = get_variable('vale_config_file',\r\n\tfiles('vale-styleguide/config/documentation.vale.ini'))\r\n```\r\n\r\nThe default variable option is being used - I'm not overriding it.\r\n\r\nThe same is happening in a Doxygen module I use:\r\n\r\n```\r\ndoxyfile_input = get_variable('doxyfile_input', files('Doxyfile.in'))\r\n```\r\n\r\nI tried moving the file object into another variable:\r\n\r\n```\r\nvale_default_config_file = files('vale-styleguide/config/documentation.vale.ini')\r\nvale_config_file = get_variable('vale_config_file', vale_default_config_file)\r\n```\r\n\r\nWith teh same result - the error is reported on the `get_variable` line.\r\n\r\n**system parameters**\r\n* Is this a [cross build](https://mesonbuild.com/Cross-compilation.html) or just a plain native build (for the same computer)? **native**\r\n* what operating system (e.g. MacOS Catalina, Windows 10, CentOS 8.0, Ubuntu 18.04, etc.) **MacOS 10.15.7**\r\n* what Python version are you using e.g. 3.8.0 **Python 3.9.6**\r\n* what `meson --version` **0.59.0.rc1**\r\n\n", "before_files": [{"content": "# Copyright 2013-2021 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .baseobjects import InterpreterObject, MesonInterpreterObject, ObjectHolder, TYPE_var\nfrom .exceptions import InvalidArguments\nfrom ..mesonlib import HoldableObject, MesonBugException\n\nimport typing as T\n\ndef _unholder(obj: T.Union[TYPE_var, InterpreterObject], *, permissive: bool = False) -> TYPE_var:\n if isinstance(obj, (int, bool, str)):\n return obj\n elif isinstance(obj, list):\n return [_unholder(x) for x in obj]\n elif isinstance(obj, dict):\n return {k: _unholder(v) for k, v in obj.items()}\n elif isinstance(obj, ObjectHolder):\n assert isinstance(obj.held_object, HoldableObject)\n return obj.held_object\n elif isinstance(obj, MesonInterpreterObject):\n return obj\n elif isinstance(obj, HoldableObject) and permissive:\n return obj\n elif isinstance(obj, HoldableObject):\n raise MesonBugException(f'Argument {obj} of type {type(obj).__name__} is not held by an ObjectHolder.')\n elif isinstance(obj, InterpreterObject):\n raise InvalidArguments(f'Argument {obj} of type {type(obj).__name__} cannot be passed to a method or function')\n raise MesonBugException(f'Unknown object {obj} of type {type(obj).__name__} in the parameters.')\n", "path": "mesonbuild/interpreterbase/_unholder.py"}], "after_files": [{"content": "# Copyright 2013-2021 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .baseobjects import InterpreterObject, MesonInterpreterObject, ObjectHolder, TYPE_var\nfrom .exceptions import InvalidArguments\nfrom ..mesonlib import HoldableObject, MesonBugException\n\nimport typing as T\n\ndef _unholder(obj: T.Union[TYPE_var, InterpreterObject], *, permissive: bool = False) -> TYPE_var:\n if isinstance(obj, (int, bool, str)):\n return obj\n elif isinstance(obj, list):\n return [_unholder(x, permissive=permissive) for x in obj]\n elif isinstance(obj, dict):\n return {k: _unholder(v, permissive=permissive) for k, v in obj.items()}\n elif isinstance(obj, ObjectHolder):\n assert isinstance(obj.held_object, HoldableObject)\n return obj.held_object\n elif isinstance(obj, MesonInterpreterObject):\n return obj\n elif isinstance(obj, HoldableObject) and permissive:\n return obj\n elif isinstance(obj, HoldableObject):\n raise MesonBugException(f'Argument {obj} of type {type(obj).__name__} is not held by an ObjectHolder.')\n elif isinstance(obj, InterpreterObject):\n raise InvalidArguments(f'Argument {obj} of type {type(obj).__name__} cannot be passed to a method or function')\n raise MesonBugException(f'Unknown object {obj} of type {type(obj).__name__} in the parameters.')\n", "path": "mesonbuild/interpreterbase/_unholder.py"}]} | 1,160 | 196 |
gh_patches_debug_1316 | rasdani/github-patches | git_diff | mozilla__bugbug-3334 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use information on how a bug is filed as a feature
This could be especially useful for the Spam model.
https://bugzilla.mozilla.org/show_bug.cgi?id=1565403
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/spambug.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.over_sampling import BorderlineSMOTE
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features, bugzilla, feature_cleanup, utils
13 from bugbug.model import BugModel
14
15
16 class SpamBugModel(BugModel):
17 def __init__(self, lemmatization=False):
18 BugModel.__init__(self, lemmatization)
19
20 self.sampler = BorderlineSMOTE(random_state=0)
21 self.calculate_importance = False
22
23 feature_extractors = [
24 bug_features.has_str(),
25 bug_features.has_regression_range(),
26 bug_features.severity(),
27 bug_features.has_crash_signature(),
28 bug_features.has_url(),
29 bug_features.whiteboard(),
30 bug_features.product(),
31 # TODO: We would like to use the component at the time of filing too,
32 # but we can't because the rollback script doesn't support changes to
33 # components yet.
34 # bug_features.component(),
35 bug_features.num_words_title(),
36 bug_features.num_words_comments(),
37 bug_features.keywords(),
38 bug_features.priority(),
39 bug_features.version(),
40 bug_features.target_milestone(),
41 bug_features.has_attachment(),
42 bug_features.platform(),
43 bug_features.op_sys(),
44 ]
45
46 cleanup_functions = [
47 feature_cleanup.fileref(),
48 feature_cleanup.url(),
49 feature_cleanup.synonyms(),
50 ]
51
52 self.extraction_pipeline = Pipeline(
53 [
54 (
55 "bug_extractor",
56 bug_features.BugExtractor(
57 feature_extractors, cleanup_functions, rollback=True
58 ),
59 ),
60 (
61 "union",
62 ColumnTransformer(
63 [
64 ("data", DictVectorizer(), "data"),
65 ("title", self.text_vectorizer(min_df=0.0001), "title"),
66 (
67 "comments",
68 self.text_vectorizer(min_df=0.0001),
69 "comments",
70 ),
71 ]
72 ),
73 ),
74 ]
75 )
76
77 self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())
78 self.clf.set_params(predictor="cpu_predictor")
79
80 def get_labels(self):
81 classes = {}
82
83 for bug_data in bugzilla.get_bugs(include_invalid=True):
84 bug_id = bug_data["id"]
85
86 # Skip bugs filed by Mozillians, since we are sure they are not spam.
87 if "@mozilla" in bug_data["creator"]:
88 continue
89
90 # A bug that was moved out of 'Invalid Bugs' is definitely a legitimate bug.
91 for history in bug_data["history"]:
92 for change in history["changes"]:
93 if (
94 change["field_name"] == "product"
95 and change["removed"] == "Invalid Bugs"
96 ):
97 classes[bug_id] = 0
98
99 # A fixed bug is definitely a legitimate bug.
100 if bug_data["resolution"] == "FIXED":
101 classes[bug_id] = 0
102
103 # A bug in the 'Invalid Bugs' product is definitely a spam bug.
104 elif bug_data["product"] == "Invalid Bugs":
105 classes[bug_id] = 1
106
107 print(
108 "{} bugs are classified as non-spam".format(
109 sum(1 for label in classes.values() if label == 0)
110 )
111 )
112 print(
113 "{} bugs are classified as spam".format(
114 sum(1 for label in classes.values() if label == 1)
115 )
116 )
117
118 return classes, [0, 1]
119
120 def items_gen(self, classes):
121 # Overwriting this method to add include_invalid=True to get_bugs to
122 # include spam bugs.
123 return (
124 (bug, classes[bug["id"]])
125 for bug in bugzilla.get_bugs(include_invalid=True)
126 if bug["id"] in classes
127 )
128
129 def get_feature_names(self):
130 return self.extraction_pipeline.named_steps["union"].get_feature_names_out()
131
132 def overwrite_classes(self, bugs, classes, probabilities):
133 for i, bug in enumerate(bugs):
134 if "@mozilla" in bug["creator"]:
135 if probabilities:
136 classes[i] = [1.0, 0.0]
137 else:
138 classes[i] = 0
139
140 return classes
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py
--- a/bugbug/models/spambug.py
+++ b/bugbug/models/spambug.py
@@ -41,6 +41,7 @@
bug_features.has_attachment(),
bug_features.platform(),
bug_features.op_sys(),
+ bug_features.filed_via(),
]
cleanup_functions = [
| {"golden_diff": "diff --git a/bugbug/models/spambug.py b/bugbug/models/spambug.py\n--- a/bugbug/models/spambug.py\n+++ b/bugbug/models/spambug.py\n@@ -41,6 +41,7 @@\n bug_features.has_attachment(),\n bug_features.platform(),\n bug_features.op_sys(),\n+ bug_features.filed_via(),\n ]\n \n cleanup_functions = [\n", "issue": "Use information on how a bug is filed as a feature\nThis could be especially useful for the Spam model.\r\n\r\nhttps://bugzilla.mozilla.org/show_bug.cgi?id=1565403\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.over_sampling import BorderlineSMOTE\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = BorderlineSMOTE(random_state=0)\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.whiteboard(),\n bug_features.product(),\n # TODO: We would like to use the component at the time of filing too,\n # but we can't because the rollback script doesn't support changes to\n # components yet.\n # bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n bug_features.priority(),\n bug_features.version(),\n bug_features.target_milestone(),\n bug_features.has_attachment(),\n bug_features.platform(),\n bug_features.op_sys(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.0001), \"title\"),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.0001),\n \"comments\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Skip bugs filed by Mozillians, since we are sure they are not spam.\n if \"@mozilla\" in bug_data[\"creator\"]:\n continue\n\n # A bug that was moved out of 'Invalid Bugs' is definitely a legitimate bug.\n for history in bug_data[\"history\"]:\n for change in history[\"changes\"]:\n if (\n change[\"field_name\"] == \"product\"\n and change[\"removed\"] == \"Invalid Bugs\"\n ):\n classes[bug_id] = 0\n\n # A fixed bug is definitely a legitimate bug.\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # A bug in the 'Invalid Bugs' product is definitely a spam bug.\n elif bug_data[\"product\"] == \"Invalid Bugs\":\n classes[bug_id] = 1\n\n print(\n \"{} bugs are classified as non-spam\".format(\n sum(1 for label in classes.values() if label == 0)\n )\n )\n print(\n \"{} bugs are classified as spam\".format(\n sum(1 for label in classes.values() if label == 1)\n )\n )\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for i, bug in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.over_sampling import BorderlineSMOTE\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\n\nclass SpamBugModel(BugModel):\n def __init__(self, lemmatization=False):\n BugModel.__init__(self, lemmatization)\n\n self.sampler = BorderlineSMOTE(random_state=0)\n self.calculate_importance = False\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.whiteboard(),\n bug_features.product(),\n # TODO: We would like to use the component at the time of filing too,\n # but we can't because the rollback script doesn't support changes to\n # components yet.\n # bug_features.component(),\n bug_features.num_words_title(),\n bug_features.num_words_comments(),\n bug_features.keywords(),\n bug_features.priority(),\n bug_features.version(),\n bug_features.target_milestone(),\n bug_features.has_attachment(),\n bug_features.platform(),\n bug_features.op_sys(),\n bug_features.filed_via(),\n ]\n\n cleanup_functions = [\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(\n feature_extractors, cleanup_functions, rollback=True\n ),\n ),\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.0001), \"title\"),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.0001),\n \"comments\",\n ),\n ]\n ),\n ),\n ]\n )\n\n self.clf = xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n self.clf.set_params(predictor=\"cpu_predictor\")\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs(include_invalid=True):\n bug_id = bug_data[\"id\"]\n\n # Skip bugs filed by Mozillians, since we are sure they are not spam.\n if \"@mozilla\" in bug_data[\"creator\"]:\n continue\n\n # A bug that was moved out of 'Invalid Bugs' is definitely a legitimate bug.\n for history in bug_data[\"history\"]:\n for change in history[\"changes\"]:\n if (\n change[\"field_name\"] == \"product\"\n and change[\"removed\"] == \"Invalid Bugs\"\n ):\n classes[bug_id] = 0\n\n # A fixed bug is definitely a legitimate bug.\n if bug_data[\"resolution\"] == \"FIXED\":\n classes[bug_id] = 0\n\n # A bug in the 'Invalid Bugs' product is definitely a spam bug.\n elif bug_data[\"product\"] == \"Invalid Bugs\":\n classes[bug_id] = 1\n\n print(\n \"{} bugs are classified as non-spam\".format(\n sum(1 for label in classes.values() if label == 0)\n )\n )\n print(\n \"{} bugs are classified as spam\".format(\n sum(1 for label in classes.values() if label == 1)\n )\n )\n\n return classes, [0, 1]\n\n def items_gen(self, classes):\n # Overwriting this method to add include_invalid=True to get_bugs to\n # include spam bugs.\n return (\n (bug, classes[bug[\"id\"]])\n for bug in bugzilla.get_bugs(include_invalid=True)\n if bug[\"id\"] in classes\n )\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n for i, bug in enumerate(bugs):\n if \"@mozilla\" in bug[\"creator\"]:\n if probabilities:\n classes[i] = [1.0, 0.0]\n else:\n classes[i] = 0\n\n return classes\n", "path": "bugbug/models/spambug.py"}]} | 1,587 | 89 |
gh_patches_debug_22536 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1860 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User configurable connection for mongo-based databases
So the pymongo client has a multitude of ways for connecting to different mongo services
So for MongoDB Atlas users the connection string is given as such
for python connections to the mongo db atlas
`mongodb+srv://<username>:<password>@<cluster-name>.mongodb.net/myFirstDatabase`
In making the mongo connection to be user configurable we can specify different types of mongo services versus
just asking for the basic connection arguments like port, user name, pass, and also we can give users an easier way to connect versus making assumptions about the type of mongodb the kinds of credentials they might have.
As long as the pymongo client accepts the connection and connects the user to the database and the collection they want I think this would be great!
Thanks again guys!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/database/mongo/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """A module for opsdroid to allow persist in mongo database."""
3 import logging
4 from contextlib import asynccontextmanager
5 from motor.motor_asyncio import AsyncIOMotorClient
6 from voluptuous import Any
7
8 from opsdroid.database import Database
9
10 _LOGGER = logging.getLogger(__name__)
11 CONFIG_SCHEMA = {
12 "host": str,
13 "port": Any(int, str),
14 "database": str,
15 "user": str,
16 "password": str,
17 "collection": str,
18 }
19
20
21 class DatabaseMongo(Database):
22 """A module for opsdroid to allow memory to persist in a mongo database."""
23
24 def __init__(self, config, opsdroid=None):
25 """Create the connection.
26
27 Set some basic properties from the database config such as the name
28 of this database.
29
30 Args:
31 config (dict): The config for this database specified in the
32 `configuration.yaml` file.
33 opsdroid (OpsDroid): An instance of opsdroid.core.
34
35 """
36 super().__init__(config, opsdroid=opsdroid)
37 _LOGGER.debug("Loaded mongo database connector.")
38 self.name = "mongo"
39 self.config = config
40 self.client = None
41 self.database = None
42 self.collection = config.get("collection", "opsdroid")
43
44 async def connect(self):
45 """Connect to the database."""
46 host = self.config.get("host", "localhost")
47 port = self.config.get("port", "27017")
48 database = self.config.get("database", "opsdroid")
49 user = self.config.get("user")
50 pwd = self.config.get("password")
51 if user and pwd:
52 path = "mongodb://{user}:{pwd}@{host}:{port}".format(
53 user=user, pwd=pwd, host=host, port=port
54 )
55 else:
56 path = "mongodb://{host}:{port}".format(host=host, port=port)
57 self.client = AsyncIOMotorClient(path)
58 self.database = self.client[database]
59 _LOGGER.info("Connected to MongoDB.")
60
61 async def put(self, key, data):
62 """Insert or replace an object into the database for a given key.
63
64 Args:
65 key (str): the key is the document lookup key.
66 data (object): the data to be inserted or replaced
67
68 """
69 _LOGGER.debug("Putting %s into MongoDB collection %s", key, self.collection)
70
71 if isinstance(data, str):
72 data = {"value": data}
73 if "key" not in data:
74 data["key"] = key
75
76 return await self.database[self.collection].update_one(
77 {"key": data["key"]}, {"$set": data}, upsert=True
78 )
79
80 async def get(self, key):
81 """Get a document from the database (key).
82
83 Args:
84 key (str): the key is the document lookup key.
85
86 """
87 _LOGGER.debug("Getting %s from MongoDB collection %s", key, self.collection)
88
89 response = await self.database[self.collection].find_one(
90 {"$query": {"key": key}, "$orderby": {"$natural": -1}}
91 )
92 if response.keys() == {"_id", "key", "value"}:
93 response = response["value"]
94 return response
95
96 async def delete(self, key):
97 """Delete a document from the database (key).
98
99 Args:
100 key (str): the key is the document lookup key.
101
102 """
103 _LOGGER.debug("Deleting %s from MongoDB collection %s.", key, self.collection)
104
105 return await self.database[self.collection].delete_one({"key": key})
106
107 @asynccontextmanager
108 async def memory_in_collection(self, collection):
109 """Use the specified collection rather than the default."""
110 db_copy = DatabaseMongo(self.config, self.opsdroid)
111 try:
112 await db_copy.connect()
113 db_copy.collection = collection
114 yield db_copy
115 finally:
116 if db_copy.client:
117 db_copy.client.close()
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/database/mongo/__init__.py b/opsdroid/database/mongo/__init__.py
--- a/opsdroid/database/mongo/__init__.py
+++ b/opsdroid/database/mongo/__init__.py
@@ -44,17 +44,18 @@
async def connect(self):
"""Connect to the database."""
host = self.config.get("host", "localhost")
+ protocol = self.config.get("protocol", "mongodb").replace("://", "")
port = self.config.get("port", "27017")
+ if port != "27017":
+ host = f"{host}:{port}"
database = self.config.get("database", "opsdroid")
user = self.config.get("user")
pwd = self.config.get("password")
if user and pwd:
- path = "mongodb://{user}:{pwd}@{host}:{port}".format(
- user=user, pwd=pwd, host=host, port=port
- )
+ self.db_url = f"{protocol}://{user}:{pwd}@{host}"
else:
- path = "mongodb://{host}:{port}".format(host=host, port=port)
- self.client = AsyncIOMotorClient(path)
+ self.db_url = f"{protocol}://{host}"
+ self.client = AsyncIOMotorClient(self.db_url)
self.database = self.client[database]
_LOGGER.info("Connected to MongoDB.")
| {"golden_diff": "diff --git a/opsdroid/database/mongo/__init__.py b/opsdroid/database/mongo/__init__.py\n--- a/opsdroid/database/mongo/__init__.py\n+++ b/opsdroid/database/mongo/__init__.py\n@@ -44,17 +44,18 @@\n async def connect(self):\n \"\"\"Connect to the database.\"\"\"\n host = self.config.get(\"host\", \"localhost\")\n+ protocol = self.config.get(\"protocol\", \"mongodb\").replace(\"://\", \"\")\n port = self.config.get(\"port\", \"27017\")\n+ if port != \"27017\":\n+ host = f\"{host}:{port}\"\n database = self.config.get(\"database\", \"opsdroid\")\n user = self.config.get(\"user\")\n pwd = self.config.get(\"password\")\n if user and pwd:\n- path = \"mongodb://{user}:{pwd}@{host}:{port}\".format(\n- user=user, pwd=pwd, host=host, port=port\n- )\n+ self.db_url = f\"{protocol}://{user}:{pwd}@{host}\"\n else:\n- path = \"mongodb://{host}:{port}\".format(host=host, port=port)\n- self.client = AsyncIOMotorClient(path)\n+ self.db_url = f\"{protocol}://{host}\"\n+ self.client = AsyncIOMotorClient(self.db_url)\n self.database = self.client[database]\n _LOGGER.info(\"Connected to MongoDB.\")\n", "issue": "User configurable connection for mongo-based databases\nSo the pymongo client has a multitude of ways for connecting to different mongo services\r\n\r\nSo for MongoDB Atlas users the connection string is given as such \r\nfor python connections to the mongo db atlas \r\n\r\n`mongodb+srv://<username>:<password>@<cluster-name>.mongodb.net/myFirstDatabase`\r\n\r\nIn making the mongo connection to be user configurable we can specify different types of mongo services versus\r\njust asking for the basic connection arguments like port, user name, pass, and also we can give users an easier way to connect versus making assumptions about the type of mongodb the kinds of credentials they might have. \r\n\r\nAs long as the pymongo client accepts the connection and connects the user to the database and the collection they want I think this would be great!\r\n\r\nThanks again guys!\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"A module for opsdroid to allow persist in mongo database.\"\"\"\nimport logging\nfrom contextlib import asynccontextmanager\nfrom motor.motor_asyncio import AsyncIOMotorClient\nfrom voluptuous import Any\n\nfrom opsdroid.database import Database\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n \"host\": str,\n \"port\": Any(int, str),\n \"database\": str,\n \"user\": str,\n \"password\": str,\n \"collection\": str,\n}\n\n\nclass DatabaseMongo(Database):\n \"\"\"A module for opsdroid to allow memory to persist in a mongo database.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connection.\n\n Set some basic properties from the database config such as the name\n of this database.\n\n Args:\n config (dict): The config for this database specified in the\n `configuration.yaml` file.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(\"Loaded mongo database connector.\")\n self.name = \"mongo\"\n self.config = config\n self.client = None\n self.database = None\n self.collection = config.get(\"collection\", \"opsdroid\")\n\n async def connect(self):\n \"\"\"Connect to the database.\"\"\"\n host = self.config.get(\"host\", \"localhost\")\n port = self.config.get(\"port\", \"27017\")\n database = self.config.get(\"database\", \"opsdroid\")\n user = self.config.get(\"user\")\n pwd = self.config.get(\"password\")\n if user and pwd:\n path = \"mongodb://{user}:{pwd}@{host}:{port}\".format(\n user=user, pwd=pwd, host=host, port=port\n )\n else:\n path = \"mongodb://{host}:{port}\".format(host=host, port=port)\n self.client = AsyncIOMotorClient(path)\n self.database = self.client[database]\n _LOGGER.info(\"Connected to MongoDB.\")\n\n async def put(self, key, data):\n \"\"\"Insert or replace an object into the database for a given key.\n\n Args:\n key (str): the key is the document lookup key.\n data (object): the data to be inserted or replaced\n\n \"\"\"\n _LOGGER.debug(\"Putting %s into MongoDB collection %s\", key, self.collection)\n\n if isinstance(data, str):\n data = {\"value\": data}\n if \"key\" not in data:\n data[\"key\"] = key\n\n return await self.database[self.collection].update_one(\n {\"key\": data[\"key\"]}, {\"$set\": data}, upsert=True\n )\n\n async def get(self, key):\n \"\"\"Get a document from the database (key).\n\n Args:\n key (str): the key is the document lookup key.\n\n \"\"\"\n _LOGGER.debug(\"Getting %s from MongoDB collection %s\", key, self.collection)\n\n response = await self.database[self.collection].find_one(\n {\"$query\": {\"key\": key}, \"$orderby\": {\"$natural\": -1}}\n )\n if response.keys() == {\"_id\", \"key\", \"value\"}:\n response = response[\"value\"]\n return response\n\n async def delete(self, key):\n \"\"\"Delete a document from the database (key).\n\n Args:\n key (str): the key is the document lookup key.\n\n \"\"\"\n _LOGGER.debug(\"Deleting %s from MongoDB collection %s.\", key, self.collection)\n\n return await self.database[self.collection].delete_one({\"key\": key})\n\n @asynccontextmanager\n async def memory_in_collection(self, collection):\n \"\"\"Use the specified collection rather than the default.\"\"\"\n db_copy = DatabaseMongo(self.config, self.opsdroid)\n try:\n await db_copy.connect()\n db_copy.collection = collection\n yield db_copy\n finally:\n if db_copy.client:\n db_copy.client.close()\n", "path": "opsdroid/database/mongo/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"A module for opsdroid to allow persist in mongo database.\"\"\"\nimport logging\nfrom contextlib import asynccontextmanager\nfrom motor.motor_asyncio import AsyncIOMotorClient\nfrom voluptuous import Any\n\nfrom opsdroid.database import Database\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n \"host\": str,\n \"port\": Any(int, str),\n \"database\": str,\n \"user\": str,\n \"password\": str,\n \"collection\": str,\n}\n\n\nclass DatabaseMongo(Database):\n \"\"\"A module for opsdroid to allow memory to persist in a mongo database.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connection.\n\n Set some basic properties from the database config such as the name\n of this database.\n\n Args:\n config (dict): The config for this database specified in the\n `configuration.yaml` file.\n opsdroid (OpsDroid): An instance of opsdroid.core.\n\n \"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(\"Loaded mongo database connector.\")\n self.name = \"mongo\"\n self.config = config\n self.client = None\n self.database = None\n self.collection = config.get(\"collection\", \"opsdroid\")\n\n async def connect(self):\n \"\"\"Connect to the database.\"\"\"\n host = self.config.get(\"host\", \"localhost\")\n protocol = self.config.get(\"protocol\", \"mongodb\").replace(\"://\", \"\")\n port = self.config.get(\"port\", \"27017\")\n if port != \"27017\":\n host = f\"{host}:{port}\"\n database = self.config.get(\"database\", \"opsdroid\")\n user = self.config.get(\"user\")\n pwd = self.config.get(\"password\")\n if user and pwd:\n self.db_url = f\"{protocol}://{user}:{pwd}@{host}\"\n else:\n self.db_url = f\"{protocol}://{host}\"\n self.client = AsyncIOMotorClient(self.db_url)\n self.database = self.client[database]\n _LOGGER.info(\"Connected to MongoDB.\")\n\n async def put(self, key, data):\n \"\"\"Insert or replace an object into the database for a given key.\n\n Args:\n key (str): the key is the document lookup key.\n data (object): the data to be inserted or replaced\n\n \"\"\"\n _LOGGER.debug(\"Putting %s into MongoDB collection %s\", key, self.collection)\n\n if isinstance(data, str):\n data = {\"value\": data}\n if \"key\" not in data:\n data[\"key\"] = key\n\n return await self.database[self.collection].update_one(\n {\"key\": data[\"key\"]}, {\"$set\": data}, upsert=True\n )\n\n async def get(self, key):\n \"\"\"Get a document from the database (key).\n\n Args:\n key (str): the key is the document lookup key.\n\n \"\"\"\n _LOGGER.debug(\"Getting %s from MongoDB collection %s\", key, self.collection)\n\n response = await self.database[self.collection].find_one(\n {\"$query\": {\"key\": key}, \"$orderby\": {\"$natural\": -1}}\n )\n if response.keys() == {\"_id\", \"key\", \"value\"}:\n response = response[\"value\"]\n return response\n\n async def delete(self, key):\n \"\"\"Delete a document from the database (key).\n\n Args:\n key (str): the key is the document lookup key.\n\n \"\"\"\n _LOGGER.debug(\"Deleting %s from MongoDB collection %s.\", key, self.collection)\n\n return await self.database[self.collection].delete_one({\"key\": key})\n\n @asynccontextmanager\n async def memory_in_collection(self, collection):\n \"\"\"Use the specified collection rather than the default.\"\"\"\n db_copy = DatabaseMongo(self.config, self.opsdroid)\n try:\n await db_copy.connect()\n db_copy.collection = collection\n yield db_copy\n finally:\n if db_copy.client:\n db_copy.client.close()\n", "path": "opsdroid/database/mongo/__init__.py"}]} | 1,550 | 325 |
gh_patches_debug_38160 | rasdani/github-patches | git_diff | archlinux__archinstall-238 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Look in to enabling SMART for drives that support it
Something like `smartctl --smart=on --offlineauto=on --saveauto=on /dev/sda` where `archinstall.hardware.detectSmart()` finds drives that support it (to extend drive lifetime if possible).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profiles/desktop.py`
Content:
```
1 # A desktop environment selector.
2
3 import archinstall, os
4
5 is_top_level_profile = True
6
7 def _prep_function(*args, **kwargs):
8 """
9 Magic function called by the importing installer
10 before continuing any further. It also avoids executing any
11 other code in this stage. So it's a safe way to ask the user
12 for more input before any other installer steps start.
13 """
14
15 supported_desktops = ['gnome', 'kde', 'awesome']
16 desktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')
17
18 # Temporarily store the selected desktop profile
19 # in a session-safe location, since this module will get reloaded
20 # the next time it gets executed.
21 archinstall.storage['_desktop_profile'] = desktop
22
23 profile = archinstall.Profile(None, desktop)
24 # Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.
25 with profile.load_instructions(namespace=f"{desktop}.py") as imported:
26 if hasattr(imported, '_prep_function'):
27 return imported._prep_function()
28 else:
29 print(f"Deprecated (??): {desktop} profile has no _prep_function() anymore")
30
31 if __name__ == 'desktop':
32 """
33 This "profile" is a meta-profile.
34 There are no desktop-specific steps, it simply routes
35 the installer to whichever desktop environment/window manager was chosen.
36
37 Maybe in the future, a network manager or similar things *could* be added here.
38 We should honor that Arch Linux does not officially endorse a desktop-setup, nor is
39 it trying to be a turn-key desktop distribution.
40
41 There are plenty of desktop-turn-key-solutions based on Arch Linux,
42 this is therefore just a helper to get started
43 """
44
45 # TODO: Remove magic variable 'installation' and place it
46 # in archinstall.storage or archinstall.session/archinstall.installation
47 installation.install_profile(archinstall.storage['_desktop_profile'])
48
```
Path: `profiles/awesome.py`
Content:
```
1 # A desktop environment using "Awesome" window manager.
2
3 import archinstall
4
5 is_top_level_profile = False
6
7 # New way of defining packages for a profile, which is iterable and can be used out side
8 # of the profile to get a list of "what packages will be installed".
9 __packages__ = ['nano', 'nemo', 'gpicview-gtk3', 'openssh', 'sshfs', 'htop', 'scrot', 'wget']
10
11 def _prep_function(*args, **kwargs):
12 """
13 Magic function called by the importing installer
14 before continuing any further. It also avoids executing any
15 other code in this stage. So it's a safe way to ask the user
16 for more input before any other installer steps start.
17 """
18
19 # Awesome WM requires that xorg is installed
20 profile = archinstall.Profile(None, 'xorg')
21 with profile.load_instructions(namespace='xorg.py') as imported:
22 if hasattr(imported, '_prep_function'):
23 return imported._prep_function()
24 else:
25 print('Deprecated (??): xorg profile has no _prep_function() anymore')
26
27
28 # Ensures that this code only gets executed if executed
29 # through importlib.util.spec_from_file_location("awesome", "/somewhere/awesome.py")
30 # or through conventional import awesome
31 if __name__ == 'awesome':
32 # Install the application awesome from the template under /applications/
33 awesome = archinstall.Application(installation, 'awesome')
34 awesome.install()
35
36 # Then setup and configure the desktop environment: awesome
37 editor = "nano"
38 filebrowser = "nemo gpicview-gtk3"
39 utils = "openssh sshfs htop scrot wget"
40
41
42 installation.add_additional_packages(f"{utils} {filebrowser} {editor}")
43
44 alacritty = archinstall.Application(installation, 'alacritty')
45 alacritty.install()
46
47 # TODO: Copy a full configuration to ~/.config/awesome/rc.lua instead.
48 with open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'r') as fh:
49 awesome_lua = fh.read()
50
51 ## Replace xterm with alacritty for a smoother experience.
52 awesome_lua = awesome_lua.replace('"xterm"', '"alacritty"')
53
54 with open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'w') as fh:
55 fh.write(awesome_lua)
56
57 ## TODO: Configure the right-click-menu to contain the above packages that were installed. (as a user config)
58
59 ## Remove some interfering nemo settings
60 installation.arch_chroot("gsettings set org.nemo.desktop show-desktop-icons false")
61 installation.arch_chroot("xdg-mime default nemo.desktop inode/directory application/x-gnome-saved-search")
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/profiles/awesome.py b/profiles/awesome.py
--- a/profiles/awesome.py
+++ b/profiles/awesome.py
@@ -6,7 +6,7 @@
# New way of defining packages for a profile, which is iterable and can be used out side
# of the profile to get a list of "what packages will be installed".
-__packages__ = ['nano', 'nemo', 'gpicview-gtk3', 'openssh', 'sshfs', 'htop', 'scrot', 'wget']
+__packages__ = ['nemo', 'gpicview-gtk3', 'scrot']
def _prep_function(*args, **kwargs):
"""
@@ -33,13 +33,7 @@
awesome = archinstall.Application(installation, 'awesome')
awesome.install()
- # Then setup and configure the desktop environment: awesome
- editor = "nano"
- filebrowser = "nemo gpicview-gtk3"
- utils = "openssh sshfs htop scrot wget"
-
-
- installation.add_additional_packages(f"{utils} {filebrowser} {editor}")
+ installation.add_additional_packages(__packages__)
alacritty = archinstall.Application(installation, 'alacritty')
alacritty.install()
diff --git a/profiles/desktop.py b/profiles/desktop.py
--- a/profiles/desktop.py
+++ b/profiles/desktop.py
@@ -4,6 +4,10 @@
is_top_level_profile = True
+# New way of defining packages for a profile, which is iterable and can be used out side
+# of the profile to get a list of "what packages will be installed".
+__packages__ = ['nano', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']
+
def _prep_function(*args, **kwargs):
"""
Magic function called by the importing installer
@@ -14,7 +18,7 @@
supported_desktops = ['gnome', 'kde', 'awesome']
desktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')
-
+
# Temporarily store the selected desktop profile
# in a session-safe location, since this module will get reloaded
# the next time it gets executed.
@@ -41,7 +45,11 @@
There are plenty of desktop-turn-key-solutions based on Arch Linux,
this is therefore just a helper to get started
"""
+
+ # Install common packages for all desktop environments
+ installation.add_additional_packages(__packages__)
# TODO: Remove magic variable 'installation' and place it
# in archinstall.storage or archinstall.session/archinstall.installation
installation.install_profile(archinstall.storage['_desktop_profile'])
+
| {"golden_diff": "diff --git a/profiles/awesome.py b/profiles/awesome.py\n--- a/profiles/awesome.py\n+++ b/profiles/awesome.py\n@@ -6,7 +6,7 @@\n \n # New way of defining packages for a profile, which is iterable and can be used out side\n # of the profile to get a list of \"what packages will be installed\".\n-__packages__ = ['nano', 'nemo', 'gpicview-gtk3', 'openssh', 'sshfs', 'htop', 'scrot', 'wget']\n+__packages__ = ['nemo', 'gpicview-gtk3', 'scrot']\n \n def _prep_function(*args, **kwargs):\n \t\"\"\"\n@@ -33,13 +33,7 @@\n \tawesome = archinstall.Application(installation, 'awesome')\n \tawesome.install()\n \n-\t# Then setup and configure the desktop environment: awesome\n-\teditor = \"nano\"\n-\tfilebrowser = \"nemo gpicview-gtk3\"\n-\tutils = \"openssh sshfs htop scrot wget\"\n-\n-\n-\tinstallation.add_additional_packages(f\"{utils} {filebrowser} {editor}\")\n+\tinstallation.add_additional_packages(__packages__)\n \n \talacritty = archinstall.Application(installation, 'alacritty')\n \talacritty.install()\ndiff --git a/profiles/desktop.py b/profiles/desktop.py\n--- a/profiles/desktop.py\n+++ b/profiles/desktop.py\n@@ -4,6 +4,10 @@\n \n is_top_level_profile = True\n \n+# New way of defining packages for a profile, which is iterable and can be used out side\n+# of the profile to get a list of \"what packages will be installed\".\n+__packages__ = ['nano', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n+\n def _prep_function(*args, **kwargs):\n \t\"\"\"\n \tMagic function called by the importing installer\n@@ -14,7 +18,7 @@\n \n \tsupported_desktops = ['gnome', 'kde', 'awesome']\n \tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n-\n+\t\n \t# Temporarily store the selected desktop profile\n \t# in a session-safe location, since this module will get reloaded\n \t# the next time it gets executed.\n@@ -41,7 +45,11 @@\n \tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n \tthis is therefore just a helper to get started\n \t\"\"\"\n+\t\n+\t# Install common packages for all desktop environments\n+\tinstallation.add_additional_packages(__packages__)\n \n \t# TODO: Remove magic variable 'installation' and place it\n \t# in archinstall.storage or archinstall.session/archinstall.installation\n \tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n+\n", "issue": "Look in to enabling SMART for drives that support it\nSomething like `smartctl --smart=on --offlineauto=on --saveauto=on /dev/sda` where `archinstall.hardware.detectSmart()` finds drives that support it (to extend drive lifetime if possible).\n", "before_files": [{"content": "# A desktop environment selector.\n\nimport archinstall, os\n\nis_top_level_profile = True\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tsupported_desktops = ['gnome', 'kde', 'awesome']\n\tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n\n\t# Temporarily store the selected desktop profile\n\t# in a session-safe location, since this module will get reloaded\n\t# the next time it gets executed.\n\tarchinstall.storage['_desktop_profile'] = desktop\n\n\tprofile = archinstall.Profile(None, desktop)\n\t# Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.\n\twith profile.load_instructions(namespace=f\"{desktop}.py\") as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint(f\"Deprecated (??): {desktop} profile has no _prep_function() anymore\")\n\nif __name__ == 'desktop':\n\t\"\"\"\n\tThis \"profile\" is a meta-profile.\n\tThere are no desktop-specific steps, it simply routes\n\tthe installer to whichever desktop environment/window manager was chosen.\n\n\tMaybe in the future, a network manager or similar things *could* be added here.\n\tWe should honor that Arch Linux does not officially endorse a desktop-setup, nor is\n\tit trying to be a turn-key desktop distribution.\n\n\tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n\tthis is therefore just a helper to get started\n\t\"\"\"\n\n\t# TODO: Remove magic variable 'installation' and place it\n\t# in archinstall.storage or archinstall.session/archinstall.installation\n\tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n", "path": "profiles/desktop.py"}, {"content": "# A desktop environment using \"Awesome\" window manager.\n\nimport archinstall\n\nis_top_level_profile = False\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nano', 'nemo', 'gpicview-gtk3', 'openssh', 'sshfs', 'htop', 'scrot', 'wget']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# Awesome WM requires that xorg is installed\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"awesome\", \"/somewhere/awesome.py\")\n# or through conventional import awesome\nif __name__ == 'awesome':\n\t# Install the application awesome from the template under /applications/\n\tawesome = archinstall.Application(installation, 'awesome')\n\tawesome.install()\n\n\t# Then setup and configure the desktop environment: awesome\n\teditor = \"nano\"\n\tfilebrowser = \"nemo gpicview-gtk3\"\n\tutils = \"openssh sshfs htop scrot wget\"\n\n\n\tinstallation.add_additional_packages(f\"{utils} {filebrowser} {editor}\")\n\n\talacritty = archinstall.Application(installation, 'alacritty')\n\talacritty.install()\n\n\t# TODO: Copy a full configuration to ~/.config/awesome/rc.lua instead.\n\twith open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'r') as fh:\n\t\tawesome_lua = fh.read()\n\n\t## Replace xterm with alacritty for a smoother experience.\n\tawesome_lua = awesome_lua.replace('\"xterm\"', '\"alacritty\"')\n\n\twith open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'w') as fh:\n\t\tfh.write(awesome_lua)\n\n\t## TODO: Configure the right-click-menu to contain the above packages that were installed. (as a user config)\n\t\n\t## Remove some interfering nemo settings\n\tinstallation.arch_chroot(\"gsettings set org.nemo.desktop show-desktop-icons false\")\n\tinstallation.arch_chroot(\"xdg-mime default nemo.desktop inode/directory application/x-gnome-saved-search\")\n", "path": "profiles/awesome.py"}], "after_files": [{"content": "# A desktop environment selector.\n\nimport archinstall, os\n\nis_top_level_profile = True\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nano', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tsupported_desktops = ['gnome', 'kde', 'awesome']\n\tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n\t\n\t# Temporarily store the selected desktop profile\n\t# in a session-safe location, since this module will get reloaded\n\t# the next time it gets executed.\n\tarchinstall.storage['_desktop_profile'] = desktop\n\n\tprofile = archinstall.Profile(None, desktop)\n\t# Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.\n\twith profile.load_instructions(namespace=f\"{desktop}.py\") as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint(f\"Deprecated (??): {desktop} profile has no _prep_function() anymore\")\n\nif __name__ == 'desktop':\n\t\"\"\"\n\tThis \"profile\" is a meta-profile.\n\tThere are no desktop-specific steps, it simply routes\n\tthe installer to whichever desktop environment/window manager was chosen.\n\n\tMaybe in the future, a network manager or similar things *could* be added here.\n\tWe should honor that Arch Linux does not officially endorse a desktop-setup, nor is\n\tit trying to be a turn-key desktop distribution.\n\n\tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n\tthis is therefore just a helper to get started\n\t\"\"\"\n\t\n\t# Install common packages for all desktop environments\n\tinstallation.add_additional_packages(__packages__)\n\n\t# TODO: Remove magic variable 'installation' and place it\n\t# in archinstall.storage or archinstall.session/archinstall.installation\n\tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n\n", "path": "profiles/desktop.py"}, {"content": "# A desktop environment using \"Awesome\" window manager.\n\nimport archinstall\n\nis_top_level_profile = False\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nemo', 'gpicview-gtk3', 'scrot']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\t# Awesome WM requires that xorg is installed\n\tprofile = archinstall.Profile(None, 'xorg')\n\twith profile.load_instructions(namespace='xorg.py') as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint('Deprecated (??): xorg profile has no _prep_function() anymore')\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"awesome\", \"/somewhere/awesome.py\")\n# or through conventional import awesome\nif __name__ == 'awesome':\n\t# Install the application awesome from the template under /applications/\n\tawesome = archinstall.Application(installation, 'awesome')\n\tawesome.install()\n\n\tinstallation.add_additional_packages(__packages__)\n\n\talacritty = archinstall.Application(installation, 'alacritty')\n\talacritty.install()\n\n\t# TODO: Copy a full configuration to ~/.config/awesome/rc.lua instead.\n\twith open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'r') as fh:\n\t\tawesome_lua = fh.read()\n\n\t## Replace xterm with alacritty for a smoother experience.\n\tawesome_lua = awesome_lua.replace('\"xterm\"', '\"alacritty\"')\n\n\twith open(f'{installation.mountpoint}/etc/xdg/awesome/rc.lua', 'w') as fh:\n\t\tfh.write(awesome_lua)\n\n\t## TODO: Configure the right-click-menu to contain the above packages that were installed. (as a user config)\n\t\n\t## Remove some interfering nemo settings\n\tinstallation.arch_chroot(\"gsettings set org.nemo.desktop show-desktop-icons false\")\n\tinstallation.arch_chroot(\"xdg-mime default nemo.desktop inode/directory application/x-gnome-saved-search\")\n", "path": "profiles/awesome.py"}]} | 1,599 | 640 |
gh_patches_debug_3276 | rasdani/github-patches | git_diff | scikit-hep__pyhf-362 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lock Tensorflow to 1.11.0 release until TensorFlow probability has caught up
# Description
[TensorFlow 1.12.0 has been released](https://github.com/tensorflow/tensorflow/releases/tag/v1.12.0) and it has breaking changes. Most notably
> Remove `tf.contrib.linalg`. `tf.linalg` should be used instead.
This doesn't affect us, but it does affect [TensorFlow Probability `v0.3.0`, which breaks](https://travis-ci.org/diana-hep/pyhf/jobs/451151767#L668-L685):
```
ImportError while loading conftest '/home/travis/build/diana-hep/pyhf/tests/conftest.py'.
tests/conftest.py:46: in <module>
(pyhf.tensor.tensorflow_backend(session=tf.Session()), None)
pyhf/tensor/__init__.py:28: in __getattr__
from .tensorflow_backend import tensorflow_backend
pyhf/tensor/tensorflow_backend.py:3: in <module>
import tensorflow_probability as tfp
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/__init__.py:21: in <module>
from tensorflow_probability.python import * # pylint: disable=wildcard-import
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/__init__.py:22: in <module>
from tensorflow_probability.python import distributions
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/distributions/__init__.py:44: in <module>
from tensorflow_probability.python.distributions.linear_gaussian_ssm import LinearGaussianStateSpaceModel
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/distributions/linear_gaussian_ssm.py:34: in <module>
tfl = tf.contrib.linalg
../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow/python/util/lazy_loader.py:54: in __getattr__
return getattr(module, item)
E AttributeError: module 'tensorflow.contrib' has no attribute 'linalg'
```
Until `tfp` updates to using `v1.12` we'll have to lock to them.
## Related Issues
- Issue #330
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4 from os import path
5 import sys
6
7 this_directory = path.abspath(path.dirname(__file__))
8 if sys.version_info.major < 3:
9 from io import open
10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
11 long_description = readme_md.read()
12
13 extras_require = {
14 'tensorflow': [
15 'tensorflow>=1.10.0',
16 'tensorflow-probability==0.3.0',
17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
18 'setuptools<=39.1.0',
19 ],
20 'torch': ['torch>=0.4.0'],
21 'mxnet': [
22 'mxnet>=1.0.0',
23 'requests<2.19.0,>=2.18.4',
24 'numpy<1.15.0,>=1.8.2',
25 'requests<2.19.0,>=2.18.4',
26 ],
27 # 'dask': [
28 # 'dask[array]'
29 # ],
30 'xmlimport': ['uproot'],
31 'minuit': ['iminuit'],
32 'develop': [
33 'pyflakes',
34 'pytest>=3.5.1',
35 'pytest-cov>=2.5.1',
36 'pytest-benchmark[histogram]',
37 'pytest-console-scripts',
38 'python-coveralls',
39 'coverage>=4.0', # coveralls
40 'matplotlib',
41 'jupyter',
42 'nbdime',
43 'uproot>=3.0.0',
44 'papermill',
45 'graphviz',
46 'bumpversion',
47 'sphinx',
48 'sphinxcontrib-bibtex',
49 'sphinxcontrib-napoleon',
50 'sphinx_rtd_theme',
51 'nbsphinx',
52 'sphinx-issues',
53 'm2r',
54 'jsonpatch',
55 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
56 'pre-commit',
57 'black;python_version>="3.6"', # Black is Python3 only
58 'twine',
59 ],
60 }
61 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
62
63 setup(
64 name='pyhf',
65 version='0.0.15',
66 description='(partial) pure python histfactory implementation',
67 long_description=long_description,
68 long_description_content_type='text/markdown',
69 url='https://github.com/diana-hep/pyhf',
70 author='Lukas Heinrich',
71 author_email='[email protected]',
72 license='Apache',
73 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
74 classifiers=[
75 "Programming Language :: Python :: 2",
76 "Programming Language :: Python :: 2.7",
77 "Programming Language :: Python :: 3",
78 "Programming Language :: Python :: 3.6",
79 ],
80 packages=find_packages(),
81 include_package_data=True,
82 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
83 install_requires=[
84 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
85 'click>=6.0', # for console scripts,
86 'tqdm', # for readxml
87 'six', # for modifiers
88 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
89 'jsonpatch',
90 ],
91 extras_require=extras_require,
92 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
93 dependency_links=[],
94 )
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
extras_require = {
'tensorflow': [
- 'tensorflow>=1.10.0',
+ 'tensorflow<1.12.0,>=1.10.0',
'tensorflow-probability==0.3.0',
'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
'setuptools<=39.1.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n \n extras_require = {\n 'tensorflow': [\n- 'tensorflow>=1.10.0',\n+ 'tensorflow<1.12.0,>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n", "issue": "Lock Tensorflow to 1.11.0 release until TensorFlow probability has caught up\n# Description\r\n\r\n[TensorFlow 1.12.0 has been released](https://github.com/tensorflow/tensorflow/releases/tag/v1.12.0) and it has breaking changes. Most notably\r\n\r\n> Remove `tf.contrib.linalg`. `tf.linalg` should be used instead. \r\n\r\nThis doesn't affect us, but it does affect [TensorFlow Probability `v0.3.0`, which breaks](https://travis-ci.org/diana-hep/pyhf/jobs/451151767#L668-L685):\r\n\r\n```\r\nImportError while loading conftest '/home/travis/build/diana-hep/pyhf/tests/conftest.py'.\r\ntests/conftest.py:46: in <module>\r\n (pyhf.tensor.tensorflow_backend(session=tf.Session()), None)\r\npyhf/tensor/__init__.py:28: in __getattr__\r\n from .tensorflow_backend import tensorflow_backend\r\npyhf/tensor/tensorflow_backend.py:3: in <module>\r\n import tensorflow_probability as tfp\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/__init__.py:21: in <module>\r\n from tensorflow_probability.python import * # pylint: disable=wildcard-import\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/__init__.py:22: in <module>\r\n from tensorflow_probability.python import distributions\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/distributions/__init__.py:44: in <module>\r\n from tensorflow_probability.python.distributions.linear_gaussian_ssm import LinearGaussianStateSpaceModel\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow_probability/python/distributions/linear_gaussian_ssm.py:34: in <module>\r\n tfl = tf.contrib.linalg\r\n../../../virtualenv/python3.6.3/lib/python3.6/site-packages/tensorflow/python/util/lazy_loader.py:54: in __getattr__\r\n return getattr(module, item)\r\nE AttributeError: module 'tensorflow.contrib' has no attribute 'linalg'\r\n```\r\n\r\nUntil `tfp` updates to using `v1.12` we'll have to lock to them.\r\n\r\n## Related Issues\r\n\r\n- Issue #330 \r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow<1.12.0,>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]} | 1,913 | 144 |
gh_patches_debug_6465 | rasdani/github-patches | git_diff | feast-dev__feast-3766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feast ui cannot parse url path
## Expected Behavior
One of example cases:
When user navigate localhost:8888/p/order_count_project/feature-view/user_3_and_7_days_order_count should see related feature-view page
## Current Behavior
One of example cases:
When user navigate localhost:8888/p/order_count_project/feature-view/user_3_and_7_days_order_count see "Internal Server Error"
## Steps to reproduce
install feast 0.34.1
run feast ui
navigate homepage localhost:8888
navigate any page (entities or feature-view or data sources doesn't matter)
you will see the page you clicked at browser search bar like http://localhost:8888/p/order_count_project/data-source
then refresh or copy url open in new tab
you will see internal server error
### Specifications
- Version: 0.34.1
- Platform: macos
- Subsystem:
## Possible Solution
ui_server.py file updated recently. commit changes resource finder library and then it returns PosixPath.
We should convert to str and add little "/" to "@app.api_route("/p/{path_name:path}", methods=["GET"])" function
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/ui_server.py`
Content:
```
1 import json
2 import threading
3 from typing import Callable, Optional
4
5 import importlib_resources
6 import uvicorn
7 from fastapi import FastAPI, Response
8 from fastapi.middleware.cors import CORSMiddleware
9 from fastapi.staticfiles import StaticFiles
10
11 import feast
12
13
14 def get_app(
15 store: "feast.FeatureStore",
16 project_id: str,
17 registry_ttl_secs: int,
18 root_path: str = "",
19 ):
20 app = FastAPI()
21
22 app.add_middleware(
23 CORSMiddleware,
24 allow_origins=["*"],
25 allow_credentials=True,
26 allow_methods=["*"],
27 allow_headers=["*"],
28 )
29
30 # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down
31 registry_proto = None
32 shutting_down = False
33 active_timer: Optional[threading.Timer] = None
34
35 def async_refresh():
36 store.refresh_registry()
37 nonlocal registry_proto
38 registry_proto = store.registry.proto()
39 if shutting_down:
40 return
41 nonlocal active_timer
42 active_timer = threading.Timer(registry_ttl_secs, async_refresh)
43 active_timer.start()
44
45 @app.on_event("shutdown")
46 def shutdown_event():
47 nonlocal shutting_down
48 shutting_down = True
49 if active_timer:
50 active_timer.cancel()
51
52 async_refresh()
53
54 ui_dir_ref = importlib_resources.files(__name__) / "ui/build/"
55 with importlib_resources.as_file(ui_dir_ref) as ui_dir:
56 # Initialize with the projects-list.json file
57 with ui_dir.joinpath("projects-list.json").open(mode="w") as f:
58 projects_dict = {
59 "projects": [
60 {
61 "name": "Project",
62 "description": "Test project",
63 "id": project_id,
64 "registryPath": f"{root_path}/registry",
65 }
66 ]
67 }
68 f.write(json.dumps(projects_dict))
69
70 @app.get("/registry")
71 def read_registry():
72 return Response(
73 content=registry_proto.SerializeToString(),
74 media_type="application/octet-stream",
75 )
76
77 # For all other paths (such as paths that would otherwise be handled by react router), pass to React
78 @app.api_route("/p/{path_name:path}", methods=["GET"])
79 def catch_all():
80 filename = ui_dir + "index.html"
81
82 with open(filename) as f:
83 content = f.read()
84
85 return Response(content, media_type="text/html")
86
87 app.mount(
88 "/",
89 StaticFiles(directory=ui_dir, html=True),
90 name="site",
91 )
92
93 return app
94
95
96 def start_server(
97 store: "feast.FeatureStore",
98 host: str,
99 port: int,
100 get_registry_dump: Callable,
101 project_id: str,
102 registry_ttl_sec: int,
103 root_path: str = "",
104 ):
105 app = get_app(
106 store,
107 project_id,
108 registry_ttl_sec,
109 root_path,
110 )
111 uvicorn.run(app, host=host, port=port)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py
--- a/sdk/python/feast/ui_server.py
+++ b/sdk/python/feast/ui_server.py
@@ -77,7 +77,7 @@
# For all other paths (such as paths that would otherwise be handled by react router), pass to React
@app.api_route("/p/{path_name:path}", methods=["GET"])
def catch_all():
- filename = ui_dir + "index.html"
+ filename = ui_dir.joinpath("index.html")
with open(filename) as f:
content = f.read()
| {"golden_diff": "diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py\n--- a/sdk/python/feast/ui_server.py\n+++ b/sdk/python/feast/ui_server.py\n@@ -77,7 +77,7 @@\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n- filename = ui_dir + \"index.html\"\n+ filename = ui_dir.joinpath(\"index.html\")\n \n with open(filename) as f:\n content = f.read()\n", "issue": "Feast ui cannot parse url path\n## Expected Behavior \r\n\r\nOne of example cases:\r\nWhen user navigate localhost:8888/p/order_count_project/feature-view/user_3_and_7_days_order_count should see related feature-view page\r\n\r\n## Current Behavior\r\n\r\nOne of example cases:\r\nWhen user navigate localhost:8888/p/order_count_project/feature-view/user_3_and_7_days_order_count see \"Internal Server Error\"\r\n\r\n## Steps to reproduce\r\n\r\ninstall feast 0.34.1\r\nrun feast ui\r\nnavigate homepage localhost:8888\r\nnavigate any page (entities or feature-view or data sources doesn't matter)\r\nyou will see the page you clicked at browser search bar like http://localhost:8888/p/order_count_project/data-source \r\nthen refresh or copy url open in new tab\r\nyou will see internal server error\r\n\r\n### Specifications\r\n\r\n- Version: 0.34.1\r\n- Platform: macos\r\n- Subsystem: \r\n\r\n## Possible Solution\r\n\r\nui_server.py file updated recently. commit changes resource finder library and then it returns PosixPath. \r\nWe should convert to str and add little \"/\" to \"@app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\" function\r\n\r\n\n", "before_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport importlib_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n project_id: str,\n registry_ttl_secs: int,\n root_path: str = \"\",\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_proto = None\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_proto\n registry_proto = store.registry.proto()\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir_ref = importlib_resources.files(__name__) / \"ui/build/\"\n with importlib_resources.as_file(ui_dir_ref) as ui_dir:\n # Initialize with the projects-list.json file\n with ui_dir.joinpath(\"projects-list.json\").open(mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": f\"{root_path}/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return Response(\n content=registry_proto.SerializeToString(),\n media_type=\"application/octet-stream\",\n )\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\",\n StaticFiles(directory=ui_dir, html=True),\n name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n root_path: str = \"\",\n):\n app = get_app(\n store,\n project_id,\n registry_ttl_sec,\n root_path,\n )\n uvicorn.run(app, host=host, port=port)\n", "path": "sdk/python/feast/ui_server.py"}], "after_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport importlib_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n project_id: str,\n registry_ttl_secs: int,\n root_path: str = \"\",\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_proto = None\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_proto\n registry_proto = store.registry.proto()\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir_ref = importlib_resources.files(__name__) / \"ui/build/\"\n with importlib_resources.as_file(ui_dir_ref) as ui_dir:\n # Initialize with the projects-list.json file\n with ui_dir.joinpath(\"projects-list.json\").open(mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": f\"{root_path}/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return Response(\n content=registry_proto.SerializeToString(),\n media_type=\"application/octet-stream\",\n )\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir.joinpath(\"index.html\")\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\",\n StaticFiles(directory=ui_dir, html=True),\n name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n root_path: str = \"\",\n):\n app = get_app(\n store,\n project_id,\n registry_ttl_sec,\n root_path,\n )\n uvicorn.run(app, host=host, port=port)\n", "path": "sdk/python/feast/ui_server.py"}]} | 1,390 | 138 |
gh_patches_debug_37912 | rasdani/github-patches | git_diff | tournesol-app__tournesol-155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Count ratings appropriately
If a contributor rates A versus B on 9 quality criteria, this should count as 9 ratings.
The home page statistics should reflect this, on not the number of times a contributor rated A versus B :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/backend/api_v2/statistics.py`
Content:
```
1 from backend.models import ExpertRating, Video, UserInformation
2 from drf_spectacular.utils import extend_schema
3 from rest_framework import serializers
4 from rest_framework import viewsets
5 from rest_framework.decorators import action
6 from rest_framework.permissions import IsAuthenticatedOrReadOnly
7 from rest_framework.response import Response
8 from backend.rating_fields import VIDEO_FIELDS
9 from django.db.models import Min, Max, F, Q
10 from backend.api_v2.helpers import WithPKOverflowProtection
11 import datetime
12 from django.utils.timezone import make_aware
13
14
15 class StatisticsSerializerV2(serializers.Serializer):
16 """Serialize statistics for the website."""
17 certified_experts = serializers.IntegerField(
18 help_text="Number of experts with certified e-mails")
19 total_experts = serializers.IntegerField(
20 help_text="Number of all experts")
21 pairwise_comparisons = serializers.IntegerField(
22 help_text="Total number of pairwise comparisons")
23 videos = serializers.IntegerField(
24 help_text="Total number of videos in the database")
25 min_score = serializers.FloatField(
26 help_text="Minimal aggregated score over all videos and features")
27 max_score = serializers.FloatField(
28 help_text="Maximal aggregated score over all videos and features")
29 weekly_active_ratings = serializers.IntegerField(
30 help_text="Number of ratings added within a week")
31 n_rated_videos = serializers.IntegerField(
32 help_text="Total number of videos with ratings")
33
34
35 class StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):
36 """Show website statistics."""
37 serializer_class = StatisticsSerializerV2
38 permission_classes = [IsAuthenticatedOrReadOnly]
39
40 # need a list, otherwise router will not register this viewset
41 @extend_schema(exclude=True, responses={
42 200: StatisticsSerializerV2(
43 many=True),
44 400: None})
45 def list(self, request):
46 return Response({})
47
48 @extend_schema(
49 responses={
50 200: StatisticsSerializerV2(
51 many=False)},
52 operation_id="view")
53 @action(methods=['GET'], detail=False)
54 def view(self, request):
55 """Get statistics for the website."""
56 minmax_scores = \
57 Video.objects.aggregate(**{'max_' + f: Max(F(f)) for f in VIDEO_FIELDS},
58 **{'min_' + f: Min(F(f)) for f in VIDEO_FIELDS})
59
60 try:
61 min_score = min([v for k, v in minmax_scores.items() if k.startswith('min')])
62 max_score = max([v for k, v in minmax_scores.items() if k.startswith('max')])
63 except Exception:
64 min_score = 0.0
65 max_score = 0.0
66
67 date_week_ago = make_aware(datetime.datetime.now()) - datetime.timedelta(days=7)
68
69 data = {'certified_experts': UserInformation.
70 _annotate_is_certified(UserInformation.objects.all())
71 .filter(_is_certified=1, user__is_active=True).count(),
72 'pairwise_comparisons': ExpertRating.objects.all().count(),
73 'videos': Video.objects.all().count(),
74 'min_score': min_score,
75 'max_score': max_score,
76 'total_experts': UserInformation.objects.filter(is_demo=False).count(),
77 'weekly_active_ratings': ExpertRating.objects.filter(
78 datetime_lastedit__gte=date_week_ago).count(),
79 'n_rated_videos': Video.objects.exclude(Q(expertrating_video_1__id=None) &
80 Q(expertrating_video_2__id=None)
81 ).distinct().count()
82 }
83
84 return Response(StatisticsSerializerV2(data, many=False).data)
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/backend/api_v2/statistics.py b/backend/backend/api_v2/statistics.py
--- a/backend/backend/api_v2/statistics.py
+++ b/backend/backend/api_v2/statistics.py
@@ -12,24 +12,35 @@
from django.utils.timezone import make_aware
-class StatisticsSerializerV2(serializers.Serializer):
- """Serialize statistics for the website."""
- certified_experts = serializers.IntegerField(
- help_text="Number of experts with certified e-mails")
- total_experts = serializers.IntegerField(
- help_text="Number of all experts")
- pairwise_comparisons = serializers.IntegerField(
- help_text="Total number of pairwise comparisons")
- videos = serializers.IntegerField(
- help_text="Total number of videos in the database")
- min_score = serializers.FloatField(
- help_text="Minimal aggregated score over all videos and features")
- max_score = serializers.FloatField(
- help_text="Maximal aggregated score over all videos and features")
- weekly_active_ratings = serializers.IntegerField(
- help_text="Number of ratings added within a week")
- n_rated_videos = serializers.IntegerField(
- help_text="Total number of videos with ratings")
+StatisticsSerializerV2 = type(
+ 'StatisticsSerializerV2', (serializers.Serializer,),
+ {**dict(
+ __doc__="""Serialize statistics for the website.""",
+ certified_experts=serializers.IntegerField(
+ help_text="Number of experts with certified e-mails"),
+ total_experts=serializers.IntegerField(
+ help_text="Number of all experts"),
+ pairwise_comparisons=serializers.IntegerField(
+ help_text="Total number of pairwise comparisons"),
+ videos=serializers.IntegerField(
+ help_text="Total number of videos in the database"),
+ min_score=serializers.FloatField(
+ help_text="Minimal aggregated score over all videos and features"),
+ max_score=serializers.FloatField(
+ help_text="Maximal aggregated score over all videos and features"),
+ weekly_active_ratings=serializers.IntegerField(
+ help_text="Number of ratings added within a week"),
+ n_rated_videos=serializers.IntegerField(
+ help_text="Total number of videos with ratings"),
+
+ n_sum_comparisons=serializers.IntegerField(
+ help_text="Sum of all numbers of comparisons for all features"),
+ ),
+ **{f"n_{f}_comparisons": serializers.IntegerField(
+ help_text=f"Number of comparisons for {f}")
+ for f in VIDEO_FIELDS}
+ }
+)
class StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):
@@ -81,4 +92,13 @@
).distinct().count()
}
+ n_sum_comparisons = 0
+ for f in VIDEO_FIELDS:
+ val = ExpertRating.objects.filter(**{
+ f + '__isnull': False, f + '_weight__gt': 0}).distinct().count()
+ data[f"n_{f}_comparisons"] = val
+ n_sum_comparisons += val
+
+ data["n_sum_comparisons"] = n_sum_comparisons
+
return Response(StatisticsSerializerV2(data, many=False).data)
| {"golden_diff": "diff --git a/backend/backend/api_v2/statistics.py b/backend/backend/api_v2/statistics.py\n--- a/backend/backend/api_v2/statistics.py\n+++ b/backend/backend/api_v2/statistics.py\n@@ -12,24 +12,35 @@\n from django.utils.timezone import make_aware\r\n \r\n \r\n-class StatisticsSerializerV2(serializers.Serializer):\r\n- \"\"\"Serialize statistics for the website.\"\"\"\r\n- certified_experts = serializers.IntegerField(\r\n- help_text=\"Number of experts with certified e-mails\")\r\n- total_experts = serializers.IntegerField(\r\n- help_text=\"Number of all experts\")\r\n- pairwise_comparisons = serializers.IntegerField(\r\n- help_text=\"Total number of pairwise comparisons\")\r\n- videos = serializers.IntegerField(\r\n- help_text=\"Total number of videos in the database\")\r\n- min_score = serializers.FloatField(\r\n- help_text=\"Minimal aggregated score over all videos and features\")\r\n- max_score = serializers.FloatField(\r\n- help_text=\"Maximal aggregated score over all videos and features\")\r\n- weekly_active_ratings = serializers.IntegerField(\r\n- help_text=\"Number of ratings added within a week\")\r\n- n_rated_videos = serializers.IntegerField(\r\n- help_text=\"Total number of videos with ratings\")\r\n+StatisticsSerializerV2 = type(\r\n+ 'StatisticsSerializerV2', (serializers.Serializer,),\r\n+ {**dict(\r\n+ __doc__=\"\"\"Serialize statistics for the website.\"\"\",\r\n+ certified_experts=serializers.IntegerField(\r\n+ help_text=\"Number of experts with certified e-mails\"),\r\n+ total_experts=serializers.IntegerField(\r\n+ help_text=\"Number of all experts\"),\r\n+ pairwise_comparisons=serializers.IntegerField(\r\n+ help_text=\"Total number of pairwise comparisons\"),\r\n+ videos=serializers.IntegerField(\r\n+ help_text=\"Total number of videos in the database\"),\r\n+ min_score=serializers.FloatField(\r\n+ help_text=\"Minimal aggregated score over all videos and features\"),\r\n+ max_score=serializers.FloatField(\r\n+ help_text=\"Maximal aggregated score over all videos and features\"),\r\n+ weekly_active_ratings=serializers.IntegerField(\r\n+ help_text=\"Number of ratings added within a week\"),\r\n+ n_rated_videos=serializers.IntegerField(\r\n+ help_text=\"Total number of videos with ratings\"),\r\n+\r\n+ n_sum_comparisons=serializers.IntegerField(\r\n+ help_text=\"Sum of all numbers of comparisons for all features\"),\r\n+ ),\r\n+ **{f\"n_{f}_comparisons\": serializers.IntegerField(\r\n+ help_text=f\"Number of comparisons for {f}\")\r\n+ for f in VIDEO_FIELDS}\r\n+ }\r\n+)\r\n \r\n \r\n class StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):\r\n@@ -81,4 +92,13 @@\n ).distinct().count()\r\n }\r\n \r\n+ n_sum_comparisons = 0\r\n+ for f in VIDEO_FIELDS:\r\n+ val = ExpertRating.objects.filter(**{\r\n+ f + '__isnull': False, f + '_weight__gt': 0}).distinct().count()\r\n+ data[f\"n_{f}_comparisons\"] = val\r\n+ n_sum_comparisons += val\r\n+\r\n+ data[\"n_sum_comparisons\"] = n_sum_comparisons\r\n+\r\n return Response(StatisticsSerializerV2(data, many=False).data)\n", "issue": "Count ratings appropriately\nIf a contributor rates A versus B on 9 quality criteria, this should count as 9 ratings.\r\nThe home page statistics should reflect this, on not the number of times a contributor rated A versus B :)\n", "before_files": [{"content": "from backend.models import ExpertRating, Video, UserInformation\r\nfrom drf_spectacular.utils import extend_schema\r\nfrom rest_framework import serializers\r\nfrom rest_framework import viewsets\r\nfrom rest_framework.decorators import action\r\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\r\nfrom rest_framework.response import Response\r\nfrom backend.rating_fields import VIDEO_FIELDS\r\nfrom django.db.models import Min, Max, F, Q\r\nfrom backend.api_v2.helpers import WithPKOverflowProtection\r\nimport datetime\r\nfrom django.utils.timezone import make_aware\r\n\r\n\r\nclass StatisticsSerializerV2(serializers.Serializer):\r\n \"\"\"Serialize statistics for the website.\"\"\"\r\n certified_experts = serializers.IntegerField(\r\n help_text=\"Number of experts with certified e-mails\")\r\n total_experts = serializers.IntegerField(\r\n help_text=\"Number of all experts\")\r\n pairwise_comparisons = serializers.IntegerField(\r\n help_text=\"Total number of pairwise comparisons\")\r\n videos = serializers.IntegerField(\r\n help_text=\"Total number of videos in the database\")\r\n min_score = serializers.FloatField(\r\n help_text=\"Minimal aggregated score over all videos and features\")\r\n max_score = serializers.FloatField(\r\n help_text=\"Maximal aggregated score over all videos and features\")\r\n weekly_active_ratings = serializers.IntegerField(\r\n help_text=\"Number of ratings added within a week\")\r\n n_rated_videos = serializers.IntegerField(\r\n help_text=\"Total number of videos with ratings\")\r\n\r\n\r\nclass StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):\r\n \"\"\"Show website statistics.\"\"\"\r\n serializer_class = StatisticsSerializerV2\r\n permission_classes = [IsAuthenticatedOrReadOnly]\r\n\r\n # need a list, otherwise router will not register this viewset\r\n @extend_schema(exclude=True, responses={\r\n 200: StatisticsSerializerV2(\r\n many=True),\r\n 400: None})\r\n def list(self, request):\r\n return Response({})\r\n\r\n @extend_schema(\r\n responses={\r\n 200: StatisticsSerializerV2(\r\n many=False)},\r\n operation_id=\"view\")\r\n @action(methods=['GET'], detail=False)\r\n def view(self, request):\r\n \"\"\"Get statistics for the website.\"\"\"\r\n minmax_scores = \\\r\n Video.objects.aggregate(**{'max_' + f: Max(F(f)) for f in VIDEO_FIELDS},\r\n **{'min_' + f: Min(F(f)) for f in VIDEO_FIELDS})\r\n\r\n try:\r\n min_score = min([v for k, v in minmax_scores.items() if k.startswith('min')])\r\n max_score = max([v for k, v in minmax_scores.items() if k.startswith('max')])\r\n except Exception:\r\n min_score = 0.0\r\n max_score = 0.0\r\n\r\n date_week_ago = make_aware(datetime.datetime.now()) - datetime.timedelta(days=7)\r\n\r\n data = {'certified_experts': UserInformation.\r\n _annotate_is_certified(UserInformation.objects.all())\r\n .filter(_is_certified=1, user__is_active=True).count(),\r\n 'pairwise_comparisons': ExpertRating.objects.all().count(),\r\n 'videos': Video.objects.all().count(),\r\n 'min_score': min_score,\r\n 'max_score': max_score,\r\n 'total_experts': UserInformation.objects.filter(is_demo=False).count(),\r\n 'weekly_active_ratings': ExpertRating.objects.filter(\r\n datetime_lastedit__gte=date_week_ago).count(),\r\n 'n_rated_videos': Video.objects.exclude(Q(expertrating_video_1__id=None) &\r\n Q(expertrating_video_2__id=None)\r\n ).distinct().count()\r\n }\r\n\r\n return Response(StatisticsSerializerV2(data, many=False).data)\r\n", "path": "backend/backend/api_v2/statistics.py"}], "after_files": [{"content": "from backend.models import ExpertRating, Video, UserInformation\r\nfrom drf_spectacular.utils import extend_schema\r\nfrom rest_framework import serializers\r\nfrom rest_framework import viewsets\r\nfrom rest_framework.decorators import action\r\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\r\nfrom rest_framework.response import Response\r\nfrom backend.rating_fields import VIDEO_FIELDS\r\nfrom django.db.models import Min, Max, F, Q\r\nfrom backend.api_v2.helpers import WithPKOverflowProtection\r\nimport datetime\r\nfrom django.utils.timezone import make_aware\r\n\r\n\r\nStatisticsSerializerV2 = type(\r\n 'StatisticsSerializerV2', (serializers.Serializer,),\r\n {**dict(\r\n __doc__=\"\"\"Serialize statistics for the website.\"\"\",\r\n certified_experts=serializers.IntegerField(\r\n help_text=\"Number of experts with certified e-mails\"),\r\n total_experts=serializers.IntegerField(\r\n help_text=\"Number of all experts\"),\r\n pairwise_comparisons=serializers.IntegerField(\r\n help_text=\"Total number of pairwise comparisons\"),\r\n videos=serializers.IntegerField(\r\n help_text=\"Total number of videos in the database\"),\r\n min_score=serializers.FloatField(\r\n help_text=\"Minimal aggregated score over all videos and features\"),\r\n max_score=serializers.FloatField(\r\n help_text=\"Maximal aggregated score over all videos and features\"),\r\n weekly_active_ratings=serializers.IntegerField(\r\n help_text=\"Number of ratings added within a week\"),\r\n n_rated_videos=serializers.IntegerField(\r\n help_text=\"Total number of videos with ratings\"),\r\n\r\n n_sum_comparisons=serializers.IntegerField(\r\n help_text=\"Sum of all numbers of comparisons for all features\"),\r\n ),\r\n **{f\"n_{f}_comparisons\": serializers.IntegerField(\r\n help_text=f\"Number of comparisons for {f}\")\r\n for f in VIDEO_FIELDS}\r\n }\r\n)\r\n\r\n\r\nclass StatisticsViewSetV2(viewsets.ViewSet, WithPKOverflowProtection):\r\n \"\"\"Show website statistics.\"\"\"\r\n serializer_class = StatisticsSerializerV2\r\n permission_classes = [IsAuthenticatedOrReadOnly]\r\n\r\n # need a list, otherwise router will not register this viewset\r\n @extend_schema(exclude=True, responses={\r\n 200: StatisticsSerializerV2(\r\n many=True),\r\n 400: None})\r\n def list(self, request):\r\n return Response({})\r\n\r\n @extend_schema(\r\n responses={\r\n 200: StatisticsSerializerV2(\r\n many=False)},\r\n operation_id=\"view\")\r\n @action(methods=['GET'], detail=False)\r\n def view(self, request):\r\n \"\"\"Get statistics for the website.\"\"\"\r\n minmax_scores = \\\r\n Video.objects.aggregate(**{'max_' + f: Max(F(f)) for f in VIDEO_FIELDS},\r\n **{'min_' + f: Min(F(f)) for f in VIDEO_FIELDS})\r\n\r\n try:\r\n min_score = min([v for k, v in minmax_scores.items() if k.startswith('min')])\r\n max_score = max([v for k, v in minmax_scores.items() if k.startswith('max')])\r\n except Exception:\r\n min_score = 0.0\r\n max_score = 0.0\r\n\r\n date_week_ago = make_aware(datetime.datetime.now()) - datetime.timedelta(days=7)\r\n\r\n data = {'certified_experts': UserInformation.\r\n _annotate_is_certified(UserInformation.objects.all())\r\n .filter(_is_certified=1, user__is_active=True).count(),\r\n 'pairwise_comparisons': ExpertRating.objects.all().count(),\r\n 'videos': Video.objects.all().count(),\r\n 'min_score': min_score,\r\n 'max_score': max_score,\r\n 'total_experts': UserInformation.objects.filter(is_demo=False).count(),\r\n 'weekly_active_ratings': ExpertRating.objects.filter(\r\n datetime_lastedit__gte=date_week_ago).count(),\r\n 'n_rated_videos': Video.objects.exclude(Q(expertrating_video_1__id=None) &\r\n Q(expertrating_video_2__id=None)\r\n ).distinct().count()\r\n }\r\n\r\n n_sum_comparisons = 0\r\n for f in VIDEO_FIELDS:\r\n val = ExpertRating.objects.filter(**{\r\n f + '__isnull': False, f + '_weight__gt': 0}).distinct().count()\r\n data[f\"n_{f}_comparisons\"] = val\r\n n_sum_comparisons += val\r\n\r\n data[\"n_sum_comparisons\"] = n_sum_comparisons\r\n\r\n return Response(StatisticsSerializerV2(data, many=False).data)\r\n", "path": "backend/backend/api_v2/statistics.py"}]} | 1,236 | 705 |
gh_patches_debug_22007 | rasdani/github-patches | git_diff | CTFd__CTFd-2074 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cascading Hints
Hints should have a sense of unlocking where one hint cannot be used until a previous one or others are used.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/api/v1/hints.py`
Content:
```
1 from typing import List
2
3 from flask import request
4 from flask_restx import Namespace, Resource
5
6 from CTFd.api.v1.helpers.request import validate_args
7 from CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic
8 from CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse
9 from CTFd.constants import RawEnum
10 from CTFd.models import Hints, HintUnlocks, db
11 from CTFd.schemas.hints import HintSchema
12 from CTFd.utils.decorators import admins_only, authed_only, during_ctf_time_only
13 from CTFd.utils.helpers.models import build_model_filters
14 from CTFd.utils.user import get_current_user, is_admin
15
16 hints_namespace = Namespace("hints", description="Endpoint to retrieve Hints")
17
18 HintModel = sqlalchemy_to_pydantic(Hints)
19
20
21 class HintDetailedSuccessResponse(APIDetailedSuccessResponse):
22 data: HintModel
23
24
25 class HintListSuccessResponse(APIListSuccessResponse):
26 data: List[HintModel]
27
28
29 hints_namespace.schema_model(
30 "HintDetailedSuccessResponse", HintDetailedSuccessResponse.apidoc()
31 )
32
33 hints_namespace.schema_model(
34 "HintListSuccessResponse", HintListSuccessResponse.apidoc()
35 )
36
37
38 @hints_namespace.route("")
39 class HintList(Resource):
40 @admins_only
41 @hints_namespace.doc(
42 description="Endpoint to list Hint objects in bulk",
43 responses={
44 200: ("Success", "HintListSuccessResponse"),
45 400: (
46 "An error occured processing the provided or stored data",
47 "APISimpleErrorResponse",
48 ),
49 },
50 )
51 @validate_args(
52 {
53 "type": (str, None),
54 "challenge_id": (int, None),
55 "content": (str, None),
56 "cost": (int, None),
57 "q": (str, None),
58 "field": (
59 RawEnum("HintFields", {"type": "type", "content": "content"}),
60 None,
61 ),
62 },
63 location="query",
64 )
65 def get(self, query_args):
66 q = query_args.pop("q", None)
67 field = str(query_args.pop("field", None))
68 filters = build_model_filters(model=Hints, query=q, field=field)
69
70 hints = Hints.query.filter_by(**query_args).filter(*filters).all()
71 response = HintSchema(many=True, view="locked").dump(hints)
72
73 if response.errors:
74 return {"success": False, "errors": response.errors}, 400
75
76 return {"success": True, "data": response.data}
77
78 @admins_only
79 @hints_namespace.doc(
80 description="Endpoint to create a Hint object",
81 responses={
82 200: ("Success", "HintDetailedSuccessResponse"),
83 400: (
84 "An error occured processing the provided or stored data",
85 "APISimpleErrorResponse",
86 ),
87 },
88 )
89 def post(self):
90 req = request.get_json()
91 schema = HintSchema(view="admin")
92 response = schema.load(req, session=db.session)
93
94 if response.errors:
95 return {"success": False, "errors": response.errors}, 400
96
97 db.session.add(response.data)
98 db.session.commit()
99
100 response = schema.dump(response.data)
101
102 return {"success": True, "data": response.data}
103
104
105 @hints_namespace.route("/<hint_id>")
106 class Hint(Resource):
107 @during_ctf_time_only
108 @authed_only
109 @hints_namespace.doc(
110 description="Endpoint to get a specific Hint object",
111 responses={
112 200: ("Success", "HintDetailedSuccessResponse"),
113 400: (
114 "An error occured processing the provided or stored data",
115 "APISimpleErrorResponse",
116 ),
117 },
118 )
119 def get(self, hint_id):
120 user = get_current_user()
121 hint = Hints.query.filter_by(id=hint_id).first_or_404()
122
123 view = "unlocked"
124 if hint.cost:
125 view = "locked"
126 unlocked = HintUnlocks.query.filter_by(
127 account_id=user.account_id, target=hint.id
128 ).first()
129 if unlocked:
130 view = "unlocked"
131
132 if is_admin():
133 if request.args.get("preview", False):
134 view = "admin"
135
136 response = HintSchema(view=view).dump(hint)
137
138 if response.errors:
139 return {"success": False, "errors": response.errors}, 400
140
141 return {"success": True, "data": response.data}
142
143 @admins_only
144 @hints_namespace.doc(
145 description="Endpoint to edit a specific Hint object",
146 responses={
147 200: ("Success", "HintDetailedSuccessResponse"),
148 400: (
149 "An error occured processing the provided or stored data",
150 "APISimpleErrorResponse",
151 ),
152 },
153 )
154 def patch(self, hint_id):
155 hint = Hints.query.filter_by(id=hint_id).first_or_404()
156 req = request.get_json()
157
158 schema = HintSchema(view="admin")
159 response = schema.load(req, instance=hint, partial=True, session=db.session)
160
161 if response.errors:
162 return {"success": False, "errors": response.errors}, 400
163
164 db.session.add(response.data)
165 db.session.commit()
166
167 response = schema.dump(response.data)
168
169 return {"success": True, "data": response.data}
170
171 @admins_only
172 @hints_namespace.doc(
173 description="Endpoint to delete a specific Tag object",
174 responses={200: ("Success", "APISimpleSuccessResponse")},
175 )
176 def delete(self, hint_id):
177 hint = Hints.query.filter_by(id=hint_id).first_or_404()
178 db.session.delete(hint)
179 db.session.commit()
180 db.session.close()
181
182 return {"success": True}
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/api/v1/hints.py b/CTFd/api/v1/hints.py
--- a/CTFd/api/v1/hints.py
+++ b/CTFd/api/v1/hints.py
@@ -120,6 +120,33 @@
user = get_current_user()
hint = Hints.query.filter_by(id=hint_id).first_or_404()
+ if hint.requirements:
+ requirements = hint.requirements.get("prerequisites", [])
+
+ # Get the IDs of all hints that the user has unlocked
+ all_unlocks = HintUnlocks.query.filter_by(account_id=user.account_id).all()
+ unlock_ids = {unlock.id for unlock in all_unlocks}
+
+ # Filter out hint IDs that don't exist
+ all_hint_ids = {h.id for h in Hints.query.with_entities(Hints.id).all()}
+ prereqs = set(requirements).intersection(all_hint_ids)
+
+ # If the user has the necessary unlocks or is admin we should allow them to view
+ if unlock_ids >= prereqs or is_admin():
+ pass
+ else:
+ return (
+ {
+ "success": False,
+ "errors": {
+ "requirements": [
+ "You must unlock other hints before accessing this hint"
+ ]
+ },
+ },
+ 403,
+ )
+
view = "unlocked"
if hint.cost:
view = "locked"
| {"golden_diff": "diff --git a/CTFd/api/v1/hints.py b/CTFd/api/v1/hints.py\n--- a/CTFd/api/v1/hints.py\n+++ b/CTFd/api/v1/hints.py\n@@ -120,6 +120,33 @@\n user = get_current_user()\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n \n+ if hint.requirements:\n+ requirements = hint.requirements.get(\"prerequisites\", [])\n+\n+ # Get the IDs of all hints that the user has unlocked\n+ all_unlocks = HintUnlocks.query.filter_by(account_id=user.account_id).all()\n+ unlock_ids = {unlock.id for unlock in all_unlocks}\n+\n+ # Filter out hint IDs that don't exist\n+ all_hint_ids = {h.id for h in Hints.query.with_entities(Hints.id).all()}\n+ prereqs = set(requirements).intersection(all_hint_ids)\n+\n+ # If the user has the necessary unlocks or is admin we should allow them to view\n+ if unlock_ids >= prereqs or is_admin():\n+ pass\n+ else:\n+ return (\n+ {\n+ \"success\": False,\n+ \"errors\": {\n+ \"requirements\": [\n+ \"You must unlock other hints before accessing this hint\"\n+ ]\n+ },\n+ },\n+ 403,\n+ )\n+\n view = \"unlocked\"\n if hint.cost:\n view = \"locked\"\n", "issue": "Cascading Hints\nHints should have a sense of unlocking where one hint cannot be used until a previous one or others are used.\n", "before_files": [{"content": "from typing import List\n\nfrom flask import request\nfrom flask_restx import Namespace, Resource\n\nfrom CTFd.api.v1.helpers.request import validate_args\nfrom CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic\nfrom CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse\nfrom CTFd.constants import RawEnum\nfrom CTFd.models import Hints, HintUnlocks, db\nfrom CTFd.schemas.hints import HintSchema\nfrom CTFd.utils.decorators import admins_only, authed_only, during_ctf_time_only\nfrom CTFd.utils.helpers.models import build_model_filters\nfrom CTFd.utils.user import get_current_user, is_admin\n\nhints_namespace = Namespace(\"hints\", description=\"Endpoint to retrieve Hints\")\n\nHintModel = sqlalchemy_to_pydantic(Hints)\n\n\nclass HintDetailedSuccessResponse(APIDetailedSuccessResponse):\n data: HintModel\n\n\nclass HintListSuccessResponse(APIListSuccessResponse):\n data: List[HintModel]\n\n\nhints_namespace.schema_model(\n \"HintDetailedSuccessResponse\", HintDetailedSuccessResponse.apidoc()\n)\n\nhints_namespace.schema_model(\n \"HintListSuccessResponse\", HintListSuccessResponse.apidoc()\n)\n\n\n@hints_namespace.route(\"\")\nclass HintList(Resource):\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to list Hint objects in bulk\",\n responses={\n 200: (\"Success\", \"HintListSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n @validate_args(\n {\n \"type\": (str, None),\n \"challenge_id\": (int, None),\n \"content\": (str, None),\n \"cost\": (int, None),\n \"q\": (str, None),\n \"field\": (\n RawEnum(\"HintFields\", {\"type\": \"type\", \"content\": \"content\"}),\n None,\n ),\n },\n location=\"query\",\n )\n def get(self, query_args):\n q = query_args.pop(\"q\", None)\n field = str(query_args.pop(\"field\", None))\n filters = build_model_filters(model=Hints, query=q, field=field)\n\n hints = Hints.query.filter_by(**query_args).filter(*filters).all()\n response = HintSchema(many=True, view=\"locked\").dump(hints)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to create a Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def post(self):\n req = request.get_json()\n schema = HintSchema(view=\"admin\")\n response = schema.load(req, session=db.session)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n db.session.add(response.data)\n db.session.commit()\n\n response = schema.dump(response.data)\n\n return {\"success\": True, \"data\": response.data}\n\n\n@hints_namespace.route(\"/<hint_id>\")\nclass Hint(Resource):\n @during_ctf_time_only\n @authed_only\n @hints_namespace.doc(\n description=\"Endpoint to get a specific Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def get(self, hint_id):\n user = get_current_user()\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n\n view = \"unlocked\"\n if hint.cost:\n view = \"locked\"\n unlocked = HintUnlocks.query.filter_by(\n account_id=user.account_id, target=hint.id\n ).first()\n if unlocked:\n view = \"unlocked\"\n\n if is_admin():\n if request.args.get(\"preview\", False):\n view = \"admin\"\n\n response = HintSchema(view=view).dump(hint)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to edit a specific Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def patch(self, hint_id):\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n req = request.get_json()\n\n schema = HintSchema(view=\"admin\")\n response = schema.load(req, instance=hint, partial=True, session=db.session)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n db.session.add(response.data)\n db.session.commit()\n\n response = schema.dump(response.data)\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to delete a specific Tag object\",\n responses={200: (\"Success\", \"APISimpleSuccessResponse\")},\n )\n def delete(self, hint_id):\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n db.session.delete(hint)\n db.session.commit()\n db.session.close()\n\n return {\"success\": True}\n", "path": "CTFd/api/v1/hints.py"}], "after_files": [{"content": "from typing import List\n\nfrom flask import request\nfrom flask_restx import Namespace, Resource\n\nfrom CTFd.api.v1.helpers.request import validate_args\nfrom CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic\nfrom CTFd.api.v1.schemas import APIDetailedSuccessResponse, APIListSuccessResponse\nfrom CTFd.constants import RawEnum\nfrom CTFd.models import Hints, HintUnlocks, db\nfrom CTFd.schemas.hints import HintSchema\nfrom CTFd.utils.decorators import admins_only, authed_only, during_ctf_time_only\nfrom CTFd.utils.helpers.models import build_model_filters\nfrom CTFd.utils.user import get_current_user, is_admin\n\nhints_namespace = Namespace(\"hints\", description=\"Endpoint to retrieve Hints\")\n\nHintModel = sqlalchemy_to_pydantic(Hints)\n\n\nclass HintDetailedSuccessResponse(APIDetailedSuccessResponse):\n data: HintModel\n\n\nclass HintListSuccessResponse(APIListSuccessResponse):\n data: List[HintModel]\n\n\nhints_namespace.schema_model(\n \"HintDetailedSuccessResponse\", HintDetailedSuccessResponse.apidoc()\n)\n\nhints_namespace.schema_model(\n \"HintListSuccessResponse\", HintListSuccessResponse.apidoc()\n)\n\n\n@hints_namespace.route(\"\")\nclass HintList(Resource):\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to list Hint objects in bulk\",\n responses={\n 200: (\"Success\", \"HintListSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n @validate_args(\n {\n \"type\": (str, None),\n \"challenge_id\": (int, None),\n \"content\": (str, None),\n \"cost\": (int, None),\n \"q\": (str, None),\n \"field\": (\n RawEnum(\"HintFields\", {\"type\": \"type\", \"content\": \"content\"}),\n None,\n ),\n },\n location=\"query\",\n )\n def get(self, query_args):\n q = query_args.pop(\"q\", None)\n field = str(query_args.pop(\"field\", None))\n filters = build_model_filters(model=Hints, query=q, field=field)\n\n hints = Hints.query.filter_by(**query_args).filter(*filters).all()\n response = HintSchema(many=True, view=\"locked\").dump(hints)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to create a Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def post(self):\n req = request.get_json()\n schema = HintSchema(view=\"admin\")\n response = schema.load(req, session=db.session)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n db.session.add(response.data)\n db.session.commit()\n\n response = schema.dump(response.data)\n\n return {\"success\": True, \"data\": response.data}\n\n\n@hints_namespace.route(\"/<hint_id>\")\nclass Hint(Resource):\n @during_ctf_time_only\n @authed_only\n @hints_namespace.doc(\n description=\"Endpoint to get a specific Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def get(self, hint_id):\n user = get_current_user()\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n\n if hint.requirements:\n requirements = hint.requirements.get(\"prerequisites\", [])\n\n # Get the IDs of all hints that the user has unlocked\n all_unlocks = HintUnlocks.query.filter_by(account_id=user.account_id).all()\n unlock_ids = {unlock.id for unlock in all_unlocks}\n\n # Filter out hint IDs that don't exist\n all_hint_ids = {h.id for h in Hints.query.with_entities(Hints.id).all()}\n prereqs = set(requirements).intersection(all_hint_ids)\n\n # If the user has the necessary unlocks or is admin we should allow them to view\n if unlock_ids >= prereqs or is_admin():\n pass\n else:\n return (\n {\n \"success\": False,\n \"errors\": {\n \"requirements\": [\n \"You must unlock other hints before accessing this hint\"\n ]\n },\n },\n 403,\n )\n\n view = \"unlocked\"\n if hint.cost:\n view = \"locked\"\n unlocked = HintUnlocks.query.filter_by(\n account_id=user.account_id, target=hint.id\n ).first()\n if unlocked:\n view = \"unlocked\"\n\n if is_admin():\n if request.args.get(\"preview\", False):\n view = \"admin\"\n\n response = HintSchema(view=view).dump(hint)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to edit a specific Hint object\",\n responses={\n 200: (\"Success\", \"HintDetailedSuccessResponse\"),\n 400: (\n \"An error occured processing the provided or stored data\",\n \"APISimpleErrorResponse\",\n ),\n },\n )\n def patch(self, hint_id):\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n req = request.get_json()\n\n schema = HintSchema(view=\"admin\")\n response = schema.load(req, instance=hint, partial=True, session=db.session)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n db.session.add(response.data)\n db.session.commit()\n\n response = schema.dump(response.data)\n\n return {\"success\": True, \"data\": response.data}\n\n @admins_only\n @hints_namespace.doc(\n description=\"Endpoint to delete a specific Tag object\",\n responses={200: (\"Success\", \"APISimpleSuccessResponse\")},\n )\n def delete(self, hint_id):\n hint = Hints.query.filter_by(id=hint_id).first_or_404()\n db.session.delete(hint)\n db.session.commit()\n db.session.close()\n\n return {\"success\": True}\n", "path": "CTFd/api/v1/hints.py"}]} | 2,009 | 330 |
gh_patches_debug_41799 | rasdani/github-patches | git_diff | mindee__doctr-369 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[demo] Improve UI for OCR result display
For very dense documents, since the predicted text value is plotted statically, there can be some readability issues. We should try to improve this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `demo/app.py`
Content:
```
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 import os
7 import streamlit as st
8 import matplotlib.pyplot as plt
9
10 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
11
12 import tensorflow as tf
13 import cv2
14
15 gpu_devices = tf.config.experimental.list_physical_devices('GPU')
16 if any(gpu_devices):
17 tf.config.experimental.set_memory_growth(gpu_devices[0], True)
18
19 from doctr.documents import DocumentFile
20 from doctr.models import ocr_predictor
21 from doctr.utils.visualization import synthetize_page, visualize_page
22
23 DET_ARCHS = ["db_resnet50"]
24 RECO_ARCHS = ["crnn_vgg16_bn", "crnn_resnet31", "sar_vgg16_bn", "sar_resnet31"]
25
26
27 def main():
28
29 # Wide mode
30 st.set_page_config(layout="wide")
31
32 # Designing the interface
33 st.title("DocTR: Document Text Recognition")
34 # For newline
35 st.write('\n')
36 # Set the columns
37 cols = st.beta_columns((1, 1))
38 cols[0].subheader("Input document (first page)")
39 cols[1].subheader("Raw heatmap (segmentation task)")
40
41 # Sidebar
42 # File selection
43 st.sidebar.title("Document selection")
44 # Disabling warning
45 st.set_option('deprecation.showfileUploaderEncoding', False)
46 # Choose your own image
47 uploaded_file = st.sidebar.file_uploader("Upload files", type=['pdf', 'png', 'jpeg', 'jpg'])
48 if uploaded_file is not None:
49 if uploaded_file.name.endswith('.pdf'):
50 doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))
51 else:
52 doc = DocumentFile.from_images(uploaded_file.read())
53 cols[0].image(doc[0], width=640)
54
55 # Model selection
56 st.sidebar.title("Model selection")
57 det_arch = st.sidebar.selectbox("Text detection model", DET_ARCHS)
58 reco_arch = st.sidebar.selectbox("Text recognition model", RECO_ARCHS)
59
60 # For newline
61 st.sidebar.write('\n')
62
63 if st.sidebar.button("Analyze document"):
64
65 if uploaded_file is None:
66 st.sidebar.write("Please upload a document")
67
68 else:
69 with st.spinner('Loading model...'):
70 predictor = ocr_predictor(det_arch, reco_arch, pretrained=True)
71
72 with st.spinner('Analyzing...'):
73
74 # Forward the image to the model
75 processed_batches = predictor.det_predictor.pre_processor(doc)
76 out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)
77 seg_map = out["out_map"]
78 seg_map = tf.squeeze(seg_map[0, ...], axis=[2])
79 seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),
80 interpolation=cv2.INTER_LINEAR)
81 # Plot the raw heatmap
82 fig, ax = plt.subplots()
83 ax.imshow(seg_map)
84 ax.axis('off')
85 cols[1].pyplot(fig)
86
87 # Plot OCR output
88 out = predictor(doc, training=False)
89 cols[1].subheader("OCR output")
90 fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)
91 cols[1].pyplot(fig)
92
93 # Page reconsitution under input page
94 cols[0].subheader("Page reconstitution from OCR output")
95 img = synthetize_page(out.pages[0].export())
96 cols[0].image(img, clamp=True, width=640)
97
98
99 if __name__ == '__main__':
100 main()
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/demo/app.py b/demo/app.py
--- a/demo/app.py
+++ b/demo/app.py
@@ -33,10 +33,14 @@
st.title("DocTR: Document Text Recognition")
# For newline
st.write('\n')
+ # Instructions
+ st.markdown("*Hint: click on the top-right corner of an image to enlarge it!*")
# Set the columns
- cols = st.beta_columns((1, 1))
- cols[0].subheader("Input document (first page)")
- cols[1].subheader("Raw heatmap (segmentation task)")
+ cols = st.beta_columns((1, 1, 1, 1))
+ cols[0].subheader("Input page")
+ cols[1].subheader("Segmentation heatmap")
+ cols[2].subheader("OCR output")
+ cols[3].subheader("Page reconstitution")
# Sidebar
# File selection
@@ -50,7 +54,8 @@
doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))
else:
doc = DocumentFile.from_images(uploaded_file.read())
- cols[0].image(doc[0], width=640)
+ page_idx = st.sidebar.selectbox("Page selection", [idx + 1 for idx in range(len(doc))]) - 1
+ cols[0].image(doc[page_idx])
# Model selection
st.sidebar.title("Model selection")
@@ -60,7 +65,7 @@
# For newline
st.sidebar.write('\n')
- if st.sidebar.button("Analyze document"):
+ if st.sidebar.button("Analyze page"):
if uploaded_file is None:
st.sidebar.write("Please upload a document")
@@ -72,11 +77,11 @@
with st.spinner('Analyzing...'):
# Forward the image to the model
- processed_batches = predictor.det_predictor.pre_processor(doc)
+ processed_batches = predictor.det_predictor.pre_processor([doc[page_idx]])
out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)
seg_map = out["out_map"]
seg_map = tf.squeeze(seg_map[0, ...], axis=[2])
- seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),
+ seg_map = cv2.resize(seg_map.numpy(), (doc[page_idx].shape[1], doc[page_idx].shape[0]),
interpolation=cv2.INTER_LINEAR)
# Plot the raw heatmap
fig, ax = plt.subplots()
@@ -85,15 +90,18 @@
cols[1].pyplot(fig)
# Plot OCR output
- out = predictor(doc, training=False)
- cols[1].subheader("OCR output")
- fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)
- cols[1].pyplot(fig)
+ out = predictor([doc[page_idx]], training=False)
+ fig = visualize_page(out.pages[0].export(), doc[page_idx], interactive=False)
+ cols[2].pyplot(fig)
# Page reconsitution under input page
- cols[0].subheader("Page reconstitution from OCR output")
- img = synthetize_page(out.pages[0].export())
- cols[0].image(img, clamp=True, width=640)
+ page_export = out.pages[0].export()
+ img = synthetize_page(page_export)
+ cols[3].image(img, clamp=True)
+
+ # Display JSON
+ st.markdown("\nHere are your analysis results in JSON format:")
+ st.json(page_export)
if __name__ == '__main__':
| {"golden_diff": "diff --git a/demo/app.py b/demo/app.py\n--- a/demo/app.py\n+++ b/demo/app.py\n@@ -33,10 +33,14 @@\n st.title(\"DocTR: Document Text Recognition\")\n # For newline\n st.write('\\n')\n+ # Instructions\n+ st.markdown(\"*Hint: click on the top-right corner of an image to enlarge it!*\")\n # Set the columns\n- cols = st.beta_columns((1, 1))\n- cols[0].subheader(\"Input document (first page)\")\n- cols[1].subheader(\"Raw heatmap (segmentation task)\")\n+ cols = st.beta_columns((1, 1, 1, 1))\n+ cols[0].subheader(\"Input page\")\n+ cols[1].subheader(\"Segmentation heatmap\")\n+ cols[2].subheader(\"OCR output\")\n+ cols[3].subheader(\"Page reconstitution\")\n \n # Sidebar\n # File selection\n@@ -50,7 +54,8 @@\n doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))\n else:\n doc = DocumentFile.from_images(uploaded_file.read())\n- cols[0].image(doc[0], width=640)\n+ page_idx = st.sidebar.selectbox(\"Page selection\", [idx + 1 for idx in range(len(doc))]) - 1\n+ cols[0].image(doc[page_idx])\n \n # Model selection\n st.sidebar.title(\"Model selection\")\n@@ -60,7 +65,7 @@\n # For newline\n st.sidebar.write('\\n')\n \n- if st.sidebar.button(\"Analyze document\"):\n+ if st.sidebar.button(\"Analyze page\"):\n \n if uploaded_file is None:\n st.sidebar.write(\"Please upload a document\")\n@@ -72,11 +77,11 @@\n with st.spinner('Analyzing...'):\n \n # Forward the image to the model\n- processed_batches = predictor.det_predictor.pre_processor(doc)\n+ processed_batches = predictor.det_predictor.pre_processor([doc[page_idx]])\n out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)\n seg_map = out[\"out_map\"]\n seg_map = tf.squeeze(seg_map[0, ...], axis=[2])\n- seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),\n+ seg_map = cv2.resize(seg_map.numpy(), (doc[page_idx].shape[1], doc[page_idx].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n fig, ax = plt.subplots()\n@@ -85,15 +90,18 @@\n cols[1].pyplot(fig)\n \n # Plot OCR output\n- out = predictor(doc, training=False)\n- cols[1].subheader(\"OCR output\")\n- fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)\n- cols[1].pyplot(fig)\n+ out = predictor([doc[page_idx]], training=False)\n+ fig = visualize_page(out.pages[0].export(), doc[page_idx], interactive=False)\n+ cols[2].pyplot(fig)\n \n # Page reconsitution under input page\n- cols[0].subheader(\"Page reconstitution from OCR output\")\n- img = synthetize_page(out.pages[0].export())\n- cols[0].image(img, clamp=True, width=640)\n+ page_export = out.pages[0].export()\n+ img = synthetize_page(page_export)\n+ cols[3].image(img, clamp=True)\n+\n+ # Display JSON\n+ st.markdown(\"\\nHere are your analysis results in JSON format:\")\n+ st.json(page_export)\n \n \n if __name__ == '__main__':\n", "issue": "[demo] Improve UI for OCR result display\nFor very dense documents, since the predicted text value is plotted statically, there can be some readability issues. We should try to improve this\n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\nimport streamlit as st\nimport matplotlib.pyplot as plt\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nimport tensorflow as tf\nimport cv2\n\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif any(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nfrom doctr.documents import DocumentFile\nfrom doctr.models import ocr_predictor\nfrom doctr.utils.visualization import synthetize_page, visualize_page\n\nDET_ARCHS = [\"db_resnet50\"]\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_resnet31\", \"sar_vgg16_bn\", \"sar_resnet31\"]\n\n\ndef main():\n\n # Wide mode\n st.set_page_config(layout=\"wide\")\n\n # Designing the interface\n st.title(\"DocTR: Document Text Recognition\")\n # For newline\n st.write('\\n')\n # Set the columns\n cols = st.beta_columns((1, 1))\n cols[0].subheader(\"Input document (first page)\")\n cols[1].subheader(\"Raw heatmap (segmentation task)\")\n\n # Sidebar\n # File selection\n st.sidebar.title(\"Document selection\")\n # Disabling warning\n st.set_option('deprecation.showfileUploaderEncoding', False)\n # Choose your own image\n uploaded_file = st.sidebar.file_uploader(\"Upload files\", type=['pdf', 'png', 'jpeg', 'jpg'])\n if uploaded_file is not None:\n if uploaded_file.name.endswith('.pdf'):\n doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))\n else:\n doc = DocumentFile.from_images(uploaded_file.read())\n cols[0].image(doc[0], width=640)\n\n # Model selection\n st.sidebar.title(\"Model selection\")\n det_arch = st.sidebar.selectbox(\"Text detection model\", DET_ARCHS)\n reco_arch = st.sidebar.selectbox(\"Text recognition model\", RECO_ARCHS)\n\n # For newline\n st.sidebar.write('\\n')\n\n if st.sidebar.button(\"Analyze document\"):\n\n if uploaded_file is None:\n st.sidebar.write(\"Please upload a document\")\n\n else:\n with st.spinner('Loading model...'):\n predictor = ocr_predictor(det_arch, reco_arch, pretrained=True)\n\n with st.spinner('Analyzing...'):\n\n # Forward the image to the model\n processed_batches = predictor.det_predictor.pre_processor(doc)\n out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)\n seg_map = out[\"out_map\"]\n seg_map = tf.squeeze(seg_map[0, ...], axis=[2])\n seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n fig, ax = plt.subplots()\n ax.imshow(seg_map)\n ax.axis('off')\n cols[1].pyplot(fig)\n\n # Plot OCR output\n out = predictor(doc, training=False)\n cols[1].subheader(\"OCR output\")\n fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)\n cols[1].pyplot(fig)\n\n # Page reconsitution under input page\n cols[0].subheader(\"Page reconstitution from OCR output\")\n img = synthetize_page(out.pages[0].export())\n cols[0].image(img, clamp=True, width=640)\n\n\nif __name__ == '__main__':\n main()\n", "path": "demo/app.py"}], "after_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\nimport streamlit as st\nimport matplotlib.pyplot as plt\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nimport tensorflow as tf\nimport cv2\n\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif any(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nfrom doctr.documents import DocumentFile\nfrom doctr.models import ocr_predictor\nfrom doctr.utils.visualization import synthetize_page, visualize_page\n\nDET_ARCHS = [\"db_resnet50\"]\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_resnet31\", \"sar_vgg16_bn\", \"sar_resnet31\"]\n\n\ndef main():\n\n # Wide mode\n st.set_page_config(layout=\"wide\")\n\n # Designing the interface\n st.title(\"DocTR: Document Text Recognition\")\n # For newline\n st.write('\\n')\n # Instructions\n st.markdown(\"*Hint: click on the top-right corner of an image to enlarge it!*\")\n # Set the columns\n cols = st.beta_columns((1, 1, 1, 1))\n cols[0].subheader(\"Input page\")\n cols[1].subheader(\"Segmentation heatmap\")\n cols[2].subheader(\"OCR output\")\n cols[3].subheader(\"Page reconstitution\")\n\n # Sidebar\n # File selection\n st.sidebar.title(\"Document selection\")\n # Disabling warning\n st.set_option('deprecation.showfileUploaderEncoding', False)\n # Choose your own image\n uploaded_file = st.sidebar.file_uploader(\"Upload files\", type=['pdf', 'png', 'jpeg', 'jpg'])\n if uploaded_file is not None:\n if uploaded_file.name.endswith('.pdf'):\n doc = DocumentFile.from_pdf(uploaded_file.read()).as_images(output_size=(1024, 1024))\n else:\n doc = DocumentFile.from_images(uploaded_file.read())\n page_idx = st.sidebar.selectbox(\"Page selection\", [idx + 1 for idx in range(len(doc))]) - 1\n cols[0].image(doc[page_idx])\n\n # Model selection\n st.sidebar.title(\"Model selection\")\n det_arch = st.sidebar.selectbox(\"Text detection model\", DET_ARCHS)\n reco_arch = st.sidebar.selectbox(\"Text recognition model\", RECO_ARCHS)\n\n # For newline\n st.sidebar.write('\\n')\n\n if st.sidebar.button(\"Analyze page\"):\n\n if uploaded_file is None:\n st.sidebar.write(\"Please upload a document\")\n\n else:\n with st.spinner('Loading model...'):\n predictor = ocr_predictor(det_arch, reco_arch, pretrained=True)\n\n with st.spinner('Analyzing...'):\n\n # Forward the image to the model\n processed_batches = predictor.det_predictor.pre_processor([doc[page_idx]])\n out = predictor.det_predictor.model(processed_batches[0], return_model_output=True, training=False)\n seg_map = out[\"out_map\"]\n seg_map = tf.squeeze(seg_map[0, ...], axis=[2])\n seg_map = cv2.resize(seg_map.numpy(), (doc[page_idx].shape[1], doc[page_idx].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n fig, ax = plt.subplots()\n ax.imshow(seg_map)\n ax.axis('off')\n cols[1].pyplot(fig)\n\n # Plot OCR output\n out = predictor([doc[page_idx]], training=False)\n fig = visualize_page(out.pages[0].export(), doc[page_idx], interactive=False)\n cols[2].pyplot(fig)\n\n # Page reconsitution under input page\n page_export = out.pages[0].export()\n img = synthetize_page(page_export)\n cols[3].image(img, clamp=True)\n\n # Display JSON\n st.markdown(\"\\nHere are your analysis results in JSON format:\")\n st.json(page_export)\n\n\nif __name__ == '__main__':\n main()\n", "path": "demo/app.py"}]} | 1,336 | 863 |
gh_patches_debug_27227 | rasdani/github-patches | git_diff | searx__searx-2066 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mymemory_translated engine: unexpected crash 'str' object has no attribute 'decode'
mymemory engine does not work.
You can see it in the search engine statistics: https://searx.space/#.
Either: "unexpected crash 'str' object has no attribute 'decode'"
Or: "no result"
My instance is https://searx.hlfh.space (I use antibot-proxy) and I have the first issue.
I am using mymemory with the API key I got from the service.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/translated.py`
Content:
```
1 """
2 MyMemory Translated
3
4 @website https://mymemory.translated.net/
5 @provide-api yes (https://mymemory.translated.net/doc/spec.php)
6 @using-api yes
7 @results JSON
8 @stable yes
9 @parse url, title, content
10 """
11 import re
12 from sys import version_info
13 from searx.utils import is_valid_lang
14
15 if version_info[0] == 3:
16 unicode = str
17
18 categories = ['general']
19 url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
20 web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
21 weight = 100
22
23 parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
24 api_key = ''
25
26
27 def request(query, params):
28 m = parser_re.match(unicode(query, 'utf8'))
29 if not m:
30 return params
31
32 from_lang, to_lang, query = m.groups()
33
34 from_lang = is_valid_lang(from_lang)
35 to_lang = is_valid_lang(to_lang)
36
37 if not from_lang or not to_lang:
38 return params
39
40 if api_key:
41 key_form = '&key=' + api_key
42 else:
43 key_form = ''
44 params['url'] = url.format(from_lang=from_lang[1],
45 to_lang=to_lang[1],
46 query=query,
47 key=key_form)
48 params['query'] = query
49 params['from_lang'] = from_lang
50 params['to_lang'] = to_lang
51
52 return params
53
54
55 def response(resp):
56 results = []
57 results.append({
58 'url': web_url.format(
59 from_lang=resp.search_params['from_lang'][2],
60 to_lang=resp.search_params['to_lang'][2],
61 query=resp.search_params['query']),
62 'title': '[{0}-{1}] {2}'.format(
63 resp.search_params['from_lang'][1],
64 resp.search_params['to_lang'][1],
65 resp.search_params['query']),
66 'content': resp.json()['responseData']['translatedText']
67 })
68 return results
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/translated.py b/searx/engines/translated.py
--- a/searx/engines/translated.py
+++ b/searx/engines/translated.py
@@ -9,23 +9,19 @@
@parse url, title, content
"""
import re
-from sys import version_info
from searx.utils import is_valid_lang
-if version_info[0] == 3:
- unicode = str
-
categories = ['general']
-url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
-web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
+url = u'https://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
+web_url = u'https://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
weight = 100
-parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
+parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
api_key = ''
def request(query, params):
- m = parser_re.match(unicode(query, 'utf8'))
+ m = parser_re.match(query)
if not m:
return params
@@ -43,9 +39,9 @@
key_form = ''
params['url'] = url.format(from_lang=from_lang[1],
to_lang=to_lang[1],
- query=query,
+ query=query.decode('utf-8'),
key=key_form)
- params['query'] = query
+ params['query'] = query.decode('utf-8')
params['from_lang'] = from_lang
params['to_lang'] = to_lang
| {"golden_diff": "diff --git a/searx/engines/translated.py b/searx/engines/translated.py\n--- a/searx/engines/translated.py\n+++ b/searx/engines/translated.py\n@@ -9,23 +9,19 @@\n @parse url, title, content\n \"\"\"\n import re\n-from sys import version_info\n from searx.utils import is_valid_lang\n \n-if version_info[0] == 3:\n- unicode = str\n-\n categories = ['general']\n-url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'\n-web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'\n+url = u'https://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'\n+web_url = u'https://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'\n weight = 100\n \n-parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)\n+parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)\n api_key = ''\n \n \n def request(query, params):\n- m = parser_re.match(unicode(query, 'utf8'))\n+ m = parser_re.match(query)\n if not m:\n return params\n \n@@ -43,9 +39,9 @@\n key_form = ''\n params['url'] = url.format(from_lang=from_lang[1],\n to_lang=to_lang[1],\n- query=query,\n+ query=query.decode('utf-8'),\n key=key_form)\n- params['query'] = query\n+ params['query'] = query.decode('utf-8')\n params['from_lang'] = from_lang\n params['to_lang'] = to_lang\n", "issue": "mymemory_translated engine: unexpected crash 'str' object has no attribute 'decode' \nmymemory engine does not work.\r\nYou can see it in the search engine statistics: https://searx.space/#.\r\n\r\nEither: \"unexpected crash 'str' object has no attribute 'decode'\"\r\nOr: \"no result\"\r\n\r\nMy instance is https://searx.hlfh.space (I use antibot-proxy) and I have the first issue.\r\nI am using mymemory with the API key I got from the service.\n", "before_files": [{"content": "\"\"\"\n MyMemory Translated\n\n @website https://mymemory.translated.net/\n @provide-api yes (https://mymemory.translated.net/doc/spec.php)\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title, content\n\"\"\"\nimport re\nfrom sys import version_info\nfrom searx.utils import is_valid_lang\n\nif version_info[0] == 3:\n unicode = str\n\ncategories = ['general']\nurl = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'\nweb_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'\nweight = 100\n\nparser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)\napi_key = ''\n\n\ndef request(query, params):\n m = parser_re.match(unicode(query, 'utf8'))\n if not m:\n return params\n\n from_lang, to_lang, query = m.groups()\n\n from_lang = is_valid_lang(from_lang)\n to_lang = is_valid_lang(to_lang)\n\n if not from_lang or not to_lang:\n return params\n\n if api_key:\n key_form = '&key=' + api_key\n else:\n key_form = ''\n params['url'] = url.format(from_lang=from_lang[1],\n to_lang=to_lang[1],\n query=query,\n key=key_form)\n params['query'] = query\n params['from_lang'] = from_lang\n params['to_lang'] = to_lang\n\n return params\n\n\ndef response(resp):\n results = []\n results.append({\n 'url': web_url.format(\n from_lang=resp.search_params['from_lang'][2],\n to_lang=resp.search_params['to_lang'][2],\n query=resp.search_params['query']),\n 'title': '[{0}-{1}] {2}'.format(\n resp.search_params['from_lang'][1],\n resp.search_params['to_lang'][1],\n resp.search_params['query']),\n 'content': resp.json()['responseData']['translatedText']\n })\n return results\n", "path": "searx/engines/translated.py"}], "after_files": [{"content": "\"\"\"\n MyMemory Translated\n\n @website https://mymemory.translated.net/\n @provide-api yes (https://mymemory.translated.net/doc/spec.php)\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title, content\n\"\"\"\nimport re\nfrom searx.utils import is_valid_lang\n\ncategories = ['general']\nurl = u'https://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'\nweb_url = u'https://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'\nweight = 100\n\nparser_re = re.compile(b'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)\napi_key = ''\n\n\ndef request(query, params):\n m = parser_re.match(query)\n if not m:\n return params\n\n from_lang, to_lang, query = m.groups()\n\n from_lang = is_valid_lang(from_lang)\n to_lang = is_valid_lang(to_lang)\n\n if not from_lang or not to_lang:\n return params\n\n if api_key:\n key_form = '&key=' + api_key\n else:\n key_form = ''\n params['url'] = url.format(from_lang=from_lang[1],\n to_lang=to_lang[1],\n query=query.decode('utf-8'),\n key=key_form)\n params['query'] = query.decode('utf-8')\n params['from_lang'] = from_lang\n params['to_lang'] = to_lang\n\n return params\n\n\ndef response(resp):\n results = []\n results.append({\n 'url': web_url.format(\n from_lang=resp.search_params['from_lang'][2],\n to_lang=resp.search_params['to_lang'][2],\n query=resp.search_params['query']),\n 'title': '[{0}-{1}] {2}'.format(\n resp.search_params['from_lang'][1],\n resp.search_params['to_lang'][1],\n resp.search_params['query']),\n 'content': resp.json()['responseData']['translatedText']\n })\n return results\n", "path": "searx/engines/translated.py"}]} | 991 | 429 |
gh_patches_debug_9958 | rasdani/github-patches | git_diff | ethereum__web3.py-3187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
web3 import errors in Python 3.12
* Version: 6.13.0
* Python: 3.12, inside a venv
* OS: linux (but is probably applicable to other platforms as well)
* `pip freeze` output:
```
aiohttp==3.9.1
aiosignal==1.3.1
attrs==23.2.0
bitarray==2.9.2
certifi==2023.11.17
charset-normalizer==3.3.2
cytoolz==0.12.2
eth-abi==4.2.1
eth-account==0.10.0
eth-hash==0.5.2
eth-keyfile==0.7.0
eth-keys==0.4.0
eth-rlp==1.0.0
eth-typing==3.5.2
eth-utils==2.3.1
frozenlist==1.4.1
hexbytes==0.3.1
idna==3.6
jsonschema==4.20.0
jsonschema-specifications==2023.12.1
lru-dict==1.2.0
multidict==6.0.4
parsimonious==0.9.0
protobuf==4.25.1
pycryptodome==3.19.1
pyunormalize==15.1.0
referencing==0.32.1
regex==2023.12.25
requests==2.31.0
rlp==4.0.0
rpds-py==0.16.2
toolz==0.12.0
typing_extensions==4.9.0
urllib3==2.1.0
web3==6.13.0
websockets==12.0
yarl==1.9.4
```
### What was wrong?
In certain situations, web3 will raise ImportErrors on python 3.12 if the `setuptools` package is not installed. _In particular, this happens inside a fresh Python 3.12 venv._ The `setuptools` package automatically installs the `pkg_resources` package, which is used in web3 [here](https://github.com/ethereum/web3.py/blob/8f853f5841fd62187bce0c9f17be75627104ca43/web3/__init__.py#L25). This used to work fine in older Python versions. However, according to the [new changes in 3.12](https://docs.python.org/3/whatsnew/3.12.html):
> gh-95299: Do not pre-install setuptools in virtual environments created with venv. This means that distutils, setuptools, pkg_resources, and easy_install will no longer available by default; to access these run pip install setuptools in the activated virtual environment.
This means that the pkg_resources package is no longer accessible which causes this error.
Among other things, this scenario can occur inside tox tests for projects that have the `web3` package installed and are configured to test against 3.12. This causes such tests to immediately fail because of the ImportError. The workaround, installing setuptools after the venv created, causes unnecessarily long test times, adding about 3 minutes to the run time.
### How can it be fixed?
Given that web3's use of setuptools/pkg_resources is limited to just getting the version number, this should be trivial to fix. Why not open the file with built-in functions such as `open()` and parse it for the version number? I don't think that `web3` should continue to depend on setuptools.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/__init__.py`
Content:
```
1 from eth_account import Account # noqa: E402,
2 import pkg_resources
3
4 from web3.main import (
5 AsyncWeb3,
6 Web3,
7 )
8 from web3.providers.async_rpc import ( # noqa: E402
9 AsyncHTTPProvider,
10 )
11 from web3.providers.eth_tester import ( # noqa: E402
12 EthereumTesterProvider,
13 )
14 from web3.providers.ipc import ( # noqa: E402
15 IPCProvider,
16 )
17 from web3.providers.rpc import ( # noqa: E402
18 HTTPProvider,
19 )
20 from web3.providers.websocket import ( # noqa: E402
21 WebsocketProvider,
22 WebsocketProviderV2,
23 )
24
25 __version__ = pkg_resources.get_distribution("web3").version
26
27 __all__ = [
28 "__version__",
29 "AsyncWeb3",
30 "Web3",
31 "HTTPProvider",
32 "IPCProvider",
33 "WebsocketProvider",
34 "WebsocketProviderV2",
35 "EthereumTesterProvider",
36 "Account",
37 "AsyncHTTPProvider",
38 ]
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/__init__.py b/web3/__init__.py
--- a/web3/__init__.py
+++ b/web3/__init__.py
@@ -1,5 +1,15 @@
-from eth_account import Account # noqa: E402,
-import pkg_resources
+from eth_account import Account # noqa: E402
+import sys
+
+if sys.version_info.major == 3 and sys.version_info.minor < 8:
+ import pkg_resources
+
+ __version__ = pkg_resources.get_distribution("web3").version
+else:
+ from importlib.metadata import version
+
+ __version__ = version("web3")
+
from web3.main import (
AsyncWeb3,
@@ -22,7 +32,6 @@
WebsocketProviderV2,
)
-__version__ = pkg_resources.get_distribution("web3").version
__all__ = [
"__version__",
| {"golden_diff": "diff --git a/web3/__init__.py b/web3/__init__.py\n--- a/web3/__init__.py\n+++ b/web3/__init__.py\n@@ -1,5 +1,15 @@\n-from eth_account import Account # noqa: E402,\n-import pkg_resources\n+from eth_account import Account # noqa: E402\n+import sys\n+\n+if sys.version_info.major == 3 and sys.version_info.minor < 8:\n+ import pkg_resources\n+\n+ __version__ = pkg_resources.get_distribution(\"web3\").version\n+else:\n+ from importlib.metadata import version\n+\n+ __version__ = version(\"web3\")\n+\n \n from web3.main import (\n AsyncWeb3,\n@@ -22,7 +32,6 @@\n WebsocketProviderV2,\n )\n \n-__version__ = pkg_resources.get_distribution(\"web3\").version\n \n __all__ = [\n \"__version__\",\n", "issue": "web3 import errors in Python 3.12\n* Version: 6.13.0\r\n* Python: 3.12, inside a venv\r\n* OS: linux (but is probably applicable to other platforms as well)\r\n* `pip freeze` output:\r\n\r\n```\r\naiohttp==3.9.1\r\naiosignal==1.3.1\r\nattrs==23.2.0\r\nbitarray==2.9.2\r\ncertifi==2023.11.17\r\ncharset-normalizer==3.3.2\r\ncytoolz==0.12.2\r\neth-abi==4.2.1\r\neth-account==0.10.0\r\neth-hash==0.5.2\r\neth-keyfile==0.7.0\r\neth-keys==0.4.0\r\neth-rlp==1.0.0\r\neth-typing==3.5.2\r\neth-utils==2.3.1\r\nfrozenlist==1.4.1\r\nhexbytes==0.3.1\r\nidna==3.6\r\njsonschema==4.20.0\r\njsonschema-specifications==2023.12.1\r\nlru-dict==1.2.0\r\nmultidict==6.0.4\r\nparsimonious==0.9.0\r\nprotobuf==4.25.1\r\npycryptodome==3.19.1\r\npyunormalize==15.1.0\r\nreferencing==0.32.1\r\nregex==2023.12.25\r\nrequests==2.31.0\r\nrlp==4.0.0\r\nrpds-py==0.16.2\r\ntoolz==0.12.0\r\ntyping_extensions==4.9.0\r\nurllib3==2.1.0\r\nweb3==6.13.0\r\nwebsockets==12.0\r\nyarl==1.9.4\r\n```\r\n\r\n### What was wrong?\r\n\r\nIn certain situations, web3 will raise ImportErrors on python 3.12 if the `setuptools` package is not installed. _In particular, this happens inside a fresh Python 3.12 venv._ The `setuptools` package automatically installs the `pkg_resources` package, which is used in web3 [here](https://github.com/ethereum/web3.py/blob/8f853f5841fd62187bce0c9f17be75627104ca43/web3/__init__.py#L25). This used to work fine in older Python versions. However, according to the [new changes in 3.12](https://docs.python.org/3/whatsnew/3.12.html):\r\n\r\n> gh-95299: Do not pre-install setuptools in virtual environments created with venv. This means that distutils, setuptools, pkg_resources, and easy_install will no longer available by default; to access these run pip install setuptools in the activated virtual environment.\r\n\r\nThis means that the pkg_resources package is no longer accessible which causes this error.\r\n\r\nAmong other things, this scenario can occur inside tox tests for projects that have the `web3` package installed and are configured to test against 3.12. This causes such tests to immediately fail because of the ImportError. The workaround, installing setuptools after the venv created, causes unnecessarily long test times, adding about 3 minutes to the run time.\r\n\r\n### How can it be fixed?\r\n\r\nGiven that web3's use of setuptools/pkg_resources is limited to just getting the version number, this should be trivial to fix. Why not open the file with built-in functions such as `open()` and parse it for the version number? I don't think that `web3` should continue to depend on setuptools.\n", "before_files": [{"content": "from eth_account import Account # noqa: E402,\nimport pkg_resources\n\nfrom web3.main import (\n AsyncWeb3,\n Web3,\n)\nfrom web3.providers.async_rpc import ( # noqa: E402\n AsyncHTTPProvider,\n)\nfrom web3.providers.eth_tester import ( # noqa: E402\n EthereumTesterProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.websocket import ( # noqa: E402\n WebsocketProvider,\n WebsocketProviderV2,\n)\n\n__version__ = pkg_resources.get_distribution(\"web3\").version\n\n__all__ = [\n \"__version__\",\n \"AsyncWeb3\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"WebsocketProvider\",\n \"WebsocketProviderV2\",\n \"EthereumTesterProvider\",\n \"Account\",\n \"AsyncHTTPProvider\",\n]\n", "path": "web3/__init__.py"}], "after_files": [{"content": "from eth_account import Account # noqa: E402\nimport sys\n\nif sys.version_info.major == 3 and sys.version_info.minor < 8:\n import pkg_resources\n\n __version__ = pkg_resources.get_distribution(\"web3\").version\nelse:\n from importlib.metadata import version\n\n __version__ = version(\"web3\")\n\n\nfrom web3.main import (\n AsyncWeb3,\n Web3,\n)\nfrom web3.providers.async_rpc import ( # noqa: E402\n AsyncHTTPProvider,\n)\nfrom web3.providers.eth_tester import ( # noqa: E402\n EthereumTesterProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.websocket import ( # noqa: E402\n WebsocketProvider,\n WebsocketProviderV2,\n)\n\n\n__all__ = [\n \"__version__\",\n \"AsyncWeb3\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"WebsocketProvider\",\n \"WebsocketProviderV2\",\n \"EthereumTesterProvider\",\n \"Account\",\n \"AsyncHTTPProvider\",\n]\n", "path": "web3/__init__.py"}]} | 1,397 | 210 |
gh_patches_debug_27086 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-8283 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable intersphinx support for hoverxref in our documentation
While writing #8283, I realized that we still do not enable intersphinx support in our sphinx-hoverxref documentation. More info here:
https://blog.readthedocs.com/hoverxref-intersphinx/
I think it would be nice to do so.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 import os
2 import sys
3 from configparser import RawConfigParser
4
5 import sphinx_rtd_theme
6
7 sys.path.insert(0, os.path.abspath('..'))
8 sys.path.append(os.path.dirname(__file__))
9 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev")
10
11 from django.utils import timezone
12
13 import django
14 django.setup()
15
16
17 def get_version():
18 """Return package version from setup.cfg."""
19 config = RawConfigParser()
20 config.read(os.path.join('..', 'setup.cfg'))
21 return config.get('metadata', 'version')
22
23
24 sys.path.append(os.path.abspath('_ext'))
25 extensions = [
26 'sphinx.ext.autosectionlabel',
27 'sphinx.ext.autodoc',
28 'sphinx.ext.intersphinx',
29 'sphinxcontrib.httpdomain',
30 'djangodocs',
31 'doc_extensions',
32 'sphinx_tabs.tabs',
33 'sphinx-prompt',
34 'notfound.extension',
35 'hoverxref.extension',
36 'sphinx_search.extension',
37 'sphinxemoji.sphinxemoji',
38 ]
39
40 templates_path = ['_templates']
41
42 master_doc = 'index'
43 project = 'Read the Docs'
44 copyright = '2010-{}, Read the Docs, Inc & contributors'.format(
45 timezone.now().year
46 )
47 version = get_version()
48 release = version
49 exclude_patterns = ['_build']
50 default_role = 'obj'
51 intersphinx_mapping = {
52 'python': ('https://docs.python.org/3.6/', None),
53 'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),
54 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
55 'pip': ('https://pip.pypa.io/en/stable/', None),
56 }
57 htmlhelp_basename = 'ReadTheDocsdoc'
58 latex_documents = [
59 ('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',
60 'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),
61 ]
62 man_pages = [
63 ('index', 'read-the-docs', 'Read the Docs Documentation',
64 ['Eric Holscher, Charlie Leifer, Bobby Grace'], 1)
65 ]
66
67 exclude_patterns = [
68 # 'api' # needed for ``make gettext`` to not die.
69 ]
70
71 language = 'en'
72
73 locale_dirs = [
74 'locale/',
75 ]
76 gettext_compact = False
77
78 html_theme = 'sphinx_rtd_theme'
79 html_static_path = ['_static']
80 html_js_files = ['js/expand_tabs.js']
81 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
82 html_logo = 'img/logo.svg'
83 html_theme_options = {
84 'logo_only': True,
85 'display_version': False,
86 }
87
88 hoverxref_auto_ref = True
89 hoverxref_domains = ['py']
90 hoverxref_roles = [
91 'option',
92 'doc',
93 ]
94 hoverxref_role_types = {
95 'mod': 'modal', # for Python Sphinx Domain
96 'doc': 'modal', # for whole docs
97 'class': 'tooltip', # for Python Sphinx Domain
98 'ref': 'tooltip', # for hoverxref_auto_ref config
99 'confval': 'tooltip', # for custom object
100 }
101
102 rst_epilog = """
103 .. |org_brand| replace:: Read the Docs Community
104 .. |com_brand| replace:: Read the Docs for Business
105 """
106
107 # Activate autosectionlabel plugin
108 autosectionlabel_prefix_document = True
109
110 numfig = True
111
112 # sphinx-notfound-page
113 # https://github.com/readthedocs/sphinx-notfound-page
114 notfound_context = {
115 'title': 'Page Not Found',
116 'body': '''
117 <h1>Page Not Found</h1>
118
119 <p>Sorry, we couldn't find that page.</p>
120
121 <p>Try using the search box or go to the homepage.</p>
122 ''',
123 }
124 linkcheck_ignore = [
125 r'http://127\.0\.0\.1',
126 r'http://localhost',
127 r'http://community\.dev\.readthedocs\.io',
128 r'https://yourproject\.readthedocs\.io',
129 r'https?://docs\.example\.com',
130 r'https://foo\.readthedocs\.io/projects',
131 r'https://github\.com.+?#L\d+',
132 r'https://github\.com/readthedocs/readthedocs\.org/issues',
133 r'https://github\.com/readthedocs/readthedocs\.org/pull',
134 r'https://docs\.readthedocs\.io/\?rtd_search',
135 r'https://readthedocs\.org/search',
136 # This page is under login
137 r'https://readthedocs\.org/accounts/gold',
138 ]
139
140
141 def setup(app):
142 app.add_css_file('css/sphinx_prompt_css.css')
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -53,7 +53,23 @@
'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),
'sphinx': ('https://www.sphinx-doc.org/en/master/', None),
'pip': ('https://pip.pypa.io/en/stable/', None),
+ 'nbsphinx': ('https://nbsphinx.readthedocs.io/en/0.8.6/', None),
+ 'myst-nb': ('https://myst-nb.readthedocs.io/en/v0.12.3/', None),
+ 'ipywidgets': ('https://ipywidgets.readthedocs.io/en/7.6.3/', None),
+ 'jupytext': ('https://jupytext.readthedocs.io/en/stable/', None),
+ 'ipyleaflet': ('https://ipyleaflet.readthedocs.io/en/stable/', None),
+ 'poliastro': ('https://docs.poliastro.space/en/v0.15.2/', None),
+ 'qiskit': ('https://qiskit.org/documentation/', None),
+ 'myst-parser': ('https://myst-parser.readthedocs.io/en/v0.15.1/', None),
}
+hoverxref_intersphinx = [
+ "sphinx",
+ "pip",
+ "nbsphinx",
+ "myst-nb",
+ "ipywidgets",
+ "jupytext",
+]
htmlhelp_basename = 'ReadTheDocsdoc'
latex_documents = [
('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',
@@ -107,8 +123,6 @@
# Activate autosectionlabel plugin
autosectionlabel_prefix_document = True
-numfig = True
-
# sphinx-notfound-page
# https://github.com/readthedocs/sphinx-notfound-page
notfound_context = {
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -53,7 +53,23 @@\n 'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),\n 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),\n 'pip': ('https://pip.pypa.io/en/stable/', None),\n+ 'nbsphinx': ('https://nbsphinx.readthedocs.io/en/0.8.6/', None),\n+ 'myst-nb': ('https://myst-nb.readthedocs.io/en/v0.12.3/', None),\n+ 'ipywidgets': ('https://ipywidgets.readthedocs.io/en/7.6.3/', None),\n+ 'jupytext': ('https://jupytext.readthedocs.io/en/stable/', None),\n+ 'ipyleaflet': ('https://ipyleaflet.readthedocs.io/en/stable/', None),\n+ 'poliastro': ('https://docs.poliastro.space/en/v0.15.2/', None),\n+ 'qiskit': ('https://qiskit.org/documentation/', None),\n+ 'myst-parser': ('https://myst-parser.readthedocs.io/en/v0.15.1/', None),\n }\n+hoverxref_intersphinx = [\n+ \"sphinx\",\n+ \"pip\",\n+ \"nbsphinx\",\n+ \"myst-nb\",\n+ \"ipywidgets\",\n+ \"jupytext\",\n+]\n htmlhelp_basename = 'ReadTheDocsdoc'\n latex_documents = [\n ('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',\n@@ -107,8 +123,6 @@\n # Activate autosectionlabel plugin\n autosectionlabel_prefix_document = True\n \n-numfig = True\n-\n # sphinx-notfound-page\n # https://github.com/readthedocs/sphinx-notfound-page\n notfound_context = {\n", "issue": "Enable intersphinx support for hoverxref in our documentation\nWhile writing #8283, I realized that we still do not enable intersphinx support in our sphinx-hoverxref documentation. More info here:\r\n\r\nhttps://blog.readthedocs.com/hoverxref-intersphinx/\r\n\r\nI think it would be nice to do so.\n", "before_files": [{"content": "import os\nimport sys\nfrom configparser import RawConfigParser\n\nimport sphinx_rtd_theme\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.utils import timezone\n\nimport django\ndjango.setup()\n\n\ndef get_version():\n \"\"\"Return package version from setup.cfg.\"\"\"\n config = RawConfigParser()\n config.read(os.path.join('..', 'setup.cfg'))\n return config.get('metadata', 'version')\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n 'sphinx_tabs.tabs',\n 'sphinx-prompt',\n 'notfound.extension',\n 'hoverxref.extension',\n 'sphinx_search.extension',\n 'sphinxemoji.sphinxemoji',\n]\n\ntemplates_path = ['_templates']\n\nmaster_doc = 'index'\nproject = 'Read the Docs'\ncopyright = '2010-{}, Read the Docs, Inc & contributors'.format(\n timezone.now().year\n)\nversion = get_version()\nrelease = version\nexclude_patterns = ['_build']\ndefault_role = 'obj'\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.6/', None),\n 'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),\n 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),\n 'pip': ('https://pip.pypa.io/en/stable/', None),\n}\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',\n 'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', 'Read the Docs Documentation',\n ['Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_static_path = ['_static']\nhtml_js_files = ['js/expand_tabs.js']\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_logo = 'img/logo.svg'\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n}\n\nhoverxref_auto_ref = True\nhoverxref_domains = ['py']\nhoverxref_roles = [\n 'option',\n 'doc',\n]\nhoverxref_role_types = {\n 'mod': 'modal', # for Python Sphinx Domain\n 'doc': 'modal', # for whole docs\n 'class': 'tooltip', # for Python Sphinx Domain\n 'ref': 'tooltip', # for hoverxref_auto_ref config\n 'confval': 'tooltip', # for custom object\n}\n\nrst_epilog = \"\"\"\n.. |org_brand| replace:: Read the Docs Community\n.. |com_brand| replace:: Read the Docs for Business\n\"\"\"\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n\nnumfig = True\n\n# sphinx-notfound-page\n# https://github.com/readthedocs/sphinx-notfound-page\nnotfound_context = {\n 'title': 'Page Not Found',\n 'body': '''\n<h1>Page Not Found</h1>\n\n<p>Sorry, we couldn't find that page.</p>\n\n<p>Try using the search box or go to the homepage.</p>\n''',\n}\nlinkcheck_ignore = [\n r'http://127\\.0\\.0\\.1',\n r'http://localhost',\n r'http://community\\.dev\\.readthedocs\\.io',\n r'https://yourproject\\.readthedocs\\.io',\n r'https?://docs\\.example\\.com',\n r'https://foo\\.readthedocs\\.io/projects',\n r'https://github\\.com.+?#L\\d+',\n r'https://github\\.com/readthedocs/readthedocs\\.org/issues',\n r'https://github\\.com/readthedocs/readthedocs\\.org/pull',\n r'https://docs\\.readthedocs\\.io/\\?rtd_search',\n r'https://readthedocs\\.org/search',\n # This page is under login\n r'https://readthedocs\\.org/accounts/gold',\n]\n\n\ndef setup(app):\n app.add_css_file('css/sphinx_prompt_css.css')\n", "path": "docs/conf.py"}], "after_files": [{"content": "import os\nimport sys\nfrom configparser import RawConfigParser\n\nimport sphinx_rtd_theme\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.utils import timezone\n\nimport django\ndjango.setup()\n\n\ndef get_version():\n \"\"\"Return package version from setup.cfg.\"\"\"\n config = RawConfigParser()\n config.read(os.path.join('..', 'setup.cfg'))\n return config.get('metadata', 'version')\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autosectionlabel',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n 'sphinx_tabs.tabs',\n 'sphinx-prompt',\n 'notfound.extension',\n 'hoverxref.extension',\n 'sphinx_search.extension',\n 'sphinxemoji.sphinxemoji',\n]\n\ntemplates_path = ['_templates']\n\nmaster_doc = 'index'\nproject = 'Read the Docs'\ncopyright = '2010-{}, Read the Docs, Inc & contributors'.format(\n timezone.now().year\n)\nversion = get_version()\nrelease = version\nexclude_patterns = ['_build']\ndefault_role = 'obj'\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.6/', None),\n 'django': ('https://docs.djangoproject.com/en/1.11/', 'https://docs.djangoproject.com/en/1.11/_objects/'),\n 'sphinx': ('https://www.sphinx-doc.org/en/master/', None),\n 'pip': ('https://pip.pypa.io/en/stable/', None),\n 'nbsphinx': ('https://nbsphinx.readthedocs.io/en/0.8.6/', None),\n 'myst-nb': ('https://myst-nb.readthedocs.io/en/v0.12.3/', None),\n 'ipywidgets': ('https://ipywidgets.readthedocs.io/en/7.6.3/', None),\n 'jupytext': ('https://jupytext.readthedocs.io/en/stable/', None),\n 'ipyleaflet': ('https://ipyleaflet.readthedocs.io/en/stable/', None),\n 'poliastro': ('https://docs.poliastro.space/en/v0.15.2/', None),\n 'qiskit': ('https://qiskit.org/documentation/', None),\n 'myst-parser': ('https://myst-parser.readthedocs.io/en/v0.15.1/', None),\n}\nhoverxref_intersphinx = [\n \"sphinx\",\n \"pip\",\n \"nbsphinx\",\n \"myst-nb\",\n \"ipywidgets\",\n \"jupytext\",\n]\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', 'Read the Docs Documentation',\n 'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', 'Read the Docs Documentation',\n ['Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_static_path = ['_static']\nhtml_js_files = ['js/expand_tabs.js']\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nhtml_logo = 'img/logo.svg'\nhtml_theme_options = {\n 'logo_only': True,\n 'display_version': False,\n}\n\nhoverxref_auto_ref = True\nhoverxref_domains = ['py']\nhoverxref_roles = [\n 'option',\n 'doc',\n]\nhoverxref_role_types = {\n 'mod': 'modal', # for Python Sphinx Domain\n 'doc': 'modal', # for whole docs\n 'class': 'tooltip', # for Python Sphinx Domain\n 'ref': 'tooltip', # for hoverxref_auto_ref config\n 'confval': 'tooltip', # for custom object\n}\n\nrst_epilog = \"\"\"\n.. |org_brand| replace:: Read the Docs Community\n.. |com_brand| replace:: Read the Docs for Business\n\"\"\"\n\n# Activate autosectionlabel plugin\nautosectionlabel_prefix_document = True\n\n# sphinx-notfound-page\n# https://github.com/readthedocs/sphinx-notfound-page\nnotfound_context = {\n 'title': 'Page Not Found',\n 'body': '''\n<h1>Page Not Found</h1>\n\n<p>Sorry, we couldn't find that page.</p>\n\n<p>Try using the search box or go to the homepage.</p>\n''',\n}\nlinkcheck_ignore = [\n r'http://127\\.0\\.0\\.1',\n r'http://localhost',\n r'http://community\\.dev\\.readthedocs\\.io',\n r'https://yourproject\\.readthedocs\\.io',\n r'https?://docs\\.example\\.com',\n r'https://foo\\.readthedocs\\.io/projects',\n r'https://github\\.com.+?#L\\d+',\n r'https://github\\.com/readthedocs/readthedocs\\.org/issues',\n r'https://github\\.com/readthedocs/readthedocs\\.org/pull',\n r'https://docs\\.readthedocs\\.io/\\?rtd_search',\n r'https://readthedocs\\.org/search',\n # This page is under login\n r'https://readthedocs\\.org/accounts/gold',\n]\n\n\ndef setup(app):\n app.add_css_file('css/sphinx_prompt_css.css')\n", "path": "docs/conf.py"}]} | 1,685 | 452 |
gh_patches_debug_428 | rasdani/github-patches | git_diff | python__python-docs-es-1762 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Translate 'library/os.po'
This needs to reach 100% translated.
The rendered version of this file will be available at https://docs.python.org/es/3.10/library/os.html once translated.
Meanwhile, the English version is shown.
Current stats for `library/os.po`:
* Fuzzy: 27
* Percent translated: 94.8%
* Entries: 804 / 848
* Untranslated: 44
Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.
Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/translate.py`
Content:
```
1 import os
2 import re
3 import sys
4 from typing import Dict, Tuple
5
6 import polib
7
8 VERBOSE = False
9 DEBUG = False
10 SKIP_TRANSLATED_ENTRIES = True
11
12 try:
13 from deep_translator import GoogleTranslator
14 except ImportError:
15 print("Error: This util script needs `deep_translator` to be installed")
16 sys.exit(1)
17
18 _patterns = [
19 ":c:func:`[^`]+`",
20 ":c:type:`[^`]+`",
21 ":c:macro:`[^`]+`",
22 ":c:member:`[^`]+`",
23 ":c:data:`[^`]+`",
24 ":py:data:`[^`]+`",
25 ":py:mod:`[^`]+`",
26 ":func:`[^`]+`",
27 ":mod:`[^`]+`",
28 ":ref:`[^`]+`",
29 ":class:`[^`]+`",
30 ":pep:`[^`]+`",
31 ":data:`[^`]+`",
32 ":exc:`[^`]+`",
33 ":term:`[^`]+`",
34 ":meth:`[^`]+`",
35 ":envvar:`[^`]+`",
36 ":file:`[^`]+`",
37 ":attr:`[^`]+`",
38 ":const:`[^`]+`",
39 ":issue:`[^`]+`",
40 ":opcode:`[^`]+`",
41 ":option:`[^`]+`",
42 ":program:`[^`]+`",
43 ":keyword:`[^`]+`",
44 ":RFC:`[^`]+`",
45 ":rfc:`[^`]+`",
46 ":doc:`[^`]+`",
47 "``[^`]+``",
48 "`[^`]+`__",
49 "`[^`]+`_",
50 "\*\*[^\*]+\*\*", # bold text between **
51 "\*[^\*]+\*", # italic text between *
52 ]
53
54 _exps = [re.compile(e) for e in _patterns]
55
56 def protect_sphinx_directives(s: str) -> Tuple[dict, str]:
57 """
58 Parameters:
59 string containing the text to translate
60
61 Returns:
62 dictionary containing all the placeholder text as keys
63 and the correct value.
64 """
65
66 i = 0
67 d: Dict[str, str] = {}
68 for exp in _exps:
69 matches = exp.findall(s)
70 if DEBUG:
71 print(exp, matches)
72 for match in matches:
73 ph = f"XASDF{str(i).zfill(2)}"
74 s = s.replace(match, ph)
75 if ph in d and VERBOSE:
76 print(f"Error: {ph} is already in the dictionary")
77 print("new", match)
78 print("old", d[ph])
79 d[ph] = match
80 i += 1
81 return d, s
82
83
84 def undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:
85 for ph, value in placeholders.items():
86 translated_text = translated_text.replace(ph, value)
87 if DEBUG:
88 print(ph, value)
89 print(translated_text)
90 return translated_text
91
92
93 if __name__ == "__main__":
94 filename = sys.argv[1]
95 if not os.path.isfile(filename):
96 print(f"File not found: '{filename}'")
97 sys.exit(-1)
98
99 po = polib.pofile(filename)
100 translator = GoogleTranslator(source="en", target="es")
101
102 for entry in po:
103 # If the entry has already a translation, skip.
104 if SKIP_TRANSLATED_ENTRIES and entry.msgstr:
105 continue
106
107 print("\nEN|", entry.msgid)
108 placeholders, temp_text = protect_sphinx_directives(entry.msgid)
109 if VERBOSE:
110 print(temp_text)
111 print(placeholders)
112
113 # Translate the temporary text without sphinx statements
114 translated_text = translator.translate(temp_text)
115
116 # Recover sphinx statements
117 real_text = undo_sphinx_directives_protection(placeholders, translated_text)
118 print("ES|", real_text)
119
120 # Replace the po file translated entry
121 entry.msgstr = real_text
122
123 # Save the file after all the entries are translated
124 po.save()
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/translate.py b/scripts/translate.py
--- a/scripts/translate.py
+++ b/scripts/translate.py
@@ -44,6 +44,8 @@
":RFC:`[^`]+`",
":rfc:`[^`]+`",
":doc:`[^`]+`",
+ ":manpage:`[^`]+`",
+ ":sup:`[^`]+`",
"``[^`]+``",
"`[^`]+`__",
"`[^`]+`_",
| {"golden_diff": "diff --git a/scripts/translate.py b/scripts/translate.py\n--- a/scripts/translate.py\n+++ b/scripts/translate.py\n@@ -44,6 +44,8 @@\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n+ \":manpage:`[^`]+`\",\n+ \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n", "issue": "Translate 'library/os.po'\nThis needs to reach 100% translated.\n\nThe rendered version of this file will be available at https://docs.python.org/es/3.10/library/os.html once translated.\nMeanwhile, the English version is shown.\n\nCurrent stats for `library/os.po`:\n\n* Fuzzy: 27\n* Percent translated: 94.8%\n* Entries: 804 / 848\n* Untranslated: 44\n\nPlease, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.\n\nRemember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).\n", "before_files": [{"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n", "path": "scripts/translate.py"}], "after_files": [{"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n", "path": "scripts/translate.py"}]} | 1,587 | 113 |
gh_patches_debug_7956 | rasdani/github-patches | git_diff | open-mmlab__mmpose-783 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
resource limit bug
**Describe the feature**
**Motivation**
It is inconvenient when we run mmpose on slurm clustre which may has larger file-open's soft limit than 4096. The resource limit adjust here [https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/builder.py#L13-L19](url) will reduce the base file-open's soft limit to 4096. Sometimes it will result in 'OSError: [Error 24] Too many open files' during training process.
**Additional context**
the code maybe can be modified like below:
```python
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096,base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/datasets/builder.py`
Content:
```
1 import platform
2 import random
3 from functools import partial
4
5 import numpy as np
6 from mmcv.parallel import collate
7 from mmcv.runner import get_dist_info
8 from mmcv.utils import Registry, build_from_cfg
9 from mmcv.utils.parrots_wrapper import _get_dataloader
10
11 from .samplers import DistributedSampler
12
13 if platform.system() != 'Windows':
14 # https://github.com/pytorch/pytorch/issues/973
15 import resource
16 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
17 hard_limit = rlimit[1]
18 soft_limit = min(4096, hard_limit)
19 resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
20
21 DATASETS = Registry('dataset')
22 PIPELINES = Registry('pipeline')
23
24
25 def build_dataset(cfg, default_args=None):
26 """Build a dataset from config dict.
27
28 Args:
29 cfg (dict): Config dict. It should at least contain the key "type".
30 default_args (dict, optional): Default initialization arguments.
31 Default: None.
32
33 Returns:
34 Dataset: The constructed dataset.
35 """
36 from .dataset_wrappers import RepeatDataset
37
38 if cfg['type'] == 'RepeatDataset':
39 dataset = RepeatDataset(
40 build_dataset(cfg['dataset'], default_args), cfg['times'])
41 else:
42 dataset = build_from_cfg(cfg, DATASETS, default_args)
43 return dataset
44
45
46 def build_dataloader(dataset,
47 samples_per_gpu,
48 workers_per_gpu,
49 num_gpus=1,
50 dist=True,
51 shuffle=True,
52 seed=None,
53 drop_last=True,
54 pin_memory=True,
55 **kwargs):
56 """Build PyTorch DataLoader.
57
58 In distributed training, each GPU/process has a dataloader.
59 In non-distributed training, there is only one dataloader for all GPUs.
60
61 Args:
62 dataset (Dataset): A PyTorch dataset.
63 samples_per_gpu (int): Number of training samples on each GPU, i.e.,
64 batch size of each GPU.
65 workers_per_gpu (int): How many subprocesses to use for data loading
66 for each GPU.
67 num_gpus (int): Number of GPUs. Only used in non-distributed training.
68 dist (bool): Distributed training/test or not. Default: True.
69 shuffle (bool): Whether to shuffle the data at every epoch.
70 Default: True.
71 drop_last (bool): Whether to drop the last incomplete batch in epoch.
72 Default: True
73 pin_memory (bool): Whether to use pin_memory in DataLoader.
74 Default: True
75 kwargs: any keyword argument to be used to initialize DataLoader
76
77 Returns:
78 DataLoader: A PyTorch dataloader.
79 """
80 rank, world_size = get_dist_info()
81 if dist:
82 sampler = DistributedSampler(
83 dataset, world_size, rank, shuffle=shuffle, seed=seed)
84 shuffle = False
85 batch_size = samples_per_gpu
86 num_workers = workers_per_gpu
87 else:
88 sampler = None
89 batch_size = num_gpus * samples_per_gpu
90 num_workers = num_gpus * workers_per_gpu
91
92 init_fn = partial(
93 worker_init_fn, num_workers=num_workers, rank=rank,
94 seed=seed) if seed is not None else None
95
96 _, DataLoader = _get_dataloader()
97 data_loader = DataLoader(
98 dataset,
99 batch_size=batch_size,
100 sampler=sampler,
101 num_workers=num_workers,
102 collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
103 pin_memory=pin_memory,
104 shuffle=shuffle,
105 worker_init_fn=init_fn,
106 drop_last=drop_last,
107 **kwargs)
108
109 return data_loader
110
111
112 def worker_init_fn(worker_id, num_workers, rank, seed):
113 """Init the random seed for various workers."""
114 # The seed of each worker equals to
115 # num_worker * rank + worker_id + user_seed
116 worker_seed = num_workers * rank + worker_id + seed
117 np.random.seed(worker_seed)
118 random.seed(worker_seed)
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py
--- a/mmpose/datasets/builder.py
+++ b/mmpose/datasets/builder.py
@@ -14,8 +14,9 @@
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
+ base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
- soft_limit = min(4096, hard_limit)
+ soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
| {"golden_diff": "diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py\n--- a/mmpose/datasets/builder.py\n+++ b/mmpose/datasets/builder.py\n@@ -14,8 +14,9 @@\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n+ base_soft_limit = rlimit[0]\n hard_limit = rlimit[1]\n- soft_limit = min(4096, hard_limit)\n+ soft_limit = min(max(4096, base_soft_limit), hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n \n DATASETS = Registry('dataset')\n", "issue": "resource limit bug\n**Describe the feature**\r\n\r\n**Motivation**\r\n\r\nIt is inconvenient when we run mmpose on slurm clustre which may has larger file-open's soft limit than 4096. The resource limit adjust here [https://github.com/open-mmlab/mmpose/blob/master/mmpose/datasets/builder.py#L13-L19](url) will reduce the base file-open's soft limit to 4096. Sometimes it will result in 'OSError: [Error 24] Too many open files' during training process.\r\n\r\n\r\n**Additional context**\r\nthe code maybe can be modified like below:\r\n```python\r\n\r\nif platform.system() != 'Windows':\r\n # https://github.com/pytorch/pytorch/issues/973\r\n import resource\r\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\r\n base_soft_limit = rlimit[0]\r\n hard_limit = rlimit[1]\r\n soft_limit = min(max(4096,base_soft_limit), hard_limit)\r\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n hard_limit = rlimit[1]\n soft_limit = min(4096, hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nDATASETS = Registry('dataset')\nPIPELINES = Registry('pipeline')\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n from .dataset_wrappers import RepeatDataset\n\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}], "after_files": [{"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n base_soft_limit = rlimit[0]\n hard_limit = rlimit[1]\n soft_limit = min(max(4096, base_soft_limit), hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nDATASETS = Registry('dataset')\nPIPELINES = Registry('pipeline')\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n from .dataset_wrappers import RepeatDataset\n\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}]} | 1,623 | 172 |
gh_patches_debug_30975 | rasdani/github-patches | git_diff | liqd__a4-product-608 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mandatory mB topic selection on bet.in ( US #1775)
All projects need a topic on bet.in now, even existing ones. Can we remove that requirement? We haven't yet thought about how to implement topics on bet.in and there are not shown anywhere, so it would probably be confusing for initiators.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liqd_product/apps/projects/dashboard.py`
Content:
```
1 from django.urls import reverse
2 from django.utils.translation import ugettext_lazy as _
3
4 from adhocracy4.dashboard import DashboardComponent
5 from adhocracy4.dashboard import ProjectFormComponent
6 from adhocracy4.dashboard import components
7
8 from . import forms
9 from . import views
10
11
12 class ParticipantsComponent(DashboardComponent):
13 identifier = 'participants'
14 weight = 30
15 label = _('Participants')
16
17 def is_effective(self, project):
18 return not project.is_draft and project.is_private
19
20 def get_base_url(self, project):
21 return reverse('a4dashboard:dashboard-participants-edit', kwargs={
22 'project_slug': project.slug
23 })
24
25 def get_urls(self):
26 return [(
27 r'^projects/(?P<project_slug>[-\w_]+)/participants/$',
28 views.DashboardProjectParticipantsView.as_view(component=self),
29 'dashboard-participants-edit'
30 )]
31
32
33 class ModeratorsComponent(DashboardComponent):
34 identifier = 'moderators'
35 weight = 32
36 label = _('Moderators')
37
38 def is_effective(self, project):
39 return True
40
41 def get_base_url(self, project):
42 return reverse('a4dashboard:dashboard-moderators-edit', kwargs={
43 'project_slug': project.slug
44 })
45
46 def get_urls(self):
47 return [(
48 r'^projects/(?P<project_slug>[-\w_]+)/moderators/$',
49 views.DashboardProjectModeratorsView.as_view(component=self),
50 'dashboard-moderators-edit'
51 )]
52
53
54 class TopicComponent(ProjectFormComponent):
55 identifier = 'topics'
56 weight = 33
57 label = _('Topics')
58
59 form_title = _('Edit topics')
60 form_class = forms.TopicForm
61 form_template_name = 'liqd_product_projects/project_topics.html'
62
63
64 components.register_project(ModeratorsComponent())
65 components.register_project(ParticipantsComponent())
66 components.register_project(TopicComponent())
67
```
Path: `liqd_product/apps/projects/forms.py`
Content:
```
1 from django import forms
2 from django.contrib.auth import get_user_model
3 from django.core.exceptions import ValidationError
4 from django.utils.translation import ugettext_lazy as _
5
6 from adhocracy4.dashboard.forms import ProjectDashboardForm
7 from adhocracy4.projects.models import Project
8 from liqd_product.apps.users import fields as user_fields
9
10 from .models import ModeratorInvite
11 from .models import ParticipantInvite
12
13 User = get_user_model()
14
15
16 class InviteForm(forms.ModelForm):
17 accept = forms.CharField(required=False)
18 reject = forms.CharField(required=False)
19
20 def clean(self):
21 data = self.data
22 if 'accept' not in data and 'reject' not in data:
23 raise ValidationError('Reject or accept')
24 return data
25
26 def is_accepted(self):
27 data = self.data
28 return 'accept' in data and 'reject' not in data
29
30
31 class ParticipantInviteForm(InviteForm):
32
33 class Meta:
34 model = ParticipantInvite
35 fields = ['accept', 'reject']
36
37
38 class ModeratorInviteForm(InviteForm):
39
40 class Meta:
41 model = ModeratorInvite
42 fields = ['accept', 'reject']
43
44
45 class InviteUsersFromEmailForm(forms.Form):
46 add_users = user_fields.CommaSeparatedEmailField(
47 required=False,
48 label=_('Invite users via email')
49 )
50
51 add_users_upload = user_fields.EmailFileField(
52 required=False,
53 label=_('Invite users via file upload'),
54 help_text=_('Upload a csv file containing email addresses.')
55 )
56
57 def __init__(self, *args, **kwargs):
58 labels = kwargs.pop('labels', None)
59 super().__init__(*args, **kwargs)
60
61 if labels:
62 self.fields['add_users'].label = labels[0]
63 self.fields['add_users_upload'].label = labels[1]
64
65 def clean(self):
66 cleaned_data = super().clean()
67 add_users = self.data.get('add_users')
68 add_users_upload = self.files.get('add_users_upload')
69 if not self.errors and not add_users and not add_users_upload:
70 raise ValidationError(
71 _('Please enter email addresses or upload a file'))
72 return cleaned_data
73
74
75 class TopicForm(ProjectDashboardForm):
76
77 class Meta:
78 model = Project
79 fields = ['topics']
80 required_for_project_publish = ['topics']
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liqd_product/apps/projects/dashboard.py b/liqd_product/apps/projects/dashboard.py
--- a/liqd_product/apps/projects/dashboard.py
+++ b/liqd_product/apps/projects/dashboard.py
@@ -2,10 +2,8 @@
from django.utils.translation import ugettext_lazy as _
from adhocracy4.dashboard import DashboardComponent
-from adhocracy4.dashboard import ProjectFormComponent
from adhocracy4.dashboard import components
-from . import forms
from . import views
@@ -51,16 +49,5 @@
)]
-class TopicComponent(ProjectFormComponent):
- identifier = 'topics'
- weight = 33
- label = _('Topics')
-
- form_title = _('Edit topics')
- form_class = forms.TopicForm
- form_template_name = 'liqd_product_projects/project_topics.html'
-
-
components.register_project(ModeratorsComponent())
components.register_project(ParticipantsComponent())
-components.register_project(TopicComponent())
diff --git a/liqd_product/apps/projects/forms.py b/liqd_product/apps/projects/forms.py
--- a/liqd_product/apps/projects/forms.py
+++ b/liqd_product/apps/projects/forms.py
@@ -3,8 +3,6 @@
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
-from adhocracy4.dashboard.forms import ProjectDashboardForm
-from adhocracy4.projects.models import Project
from liqd_product.apps.users import fields as user_fields
from .models import ModeratorInvite
@@ -70,11 +68,3 @@
raise ValidationError(
_('Please enter email addresses or upload a file'))
return cleaned_data
-
-
-class TopicForm(ProjectDashboardForm):
-
- class Meta:
- model = Project
- fields = ['topics']
- required_for_project_publish = ['topics']
| {"golden_diff": "diff --git a/liqd_product/apps/projects/dashboard.py b/liqd_product/apps/projects/dashboard.py\n--- a/liqd_product/apps/projects/dashboard.py\n+++ b/liqd_product/apps/projects/dashboard.py\n@@ -2,10 +2,8 @@\n from django.utils.translation import ugettext_lazy as _\n \n from adhocracy4.dashboard import DashboardComponent\n-from adhocracy4.dashboard import ProjectFormComponent\n from adhocracy4.dashboard import components\n \n-from . import forms\n from . import views\n \n \n@@ -51,16 +49,5 @@\n )]\n \n \n-class TopicComponent(ProjectFormComponent):\n- identifier = 'topics'\n- weight = 33\n- label = _('Topics')\n-\n- form_title = _('Edit topics')\n- form_class = forms.TopicForm\n- form_template_name = 'liqd_product_projects/project_topics.html'\n-\n-\n components.register_project(ModeratorsComponent())\n components.register_project(ParticipantsComponent())\n-components.register_project(TopicComponent())\ndiff --git a/liqd_product/apps/projects/forms.py b/liqd_product/apps/projects/forms.py\n--- a/liqd_product/apps/projects/forms.py\n+++ b/liqd_product/apps/projects/forms.py\n@@ -3,8 +3,6 @@\n from django.core.exceptions import ValidationError\n from django.utils.translation import ugettext_lazy as _\n \n-from adhocracy4.dashboard.forms import ProjectDashboardForm\n-from adhocracy4.projects.models import Project\n from liqd_product.apps.users import fields as user_fields\n \n from .models import ModeratorInvite\n@@ -70,11 +68,3 @@\n raise ValidationError(\n _('Please enter email addresses or upload a file'))\n return cleaned_data\n-\n-\n-class TopicForm(ProjectDashboardForm):\n-\n- class Meta:\n- model = Project\n- fields = ['topics']\n- required_for_project_publish = ['topics']\n", "issue": "Mandatory mB topic selection on bet.in ( US #1775)\nAll projects need a topic on bet.in now, even existing ones. Can we remove that requirement? We haven't yet thought about how to implement topics on bet.in and there are not shown anywhere, so it would probably be confusing for initiators.\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import ProjectFormComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import forms\nfrom . import views\n\n\nclass ParticipantsComponent(DashboardComponent):\n identifier = 'participants'\n weight = 30\n label = _('Participants')\n\n def is_effective(self, project):\n return not project.is_draft and project.is_private\n\n def get_base_url(self, project):\n return reverse('a4dashboard:dashboard-participants-edit', kwargs={\n 'project_slug': project.slug\n })\n\n def get_urls(self):\n return [(\n r'^projects/(?P<project_slug>[-\\w_]+)/participants/$',\n views.DashboardProjectParticipantsView.as_view(component=self),\n 'dashboard-participants-edit'\n )]\n\n\nclass ModeratorsComponent(DashboardComponent):\n identifier = 'moderators'\n weight = 32\n label = _('Moderators')\n\n def is_effective(self, project):\n return True\n\n def get_base_url(self, project):\n return reverse('a4dashboard:dashboard-moderators-edit', kwargs={\n 'project_slug': project.slug\n })\n\n def get_urls(self):\n return [(\n r'^projects/(?P<project_slug>[-\\w_]+)/moderators/$',\n views.DashboardProjectModeratorsView.as_view(component=self),\n 'dashboard-moderators-edit'\n )]\n\n\nclass TopicComponent(ProjectFormComponent):\n identifier = 'topics'\n weight = 33\n label = _('Topics')\n\n form_title = _('Edit topics')\n form_class = forms.TopicForm\n form_template_name = 'liqd_product_projects/project_topics.html'\n\n\ncomponents.register_project(ModeratorsComponent())\ncomponents.register_project(ParticipantsComponent())\ncomponents.register_project(TopicComponent())\n", "path": "liqd_product/apps/projects/dashboard.py"}, {"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom adhocracy4.projects.models import Project\nfrom liqd_product.apps.users import fields as user_fields\n\nfrom .models import ModeratorInvite\nfrom .models import ParticipantInvite\n\nUser = get_user_model()\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n\n\nclass ParticipantInviteForm(InviteForm):\n\n class Meta:\n model = ParticipantInvite\n fields = ['accept', 'reject']\n\n\nclass ModeratorInviteForm(InviteForm):\n\n class Meta:\n model = ModeratorInvite\n fields = ['accept', 'reject']\n\n\nclass InviteUsersFromEmailForm(forms.Form):\n add_users = user_fields.CommaSeparatedEmailField(\n required=False,\n label=_('Invite users via email')\n )\n\n add_users_upload = user_fields.EmailFileField(\n required=False,\n label=_('Invite users via file upload'),\n help_text=_('Upload a csv file containing email addresses.')\n )\n\n def __init__(self, *args, **kwargs):\n labels = kwargs.pop('labels', None)\n super().__init__(*args, **kwargs)\n\n if labels:\n self.fields['add_users'].label = labels[0]\n self.fields['add_users_upload'].label = labels[1]\n\n def clean(self):\n cleaned_data = super().clean()\n add_users = self.data.get('add_users')\n add_users_upload = self.files.get('add_users_upload')\n if not self.errors and not add_users and not add_users_upload:\n raise ValidationError(\n _('Please enter email addresses or upload a file'))\n return cleaned_data\n\n\nclass TopicForm(ProjectDashboardForm):\n\n class Meta:\n model = Project\n fields = ['topics']\n required_for_project_publish = ['topics']\n", "path": "liqd_product/apps/projects/forms.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import views\n\n\nclass ParticipantsComponent(DashboardComponent):\n identifier = 'participants'\n weight = 30\n label = _('Participants')\n\n def is_effective(self, project):\n return not project.is_draft and project.is_private\n\n def get_base_url(self, project):\n return reverse('a4dashboard:dashboard-participants-edit', kwargs={\n 'project_slug': project.slug\n })\n\n def get_urls(self):\n return [(\n r'^projects/(?P<project_slug>[-\\w_]+)/participants/$',\n views.DashboardProjectParticipantsView.as_view(component=self),\n 'dashboard-participants-edit'\n )]\n\n\nclass ModeratorsComponent(DashboardComponent):\n identifier = 'moderators'\n weight = 32\n label = _('Moderators')\n\n def is_effective(self, project):\n return True\n\n def get_base_url(self, project):\n return reverse('a4dashboard:dashboard-moderators-edit', kwargs={\n 'project_slug': project.slug\n })\n\n def get_urls(self):\n return [(\n r'^projects/(?P<project_slug>[-\\w_]+)/moderators/$',\n views.DashboardProjectModeratorsView.as_view(component=self),\n 'dashboard-moderators-edit'\n )]\n\n\ncomponents.register_project(ModeratorsComponent())\ncomponents.register_project(ParticipantsComponent())\n", "path": "liqd_product/apps/projects/dashboard.py"}, {"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom liqd_product.apps.users import fields as user_fields\n\nfrom .models import ModeratorInvite\nfrom .models import ParticipantInvite\n\nUser = get_user_model()\n\n\nclass InviteForm(forms.ModelForm):\n accept = forms.CharField(required=False)\n reject = forms.CharField(required=False)\n\n def clean(self):\n data = self.data\n if 'accept' not in data and 'reject' not in data:\n raise ValidationError('Reject or accept')\n return data\n\n def is_accepted(self):\n data = self.data\n return 'accept' in data and 'reject' not in data\n\n\nclass ParticipantInviteForm(InviteForm):\n\n class Meta:\n model = ParticipantInvite\n fields = ['accept', 'reject']\n\n\nclass ModeratorInviteForm(InviteForm):\n\n class Meta:\n model = ModeratorInvite\n fields = ['accept', 'reject']\n\n\nclass InviteUsersFromEmailForm(forms.Form):\n add_users = user_fields.CommaSeparatedEmailField(\n required=False,\n label=_('Invite users via email')\n )\n\n add_users_upload = user_fields.EmailFileField(\n required=False,\n label=_('Invite users via file upload'),\n help_text=_('Upload a csv file containing email addresses.')\n )\n\n def __init__(self, *args, **kwargs):\n labels = kwargs.pop('labels', None)\n super().__init__(*args, **kwargs)\n\n if labels:\n self.fields['add_users'].label = labels[0]\n self.fields['add_users_upload'].label = labels[1]\n\n def clean(self):\n cleaned_data = super().clean()\n add_users = self.data.get('add_users')\n add_users_upload = self.files.get('add_users_upload')\n if not self.errors and not add_users and not add_users_upload:\n raise ValidationError(\n _('Please enter email addresses or upload a file'))\n return cleaned_data\n", "path": "liqd_product/apps/projects/forms.py"}]} | 1,525 | 387 |
gh_patches_debug_13265 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-569 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove `integtest.sh` from all plugin repos
The [integtest.sh](https://github.com/opensearch-project/opensearch-build/blob/main/bundle-workflow/scripts/default/integtest.sh) tool contains the logic to run integration tests for a plugin. This logic is mostly common across most plugins, so it has been moved to `opensearch-build` repo. Thus it can be removed from the individual plugin repos.
However, if a plugin requires some custom logic to run integtests, which the standard tool doesn't provide, they can continue maintaining this integtest.sh in their own repo. In this case, when the integration tests are run, if a plugin has a integtest.sh tool in their repo, it gets precedence over the standard default integtest.sh in the `opensearch-build` repo. This precedence order logic is defined in ScriptFinder [here](https://github.com/opensearch-project/opensearch-build/blob/84f2fa1cf15abe314aee62dbd2cb39bf2c9bb65f/bundle-workflow/src/paths/script_finder.py#L65)
Action items:
Raise PRs on all plugin repos and remove integtest.sh
- [ ] index-management
- [ ] anomaly-detection,
- [ ] alerting
- [ ] asynchronous-search
- [ ] k-NN
Changes will need to be backported into 1.x branches if such exist, too.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bundle-workflow/src/paths/script_finder.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8
9
10 class ScriptFinder:
11 class ScriptNotFoundError(Exception):
12 def __init__(self, kind, paths):
13 self.kind = kind
14 self.paths = paths
15 super().__init__(f"Could not find {kind} script. Looked in {paths}.")
16
17 component_scripts_path = os.path.realpath(
18 os.path.join(
19 os.path.dirname(os.path.abspath(__file__)), "../../scripts/components"
20 )
21 )
22
23 default_scripts_path = os.path.realpath(
24 os.path.join(
25 os.path.dirname(os.path.abspath(__file__)), "../../scripts/default"
26 )
27 )
28
29 """
30 ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.
31
32 For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,
33 it will look in the following locations, in order:
34 * Root of the Git repository
35 * /scripts/<script-name> in the Git repository
36 * <component_scripts_path>/<component_name>/<script-name>
37 * <default_scripts_path>/<script-name>
38
39 For install.sh scripts, given a component name, it will look in the following locations, in order:
40 * <component_scripts_path>/<component_name>/<script-name>
41 * <default_scripts_path>/<script-name>
42 """
43
44 @classmethod
45 def __find_script(cls, name, paths):
46 script = next(filter(lambda path: os.path.exists(path), paths), None)
47 if script is None:
48 raise ScriptFinder.ScriptNotFoundError(name, paths)
49 return script
50
51 @classmethod
52 def find_build_script(cls, component_name, git_dir):
53 paths = [
54 os.path.realpath(os.path.join(git_dir, "build.sh")),
55 os.path.realpath(os.path.join(git_dir, "scripts/build.sh")),
56 os.path.realpath(
57 os.path.join(cls.component_scripts_path, component_name, "build.sh")
58 ),
59 os.path.realpath(os.path.join(cls.default_scripts_path, "build.sh")),
60 ]
61
62 return cls.__find_script("build.sh", paths)
63
64 @classmethod
65 def find_integ_test_script(cls, component_name, git_dir):
66 paths = [
67 # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497
68 # os.path.realpath(os.path.join(git_dir, "integtest.sh")),
69 # os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
70 os.path.realpath(
71 os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
72 ),
73 os.path.realpath(os.path.join(cls.default_scripts_path, "integtest.sh")),
74 ]
75
76 return cls.__find_script("integtest.sh", paths)
77
78 @classmethod
79 def find_install_script(cls, component_name):
80 paths = [
81 os.path.realpath(
82 os.path.join(cls.component_scripts_path, component_name, "install.sh")
83 ),
84 os.path.realpath(os.path.join(cls.default_scripts_path, "install.sh")),
85 ]
86
87 return cls.__find_script("install.sh", paths)
88
89 @classmethod
90 def find_bwc_test_script(cls, component_name, git_dir):
91 paths = [
92 os.path.realpath(os.path.join(git_dir, "bwctest.sh")),
93 os.path.realpath(os.path.join(git_dir, "scripts/bwctest.sh")),
94 os.path.realpath(
95 os.path.join(cls.component_scripts_path, component_name, "bwctest.sh")
96 ),
97 os.path.realpath(os.path.join(cls.default_scripts_path, "bwctest.sh")),
98 ]
99
100 return cls.__find_script("bwctest.sh", paths)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py
--- a/bundle-workflow/src/paths/script_finder.py
+++ b/bundle-workflow/src/paths/script_finder.py
@@ -64,9 +64,8 @@
@classmethod
def find_integ_test_script(cls, component_name, git_dir):
paths = [
- # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497
- # os.path.realpath(os.path.join(git_dir, "integtest.sh")),
- # os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
+ os.path.realpath(os.path.join(git_dir, "integtest.sh")),
+ os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
os.path.realpath(
os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
),
| {"golden_diff": "diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py\n--- a/bundle-workflow/src/paths/script_finder.py\n+++ b/bundle-workflow/src/paths/script_finder.py\n@@ -64,9 +64,8 @@\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n- # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n- # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n- # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n+ os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n+ os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n", "issue": "Remove `integtest.sh` from all plugin repos\nThe [integtest.sh](https://github.com/opensearch-project/opensearch-build/blob/main/bundle-workflow/scripts/default/integtest.sh) tool contains the logic to run integration tests for a plugin. This logic is mostly common across most plugins, so it has been moved to `opensearch-build` repo. Thus it can be removed from the individual plugin repos.\r\nHowever, if a plugin requires some custom logic to run integtests, which the standard tool doesn't provide, they can continue maintaining this integtest.sh in their own repo. In this case, when the integration tests are run, if a plugin has a integtest.sh tool in their repo, it gets precedence over the standard default integtest.sh in the `opensearch-build` repo. This precedence order logic is defined in ScriptFinder [here](https://github.com/opensearch-project/opensearch-build/blob/84f2fa1cf15abe314aee62dbd2cb39bf2c9bb65f/bundle-workflow/src/paths/script_finder.py#L65) \r\n\r\nAction items:\r\n\r\nRaise PRs on all plugin repos and remove integtest.sh \r\n- [ ] index-management\r\n- [ ] anomaly-detection,\r\n- [ ] alerting\r\n- [ ] asynchronous-search\r\n- [ ] k-NN\r\n\r\nChanges will need to be backported into 1.x branches if such exist, too.\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind, paths):\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/components\"\n )\n )\n\n default_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/default\"\n )\n )\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * Root of the Git repository\n * /scripts/<script-name> in the Git repository\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name, paths):\n script = next(filter(lambda path: os.path.exists(path), paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n return script\n\n @classmethod\n def find_build_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/build.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"build.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"build.sh\")),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"integtest.sh\")),\n ]\n\n return cls.__find_script(\"integtest.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name):\n paths = [\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"install.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n\n @classmethod\n def find_bwc_test_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"bwctest.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/bwctest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"bwctest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"bwctest.sh\")),\n ]\n\n return cls.__find_script(\"bwctest.sh\", paths)\n", "path": "bundle-workflow/src/paths/script_finder.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind, paths):\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/components\"\n )\n )\n\n default_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/default\"\n )\n )\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * Root of the Git repository\n * /scripts/<script-name> in the Git repository\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name, paths):\n script = next(filter(lambda path: os.path.exists(path), paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n return script\n\n @classmethod\n def find_build_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/build.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"build.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"build.sh\")),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"integtest.sh\")),\n ]\n\n return cls.__find_script(\"integtest.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name):\n paths = [\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"install.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n\n @classmethod\n def find_bwc_test_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"bwctest.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/bwctest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"bwctest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"bwctest.sh\")),\n ]\n\n return cls.__find_script(\"bwctest.sh\", paths)\n", "path": "bundle-workflow/src/paths/script_finder.py"}]} | 1,586 | 214 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.